content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def unflatten_verifications(old_verifications): """ Convert verifications from v2 to v1 """ new_verifications = {} for verification in old_verifications: key = verification['key'] del verification['key'] new_verifications[key] = verification return new_verifications
a3ce1af2991304576b5e3dd3459b86b9e6b6557c
29,425
import os def load_smoothing_parameter_file(file_name): """Reads and parses the smoothing parameter file""" par = {} with open(file_name, 'rU') as handle: for line in handle: if line: fields = line.rstrip(os.linesep).split() try: v = float(fields[0]) w = float(fields[1]) par[v] = w except: raise Exception("Reading error in smoothing parameter file {}".format(file_name)) return par
29ec17fcaaceeeba2a3dd9b0b7f346f32925e8b7
29,427
def clean_advance_reservation(text): """ The advance reservation field identifies whether or not advance reservations are required to use these facilities (day use areas). If there is not recorded answer (blank) we default to 'no reservation required'. Returns 'True' if reservation required, False otherwise. """ text = text.lower() if text in ['yes']: return True if 'registration required' in text: return True if 'tour request form' in text: return True if 'call ahead' in text: return True return False
8f707074f62ff605935737e3a6fa4cf2dc492de2
29,428
import copy def transform(N, max_r=None): """ (Nondestructively) transform the list of values by averaging them with neighboring zeros. The maximum r value is assumed to be len[N] unless specified otherwise with max_r. This enables use of defaultdict instead of list to specify N vals. Identifiers are chosen to match those used by W. Gale. Good-Turing smoothing without tears. Journal of Quantitative Linguistics, 2:217-37, 1995. """ size = len(N) if max_r is None else max_r+1 Z = copy.copy(N) for r in (r for r in range(size)[1:] if N[r] != 0): q = r - 1 while q > 0 and N[q] == 0: q -= 1 t = r + 1 while t < size and N[t] == 0: t += 1 # The paper doesn't really specify a way to handle the edge case where # there is no defined q or t, because this is the lowest or hightest R. # I am choosing to handle the lower case by setting q to 0 (which falls # out of the above code naturally), and the higher case by duplicating # their code, which just sets t = r + (r - q). I originally proceeded, # by assuming that t/r likely equals r/q. This would be correct for the # case where the log-log slope is -1, which won't really be true, but # it'd be an okay guess. But Gale didn't do that, so we're not doing # it. if t == size and q != 0: t = 2 * r - q Z[r] = N[r] * 2 / (t - q) return Z
c73d791c41428e22d486e2fe1dca9d9b29b3d3c6
29,429
def checksubstate(repo, baserev=None): """return list of subrepos at a different revision than substate. Abort if any subrepos have uncommitted changes.""" inclsubs = [] wctx = repo[None] if baserev: bctx = repo[baserev] else: bctx = wctx.p1() for s in sorted(wctx.substate): wctx.sub(s).bailifchanged(True) if s not in bctx.substate or bctx.sub(s).dirty(): inclsubs.append(s) return inclsubs
26efd3a222905352bba78ecd906d4eaeb509f586
29,430
def _set_kwargs_quiet(kwargs, quiet=True): """ Enable quiet switch in keyword cmd_args :param kwargs: :param quiet: :return: """ kwargs['quiet'] = quiet return kwargs
e7865d35a1d8a81ae6e0c3a1753b65c85a30d55e
29,431
def _get_question_features(conv_id, QUESTIONS): """ Given conversation id and a dictionary QUESTIONS Returns an one-hot vector of length 8, the Xth entry being 1 iff the conversation contains question type X. """ ret = {} for ind in range(8): ret['question_type%d'%(ind)] = 0 if conv_id in QUESTIONS: for pair in QUESTIONS[conv_id]: ret['question_type%d'%(pair['question_type'])] = 1 return ret
7e81b6aed3f7fdd4aae1cd9e8cafdb43ebea2586
29,433
def get_mirror_type(should_mirror_in, should_mirror_out): """ This function return the type of mirror to perform on a Jira incident. NOTE: in order to not mirror an incident, the type should be None. :param should_mirror_in: demisto.params().get("incoming_mirror") :param should_mirror_out: demisto.params().get('outgoing_mirror') :return: The mirror type """ # Adding mirroring details mirror_type = None if should_mirror_in and should_mirror_out: mirror_type = 'Both' elif should_mirror_in: mirror_type = 'In' elif should_mirror_out: mirror_type = 'Out' return mirror_type
2fe47a6ad13305c0c07e91bfc86cbb4dd64ab450
29,434
import re def escape_json_string(string: str) -> str: """ Makes any single escaped double quotes doubly escaped. """ def escape_underescaped_slash(matchobj): """ Return adjacent character + extra escaped double quote. """ return matchobj.group(1) + "\\\"" # This regex means: match .\" except \\\" while capturing `.` return re.sub('([^\\\\])\\\"', escape_underescaped_slash, string)
a241845cdf46a96d79d38a410c497cadfd615205
29,435
def secant( fp , a , b , epsilon=1e-5 ): """ secant( fp , a , b , epsilon ). This is a root-finding method that finds the minimum of a function (f) using secant method. Parameters: fp (function): the first derivative of the function to minimize a (float): inferior born of the initial uncertainty interval b (float): superior born of the initial uncertainty interval epsilon (float): very small number Returns: c: optimum value """ c = a-fp(a)*(b-a)/(fp(b)-fp(a)) prev = a while abs(c-prev) > epsilon: if fp(a)*fp(c)> 0: a = c else: b = c prev = c c = a-fp(a)*(b-a)/(fp(b)-fp(a)) return c
44276e18dc3db30521a5173eb7824da38d3e65e0
29,437
def validate_csv(row0, required_fields): """ :param row0: :param required_fields: :return: a tuple, True if all requirements are met and a dictionary with the index for all the fields """ for field in required_fields: if field not in row0: return False, {} field_pos = {} for idx, field in enumerate(row0): field_pos[field] = idx return True, field_pos
6a126f22a86fbcfa4192ec4f9104faab7efd6b3f
29,439
from typing import Sequence from typing import Callable from typing import Any def batch_by_property( items: Sequence, property_func: Callable ) -> list[tuple[list, Any]]: """Takes in a Sequence, and returns a list of tuples, (batch, prop) such that all items in a batch have the same output when put into the Callable property_func, and such that chaining all these batches together would give the original Sequence (i.e. order is preserved). Examples -------- Normal usage:: batch_by_property([(1, 2), (3, 4), (5, 6, 7), (8, 9)], len) # returns [([(1, 2), (3, 4)], 2), ([(5, 6, 7)], 3), ([(8, 9)], 2)] """ batch_prop_pairs = [] curr_batch = [] curr_prop = None for item in items: prop = property_func(item) if prop != curr_prop: # Add current batch if len(curr_batch) > 0: batch_prop_pairs.append((curr_batch, curr_prop)) # Redefine curr curr_prop = prop curr_batch = [item] else: curr_batch.append(item) if len(curr_batch) > 0: batch_prop_pairs.append((curr_batch, curr_prop)) return batch_prop_pairs
733dae3d31219dafbe115f12263a3da735409c30
29,440
def recipeParameter(func): """ Internal decorator for all recipe parameters. """ def wrapper(self): if not self._gotMeta: self.getMeta() try: return func(self) except KeyError: return None return wrapper
8f7f4db14df06107774c2ec5be397603d0f04bc8
29,441
import os import ast def generate_ast(path): """Generate an Abstract Syntax Tree using the ast module.""" if os.path.isfile(path): with open(path, 'r') as f: return ast.parse(f.read()) raise IOError('Input needs to be a file. Path: ' + path)
1573464fe61e345219e3affc45adf2202c0cccb7
29,442
def fix_short_sentences(summary_content): """ Merge short sentences with previous sentences :param summary_content: summary text """ LEN_95_PERCENTILE = 20 # merge short sentences ix = 0 fixed_content = [] while ix < len(summary_content): sentence = summary_content[ix] if len(sentence) < LEN_95_PERCENTILE: if fixed_content and sentence[0].islower(): fixed_content[-1] = fixed_content[-1] + " " + sentence ix += 1 elif ix+1 < len(summary_content): fixed_content.append(sentence + " " + summary_content[ix+1]) ix += 2 else: try: fixed_content[-1] = fixed_content[-1] + " " + sentence except: print ("sentence: ", sentence) print ("summary_content: ", summary_content) print ("fixed_content: ", fixed_content) ix += 1 else: fixed_content.append(sentence) ix += 1 return fixed_content
6d093eb1d3fd865098f486bc135990a921879909
29,444
def float01(flt): """Float value between 0 and 1""" flt = float(flt) if flt < 0 or flt > 1: raise ValueError("Input must be between 0 and 1!") return flt
952f7753ad2c2b227cb1fc4e677f8c4a9dc3ae6a
29,445
import warnings def reassign_topology(structures, new): """Take Packmol created Universe and add old topology features back in Attempts to reassign: - types - names - charges - masses - bonds - angles - torsions - impropers - resnames Parameters ---------- structures : list list of Packmol structures used to create the Packmol Universe new : Universe the raw output of Packmol Returns ------- new : Universe the raw output modified to best match the templates given to it """ index = 0 bonds = [] angles = [] dihedrals = [] impropers = [] # add required attributes for attr in ['types', 'names', 'charges', 'masses']: if any(hasattr(pms.ag, attr) for pms in structures): new.add_TopologyAttr(attr) if not all(hasattr(pms.ag, attr) for pms in structures): warnings.warn("added attribute which not all templates had") while index < len(new.atoms): # first atom we haven't dealt with yet start = new.atoms[index] # the resname was altered to give a hint to what template it was from template = structures[int(start.resname[1:])].ag # grab atomgroup which matches template to_change = new.atoms[index:index + len(template.atoms)] # Update residue names nres = len(template.residues) new.residues[start.resindex:start.resindex + nres].resnames = template.residues.resnames # atom attributes for attr in ['types', 'names', 'charges', 'masses']: if hasattr(template.atoms, attr): setattr(to_change, attr, getattr(template.atoms, attr)) # bonds/angles/torsions if hasattr(template, 'bonds'): bonds.extend((template.bonds.to_indices() + index).tolist()) if hasattr(template, 'angles'): angles.extend((template.angles.to_indices() + index).tolist()) if hasattr(template, 'dihedrals'): dihedrals.extend((template.dihedrals.to_indices() + index).tolist()) if hasattr(template, 'impropers'): impropers.extend((template.impropers.to_indices() + index).tolist()) # update the index pointer to be on next unknown atom index += len(template.atoms) if bonds: # convert to tuples for hashability bonds = [tuple(val) for val in bonds] new.add_TopologyAttr('bonds', values=bonds) if angles: angles = [tuple(val) for val in angles] new.add_TopologyAttr('angles', values=angles) if dihedrals: dihedrals = [tuple(val) for val in dihedrals] new.add_TopologyAttr('dihedrals', values=dihedrals) if impropers: impropers = [tuple(val) for val in impropers] new.add_TopologyAttr('impropers', values=impropers) return new
57c158df54604788cfbd638569bd338f4ee97cae
29,446
import json def parse_behavior(risky_flows): """ build incidents from risky flows """ incidents = [] for flow in risky_flows['data']: incident = { 'name': "{rule} {int_}:{int_port} : {ext}:{ext_port}".format( rule=flow['riskRule']['name'], int_=flow['internalAddress'], int_port=flow['internalPort'], ext=flow['externalAddress'], ext_port=flow['externalPort'] ), 'occurred': flow['observationTimestamp'], 'rawJSON': json.dumps(flow), 'type': 'Expanse Behavior', 'CustomFields': { 'expanserawjsonevent': json.dumps(flow) }, 'severity': 2 # All behavior is cast to a warning, we can revisit if critically is added to flow data } incidents.append(incident) return incidents
68ac8b0c8312a24deecabed793f87652500a907c
29,448
import os import random def generate_splits(path_input, val_ratio, test_products): """ Generate train and validation split accoring to ratio and tes split read from test products file""" all_indices = [f for f in os.listdir(path_input) if os.path.isfile(os.path.join(path_input, f)) and os.path.join(path_input, f).endswith('.nc')] all_indices_fullname = [os.path.join(path_input, index) for index in all_indices] test_indices = [] train_val_indices = [] for filename in all_indices: filename_spl = filename.split("/")[-1] filename_sub = filename_spl.split("_") filename_sub = filename_sub[0] + "_" + filename_sub[1] if filename_sub in test_products: test_indices.append(os.path.join(path_input, filename)) elif filename_sub not in test_products: train_val_indices.append(os.path.join(path_input, filename)) total_length = len(train_val_indices) validation_test_number = int(val_ratio * total_length) val_test_indices = random.sample(train_val_indices, validation_test_number) val_indices = list(set(val_test_indices)) train_indices = list(set(train_val_indices) - set(val_test_indices)) dictionary_out = { 'total': total_length, 'filepaths': all_indices_fullname, 'test': test_indices, 'val': val_indices, 'train': train_indices } return dictionary_out
6abff73d51d093de997e6c2d55e8f39f6ebb1d5e
29,449
def transpose_func(classes, table): """ Transpose table. :param classes: confusion matrix classes :type classes: list :param table: input confusion matrix :type table: dict :return: transposed table as dict """ transposed_table = {k: table[k].copy() for k in classes} for i, item1 in enumerate(classes): for j, item2 in enumerate(classes): if i > j: temp = transposed_table[item1][item2] transposed_table[item1][item2] = transposed_table[item2][item1] transposed_table[item2][item1] = temp return transposed_table
6c59feef2b735076c5768e086ef2e91331b78a73
29,450
from functools import reduce def traverse_dict_children(data, *keys, fallback=None): """attempts to retrieve the config value under the given nested keys""" value = reduce(lambda d, l: d.get(l, None) or {}, keys, data) return value or fallback
2c6f28259136bd8248306b7e759540fc3877d46f
29,451
def dov(init): """Usage: dictionaryOfVariables = {....} @dov def __init__(self,...): Decorates __init__ so that it takes a STATIC dictionary of variables and computes dynamic mandatoryVariables and optionalVariables dictionies. Obviously easy to rewrite to take a dynamic dictionaryOfVariables. Nevertheless, it should be a class decorator that only handles static variables. That's TBD. """ def constructor(self, *args, **kwargs): self.descriptionOfVariables = {} self.mandatoryVariables = [] self.optionalVariables = [] typePos = 2 for key , val in self.dictionaryOfVariables.items(): value = val[typePos] if value is True or value == 'mandatory': self.mandatoryVariables.append(key) elif value is False or value == 'optional': self.optionalVariables.append(key) else: raise ValueError( 'Error. Variable can only be "optional"/False or "mandatory"/True' ) pass return init(self, *args, **kwargs) return constructor
a3e8d3e82d951200e60adf15efb6d5a9b76e34f6
29,452
def get_idxs_in_correct_order(idx1, idx2): """First idx must be smaller than second when using upper-triangular arrays (matches, keypoints)""" if idx1 < idx2: return idx1, idx2 else: return idx2, idx1
05963c4f4b692b362980fde3cf6bb1de1b8a21e0
29,453
import os def filename_handler_ignore_directive(fname): """A filename handler that removes anything before (and including) '://'. Args: fname (str): A file name. Returns: str: The file name without the prefix. """ return fname.split(f":{os.path.sep}{os.path.sep}")[-1]
555cfeae0017600bc0096693b845f7b502d994a7
29,454
from typing import Dict from typing import Any def source_startswith(cell: Dict[str, Any], key: str) -> bool: """Return True if cell source start with the key.""" source = cell.get("source", []) return len(source) > 0 and source[0].startswith(key)
a3dab1e72488a5075832432f36c6fc10d9808a7f
29,455
def categories_to_columns(categories, prefix_sep = ' is '): """contructs and returns the categories_col dict from the categories_dict""" categories_col = {} for k, v in categories.items(): val = [k + prefix_sep + vi for vi in v] categories_col[k] = val return categories_col
f513fda8f369e7ac48de9306bda9e98889ed06d0
29,457
import os import sys def default_python_path(work_dir="."): """set and return the Python path by appending this work_dir to it.""" work_dir = os.path.abspath(work_dir) python_path = os.pathsep.join(sys.path) python_path += os.pathsep + work_dir return python_path
6809cbcc6157377802178239892438e3f9de2deb
29,458
from bs4 import BeautifulSoup import re import json def parse_rune_links(html: str) -> dict: """A function which parses the main Runeforge website into dict format. Parameters ---------- html : str The string representation of the html obtained via a GET request. Returns ------- dict The nested rune_links champ rune pages from runeforge. """ soup = BeautifulSoup(html, 'lxml') # Champs with only a single runepage single_page_raw = soup.find_all('li', class_='champion') single_page = {re.split('\W+', x.a.div.div['style'])[-3].lower(): [x.a['href']] for x in single_page_raw if x.a is not None} # Champs with two (or more) runepages double_page_raw = soup.find_all('div', class_='champion-modal-open') # This is JSON data which just needs to be decoded double_page_decode = [json.loads(x['data-loadouts']) for x in double_page_raw] # This lowers the champ name in the structure, # and pulls out the champ links, after it's been decoded double_page = {re.sub('[^A-Za-z0-9]+', '', x[0]['champion'].lower()): [x[0]['link'], x[1]['link']] for x in double_page_decode} # Combine the two dicts champs_combined = {**single_page, **double_page} return champs_combined
cea14b4d572cad7ca42b3b4ff7bd1b6c52a1e608
29,460
def format_path(path): """ Attempt to pretty print paths """ build = "" for rule in path: if not build: build += str(rule) + '\n' else: string = str(rule) lines = string.split('\n') halfway = len(lines) / 2 build += "\n".join([('\t--> ' + line if i == halfway else '\t' + line) for i, line in enumerate(lines)]) build += "\n" return build
360598645f42f0a2bf745eda573ca66fab085427
29,463
import os import imp def load_module(filename): """Load a Python module from a filename or qualified module name. If filename ends with ``.py``, the module is loaded from the given file. Otherwise it is taken to be a module name reachable from the path. Example: .. code-block: python pb_util = load_module('problog.util') pb_util = load_module('problog/util.py') :param filename: location of the module :type filename: str :return: loaded module :rtype: module """ if filename.endswith(".py"): filename = os.path.abspath(os.path.join(os.path.dirname(__file__), filename)) (path, name) = os.path.split(filename) (name, ext) = os.path.splitext(name) (extfile, filename, data) = imp.find_module(name, [path]) return imp.load_module(name, extfile, filename, data) else: mod = __import__(filename) components = filename.split(".") for c in components[1:]: mod = getattr(mod, c) return mod
c274ba83c5c0745fc66d5458a40ee5285699948d
29,464
def _sabr_implied_vol_hagan_A4( underlying, strike, maturity, alpha, beta, rho, nu): """_sabr_implied_vol_hagan_A4 One of factor in hagan formua. See :py:func:`sabr_implied_vol_hagan`. A_{4}(K, S; T) & := & 1 + \left( \\frac{(1 - \\beta)^{2}}{24} \\frac{\alpha^{2}}{(SK)^{1-\\beta}} + \\frac{1}{4} \\frac{\\rho\\beta\nu\alpha}{(SK)^{(1-\\beta)/2}} + \\frac{2 - 3\\rho^{2}}{24}\nu^{2} \\right) T :param float underlying: :param float strike: :param float maturity: :param float alpha: :param float beta: :param float rho: :param float nu: :return: value of factor. :rtype: float. """ one_minus_beta = 1.0 - beta one_minus_beta_half = one_minus_beta / 2.0 one_minus_beta2 = one_minus_beta ** 2 numerator1 = one_minus_beta2 * alpha * alpha denominator1 = 24.0 * ((underlying * strike) ** one_minus_beta) term1 = numerator1 / denominator1 numerator2 = rho * beta * nu * alpha denominator2 = 4.0 * ((underlying * strike) ** one_minus_beta_half) term2 = numerator2 / denominator2 term3 = (2.0 - 3.0 * rho * rho) * nu * nu / 24.0 return 1.0 + (term1 + term2 + term3) * maturity
704f4db60570cc18ad16b17d9b6ba93747d373d4
29,466
import numpy as np def tif_stitch(tiles, saved_locs, im_shape): """Creates a background mask the size of the original image shape and uses it to stitch the tiles back together. """ stitched_im = np.zeros(im_shape) for key in saved_locs.keys(): location = saved_locs[key] im = tiles[key] stitched_im[location[0]:location[1], location[2]:location[3]] = im return stitched_im
d7340fc614e6bbfc3b932367892009d4e711202b
29,467
def get_number_from_numerical_spec(spec): """ Return the number from a numerical spec. """ assert isinstance(spec, int) or isinstance(spec, str), f"'input' has invalid type {type(spec)}." if isinstance(spec, str): try: return int(spec) except ValueError: raise ValueError("'spec' does not contain an int.") return spec
68b133de6e6090774262aec6fcbaccc8f90132eb
29,468
def make_body_section_text(avl_body_section): """This function writes the body text using the template required for the AVL executable to read Assumptions: None Source: None Inputs: avl_section.origin [meters] avl_section.chord [meters] avl_section.twist [radians] avl_section.airfoil_coord_file [-] Outputs: body_section_text Properties Used: N/A """ section_base = \ ''' SECTION #Xle Yle Zle Chord Ainc Nspanwise Sspace {0} {1} {2} {3} {4} 1 0 ''' airfoil_base = \ '''AFILE {} ''' # Unpack inputs x_le = avl_body_section.origin[0] y_le = avl_body_section.origin[1] z_le = avl_body_section.origin[2] chord = avl_body_section.chord ainc = avl_body_section.twist airfoil = avl_body_section.airfoil_coord_file body_section_text = section_base.format(round(x_le,4),round(y_le,4), round(z_le,4),round(chord,4),round(ainc,4)) if airfoil: body_section_text = body_section_text + airfoil_base.format(airfoil) return body_section_text
4896c556c0f543cafc2141599e8c5ab6b6493d28
29,473
import glob def get_sorted_files(files): """Given a Unix-style pathname pattern, returns a sorted list of files matching that pattern""" files = glob.glob(files) files.sort() return files
9bed6be4c57846c39290d4b72dba1577f9d76396
29,474
import os def import_plants_csv(result_dir, structures=None, files=('features.csv', 'ranking.csv')): """ Import PLANTS results csv files :param result_dir: PLANTS results directory :type result_dir: :py:str :param structures: only import files in structure selection, import all by default :type structures: :py:list :param files: CSV files to import :type files: :py:list, py:tuple :return: docking results :rtype: :py:dict """ results = {} docking_dir_name = os.path.basename(result_dir) for resultcsv in files: resultcsv = os.path.join(result_dir, resultcsv) if os.path.isfile(resultcsv): header = [] with open(resultcsv, 'r') as csv_file: for line in csv_file.readlines(): line = line.strip().split(',') if not header: header = line continue # Only import structure selection if needed mol2 = line[0] path = os.path.join(result_dir, '{0}.mol2'.format(mol2)) if structures is not None and path not in structures: continue row = {} for i, val in enumerate(line[1:]): if not len(val): row[header[i]] = None elif '.' in val: row[header[i]] = float(val) else: row[header[i]] = int(val) row['PATH'] = os.path.join(docking_dir_name, '{0}.mol2'.format(mol2)) results[mol2] = row break return results
b83d61946c3ffdfbee85167a0c4b17850505ed87
29,475
def removeDuplicates(nums): """ :type nums: List[int] :rtype: int """ i = 0 # current check point for val in nums: if val != nums[i]: i += 1 nums[i] = val return i + 1
49a717e0133d58b097811f930d9b47d1188f6825
29,476
import sys def current_version(): """Current system python version""" return sys.version_info
e1efe2447b9544c0b9723e9d050869464f032096
29,477
from typing import Dict from typing import Any def modify_namelist_template( namelist_template: str, placeholder_dict: Dict[str, Any] ): """ Placeholders in the form of %PLACEHOLDER_NAME% are replaced within the given namelist template. Parameters ---------- namelist_template : str Placeholders within this template are replaced by the given value. placeholder_dict : Dict[str, Any] This is the directory with the placeholder values. A placeholder is skipped if it is not found within given namelist template. The values have to be castable to string type. Returns ------- namelist_template : str The namelist where the placeholders are replaced with given values. """ for placeholder, value in placeholder_dict.items(): namelist_template = namelist_template.replace(placeholder, str(value)) return namelist_template
2a872b20caf871ff6406e04a91d69ee2f33ba66c
29,479
from typing import Optional import subprocess def process(stdin: str, capture_output=True) -> Optional[str]: """ wrapper for subprocess. :param capture_output: :param stdin: stdin :return: stdout """ p = subprocess.run(stdin.split(), capture_output=capture_output) if capture_output: stdout = p.stdout.decode("utf-8") if p.returncode == 0 else p.stderr.decode("utf-8") return stdout
e3e0ad01fda3b836e73f77c86834a3b45d9b4c3e
29,480
import pickle def load_results(path, filename): """ load result from pickle files. args: path: path to directory in which file is located filename: name of the file (without pickle extention) """ with open(path+filename+'.pickle', 'rb') as handle: return pickle.load(handle)
6f76914cdbb25c4c5f81bc2bd61b61da5b34777a
29,481
def _interaction_df_to_edge_weight_list(interaction_table, threshold=0.0): """Convert from df to edge_list.""" edge_weight_list = [ tuple(sorted([row['e1'], row['e2']]) + [row['intensity']]) for idx, row in interaction_table.iterrows() if row['intensity'] > threshold ] return edge_weight_list
24f2798edd940165c6c4e4abd81327ade3f7a8fc
29,483
import itertools def generateSubSequences(k, ch): """ generates all subsequences of ch with length k """ seq = ["".join(c) for c in itertools.product(ch, repeat = k)] # discussion about the best way to do this: # https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings return seq
e2f43a197cd0f4beb46c704afe3713765f99e8f3
29,484
from pathlib import Path def create_target_absolute_file_path(file_path, source_absolute_root, target_path_root, target_suffix): """ Create an absolute path to a file. Create an absolute path to a file replacing the source_absolute_root part of the path with the target_path_root path if file_path is on the source_absolute_root path and replace the extension with target_suffix. If file_path is not on source_absolute_root return file_path with the target_suffix Parameters ---------- file_path : str or Path Path to a file source_absolute_root : str or Path a path that may be the start of file_path target_path_root : str or Path A path that will become the start of the path to the returned target path target_suffix : str The suffix to use on the returned target path Returns ------- Path The new absolute path to the file that was on file_path """ if Path(file_path).is_relative_to(source_absolute_root): target_relative_path_to_source_root = Path(file_path).relative_to(source_absolute_root) # target_relative_path_to_source_root is the relative path that will be added onto the export folder path return Path(target_path_root, target_relative_path_to_source_root).with_suffix(target_suffix) if not Path(file_path).is_absolute(): # path is relative add target path_root and new suffix return Path(target_path_root, file_path).with_suffix(target_suffix) # file is not in the source path return the file path with the new suffix return Path(file_path).with_suffix(target_suffix)
0d24f8544752967815bd8ddceea15b371076c500
29,485
def query_prefix_transform(query_term): """str -> (str, bool) return (query-term, is-prefix) tuple """ is_prefix = query_term.endswith('*') query_term = query_term[:-1] if is_prefix else query_term return (query_term, is_prefix)
d33dd4222f53cba471be349b61e969838f6188d5
29,486
import re import copy import json def test_for_data(_): """测试用例""" data = list() # 公司名称中包含年份 res_date = re.search(r"\d{2,4}-\d{1,2}-\d{1,2}", _["org_name"]) if res_date: # 置否原始数据,然后新增一条 new_data = copy.deepcopy(_) _["valid"] = 0 data.append(_) _ = new_data # 对比年月日出现的位置和括号出现的位置 date_pos = _["org_name"].index(res_date.group()) left_bracket = _["org_name"].find(u"(") # 日期出现在括号内,则去除org_name中的括号部分; 日期在左括号左边,则判断是否有“公司”, # 有“公司”则截取包含公司的部分,没有则直接将日期及之后的部分去除 if (left_bracket > -1) and (left_bracket < date_pos): _["org_name"] = _["org_name"][:left_bracket] else: _["org_name"] = _["org_name"][:date_pos] word_pos = _["org_name"].rfind(u"公司") if word_pos > -1: _["org_name"] = _["org_name"][:word_pos + 2] # 公司名称以"公"结尾,缺少"司"字 if re.match(r"^.+公$", _["org_name"]): # 原始数据置否,新增一个数据,org_name加上"司" if not data: new_data = copy.deepcopy(_) _["valid"] = 0 data.append(json.dumps(_)) _ = new_data _["org_name"] = _["org_name"] + u"司" data.append(_) return data
2710a4842ce671bc17347fc0586ef1c8c0628fec
29,487
def consolidate_conversion(x): """ Collapses the conversion status to Yes or No. """ xl = x.str.lower() if any(xl.str.startswith('purchase')) or any(xl.str.startswith('converted')) or \ any(xl.str.startswith('y')) or any(xl.str.startswith('processed')) or \ any(xl.str.startswith('payment_completed')) or any(xl.str.startswith('initiated')) \ or any(xl.str.startswith('pdf_error')) or any(xl.str.startswith('toprocess')) \ or any(xl.str.startswith('delivered')): return 'Y' else: return 'N'
fa9e8c93705d9d1941c12aeae293dc32750853d2
29,489
def combination_sum_bottom_up(nums, target): """Find number of possible combinations in nums that add up to target, in bottom-up manner. Keyword arguments: nums -- positive integer array without duplicates target -- integer describing what a valid combination should add to """ combs = [0] * (target + 1) combs[0] = 1 for i in range(0, len(combs)): for num in nums: if i - num >= 0: combs[i] += combs[i - num] return combs[target]
00feff267ff21f5132d98ac9c3171869fc150bfa
29,490
def fix_msg(msg, name): """Return msg with name inserted in square brackets""" b1 = msg.index('[')+1 b2 = msg.index(']') return msg[:b1] + name + msg[b2:]
66544f811a9cead0a40b6685f60a7de8f219e254
29,491
def reshape_as_vectors(tensor): """Reshape from (b, c, h, w) to (b, h*w, c).""" b, c = tensor.shape[:2] return tensor.reshape(b, c, -1).permute(0, 2, 1)
79044cc2061d0a4368922c3dc6c1324eb5fe315b
29,493
from functools import wraps def memoized(function): """ Caches the output of a function in memory to increase performance. Returns ------- f : decorated function Notes ----- This decorator speeds up slow calculations that you need over and over in a script. If you want to keep the results of a slow function for several script executions, use the "cached" decorator instead (which also allows mutable arguments). Limitations ----------- Use this decorator only for functions with immutable arguments, like numbers, tuples, and strings. The decorator is intended for simple mathematical functions and optimized for performance. """ cache = {} @wraps(function) def decorated_function(*args): if args in cache: output = cache[args] else: output = function(*args) cache[args] = output return output return decorated_function
885999e6cdcbb165570beae901ed6701df966f64
29,494
def get_common_items(list1, list2): """ Compares two lists and returns the common items in a new list. Used internally. Example ------- >>> list1 = [1, 2, 3, 4] >>> list2 = [2, 3, 4, 5] >>> common = get_common_items(list1, list2) list1: list list2: list returns: list """ common = [value for value in list1 if value in list2] return common
4978429d7d255270b4e4e52e44159e934cbd5ba5
29,497
def validate_eyr(value: str) -> bool: """Expiration must be between 2020 and 2030, inclusive""" try: return int(value) in range(2020, 2031) except (TypeError, ValueError): return False
8b9cacda3eafeba6e26779430bb80218051f2f24
29,498
from typing import List def cut_list_with_overlap(input_list: list, norm_seg_size: int, overlap: int, last_prop: float) -> List[list]: """Cut the split list of text into list that contains sub-lists. This function takes care of both overlap and last proportion with the input list and the segment size. The function calculates the number of segment with the overlap value and then use it as indexing to capture all the sub-lists with the get_single_seg helper function. :param last_prop: the last segment size / other segment size. :param input_list: the segment list that split the contents of the file. :param norm_seg_size: the size of the segment. :param overlap: the min proportional size that the last segment has to be. :return a list of list(segment) that the text has been cut into, which has not go through the last proportion size calculation. """ # get the distance between starts of each two adjacent segments seg_start_distance = norm_seg_size - overlap # the length of the list excluding the last segment length_exclude_last = len(input_list) - norm_seg_size * last_prop # the total number of segments after cut # the `+ 1` is to add back the last segments num_segment = \ int(length_exclude_last / seg_start_distance) + 1 # need at least one segment if num_segment < 1: num_segment = 1 def get_single_seg(index: int, is_last_prop: bool) -> list: """Helper to get one single segment with index. This function first evaluate whether the segment is the last one and grab different segment according to the result, and returns sub-lists while index is in the range of number of segment. :param is_last_prop: the bool value that determine whether the segment is the last one. :param index: the index of the segment in the final segment list. :return single segment in the input_list based on index. """ # define current segment size based on whether it is the last segment if is_last_prop: return input_list[seg_start_distance * index:] else: return input_list[seg_start_distance * index: seg_start_distance * index + norm_seg_size] # return the whole list of segment while evaluating whether is last segment return [get_single_seg( index=index, is_last_prop=True if index == num_segment - 1 else False ) for index in range(num_segment)]
5f3b912907bcfbb41bae1df048601def5c6dbef6
29,499
def craft_post_header(length=0, content_length=True): """ returns header with 'content-length' set to 'num' """ if content_length: header = b"POST /jsproxy HTTP/1.1\r\nContent-Length: " header += "{}\r\n\r\n".format(str(length)).encode() else: header = b"POST /jsproxy HTTP/1.1\r\n\r\n" return header
4d556541ccd9bce18dad2bebffaaab7851673ef8
29,500
def Powerlaw(x, amp, index, xp): """ Differential spectrum of simple Powerlaw normlized at xp :param x: Energies where to calculate differential spectrum :param amp: Amplitude of pl :param index: Index of pl :param xp: Normalization energy :return: differential spectrum of a Powerlaw at energies x """ return amp*(x/xp)**index
6e31d7a85998f762ff27305a38535487db6592fb
29,501
def set_mates(aset): """Set the mates in a set of alignments.""" def mate_sorter(aln): """Sort the alignments in order of mate.""" return ( aln.supplementary_alignment, aln.secondary_alignment, not aln.first_in_pair) aset.sort(key=mate_sorter) if len(aset) == 1: return aset # consider paired alignments differently if aset[0].paired: if not aset[1].last_in_pair: raise ValueError("second alignment should be last in pair") if aset[1].supplementary_alignment or aset[1].secondary_alignment: raise ValueError("second alignment should be primary alignment") aset[0].mate = aset[1] aset[1].mate = aset[0] if len(aset) > 2: for idx in range(2, len(aset)): if aset[idx].first_in_pair: aset[idx].mate = aset[0] else: aset[idx].mate = aset[1] # consider single read alignments else: if len(aset) > 1: for idx in range(1, len(aset)): aset[idx].mate = aset[0] # return aset
50b499e8d03e049fefd8a9e29db4b267d7e1399f
29,502
import math def number_of_combinations(n, i): """ This function gets the binominal coefficients n and i. Returns the number of ways the i objects can be chosen from among n objects.""" return int((math.factorial(n)) / (math.factorial(i) * math.factorial(n - i)))
cf78abaa64948732965ee912a5247610a7c8d909
29,503
def get_by_tag_name(element, tag): """ Get elements by tag name. :param element: element :param tag: tag string :return: list of elements that matches tag """ results = [] for child in element.findall('.//'): if tag in child.tag: results.append(child) return results
d1e2d196cae5c3a0de00c624f5df28c988eaa088
29,504
def clamp(value, min=0, max=255): """ Clamps a value to be within the specified range Since led shop uses bytes for most data, the defaults are 0-255 """ if value > max: return max elif value < min: return min else: return value
2d6e01daddc3603527021e4dbab261f82b3f8862
29,505
def clamp_value(value, minimum, maximum): """Clamp a value to fit within a range. * If `value` is less than `minimum`, return `minimum`. * If `value` is greater than `maximum`, return `maximum` * Otherwise, return `value` Args: value (number or Unit): The value to clamp minimum (number or Unit): The lower bound maximum (number or Unit): The upper bound Returns: int or float: The clamped value Example: >>> clamp_value(-1, 2, 10) 2 >>> clamp_value(4.0, 2, 10) 4.0 >>> clamp_value(12, 2, 10) 10 """ if value < minimum: return minimum elif value > maximum: return maximum else: return value
1224b7ed098781d66721dceef5c837470f1195a6
29,507
import os import yaml def _load_config(): """Load the Helm chart configuration used to render the Helm templates of the chart from a mounted k8s Secret.""" path = f"/etc/jupyterhub/secret/values.yaml" if os.path.exists(path): print(f"Loading {path}") with open(path) as f: return yaml.safe_load(f) else: raise Exception(f"{path} not found!")
658284176edb6327be4615c0ec347f13606fc8b3
29,508
def earlyexit_loss(output, target, criterion, args): """Compute the weighted sum of the exits losses Note that the last exit is the original exit of the model (i.e. the exit that traverses the entire network. """ weighted_loss = 0 sum_lossweights = sum(args.earlyexit_lossweights) assert sum_lossweights < 1 for exitnum in range(args.num_exits-1): if output[exitnum] is None: continue exit_loss = criterion(output[exitnum], target) weighted_loss += args.earlyexit_lossweights[exitnum] * exit_loss args.exiterrors[exitnum].add(output[exitnum].detach(), target) # handle final exit weighted_loss += (1.0 - sum_lossweights) * criterion(output[args.num_exits-1], target) args.exiterrors[args.num_exits-1].add(output[args.num_exits-1].detach(), target) return weighted_loss
49eb0fd896621dc678eb4de1d482a0e80eb00d8e
29,509
def make_signatures_with_minhash(family, seqs): """Construct a signature using MinHash for each sequence. Args: family: lsh.MinHashFamily object seqs: dict mapping sequence header to sequences Returns: dict mapping sequence header to signature """ # Construct a single hash function; use the same for all sequences h = family.make_h() signatures = {} for name, seq in seqs.items(): signatures[name] = h(seq) return signatures
c6366811536f1e32c29e3a00c91267bde85969fa
29,510
def promote_window(gate_gen1, gate_item2): """ :param gate_gen1: the generator of the gates objects :param gate_item2: the current gate 2 object :return: promoted gate 1 , new middle list , promoted gate 2 """ try: temp_gate = next(gate_gen1) except RuntimeError: return gate_item2, [], [] gate_item1 = gate_item2 # runs over the list gate_item2 = temp_gate middle_list = list() return gate_item1, gate_item2, middle_list
e69ff90a31fb3de3335dedbd3f17cd6f24fe44bf
29,512
def equivalent(m1, m2): """Test whether two DFAs are equivalent, using the Hopcroft-Karp algorithm.""" if not m1.is_finite() and m1.is_deterministic(): raise TypeError("machine must be a deterministic finite automaton") if not m2.is_finite() and m2.is_deterministic(): raise TypeError("machine must be a deterministic finite automaton") # Index transitions. We use tuples (1,q) and (2,q) to rename apart state sets alphabet = set() d = {} for t in m1.get_transitions(): [[q], a] = t.lhs [[r]] = t.rhs alphabet.add(a) d[(1,q),a] = (1,r) for t in m2.get_transitions(): [[q], a] = t.lhs [[r]] = t.rhs alphabet.add(a) d[(2,q),a] = (2,r) # Naive union find data structure u = {} def union(x, y): for z in u: if u[z] == x: u[z] = y for q in m1.states: u[1,q] = (1,q) for q in m2.states: u[2,q] = (2,q) s = [] s1 = (1,m1.get_start_state()) s2 = (2,m2.get_start_state()) union(s1, s2) s.append((s1, s2)) while len(s) > 0: q1, q2 = s.pop() for a in alphabet: r1 = u[d[q1,a]] r2 = u[d[q2,a]] if r1 != r2: union(r1, r2) s.append((r1, r2)) cls = {} f = ( {(1, q) for q in m1.get_accept_states()} | {(2, q) for q in m2.get_accept_states()} ) for q in u: if u[q] not in cls: cls[u[q]] = q in f elif (q in f) != cls[u[q]]: return False return True
ad0c67b167de8987d90fe9d41cec25f8deea81ea
29,514
def divide_no_nan(a, b): """ Auxiliary funtion to handle divide by 0 """ div = a / b div[div != div] = 0.0 div[div == float('inf')] = 0.0 return div
e037bd1f7d5ed6e2aaf545e1fc5454dbcf719c38
29,515
import socket def resolve_fqdn_ip(fqdn): """Attempt to retrieve the IPv4 or IPv6 address to a given hostname.""" try: return socket.inet_ntop(socket.AF_INET, socket.inet_pton(socket.AF_INET, socket.gethostbyname(fqdn))) except Exception: # Probably using ipv6 pass try: return socket.inet_ntop(socket.AF_INET6, socket.inet_pton(socket.AF_INET6, socket.getaddrinfo(fqdn, None, socket.AF_INET6)[0][4][0])) except Exception: return ''
2e7680fe3a4ea174f3f7b22e4991a3a65e5e4995
29,516
def personalizar_url(termo_de_pesquisa): """ Função para pesquisar produtos e personalizar a url de pesquisa IN: 'smart tv 4k': str OUT: https://www.magazineluiza.com.br/busca/iphone 11/?page={} """ template = 'https://www.magazineluiza.com.br/busca/{}/' termo_de_pesquisa.replace('', '+') # Adicionando o query string da pesquisa na URL url = template.format(termo_de_pesquisa) # Adicionando o query string da página na URL url += '?page={}' return url
92c3fa9a6f79cfef5d43d8eef1d6aadbc88b814f
29,517
def empty_rule(*args): """An empty rule for providing structure""" return {}
e42c8ecbfc1dd47f366734ad0a4294733954cfe5
29,518
from pathlib import Path def field_name_from_path(path): """Extract field name from file path.""" parts = Path(path).stem.split('.') field_name = parts[-1] if len(parts) > 1: if parts[-1] in ('bam', 'sam', 'cram'): field_name = parts[-2] return field_name
9d21b760bccb6557f5450921482d76917ba13981
29,519
import sys def import_class(full_name, subclassof=None): """ Import the Python class `full_name` given in full Python package format e.g. package.another_package.class_name Return the imported class. Optionally, if `subclassof` is not None and is a Python class, make sure that the imported class is a subclass of `subclassof`. """ # Understand which class we need to instantiate. The class name is given in # full Python package notation, e.g. # package.subPackage.subsubpackage.className # in the input parameter `full_name`. This means that # 1. We HAVE to be able to say # from package.subPackage.subsubpackage import className # 2. If `subclassof` is defined, the newly imported Python class MUST be a # subclass of `subclassof`, which HAS to be a Python class. full_name = full_name.strip() package_name, class_name = str(full_name).rsplit('.', 1) imported = __import__(package_name, globals(), locals(), [class_name, ]) # Now we can have two situation and we try and support both: # 1. What we call imported is really a module in which class_name is # defined. # 2. imported, by virtue of whatever module __init__.py magic, is # already the Python class we want. # Is imported a module? if(isinstance(imported, type(sys))): step_class = getattr(imported, class_name) else: step_class = imported if(subclassof and not issubclass(step_class, subclassof)): msg = 'Class %s from package %s is not a subclass of %s' \ % (class_name, package_name, subclassof.__name__) raise(NotImplementedError(msg)) return(step_class)
2b0d3acc462e016e4d6d9bdc65210bb126ddc756
29,520
def binaryStringDigitDiff(binstr1, binstr2): """ Count the number of digits that differ between two same-length binary strings Parameters ---------- binstr1 : string A binary (base-2) numeric string. binstr2 : string A binary (base-2) numeric string. Returns ------- An integer, the number of digits that differ between the two strings. """ digitDiff = 0 if not len(binstr1) == len(binstr2): raise Exception("binaryStringDigitDiff requires arguments to have same length") for n in range(0, len(binstr1)): if binstr1[n] != binstr2[n]: digitDiff += 1 return digitDiff
be39c7dbc30f6e49263a880254a021ff6873d240
29,521
import os def find_all_event_files(dir_path): """find all event files in directory `dir_path`. :param dir_path: directory path :type dir_path: str :return: list of file path. """ file_path_list = [] for root, dirs, files in os.walk(dir_path): for file_name in files: if "events" in file_name: file_path_list.append(os.path.join(root, file_name)) return file_path_list
822efba29bbd5c7ad5050f73f8859b2824f1789e
29,522
def nodes(topology): """Get the nodes in the topology This function returns a list containing all nodes in the topology. When iterating through all nodes in the list, nodes_iter is preferable. """ return topology.nodes()
ec631e7ff22d993d3e21974ffbe035e2a03f99dc
29,523
from datetime import datetime def timestamp_to_datetime(timestamp): """Generate datetime string formatted for the ontology timeseries Args: timestamp (float, timemstamp): Timestamp value as float Returns: str: Returns the timestamp with the format necessary to insert in the Ontology """ return datetime.fromtimestamp(timestamp).strftime("%Y-%m-%dT%H:%M:%SZ")
b894c3610842bf62387cbed77047fe4cc4d577a5
29,524
def rem(args): """MODIFICATION Removes sub-string (requires one argument) """ f=args.split()[1] def _f(input): return [ i.replace(f, '') for i in input ] return _f
b46f626ad767693735fde87ade3aee8a37a13e0a
29,525
def setdiag(G, val): """ Sets the diagonal elements of a matrix to a specified value. Parameters ---------- G: A 2D matrix or array. The matrix to modify. val: Int or float The value to which the diagonal of 'G' will be set. """ n = G.shape[0] for i in range(0,n): G[i,i] = val return G
74a1e2899a8575f04692f74ad01e122b3639d2b1
29,526
import re def list_nameservers(): """Append a nameserver entry to /etc/resolv.conf""" with open('/etc/resolv.conf', 'r') as dnsservers: entries = dnsservers.readlines() dns = [] for entry in entries: if re.match("\s*nameserver", entry) is not None: dns.append(entry.split()[1]) return dns
1fef764c7652d50387c828539d962049ad4195c6
29,527
def forest_without(root, vals): """Removes nodes with specified values. Yields the remaining roots.""" def dfs_root(node): if not node: return if node.val in vals: yield from dfs_root(node.left) yield from dfs_root(node.right) else: yield node yield from dfs_left(node) yield from dfs_right(node) def dfs_left(parent): child = parent.left if not child: return if child.val in vals: parent.left = None yield from dfs_root(child) else: yield from dfs_left(child) yield from dfs_right(child) def dfs_right(parent): child = parent.right if not child: return if child.val in vals: parent.right = None yield from dfs_root(child) else: yield from dfs_left(child) yield from dfs_right(child) return dfs_root(root)
5fd3badc8d1e55d352be2fecd12535b643fa34e2
29,528
def get_operation_full_job_id(op): """Returns the job-id or job-id.task-id for the operation.""" job_id = op.get_field('job-id') task_id = op.get_field('task-id') if task_id: return '%s.%s' % (job_id, task_id) else: return job_id
6c0258d5c2053a5320d78340979fe60c8ea88980
29,530
def add_to_list(api_call): """Change API request to a list of replays by managing potential KeyError. :param api_call: Request object :return: list of replays. """ the_list = api_call.json()['list'] return the_list
5d742d44f96df1cb0b1b422468d220abaaab6fbe
29,531
def make_too_big(): """ Returns a string too big to store as custom data (> 20MB). """ return "12" * 20 * 1024 * 1024
db5177357d9e6c4b0b22b35cdded1d8f7a6269ab
29,532
def get_conditions(worksheet): """ Get the conditions displayed on a worksheet. args: worksheet (seeq.spy.workbooks._worksheet.AnalysisWorksheet): Worksheet returns: conditions (pandas.DataFrame): Displayed Conditions """ display_items = worksheet.display_items if len(display_items) == 0: raise ValueError('No items (signals, conditions, etc) are displayed in this worksheet.') return display_items.query("Type == 'Condition'")
f3fe0bd58f9a0344f047e3ae6eef6bdefa325c21
29,534
def TailFile(fname, lines=20): """Return the last lines from a file. @note: this function will only read and parse the last 4KB of the file; if the lines are very long, it could be that less than the requested number of lines are returned @param fname: the file name @type lines: int @param lines: the (maximum) number of lines to return """ fd = open(fname, "r") try: fd.seek(0, 2) pos = fd.tell() pos = max(0, pos - 4096) fd.seek(pos, 0) raw_data = fd.read() finally: fd.close() rows = raw_data.splitlines() return rows[-lines:]
39f2f07421f150df16061219790e49f222f284e7
29,535
import argparse def parse_args(): """Parse CLI arguments""" ocm_cli_binary_url = ("https://github.com/openshift-online/ocm-cli/" "releases/download/v0.1.55/ocm-linux-amd64") parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Script to generate test config file') parser.add_argument("-i", "--awsaccountid", help="aws account id", action="store", dest="aws_account_id", required=True) parser.add_argument("-a", "--awsaccesskeyid", help="aws access key id", action="store", dest="aws_access_key_id", required=True) parser.add_argument("-k", "--awssecretaccesskey", help="aws secret access key", action="store", dest="aws_secret_access_key", required=True) parser.add_argument("-l", "--logintoken", help="openshift token for login", action="store", dest="login_token", required=True) parser.add_argument("-p", "--testingplatform", help="testing platform. 'prod' or 'stage'", action="store", dest="testing_platform", default="stage") parser.add_argument("-e", "--clustername", help="osd cluster name", action="store", dest="cluster_name", default="osd-qe-1") parser.add_argument("-r", "--awsregion", help="aws region", action="store", dest="aws_region", default="us-east-1") parser.add_argument("-t", "--awsinstancetype", help="aws instance type", action="store", dest="aws_instance_type", default="m5.2xlarge") parser.add_argument("-c", "--numcomputenodes", help="Number of compute nodes", action="store", dest="num_compute_nodes", default="3") parser.add_argument("--openshift-version", help="Openshift Version", action="store", dest="openshift_version", default="") parser.add_argument("-j", "--htpasswd-cluster-admin", help="Cluster admin user of idp type htpasswd", action="store", dest="htpasswd_cluster_admin", default="htpasswd-cluster-admin-user") parser.add_argument("-y", "--htpasswd-cluster-password", help="htpasswd Cluster admin password", action="store", dest="htpasswd_cluster_password", default="rhodsPW#123456") parser.add_argument("-u", "--ldap-url", help="Ldap url", action="store", dest="ldap_url", default=("ldap://openldap.openldap.svc." "cluster.local:1389" "/dc=example,dc=org?uid")) parser.add_argument("-b", "--ldap-bind-dn", help="Ldap bind dn", action="store", dest="ldap_bind_dn", default="cn=admin,dc=example,dc=org") parser.add_argument("-w", "--ldap-bind-password", help="Ldap bind password", action="store", dest="ldap_bind_password", default="adminpassword") parser.add_argument("-z", "--num-users-to-create-per-group", help="Ldap bind password", action="store", dest="num_users_to_create_per_group", default="20") parser.add_argument("-s", "--skip-cluster-creation", help="skip osd cluster creation", action="store_true", dest="skip_cluster_creation") parser.add_argument("-x", "--skip-rhods-installation", help="skip rhods installation", action="store_true", dest="skip_rhods_installation") parser.add_argument("-m", "--create-cluster-admin-user", help="create cluster admin user for login", action="store_true", dest="create_cluster_admin_user") parser.add_argument("-q", "--create-ldap-idp", help="create ldap idp and add users to rhods groups", action="store_true", dest="create_ldap_idp") parser.add_argument("-d", "--delete-ldap-idp", help="delete ldap idp", action="store_true", dest="delete_ldap_idp") parser.add_argument("-g", "--delete-cluster", help="delete osd cluster", action="store_true", dest="delete_cluster") parser.add_argument("--uninstall-rhods", help="Uninstall rhods", action="store_true", dest="uninstall_rhods") parser.add_argument("-o", "--ocmclibinaryurl", help="ocm cli binary url", action="store", dest="ocm_cli_binary_url", default=ocm_cli_binary_url) return parser.parse_args()
b278cdd1ebc43088eec940fc80dfe746f6a9f744
29,536
def assign_x_in_same_rows(sorted_df): """ :param sorted_df: dataframe sorted on min_x values :return: dataframe assigning squares in the same x row """ df = sorted_df.reset_index() x_assignments = [] group = 0 min_x = df.ix[0, 'min_x'] for row in sorted_df.iterrows(): x = row[1]['min_x'] diff_x = abs(min_x-x) if diff_x < 40: x_assignments.append(group) else: group = group + 1 x_assignments.append(group) min_x = x df['x_groups'] = x_assignments return df.sort_values(by=['x_groups', 'min_y'])
d9877c45ca12f12cfebc6a88a257a28594ea70ca
29,537
def get_df_subset(df, column_one, value_one, column_two, value_two): """ Takes a dataframe and filters it based on two columns and their matching values """ return df.loc[(df[column_one] == value_one) & (df[column_two] == value_two), :]
4971af24f33b12d00a2385b7c5ee295630f385de
29,538
import os def get_dir(instr,pathsup): """ folder = get_dir(instr,pathsup) folder = get_dir(s,1) # get the inner most folder Parameters ---------- instr : STRING PATH. pathsup : INTEGER Defines the number of folders away from the innermost. Returns ------- oustr: STRING Folder name. """ x = instr.split(os.sep) # split string based on operating system oustr = x[-pathsup] # return oustr
4ace513dc7fb74f7eafa6f0f091b2c2068826a1d
29,539
def zoom_to_roi(zoom, resolution): """Gets region of interest coordinates from x,y,w,h zoom parameters""" x1 = int(zoom[0] * resolution[0]) x2 = int((zoom[0]+zoom[2]) * resolution[0]) y1 = int(zoom[1] * resolution[1]) y2 = int((zoom[1]+zoom[3]) * resolution[1]) return ((x1,y1),(x2,y2))
cd77da9a67713ed7f76f4d41a7b7bee7e2f54a78
29,540
def get_ext(path): """Get file extension. Finds a file's extension and returns it. Parameters ---------- path : str The path to a file. Returns ------- str The file extension including leading "." """ return path[path.rfind(".") :]
80ba4fb8686b2a7454240a56aa0c3a7d41636f27
29,541
def _engprop(l): # {{{1 """Print the engineering properties as a HTML table.""" lines = [ "</tbody>", "<tbody>", '<tr><td colspan="6" align="center"><strong>Engineering properties</strong></td></tr>', '<tr><td colspan="3" align="center"><strong>In-plane</strong></td>', '<td colspan="3" align="center"><strong>3D stiffness tensor</strong></td></tr>', "<tr>", f'<td>E<sub>x</sub></td><td>{l.Ex:.0f}</td><td align="left">MPa</td>', f'<td>E<sub>x</sub></td><td>{l.tEx:.0f}</td><td align="left">MPa</td>', "</tr>", "<tr>", f'<td>E<sub>y</sub></td><td>{l.Ey:.0f}</td><td align="left">MPa</td>', f'<td>E<sub>y</sub></td><td>{l.tEy:.0f}</td><td align="left">MPa</td>', "</tr>", "<tr>", f'<td>E<sub>z</sub></td><td>{l.Ez:.0f}</td><td align="left">MPa</td>', f'<td>E<sub>z</sub></td><td>{l.tEz:.0f}</td><td align="left">MPa</td>', "</tr>", "<tr>", f'<td>G<sub>xy</sub></td><td>{l.Gxy:.0f}</td><td align="left">MPa</td>', f'<td>G<sub>xy</sub></td><td>{l.tGxy:.0f}</td><td align="left">MPa</td>', "</tr>", "<tr>", f'<td>G<sub>xz</sub></td><td>{l.Gxz:.0f}</td><td align="left">MPa</td>', f'<td>G<sub>xz</sub></td><td>{l.tGxz:.0f}</td><td align="left">MPa</td>', "</tr>", "<tr>", f'<td>G<sub>yz</sub></td><td>{l.Gyz:.0f}</td><td align="left">MPa</td>', f'<td>G<sub>yz</sub></td><td>{l.tGyz:.0f}</td><td align="left">MPa</td>', "</tr>", "<tr>", f'<td>&nu;<sub>xy</sub></td><td>{l.νxy:.3f}</td><td align="left">-</td>', f'<td>&nu;<sub>xy</sub></td><td>{l.tνxy:.3f}</td><td align="left">-</td>', "</tr>", "<tr>", f'<td>&nu;<sub>yx</sub></td><td>{l.νyx:.3f}</td><td align="left">-</td>', f'<td>&nu;<sub>xz</sub></td><td>{l.tνxz:.3f}</td><td align="left">-</td>', "</tr>", "<tr>", f'<td>&alpha;<sub>x</sub></td><td>{l.αx:.3f}</td><td align="left">K<sup>-1</sup></td>', f'<td>&nu;<sub>yz</sub></td><td>{l.tνyz:.3f}</td><td align="left">-</td>', "</tr>", "<tr>", f'<td>&alpha;<sub>y</sub></td><td>{l.αy:.3f}</td><td align="left">K<sup>-1</sup></td>', "</tr>", ] return lines
e6bbaa4a39265b2703aa623a99a73f5b4e471023
29,544
def convert_codonlist_to_tuplelist(seq_codons, codon_to_codon_extended): """Convert a list of triplets into a list of tuples, using a swaptable. The swaptable is a dict of triplet: triplets, and determines the allowed swaps. """ codon_extended = [None] * len(seq_codons) for i, codon in enumerate(seq_codons): codon_extended[i] = codon_to_codon_extended[codon] return codon_extended
c4121f51aa0152f8e7683380667d06f0b79903f1
29,547
def _check_model(model): """Check model input, return class information and predict function""" try: n_classes = len(model.classes_) predict = model.predict_proba except: n_classes = 0 predict = model.predict return n_classes, predict
9dbf14724f1407c65a7b9e306f32e28eb37b66ee
29,548
import typing import hashlib def create_sf_instance(entity_name: str, property_label: str) -> typing.Tuple[str, str]: """Creates a slot filling instance by combining a name and a relation. Arguments: entity_name: ``str`` The name of the AmbER set to fill into the template. property_label: ``str`` The property name of the AmbER set tuple. Returns: query: ``str`` The template with the name slotted in. query_hashlib: ``str`` A MD5 hash of the query. """ sf_input = entity_name + " [SEP] " + property_label sf_hashlib = hashlib.md5(sf_input.encode("utf-8")).hexdigest() return sf_input, sf_hashlib
02e69115261b148c7fdb270864095c63bbdb2278
29,549
def ensure_keys(dict_obj, *keys): """ Ensure ``dict_obj`` has the hierarchy ``{keys[0]: {keys[1]: {...}}}`` The innermost key will have ``{}`` has value if didn't exist already. """ if len(keys) == 0: return dict_obj else: first, rest = keys[0], keys[1:] if first not in dict_obj: dict_obj[first] = {} dict_obj[first] = ensure_keys(dict_obj[first], *rest) return dict_obj
e8d87444ed8961d8d650b49c8670dca1496623b1
29,550
def to_plist(head): """scans from head onwards, returns python list""" plist = [head.val] current = head while current.next: plist.append(current.next.val) current = current.next return plist
95cf048cd51a931c6381a90d455b721c4766473f
29,551
def uint_double(k): """Substitute for corresponding C function in the mini_double package The cython extension 'mini_double' is used for code generation. Depending on the details of the build process, it may not yet be available when the code is generated. So we take this function as a substitute for the corresponding function in the ``mini_double`` package. """ return 2*k
760d9b1dbbd6975879b97bdc3d808cda7980c93e
29,553
import re def middle_coord(text): """Get the middle coordinate from a coordinate range. The input is in the form <start> to <end> with both extents being in degrees and minutes as N/S/W/E. S and W thus need to be negated as we only care about N/E. """ def numbers_from_string(s): """Find all numbers in a string.""" return [float(n) for n in re.findall(r'[\d.]*\d[\d.]*', s)] def tuple_to_float(numbers): divisor = 1 result = 0 for num in numbers: result += float(num) / divisor divisor = divisor * 60 return result if text is None: return None pieces = text.split(' to ', 1) start, end = map(numbers_from_string, pieces) start = tuple_to_float(start) end = tuple_to_float(end) if pieces[0][-1] in ('S', 'W'): start = -start if pieces[1][-1] in ('S', 'W'): end = -end return (start + end) / 2
4e8148102ff04969279690922ddce5bd767fed55
29,554
def fakefunction(*args, **kwargs): """ :dsc: Simple wrapper to attache this to classes of module when we end up with a fake attribute """ return None
bd3d270395d8a7de4185d88f01dd76c1c28ec9ed
29,555