content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _flatten(d): """ Pack a hierarchical dictionary of variables into a list Sorting is important as it ensures the function is called with the inputs in the same order each time! """ l = [] # This sorting is important! for (k,v) in sorted(d.items(), key=lambda t: t[0]): if isinstance(v, dict): lv = _flatten(v) for v2 in lv: l.append(v2) else: l.append(v) return l
d1aea2b85e161747262ec424e9b63fb336967236
22,093
def gwas(batch, vcf, phenotypes): """ QC data and get association test statistics """ cores = 2 g = batch.new_job(name='run-gwas') g.image('gcr.io/<MY_PROJECT>/1kg-gwas:latest') g.cpu(cores) g.declare_resource_group(ofile={ 'bed': '{root}.bed', 'bim': '{root}.bim', 'fam': '{root}.fam', 'assoc': '{root}.assoc' }) g.command(f''' python3 /run_gwas.py \ --vcf {vcf} \ --phenotypes {phenotypes} \ --output-file {g.ofile} \ --cores {cores} ''') return g
0f0b523a9a5976119219c7e746b602470f56be1d
22,094
def mod_sqrt(n: int, p: int, is_odd: bool) -> int: """ Find Square Root under Modulo p Given a number 'n' and a prime 'p', find square root of n under modulo p if it exists. https://www.geeksforgeeks.org/find-square-root-under-modulo-p-set-1-when-p-is-in-form-of-4i-3/ """ n %= p y = pow(n, (p + 1) // 4, p) y_odd = bool(y & 0x01) if y_odd != is_odd: y = p - y assert (y * y) % p == n return y
9a04db0965f5db32fab29ba78e9963c25c72eefc
22,095
def dealer_options(dealer_score): """ This function allows the dealer to hit or stay. """ if dealer_score < 17: return True else: return False
5c208d5ab330102ecd11ad6266c8a155e8220e9c
22,096
def get_logging_options_string(args): """ This function extracts the flags and options specified for logging options added with add_logging_options. Presumably, this is used in "process-all" scripts where we need to pass the logging options to the "process" script. Args: args (namespace): a namespace with the arguments added by add_logging_options Returns: string: a string containing all logging flags and options """ args_dict = vars(args) # first, pull out the text arguments logging_options = ['log_file', 'logging_level', 'file_logging_level', 'stdout_logging_level', 'stderr_logging_level'] # create a new dictionary mapping from the flag to the value logging_flags_and_vals = {'--{}'.format(o.replace('_', '-')) : args_dict[o] for o in logging_options if len(args_dict[o]) > 0} s = ' '.join("{} {}".format(k,v) for k,v in logging_flags_and_vals.items()) # and check the flags if args.log_stdout: s = "--log-stdout {}".format(s) if args.no_log_stderr: s = "--no-log-stderr {}".format(s) return s
070ed0cd906845abf784bd566118a1959af875f2
22,097
def to_bytes(text, encoding='utf-8'): """Make sure text is bytes type.""" if not text: return text if not isinstance(text, bytes): text = text.encode(encoding) return text
367da58c31dddd4c243c56a9780b47ef6682bee2
22,098
def email_get_unread(imap, from_email_address): """Returns (status, list of UIDs) of unread emails from a sending email address. """ search = '(UNSEEN UNFLAGGED FROM "{}")'.format(from_email_address) status, response = imap.search(None, search) if status != 'OK': return status, response # Return status and list of unread email UIDs. return status, response[0].split()
48c4cf036e24acadec425bb7bb8ac9395488229a
22,099
def _lambda_risk_mapper(risk_level: int) -> str: """Helper methods Parameters ---------- risk_level: int number from range 0-4 represents risk factor for given vault Returns ------- string: text representation of risk """ mappings = {0: "Non Eligible", 1: "Least", 2: "Low", 3: "Medium", 4: "High"} return mappings.get(risk_level, "Non Eligible")
e19ef85b82d4b36bb6e0dfdcae373f25a894dbff
22,100
import math def torsion_angle(c1, c2, c3, c4): """ float <- torsion_angle(a, b, c, d) returns the torsion angle in degrees between 3D pts a,b,c,d """ v1 = (c1[0]-c2[0], c1[1]-c2[1], c1[2]-c2[2]) v2 = (c2[0]-c3[0], c2[1]-c3[1], c2[2]-c3[2]) v3 = (c3[0]-c4[0], c3[1]-c4[1], c3[2]-c4[2]) p = (v2[1]*v1[2] - v1[1]*v2[2], v1[0]*v2[2] - v2[0]*v1[2], v2[0]*v1[1] - v1[0]*v2[1]) q = (v3[1]*v2[2] - v2[1]*v3[2], v2[0]*v3[2] - v3[0]*v2[2], v3[0]*v2[1] - v2[0]*v3[1]) n = 1.0 / math.sqrt( p[0]*p[0] + p[1]*p[1] + p[2]*p[2] ) p = (p[0]*n, p[1]*n, p[2]*n ) n = 1.0 / math.sqrt( q[0]*q[0] + q[1]*q[1] + q[2]*q[2] ) q = (q[0]*n, q[1]*n, q[2]*n ) xtheta = p[0]*q[0] + p[1]*q[1] + p[2]*q[2] if xtheta > 1.0: xtheta = 1.0 if xtheta < -1.0: xtheta = -1.0 theta = math.acos(xtheta) * 57.29578 absth = math.fabs(theta) if absth < 0.001: return 0.0 elif math.fabs(absth - 180.0) < 0.001: return 180.0 s = v1[0]*q[0] + v1[1]*q[1] + v1[2]*q[2] if s < 0.0: theta = 360.0 - theta if theta > 180.0: theta = theta - 360.0 return theta
32eb380fd8e4d0645481ab816a32e01144fb3c7b
22,101
import argparse def arg_parse(): """ Parse command line arguments to the detector """ parser = argparse.ArgumentParser(description = " Red Neuronal Yolo V3") parser.add_argument('--images', dest = 'images', help = 'Directorio donde se encuentran las imagenes a procesar', default = 'imgs', type = str) parser.add_argument('--dest', dest = 'dest', help = 'Directorio para almacenar detecciones', default = 'dest', type = str ) parser.add_argument('--bs', dest = 'bs', help = 'Batch Size', default = 1) parser.add_argument('--conf', dest = 'confidence', help = 'Umbral de confianza para filtrar detectiones', default = 0.5) parser.add_argument('--nms_thresh', dest = 'nms_thresh', help = 'Umbral para Non Maximum Supression', default = 0.4) parser.add_argument('--cfg', dest = 'cfgfile', help = 'Archivo de configuracion .cfg de Yolo', default = 'cfg/yolov3.cfg', type = str) parser.add_argument('--weights', dest = 'weightsfile', help = 'Archivo .weights de pesos de la red Yolo', default = 'backup/yolov3.weights', type = str) parser.add_argument('--res', dest = 'res', help = 'Resolution the entrada para la red. Mayor resolucion es igual a mejor precision pero menor rapidez', default = '416', type = str) return parser.parse_args()
10a9880636c9569fa09094767128a5e53e69917a
22,103
def accumulate(instructions: list) -> dict: """ Read and execute instructions until an infinite loop if found: acc +12 -> add 12 to 'acc' jmp +48 -> jump to the instruction located at 'index' + 48 nop -56 -> do nothing & go to the next instruction if an instruction has already been executed, stop and return the current 'acc' value """ acc = 0 index = 0 seen = [] while index < len(instructions): if index in seen: return {'acc': acc, 'infinite': True} seen.append(index) op, val = instructions[index].split(' ') if op == 'acc': acc += int(val) elif op == 'jmp': index += int(val) continue index += 1 return {'acc': acc, 'infinite': False}
bf5a5bb278e71e783968eafed6a790ff2bdf77c1
22,105
from math import sqrt def library_sqrt(x): """Uses math library""" return sqrt(x)
e1b3df0e6fe6d5b62de8efea1bcc3df7ae994d1d
22,106
def build_sampler( sampler_class, pe_method, force_method, T = 1.0e-4, \ dt = 1.0e-1, traj_len = 100, absxmax = 1.0e2, dt_max = None, min_rate = 0.6, \ max_rate = 0.7, gaussianprior_std = None ): """Builds a sampling.Sampler class object of type sampler_class. Args: sampler_class : Sampler class from module sampling. Eg. sampling.Hmc pe_method : A method for evaluating the potential energy. force_method : A method for evaluating the forces. T (float) : Dimensionless temperature of the system: T=1/beta. (Default 1.0). dt (float) : Initial time step (or step size). This will be updated algorithmically, but a good starting point saves time. (Default 1.0e-4). traj_len (int) : The number of time steps in a single trajectory. (Default 100). absxmax (single float or numpy array of floats, with length 1 or length numdim) : During the main calculation, the sampler is restricted to a region x in [-absxmax,absxmax]. (Default: 1.0e2). dt_max (float) : maximum step size (time step). (Default: median(absxmax), which is set in module sampling.) min_rate (float) : minimum acceptance rate of trajectories. Used for setting step size (time step). (Default: 0.6. The optimal acceptance rate for HMC on a multivariate Gaussian is 0.65 http://www.mcmchandbook.net/HandbookChapter5.pdf, section 5.4.4.3). max_rate (float) : maximum acceptance rate of trajectories. Used for setting step size (time step). (Default 0.7. The optimal acceptance rate for HMC on a multivariate Gaussian is 0.65 http://www.mcmchandbook.net/HandbookChapter5.pdf, section 5.4.4.3). gaussianprior_std (single float or numpy array of floats, with length 1 or length numdim) : If this is set to a real value then an additional term is applied to (H)MC acceptance/rejection, such that the target distribution is proportional to a multivariate Gaussian with this standard deviation for each dimension. (Default: None.) Return: sampling.Sampler class object of type sampler_class. """ sampler = sampler_class( pe_method, force_method, dt, traj_len, absxmax, \ dt_max, 1.0/T, min_rate, max_rate, gaussianprior_std ) return sampler
528966ec88dd8d4a910753290e19849f7919cf22
22,107
def codegen_reload_data(): """Parameters to codegen used to generate the fn_sep package""" reload_params = {"package": u"fn_sep", "incident_fields": [], "action_fields": [u"sep_artifact_type_scan_results", u"sep_domain_name", u"sep_fingerprintlist_name", u"sep_fullpathname", u"sep_scan_type", u"sep_source", u"sep_target_endpoints"], "function_params": [u"sep_commandid", u"sep_computer_ids", u"sep_computername", u"sep_description", u"sep_domain", u"sep_domainid", u"sep_file_id", u"sep_file_path", u"sep_fingerprintlist_id", u"sep_fingerprintlist_name", u"sep_fullpathname", u"sep_group_ids", u"sep_groupid", u"sep_hardwarekey", u"sep_hash_value", u"sep_incident_id", u"sep_lastupdate", u"sep_matching_endpoint_ids", u"sep_md5", u"sep_mode", u"sep_order", u"sep_os", u"sep_pageindex", u"sep_pagesize", u"sep_scan_action", u"sep_scan_date", u"sep_scan_type", u"sep_sha1", u"sep_sha256", u"sep_sort", u"sep_source", u"sep_status", u"sep_status_details", u"sep_status_type", u"sep_undo"], "datatables": [u"sep_endpoint_details", u"sep_endpoint_status_summary", u"sep_endpoints_non_compliant_details", u"sep_eoc_scan_results", u"sep_fingerprint_lists", u"sep_groups"], "message_destinations": [u"fn_sep"], "functions": [u"fn_sep_add_fingerprint_list", u"fn_sep_assign_fingerprint_list_to_group", u"fn_sep_delete_fingerprint_list", u"fn_sep_get_command_status", u"fn_sep_get_computers", u"fn_sep_get_domains", u"fn_sep_get_file_content_as_base64", u"fn_sep_get_fingerprint_list", u"fn_sep_get_groups", u"fn_sep_move_endpoint", u"fn_sep_quarantine_endpoints", u"fn_sep_scan_endpoints", u"fn_sep_update_fingerprint_list", u"fn_sep_upload_file_to_sepm"], "phases": [], "automatic_tasks": [], "scripts": [u"scr_sep_add_artifact_from_scan_results", u"scr_sep_parse_email_notification"], "workflows": [u"wf_sep_add_fingerprint_list", u"wf_sep_assign_fingerprint_list_to_lockdown_group", u"wf_sep_delete_fingerprint_list", u"wf_sep_delete_hash_from_fingerprint_list", u"wf_sep_get_blacklist_information", u"wf_sep_get_endpoint_details", u"wf_sep_get_endpoint_details_for_artifact", u"wf_sep_get_endpoints_status", u"wf_sep_get_endpoints_status_details", u"wf_sep_get_endpoints_status_refresh", u"wf_sep_get_file_content_as_base64_string", u"wf_sep_get_groups_information", u"wf_sep_get_quarantine_status", u"wf_sep_get_remediation_status", u"wf_sep_get_scan_results", u"wf_sep_get_upload_status", u"wf_sep_initiate_eoc_scan_for_artifact", u"wf_sep_move_endpoint", u"wf_sep_quarantine_endpoint", u"wf_sep_remediate_artifact_on_endpoint", u"wf_sep_upload_file_to_sepm"], "actions": [u"Example: SEP - Add Artifact from Scan Result", u"Example: SEP - Add Hash to Blacklist", u"Example: SEP - Assign Blacklist to lockdown group", u"Example: SEP - Delete Blacklist", u"Example: SEP - Delete Hash from Blacklist", u"Example: SEP - Get Blacklist information", u"Example: SEP - Get Endpoint Details", u"Example: SEP - Get Endpoint Details for artifact", u"Example: SEP - Get Endpoints status summary", u"Example: SEP - Get Endpoints status summary (refresh)", u"Example: SEP - Get File Content as Base64 string", u"Example: SEP - Get Groups information", u"Example: SEP - Get Non-Compliant Endpoints status details", u"Example: SEP - Get Quarantine status", u"Example: SEP - Get Remediation status", u"Example: SEP - Get Scan results", u"Example: SEP - Get Upload status", u"Example: SEP - Initiate EOC Scan for Artifact", u"Example: SEP - Move Endpoint", u"Example: SEP - Parse notification", u"Example: SEP - Quarantine Endpoint", u"Example: SEP - Remediate Artifact on Endpoint", u"Example: SEP - Un-Quarantine Endpoint", u"Example: SEP - Upload file to SEPM server"], "incident_artifact_types": [] } return reload_params
dcb34b8533c0356a12b6efcf2ae97e4e39dcf752
22,108
import time from datetime import datetime def SecondsToZuluTS(secs=None): """Returns Zulu TS from unix time seconds. If secs is not provided will convert the current time. """ if not secs: secs = int(time.time()) return(datetime.utcfromtimestamp(secs).strftime("%Y-%m-%dT%H:%M:%SZ"))
2ca96ed779020037eb360a1250243dfe628f6195
22,109
def _LookupMeOrUsername(cnxn, username, services, user_id): """Handle the 'me' syntax or lookup a user's user ID.""" if username.lower() == 'me': return user_id return services.user.LookupUserID(cnxn, username)
68ef5ea6d6c3076717660848a0b8a9c3cb4847d4
22,111
def get_max_df(df, column): """ Get maximum of a dataframe column Parameters: ---------- df: pandas dataframe A data frame with multiple columns column: str Name of the column to get the maximum Returns: ------- """ idx_max = df[column].idxmax() return df.loc[idx_max,:]
6511dda522f25d5cc69b6f8dd8ac37791015936d
22,114
import torch def attn_norm(weights, self_loop=False): """ weights: aggregation weights from a node's neighbours add_eye: """ weights = weights.t() weights = weights * (1 - torch.eye(weights.shape[0])).type_as(weights) if self_loop: weights = weights + torch.eye(weights.shape[0]).type_as(weights) degree = weights.sum(dim=1) degree_inversed = degree.pow(-1) degree_inversed[degree_inversed == float('inf')] = 0 degree_inversed = degree_inversed * torch.eye(weights.shape[0]).type_as(weights) weights = (degree_inversed @ weights).t() return weights
156423493950bbbd369201fe9f0f9a6bdfb8844d
22,117
import hashlib import ctypes def size_t_hash(key): """Hash the key using size_t. Args: key (str): The key to hash. Returns: str: The hashed key. """ hash_digest = hashlib.blake2b(key.encode()).hexdigest() # pylint: disable=no-member return '%u' % ctypes.c_size_t(int(hash_digest, 16)).value
19aa7ec430c9c0fbbba45baa9812f755d19a4cfe
22,120
def transpose(matrix): """转置矩阵 list :param matrix: matrix list :return: transpose matrix list >>> matrix = [[1, 1, 2], [2, 2, 1], [3, 3, 2], [3, 4, 5]] # [1, 1, 2] # [2, 2, 1] # [3, 3, 2] # [3, 4, 5] >>> transpose(matrix) [[1, 2, 3, 3], [1, 2, 3, 4], [2, 1, 2, 5]] # [1, 2, 3, 3] # [1, 2, 3, 4] # [2, 1, 2, 5] """ return map(list, zip(*matrix))
5a7798146f434eeb1c3b61c67b134499ff02030e
22,121
import json def get_expected_legacy_permit_list(): """returns legacy expected permit list from mock""" return json.loads("""{"TESTBLAH P-2187964": {"application_id": "P-2187964", "dba_name": "TESTblah", "address": "420 Bud St, SF, California 94102", "parcel": "no idea", "activities": "retailer (medical and adult use)", "referring_dept": "Planning Department, Department of Public Health, Mayor's Office of Disability", "status": "Under Construction"}, "BESTEST BUDDY BUD BUD TESTBLAH2": {"application_id": "TESTblah2", "dba_name": "Bestest Buddy Bud Bud", "address": "421 Bestest Bud St, San Francisco, California 94103", "parcel": "no idea", "activities": "retailer (medical and adult use), delivery only retailer (medical and adult use), medicinal cannabis retailer (medical only)", "referring_dept": "", "status": "Submitted"}}""")
882fc81bf62bbb4abd3c9f0c86270767ceabf701
22,126
def check_scenarios(scenes): """ Make sure all scenarios have unique case insensitive names """ assert len(scenes) == len(dict((k.lower(), v) for k, v in scenes)) return scenes
c9b437d396a4d0ca17c17a85cb99e8574cc78fe3
22,127
def filter_names(name): """ This is just the filter function for the framework search :param name: :return: """ return name.lower()
dcb3246ad241347c38ff2d049b5f7a5c406a8c10
22,128
def is_even_permutation(seq1, seq2): """ Determine whether a permutation of a sequence is even or odd. :param seq1: the first sequence :param seq2: the second sequence, which must be a permuation of the first :returns: True if the permutation is even, False if it is odd :rtype: bool """ size = len(seq1) assert sorted(seq1) == sorted(seq2) and len(set(seq1)) == size perm = [seq2.index(val) for val in seq1] sgn = 1 for idx in range(size): if perm[idx] != idx: sgn *= -1 swap_idx = perm.index(idx) perm[idx], perm[swap_idx] = perm[swap_idx], perm[idx] parity = (sgn == 1) return parity
921f8a642241021f6bd41a42e231f92dc0565442
22,130
import re def get_title(filename: str) -> str: """Locate the first title in a HTML document.""" with open(filename, 'r') as index: raw_html = index.read() res = re.search('<title>(.*?)</title>', raw_html) if res and len(res.groups()): return res.group(1) return '~somebody'
bd363e0ee12a4c5bd714001c4b0498999fe3e235
22,131
def fix_tract(t): """Clean up census tract names. :param t: Series of string tract names :returns: Series of cleaned tract names """ if type(t) == str: return t return str(t).rstrip("0").rstrip(".")
e8e2f6bee61596c57bb805717e9a34eb88f1c91e
22,132
def destructure(d, keys): """Custom object destructuring""" return [d[k] if k in d else None for k in keys]
5fdf0a4f732612005a5528b5f1c569fdb7d52927
22,134
def moving_avg_features(df): """ Function to calculate the exponential moving averages and moving averages over different intervals of days Input: Dataframe Output: Dataframe with new moving average features """ df['EMA_9'] = df['Close'].ewm(9).mean().shift() df['EMA_9'] = df['EMA_9'].fillna(0) df['SMA_5'] = df['Close'].rolling(5).mean().shift() df['SMA_5'] = df['SMA_5'].fillna(0) df['SMA_10'] = df['Close'].rolling(10).mean().shift() df['SMA_10'] = df['SMA_10'].fillna(0) df['SMA_15'] = df['Close'].rolling(15).mean().shift() df['SMA_15'] = df['SMA_15'].fillna(0) df['SMA_30'] = df['Close'].rolling(30).mean().shift() df['SMA_30'] = df['SMA_30'].fillna(0) return df
820eb6b0ad42592393d50d93d5718a6a10562907
22,135
def _old_style_nesting(vocab): """Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``).""" return isinstance(vocab, dict) and \ any(isinstance(v, list) for v in vocab.values())
bce0ba4ee2fe7de4916f970c6f765b493ccdf0db
22,136
def err_exception(line=""): """ list the exceptions even if there is an error in the line """ if "414" in line: return True if "ValueError" in line: return True if "TypeError" in line: return True if '503' in line: # twitter service overloaded, try again later return True
18963c67e7ddc078dc8040e86b60edb7f66c3e2c
22,137
from datetime import datetime import re def twitterTime(t): """Return Twitter's time format as isoformat.""" return datetime.strptime(re.sub('[+\-][0-9]{4}\s', '', t), '%a %b %d %X %Y').isoformat()
63b5763edd01d78902c97d3a9c4d059e1fb76a23
22,138
import functools def conditional_decorator(dec, condition, *args, **kwargs): """Apply arbitrary decorator to a function if condition is met Parameters ---------- dec : decorator Decorator to apply condition : bool condition that must be met for decorator to be applied args : tuple, optional Arguments to pass to decorator kwargs : dict, optional Keyword arguments to pass to decorator Returns ------- cond_dec : decorator Decorator that acts like ``dec`` if ``condition``, """ @functools.wraps(dec) def decorator(func): if not condition: # Return the function unchanged, not decorated. return func return dec(func, *args, **kwargs) return decorator
c6854d6fb3d6fd37a5a36ddb0aaf40eadf1bbd22
22,139
from datetime import datetime def convert_date(date_string): """Convert the date_string to dd-mm-YYYY format.""" date = datetime.strptime(date_string, "%d.%m.%Y") return date.strftime('%d-%m-%Y')
33a67ba33fed1f195812e9839a811411de1e2987
22,140
def metadata_default_dict_factory_fn(): """Factory function that can be passed to defaultdict to provide default values for """ return {'passing_sample_ids': set()}
12aef48a549d87b97fb03424ace39aa53022abb4
22,141
def squeeze_whitespace(text): """Remove extra whitespace, newline and tab characters from text.""" return ' '.join(text.split())
ef476f4ed6cd524c1cb115345151e4bc18c616b5
22,142
def iso_string_to_sql_date_sqlite(x: str) -> str: """ Provides SQLite SQL to convert a column to a ``DATE``, just by taking the date fields (without any timezone conversion). The argument ``x`` is the SQL expression to be converted (such as a column name). """ return f"DATE(SUBSTR({x}, 1, 10))"
349c560589d4f03938538f74dbc188577ac63a2d
22,143
from pathlib import Path def save_NN_sequential(model, model_name): """ Saving a Neural Network as h5 file :param model: sequential model :param model_name: name to save the model :return: True """ file_name = 'Model_' + model_name file_path = Path().joinpath('Pickles', file_name + ".h5") print("The file ", file_path, "was save.") model.save(file_path) return True
a7481ace8971debb1be3af2140f6fdd33b3f679b
22,144
def mask(x): """Turn a string into an equal-length string of asterisks""" try: return len(x) * '*' except TypeError: # not a string - perhaps None - just return it as-is return x
4d17c3c5a7f8745353e01b84a9fe8b048bddd27d
22,145
def get_second_validate_param(tel_num): """ Assemble param for get_second_validate :param tel_num: Tel number :return: Param in dict """ param_dict = dict() param_dict['act'] = '1' param_dict['source'] = 'wsyytpop' param_dict['telno'] = tel_num param_dict['password'] = '' param_dict['validcode'] = '' param_dict['authLevel'] = '' param_dict['decode'] = '1' param_dict['iscb'] = '1' return param_dict
efbd31c56e3fdd4cb0e75bcae92cf51d996aac5d
22,146
def filter_text(text, letters): """Remove letters that are not in the dataset.""" all_letters = set(text) letters_out = set(all_letters).difference(letters) letters_out = letters_out.difference(" ") for letter in letters_out: text = text.replace(letter, "") return text
53c47499445508e1d3f2f6a4d60a789cefe38796
22,147
def nautobot_vlan_status(status: str) -> str: """Method to return VLAN Status from mapping.""" statuses = { "Active": "ASSIGNED", "Deprecated": "UNASSIGNED", "Reserved": "RESERVED", } return statuses[status]
efbc87b350953e3ff4ce379f9299f59e741520f8
22,148
def typeless_equals(entity1, entity2, check_class, check_instance): """ Checks if entities are equal. The check is different whether entities are classes or instances, which is specified in corresponding parameters. If neither checks are specified, True is returned """ if check_class: return isinstance(entity1, entity2) if check_instance: return entity1 == entity2 return True
a0f1360509d8f2f1191c158426fa29ec42d3a3c8
22,149
def node_name(node): """Return lxml node name without the namespace prefix.""" try: return node.tag.split('}')[-1] except AttributeError: pass
0e61ae70563784b5c845a73a82c3b2265ce63202
22,150
def get_most_read_or_features( amb_sams: list, counts: dict) -> list: """ Get the samples that have the most counts (reads of features). Parameters ---------- amb_sams : list Sample IDs. counts : dict Count per Sample ID. Returns ------- cur_best_samples : list Selected samples (with max reads/features). """ cur_counts = dict((amb_sam, counts[amb_sam]) for amb_sam in amb_sams) count_max = max(cur_counts.values()) cur_best_samples = [amb_sam for amb_sam in amb_sams if cur_counts[amb_sam] == count_max] return cur_best_samples
147a9e3a1df579f1673a832bf7dd3a7267ac4394
22,151
from typing import Counter def can_rearrange_to_palindrom(string): """ Returns True if string can be rearranged to a palindrom """ occurrences = Counter(string) return sum(count % 2 != 0 for count in occurrences.values()) < 2
c562b7e14f93a964d55fc4c1ca903470fa44a95d
22,153
def play_game(player_1: list, player_2: list, part_two: bool=False) -> tuple: """Play a game of (Recursive) Combat returning winner and their hand. Args: player_1: List of player one's card values. player_2: List of player two's card values. Return: Tuple of winner's number (1/2) and their winning card values in order. """ prev_rounds = set() while player_1 and player_2: one_card = player_1[0] two_card = player_2[0] if (tuple(player_1), tuple(player_2)) in prev_rounds: player_1.extend([one_card, two_card]) return (1, player_1) prev_rounds.add((tuple(player_1), tuple(player_2))) if not part_two: if one_card > two_card: player_1.extend([one_card, two_card]) else: player_2.extend([two_card, one_card]) else: if len(player_1) - 1 >= one_card and len(player_2) - 1 >= two_card: one_recursive = player_1[1: one_card + 1].copy() two_recursive = player_2[1: two_card + 1].copy() winner = play_game(one_recursive, two_recursive, True) if winner[0] == 1: player_1.extend([one_card, two_card]) else: player_2.extend([two_card, one_card]) else: if one_card > two_card: player_1.extend([one_card, two_card]) else: player_2.extend([two_card, one_card]) player_1 = player_1[1:] player_2 = player_2[1:] if player_1: return (1, player_1) else: return (2, player_2)
2b20970bd7f1d792ba195d2aab280aca0c3d7ccc
22,154
def find_index_unsafe(val, bin_edges): """Find bin index of `val` within binning defined by `bin_edges`. Validity of `val` and `bin_edges` is not checked. Parameters ---------- val : scalar Assumed to be within range of `bin_edges` (including lower and upper bin edges) bin_edges : array Returns ------- index See also -------- find_index : includes bounds checking and handling of special cases """ # Initialize to point to left-most edge left_edge_idx = 0 # Initialize to point to right-most edge right_edge_idx = len(bin_edges) - 1 while left_edge_idx < right_edge_idx: # See where value falls w.r.t. an edge ~midway between left and right edges # ``>> 1``: integer division by 2 (i.e., divide w/ truncation) test_edge_idx = (left_edge_idx + right_edge_idx) >> 1 # ``>=``: bin left edges are inclusive if val >= bin_edges[test_edge_idx]: left_edge_idx = test_edge_idx + 1 else: right_edge_idx = test_edge_idx # break condition of while loop is that left_edge_idx points to the # right edge of the bin that `val` is inside of; that is one more than # that _bin's_ index return left_edge_idx - 1
08f89f8f6096d930e5d6c043374567d6646e9a37
22,156
from typing import Union import json def encode_message(message: Union[str, dict]) -> bytes: """ Кодирование сообщения """ if isinstance(message, str): message = json.loads(message) return json.dumps(message).encode()
02ec60f1dc33424d736c58b681e489b0d607dadd
22,157
import time def func_to_time(x): """This is sleeps for x seconds for timing tests.""" time.sleep(x) return 'Slept for {0} second(s)'.format(x)
8698514f6efe4b7b58aab3da5687cb72c2083fd7
22,158
import os import sys import shlex def get_original_command(max_width=80, full_python_path=False): """ Return the original command line string that can be replayed nicely and wrapped for 80 char width. Args: max_width (`int`, `optional`, defaults to 80): The width to wrap for. full_python_path (`bool`, `optional`, defaults to `False`): Whether to replicate the full path or just the last segment (i.e. `python`). """ cmd = [] # deal with critical env vars env_keys = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: val = os.environ.get(key, None) if val is not None: cmd.append(f"{key}={val}") # python executable (not always needed if the script is executable) python = sys.executable if full_python_path else sys.executable.split("/")[-1] cmd.append(python) # now the normal args cmd += list(map(shlex.quote, sys.argv)) # split up into up to MAX_WIDTH lines with shell multi-line escapes lines = [] current_line = "" while len(cmd) > 0: current_line += f"{cmd.pop(0)} " if len(cmd) == 0 or len(current_line) + len(cmd[0]) + 1 > max_width - 1: lines.append(current_line) current_line = "" return "\\\n".join(lines)
57f680f223923df605111cd5a75e8845c16d6f87
22,159
def to_float_str( val): """to_float_str convert the float to a string with 2 decimal points of precision :param val: float to change to a 2-decimal string """ return str("%0.2f" % float(val))
c15c4e43e788ab416170f21413906fc2d218b345
22,160
import time import hashlib def user2cookie(user, max_age, cookie_key): """ 计算加密cookie :param user: :param max_age: :param cookie_key: :return: """ # build cookie string by: id-expires-sha1 expires = str(int(time.time() + max_age)) s = '%s-%s-%s-%s' % (user.id, user.password, expires, cookie_key) l = [user.id, expires, hashlib.sha1(s.encode('utf-8')).hexdigest()] return '-'.join(l)
136d122c93c28f3b270af98f961099e118ab0380
22,161
def replicate_filter(compiled, replicate_threshold): """collect only those seen in threshold replicates""" df = compiled.copy() replicates = df.groupby('Sequence').count()['Proteins'] rep_sequences = replicates[replicates == replicate_threshold].reset_index()['Sequence'] return df[df['Sequence'].isin(rep_sequences)]
ded0c1936afb2a3ea616cfb05c7feeff52f36b95
22,162
def find_closest(val1, val2, target): """Contain get_closest value to a certain target.""" return val2 if target - val1 >= val2 - target else val1
62b2ee13b0c92d6445819a86d353b848def0239c
22,164
def get_id(): """ Returns the ID. """ return "*IDN?"
65e63f1f4e53952264e58684d835bae3c4ee24a7
22,165
def limits2slice(limits): """ Create a set of slice objects given an array of min, max limits. Parameters ---------- limits: tuple, (ndarray, ndarray) Two tuple consisting of array of the minimum and maximum indices. Returns ------- slices : list List of slice objects which return points between limits See Also -------- find_limits : Find the minimum and maximum limits from a list of points. slice2limits : Find a minimum and maximum limits for a list of slices. """ mins, maxs = limits return tuple([slice(i, j + 1) for i, j in zip(mins, maxs)])
f796f1e468f560d72e7d037e27a110ee50d3a45d
22,166
def create_clip_xml_info(readlen, adapl, adapr, quall, qualr): """Takes the clip values of the read and formats them into XML Corrects "wrong" values that might have resulted through simplified calculations earlier in the process of conversion (especially during splitting of paired-end reads) """ to_print = [""] # if right borders are >= to read length, they don't need # to be printed if adapr >= readlen: adapr = 0 if qualr >= readlen: qualr = 0 # BaCh # when called via split_paired_end(), some values may be < 0 # (when clip values were 0 previously) # instead of putting tons of if clauses for different calculations there, # I centralise corrective measure here # set all values <0 to 0 if adapr < 0: adapr = 0 if qualr < 0: qualr = 0 if adapl < 0: adapl = 0 if quall < 0: quall = 0 if quall: to_print.append(" <clip_quality_left>") to_print.append(str(quall)) to_print.append("</clip_quality_left>\n") if qualr: to_print.append(" <clip_quality_right>") to_print.append(str(qualr)) to_print.append("</clip_quality_right>\n") if adapl: to_print.append(" <clip_vector_left>") to_print.append(str(adapl)) to_print.append("</clip_vector_left>\n") if adapr: to_print.append(" <clip_vector_right>") to_print.append(str(adapr)) to_print.append("</clip_vector_right>\n") return "".join(to_print)
d57833595f61cf798bc40816a6097c747dc198db
22,167
def has_clockwise_numbering(coords): """ tests if a polygon has clockwise vertex numbering approach: Sum over the edges, (x2 − x1)(y2 + y1). If the result is positive the curve is clockwise. from: https://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order :param coords: the list of (x,y) coordinates representing the polygon to be tested :return: true if the polygon has been given in clockwise numbering """ total_sum = 0.0 p1 = coords[-1] for p2 in coords: x1, y1 = p1 x2, y2 = p2 total_sum += (x2 - x1) * (y2 + y1) p1 = p2 return total_sum > 0
e9d55bfe9c5ef66e4b3e611c579c3ca940ba98d2
22,168
def convert_to_float(value: str) -> float: """ Get the float value from, for example, "R9 323.46". """ return float(value[1:].replace(' ', ''))
5ebea176aa6cbfaf6318b246fda73ff443efd092
22,169
def sift(iterable, predicate): """ Sift an iterable into two lists, those which pass the predicate and those who don't. :param iterable: :param predicate: :return: (True-list, False-list) :rtype: tuple[list, list] """ t_list = [] f_list = [] for obj in iterable: (t_list if predicate(obj) else f_list).append(obj) return (t_list, f_list)
347ae7cd9f79bccdc6fc6ae1efa1a29b5563322a
22,170
def compare_datetime(date, span): """ Compare information within datetime object with a span Parameters ---------- date: datetime Datetime to compare. span: Span Span to compare. Returns ------- bool True if match. """ return span.text in str(date)
d2bd942d7b6c536ac35d1cd18b10ed57a465622a
22,173
def reduce_list(data_set): """ Reduce duplicate items in a list and preserve order """ seen = set() return [item for item in data_set if item not in seen and not seen.add(item)]
e84b87b45c8a7aea14beee3b7822f55a0a99151e
22,174
def validate_knot(knot): """ Confirm a knot is in the range [0, 1] Parameters ---------- knot : float Parameter to verify Returns ------- bool Whether or not the knot is valid """ return (0.0 <= knot <= 1.0)
61ab023d61248268db74febd72df6ecf3ef0c056
22,175
def checkio(array): """ sums even-indexes elements and multiply at the last """ if len(array) == 0: return 0 elif len(array) == 1: return array[0] ** 2 return sum([x for i, x in enumerate(array) if i % 2 == 0]) * array[-1]
7d712f406b959f95ccc97075296a720b2ec65d22
22,176
import subprocess import click import os import shutil def init(): """Initialize minion aliases for git repository.""" # check if git repository if subprocess.call(["git", "status"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) != 0: click.echo("This is not a git repository!") return 1 if os.path.exists("minion.yml"): click.echo("This is already a minion repository!") return if subprocess.call(["git", "config", "alias.minion", "!minion submit ."]) != 0: click.echo("Unable to add git minion alias!") return 1 shutil.copyfile(os.path.join(os.path.dirname(__file__), "templates/minion.yml"), "minion.yml") click.echo("Added 'git minion' alias")
e203100a297380e9502b0704a67ca94497068fd4
22,177
def create_forwards_regex(input_symbol: str) -> str: """Creates a regular expression pattern to match the forwards symbology. To create the regular expression, the function uses the fact that within the ICE consolidated feed all the forwards contracts are identified by the root symbol (a unique mnemonic based on the exchange ticker or the ISIN, where no exchange ticker is available), prefixed with the type and optional session indicator; a backslash, followed by a relative term indicator that expresses the delivery date relatively to today (for example SP characterise a spot contract, 5D a contract with delivery date in 5 days, 2Y a contract with delivery date in 2 years etc.). The function logic allows the user to pass a complete forward contract symbol or to use a wildcard flag. In case a full forward symbol is passed to the function, the resulting regex expression will be such that it will match only that specific contract (for example, passing R2:GAS\\5D as an input, will result in the function creating a regular expression matching only the forward contract for GAS traded after hours with 5-day delivery). Conversely, if it is passed as an input only the root symbol (prefixed with the type and optional session indicator) followed by a backslash and the * wildcard flag, the function will generate a regular expression that can match all the possible relative terms (for example, passing R2:GAS*, will produce a regular expression that can match all the available relative delivery dates of the GAS forward). Parameters ---------- input_symbol: str Either a forward symbol consisting of the root symbol prefixed with the type identifier (R) and the optional session indicator, followed by a backslash and the chosen relative delivery term, or the root symbol (with all the necessary prefixes) followed by a backslash and the * wildcard flag. Returns ------- str Depending on the input symbol, the function returns a regular expression patter that either matches literally a specific forward contract symbol, or one that matches all the possible relative term indicators for a specific forward's symbol root. """ if not input_symbol.endswith("*"): symbol_components = input_symbol.split("\\") return rf"{symbol_components[0]}\\{symbol_components[1]}" else: symbol_root = input_symbol.split("\\")[0] return rf"{symbol_root}\\[A-Z0-9]{{2,4}}"
1404f4757675fc634ed3cecdfa4689c5b63020d2
22,179
def _get_all_deps(*, deps, split_deps_keys = []): """Returns a list of all dependencies from a Label list and optional split attribute keys. Args: deps: Label list of (split) dependencies to traverse. split_deps_keys: (optional) List of split attribute keys to use on split deps. Returns: List of all dependencies. If split_deps_keys is not provided, return deps. """ if type(deps) == "list": return deps if not split_deps_keys: return deps.values() all_deps = [] for split_deps_key in split_deps_keys: all_deps += deps[split_deps_key] return all_deps
30521b0ce646c3ee51b1d45d06e05aeb71058456
22,180
import csv def ftt_lookup(organism, experiment=""): """Import the ftt file and process as a dictionary of lookup values indexed on Synonym (i.e., Locus Tag) {'VF_0001': {'locus': 'CP000020', 'start': ...}, 'VF_0002': {'locus': 'CP000020', 'start': ...}} """ ftt_list = [] with open( f"results/{experiment}/genome_lookup/{organism}.ftt", newline="" ) as csv_file: ftt_reader = csv.reader(csv_file, delimiter="\t") for line in ftt_reader: # ignore header row if line[0] != ("Locus"): # Locus, Location_Start, Location_End, Strand, Length, PID, # Gene, Synonym, Code, COG, Product feature_data = line[0:11] ftt_list.append(feature_data) return ftt_list
4935ec6f3e749d9f84864e79cde73c48a4dfebab
22,181
def get_time_string(codetime): """ Utility function that takes the codetime and converts this to a human readable String. Args: codetime (`float`): Code execution time in seconds (usually the difference of two time.time() calls) Returns: `str`: A string indicating the total execution time """ if codetime < 60.0: retstr = 'Execution time: {0:.2f}s'.format(codetime) elif codetime / 60.0 < 60.0: mns = int(codetime / 60.0) scs = codetime - 60.0 * mns retstr = 'Execution time: {0:d}m {1:.2f}s'.format(mns, scs) else: hrs = int(codetime / 3600.0) mns = int(60.0 * (codetime / 3600.0 - hrs)) scs = codetime - 60.0 * mns - 3600.0 * hrs retstr = 'Execution time: {0:d}h {1:d}m {2:.2f}s'.format(hrs, mns, scs) return retstr
2cdc53ba83e06297c3c09b59095553db72d41643
22,184
def remove_duplicates(lst): """ This function removes all duplicate object from a list. :param lst: A list. :return: The same list, with all its elements appearing just once. """ if len(lst) == 1: return lst return [i for n, i in enumerate(lst) if i not in lst[:n]]
bf28a109a1af4760c39e31fdda88ea30b1e55f8a
22,185
def aln_abuts_unknown_bases(tx, fasta): """ Do any exons in this alignment immediately touch Ns? :param tx: a GenePredTranscript object :param fasta: pyfasta Fasta object for genome :return: boolean """ chrom = tx.chromosome for exon in tx.exon_intervals: if exon.start == 0: # we are at the edge of the contig left_base = None else: left_base = fasta[chrom][exon.start - 1] if exon.stop >= len(fasta[chrom]): # we are at the edge of the contig right_base = None else: right_base = fasta[chrom][exon.stop] if left_base == 'N' or right_base == 'N': return True return False
ceb31739be0091b3b52f763c0c0d6d15b1aebd19
22,188
import inspect def typedtuple(*typesig): """Return a new function to generate an n-tuple with. That works only if the entire type signature is identical when called. >>> Somefoo = typedtuple([int, list, float, str]) >>> foo = Somefoo(3, [1, 2], 0.3, 'hello') But this throws an error: >>> foo = Somefoo(3, None, 0.3, 'hello') """ def _tuple(*someargs): if len(someargs) != len(typesig): raise ValueError( '{}-tuple requires arity of {}, ' 'but arity of {} was passed'.format( len(typesig), len(typesig), len(someargs) )) called_typesig = [] for arg in someargs: if inspect.isclass(arg): called_typesig.append(object) else: called_typesig.append(type(arg)) _typesig = list(typesig) if called_typesig != _typesig: raise ValueError( 'Called with type signature "{}" ' 'but expected {}'.format(called_typesig, _typesig) ) return tuple(someargs) return _tuple
4bac75b2962b700baa147d08dcd65a51e6470840
22,189
import json import hashlib def get_md5(obj, trans_func=None): """get a object md5, if this obj is not supported by `json.dumps` please provide a trains_func. Args: obj (object): obj to get md5 trans_func (function, optional): use this to trans obj to str. Defaults to None. """ if trans_func is None: trans_func = json.dumps obj_str = trans_func(obj) hl = hashlib.md5() hl.update(obj_str.encode(encoding='utf-8')) return hl.hexdigest()
ae7da1f0bab1815a2d357617dd93d26e020a2316
22,190
import re def extract_courts(s: str) -> list: """ Extract a list of court numbers listed in the statute's text. Args: s (str): The text of the statute that lists the court numbers. Returns: (list): A list court numbers, all cleaned up. """ my_s = re.sub(r'[^0-9\s]', '', s) my_s = re.sub(r'\s{2,}', ' ', my_s) courts = my_s.strip().split(' ') result = [] for court in courts: if court not in result: result.append(court) return sorted(result)
d15e279ca2368d23fc75c7c2151329cfd60bb43a
22,191
def _remove_trailing_zeros(lst): """ Removes any zeros at the end of the list. """ k=0 for k, value in enumerate( lst[::-1] ): if value != 0: break lst_no_trailing_zeroes = lst if k == 0 else lst[:-k] return lst_no_trailing_zeroes
7b1d78716138eb25b635a962482b21d4fbb35284
22,192
def fa_attachment(extension): """ Add fontawesome icon if found. Else return normal extension as string. :param extension: file extension :return: matching fontawesome icon as string """ if extension == 'pdf': return "<i class='fa fa-file-pdf-o fa-lg'></i>" elif extension == 'jpg' or extension == 'png': return "<i class='fa fa-picture-o fa-lg'></i>" elif extension == 'doc' or extension == 'docx': return "<i class='fa fa-file-word-o fa-lg'></i>" elif extension == 'xls' or extension == 'xlsx': return "<i class='fa fa-file-excel-o fa-lg'></i>" elif extension == 'extern': return "<i class='fa fa-external-link'></i>" elif extension == 'zip': return "<i class='fa fa-file-archive-o fa-lg'></i>" else: return extension
3add6bf4c177cba893a2242df352fd0ae619ee90
22,194
def get_worksheet_keys(data_dict, result_info_key): """Gets sorted keys from the dict, ignoring result_info_key and 'meta' key Args: data_dict: dict to pull keys from Returns: list of keys in the dict other than the result_info_key """ keys = set(data_dict.keys()) keys.remove(result_info_key) if 'meta' in keys: keys.remove('meta') return sorted(keys)
1092eee46980a5e4f745d3a99ff6abe7d5c9db62
22,196
def add_options(click_options): """ Decorator that adds multiple Click options to the decorated function. The list is reversed because of the way Click processes options. Note: This function has its origins in the https://github.com/pywbem/pywbemtools project (Apache 2.0 license) Parameters: click_options (list): List of `click.option` objects. """ def _add_options(func): """ Apply the Click options to the function in reversed order. """ for option in reversed(click_options): func = option(func) return func return _add_options
fe6c5bda8f0606cc7fcdec87dd3332d1abb9b695
22,197
import itertools def _expand(the_set, expand_fn): """Returns a concatenation of the expanded sets. I.e. Returns a set of all elements returned by the expand_fn function for all elements in the_set. E.g. With expand_fn = lambda x: (10*x, 100*x) and the_set = set([1, 2, 3]) this function returns set([10, 100, 20, 200, 30, 300]) Args: the_set: A set of elements. expand_fn: Function returning an interable given some element in the_set. Returns: a concatenation of the expanded sets. """ return set(itertools.chain(*[expand_fn(x) for x in the_set]))
694661c0cc6d2d09d72d65ea63cc1241fc32d4d5
22,198
import re def ingredients_from_food(food): """ Extract ingredients from food description :param food: A string, value from the Food column :return: A list of ingredients (strings) or empty list "Салат "Папарать-Кветка"(Говядина,ветчина,помидоры,огурцы)" -> ["говядина", "ветчина", "помидоры", "огурцы"] "Капуста тушеная" -> [] """ re_ingr = re.compile(r'\(([^\)]+)\)') ingr = re_ingr.findall(food) if ingr: ingr_parts = ingr[0].split(',') ingr_parts = [p.strip().lower() for p in ingr_parts] return ingr_parts else: return []
4314bce50e602f0e3c0d8db4d7c22ef2bd693c2a
22,201
import json def json_content(directory, file_name): """ This function gets the content of a json file and returns as dictionary. :param directory: String The folder where the json file is located :param file_name: String The name of the json file :return: dict The content of the json file as python dictionary """ f = open(directory + file_name + '.json', "r") if f.mode == 'r': contents = f.read() contents = json.loads(contents) return contents
98cf97b4a1d6853cae9b4087fd02a1d316afb492
22,202
def check_login(db, usernick, password): """returns True if password matches stored""" cur = db.cursor() # retrieve user information from the database rows = cur.execute("SELECT nick, password FROM users") for item in rows: if(item[0] == usernick and item[1] == db.crypt(password)): # if the username and password match the database return True elif(item[0] == usernick and item[1] != db.crypt(password)): # if the username and/or password does not match return False
e4f6d2dbb621699865c21276b6a42d23941aed39
22,203
import math def rice_list(size=8, approx_ln=False): """return the list of rice number""" approx_fn = (lambda x: x) if approx_ln else (lambda x: math.floor(math.log10(x) + 1)) lines = [] for y in range(8): line = [] for x in range(8): line.append(approx_fn(2**(x + 8*y))) lines.append(line) return lines
02051b05465cadec0d7af24a5077d300c37faf52
22,205
def index_dict(d: dict, x: float): """ return a value from d for range x 0-1 the best range""" keys = list(d.keys()) index = int(x * (len(keys) - 1)) return d[keys[index]]
653324369bef1eb9873853c634d27d63ba0675e7
22,208
def get_cai_ptr(X): """ Function gets the pointer from an object that supports the __cuda_array_interface__. Raises TypeError if `X` does not support it. """ if hasattr(X, '__cuda_array_interface__'): return X.__cuda_array_interface__['data'][0] else: raise TypeError("X must support `__cuda_array_interface__`")
467e4437b34693e37f3f90e822899dc3a548710e
22,209
def validate_float(s): """Convert s to float or raise a ValueError.""" try: return float(s) except ValueError: raise ValueError('Could not convert {0!r} to float'.format(s))
1559e4b8465e4d380c74784f0dab68aaf7965dbc
22,210
def show_element(elem): """ Output whole a element as it is. """ return elem
bcb8d2ae273c105524a7518a2f4247a5aa48410f
22,212
def exists_icd(db, icd): """ Search bar MongoDB: db.icd_info.find({'icd': 'RH141'}, fields={'_id': False}) SciDB: aggregate( filter(icd_info, icd = 'RH141'), count(*)); SciDBnew: res = db.get_phenotype_fields(association_set=str(db.list_association_sets()['name'][0])) resphe = [res['description'] == icd] return bool(resphe.empty) """ res = db.get_phenotype_fields(association_set=str(db.list_association_sets()['name'][0])) resphe = res[res['description'] == icd] return not bool(resphe.empty)
664858d08bc64efaaa1bed797f1de59d63058747
22,213
import re import warnings def parse_psp_name(psp_name): """ Parse the name of vasp's psp Parameter psp_name: str The name of vasp's psp, e.g. GGA, LDA, potpaw_LDA Return psp_name_norm: str The normalized psp name """ psp_name = psp_name.upper() psp_name_list = re.split(r'\.|\_|\-|\=|\+|\*|\s',psp_name) flag_us = False for psp_name_i in psp_name_list: if "US" in psp_name_i: flag_us = True break if "LDA" in psp_name_list: if "52" in psp_name_list: psp_name_norm = "POT_LDA_PAW_52" elif "54" in psp_name_list: psp_name_norm = "POT_LDA_PAW_54" elif flag_us: psp_name_norm = "POT_LDA_US" else: psp_name_norm = "POT_LDA_PAW" elif "PBE" in psp_name_list: if "52" in psp_name_list: psp_name_norm = "POT_GGA_PAW_PBE_52" elif "54" in psp_name_list: psp_name_norm = "POT_GGA_PAW_PBE_54" else: psp_name_norm = "POT_GGA_PAW_PBE" elif "GGA" in psp_name_list: if flag_us: psp_name_norm = "POT_GGA_US_PW91" else: psp_name_norm = "POT_GGA_PAW_PW91" else: warnings.warn("{} is not a proper name of vasp's pseudopotential, please ref \ https://github.com/PhasesResearchLab/dfttk/blob/master/docs/Configuration.md. \ This folder will be ignored.".format(psp_name)) psp_name_norm = None return psp_name_norm
0b5e025fa503f23fc17101690b978d414003197b
22,214
def _csv_lst_convert(data_repr, csv_lst, var_name): """ csv convert list or dict """ # csv中list or dict转换 try: col_val = csv_lst.strip() if col_val != '': tmp_eval = eval(csv_lst) if isinstance(tmp_eval, list) or isinstance(tmp_eval, dict): data_repr = data_repr.replace( '"{}"'.format(var_name), var_name ).replace( "'{}'".format(var_name), var_name ) else: # list or dict to string if col_val[:1] == "'" or col_val[:1] == '"': csv_lst = col_val[1:len(col_val)-1] except NameError: pass return data_repr, csv_lst
44adc7e04af67e251443421196bd04d39468d5ed
22,215
def _format_list_items(list_items): """Generate an indented string out of a list of items.""" list_string = '' if list_items: for item in list_items: list_string = list_string + " '" + item + "',\n" list_string = "[\n {}\n]".format(list_string.strip()[:-1]) else: list_string = '[]' return list_string
dd677277650e5d3105c01f6636518b8bbd2a1bff
22,216
import torch import math def importance_sampling_cross_validation(logp): """Compute the importance-sampling cross validation (ISCV) estimate. The ISCV estimates the holdout log-likelihood from just an approximation to the posterior predictive log-likelihoods on the training data. ### References: [1]: Alan E. Gelfand, Dipak K. Dey, Hong Chang. Model determination using predictive distributions with implementation via sampling-based methods. Technical report No. 462, Department of Statistics, Stanford university, 1992. http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.860.3702&rep=rep1&type=pdf [2]: Aki Vehtari, Andrew Gelman, Jonah Gabry. Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. arXiv:1507.04544 https://arxiv.org/pdf/1507.04544.pdf [3]: Sumio Watanabe. Mathematical Theory of Bayesian Statistics. CRC Press. 2018 https://www.crcpress.com/Mathematical-Theory-of-Bayesian-Statistics/Watanabe/p/book/9781482238068 Args: logp: Tensor, shape (B,M,...), containing log p(y_i | x_i, theta_j) for i=1,..,B instances and j=1,...,M models. Returns: iscv_logp: Tensor, (...), the ISCV estimate of the holdout log-likelihood. iscv_logp_sem: Tensor, (...), the standard error of th emean of `iscv_logp`. """ logse = torch.logsumexp(-logp, 1) iscv_logp = logse.mean(0) iscv_logp_var = logse.std(0) m = int(logp.shape[1]) iscv_logp -= math.log(m) iscv_logp = -iscv_logp iscv_logp_sem = (iscv_logp_var / float(m)).sqrt() return iscv_logp, iscv_logp_sem
9bef3b3c3775e359d52a321a8e72b69d38f0fcb7
22,217
import re def convert_character(text : str): """ Convert consecutive full-size numbers to half-size numbers. Convert a single half-size number into a full-size number. Convert half-size English characters to full-size ones. Parameters ---------- text : str input text Returns ---------- output : str converted text """ list_text = list(text) half_nums = re.findall('[0-9]+', text) full_nums = re.findall('[0-9]+', text) c_half_nums = [] for half_num in half_nums: if len(half_num) == 1: c_half_nums.append(half_num) c_full_nums = [] for full_num in full_nums: if len(full_num) > 1: c_full_nums.append(full_num) #half to full for c_half_num in c_half_nums: index = text.find(c_half_num) convert = c_half_num.translate(str.maketrans({chr(0x0021 + i): chr(0xFF01 + i) for i in range(94)})) list_text[index] = convert #full to half for c_full_num in c_full_nums: index = text.find(c_full_num) converts = c_full_num.translate(str.maketrans({chr(0xFF01 + i): chr(0x21 + i) for i in range(94)})) for i, convert in enumerate(converts): list_text[index + i] = convert output = "".join(list_text) return output
f388de9eac9c92daceb96a46fce3efc525ce3eff
22,218
def add_hover_description(df): """ Add the column 'text' with the numbers and description shown on the app when selecting one postalcode area. :return: the updated data frame """ list = [] for _, row in df.iterrows(): list.append('<b>' + str(row['Area']) + '</b><br>' + "Density: " + str(row['Density']) + '<br>' + "Average age: " + str(row['Average age of inhabitants']) + '<br>' + "Median income: " + str(row['Median income of inhabitants']) + '<br>' + "Employment rate: " + str(row['Employment rate %']) + '<br>') df['text'] = list return df
324efae5c1587d9b86407d1f0b33e7d0b8a23c83
22,223
import json def others(request): """ Display others menu with activities to choose :param request: POST request from "Others" dialogflow intent :return: Json with others menu """ speech_text_pl = "Wybierz jedną z poniższych opcji, która Cię interesuje" display_text_pl = "Która z poniższych opcji Cię interesuje?" suggestions_pl = [{"title": "Kontakt"}, {"title": "O firmie"}, {"title": "Adres"}, {"title": "Tabor samochodowy"}] speech_text_en = "Choose one of the options below that interest you" display_text_en = "Choose one of the options below that interest you" suggestions_en = [{"title": "Contact"}, {"title": "About us"}, {"title": "Address"}, {"title": "Car fleet"}] with open('api/response.json') as json_file: menu_others = json.load(json_file) part_to_modify = menu_others['payload']['google']['richResponse'] if request.data['queryResult']['languageCode'] == 'pl': part_to_modify['items'][0]['simpleResponse']['textToSpeech'] = speech_text_pl part_to_modify['items'][0]['simpleResponse']['displayText'] = display_text_pl part_to_modify['suggestions'] = suggestions_pl else: part_to_modify['items'][0]['simpleResponse']['textToSpeech'] = speech_text_en part_to_modify['items'][0]['simpleResponse']['displayText'] = display_text_en part_to_modify['suggestions'] = suggestions_en menu_others['payload']['google']['richResponse'] = part_to_modify return menu_others
245546d7b0691b5ffb3ba1794e76cd974bfd67e8
22,225
def _create_html_file_content(translations): """Create html string out of translation dict. Parameters ---------- tralnslations : dict Dictionary of word translations. Returns ------- str: html string of translation """ content = [] for i1, t in enumerate(translations): if i1 > 0: content.append("<br>") content.append('<div class="translation">') content.append('<div class="word">') for w in t.word: content.append("<h2>{word}</h2>".format(word=w)) content.append("</div>") # end `word` for i2, t2 in enumerate(t.parts_of_speech): if i2 > 0: content.append("<br>") content.append('<div class="part-of-speech">') if t2.part is not None: content.append('<p class="part-name">[{part}]</p>'.format(part=t2.part)) content.append("<ol>") for m in t2.meanings: content.append('<div class="meaning">') mng = ["<strong><li>"] for i3, mn in enumerate(m.meaning): if i3 > 0: mng.append(", ") mng.append("<span>{meaning}</span>".format(meaning=mn)) mng.append("</li></strong>") content.append("".join(mng)) content.append('<div class="examples">') for e in m.examples: exmpl = "<p><span>{ex}</span>".format(ex=e[0]) if e[1]: exmpl += "<br><span>{tr}</span>".format(tr=e[1]) exmpl += "</p>" content.append(exmpl) content.append("</div>") # end `examples` content.append("</div>") # end `meaning` content.append("</ol>") content.append("</div>") # end `part-of-speech` content.append("</div>") # end `translation` return "\n".join(content)
377fee1d5a45e9d6a1ae1c96a858f13f78b8c499
22,226
import time def retry(func, exc=Exception, tries=3, delay=1): """ Call ``func()`` up to ``tries`` times, exiting only if the function returns without an exception. If the function raises an exception on the final try that exception is raised. If given, ``exc`` can be either an `Exception` or a tuple of `Exception`s in which only those exceptions result in a retry, and all other exceptions are raised. ``delay`` is the time in seconds between each retry, and doubles after each retry. """ while True: try: return func() except exc: tries -= 1 if tries == 0: raise time.sleep(delay) delay *= 2
5384afd77840b77b2cb278502d8fc64890af6be7
22,227
def select_table_level_gmeta_fields(metabase_cur, data_table_id): """ Select metadata at data set and table levels. """ date_format_str = 'YYYY-MM-DD' metabase_cur.execute( """ SELECT file_table_name AS file_name, format AS file_type, data_table.data_set_id AS dataset_id, -- data_set.title AS title, -- data_set.description AS description, TO_CHAR(start_date, %(date_format_str)s) AS temporal_coverage_start, TO_CHAR(end_date, %(date_format_str)s) AS temporal_coverage_end, -- geographical_coverage -- geographical_unit -- data_set.keywords AS keywords, -- data_set.category AS category, -- data_set.document_link AS reference_url, contact AS data_steward, -- data_set.data_set_contact AS data_steward_organization, size::FLOAT AS file_size -- number_rows AS rows NOTE: not included in the sample file -- number_columns AS columns -- NOTE: not included in the sample file FROM metabase.data_table -- JOIN metabase.data_set USING (data_set_id) WHERE data_table_id = %(data_table_id)s """, { 'date_format_str': date_format_str, 'data_table_id': data_table_id }, ) return metabase_cur.fetchall()[0] # Index by 0 since the result is a list of one dict.
ccc1bd9fd051125113b51f7da2e276722319ea99
22,228
def positionsDictionary(): """ Creates a dictionary with corresponding names and their positions Return Value: the ordered dictionary """ stats = open('volleyball_stats.csv', 'r', encoding="utf-8") #imports csv file names = [] #creates empty lists positions = [] line = stats.readline() #removes headers for line in stats: #iterates through each line of the csv file field = line.split(",") #splits them by commas in order to read individual data entries names.append(field[0]) #appends specific column to assigned list positions.append(field[2]) Names_Pos = dict(zip(names,positions)) # creates dictionary with parallel lists return Names_Pos
c42a9611b9474137b884f35701e4fc593862892a
22,229
import json def _get_entry_count(doi_file): """ Given a file path, will return the number of entries in that file. If the file reading files, returns None. """ try: with open(doi_file) as f: content = json.load(f) return len(content) except: return None
fba5a3152811fbc01f4d91b72bdbe5659a65a152
22,230