content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from pathlib import Path import os def get_size(path: Path, decimal_places: int = 1) -> float: """ Get file size in MB, rounded to decimal_places. :param path: Path to the file. :param decimal_places: int, count of sighs after dor in result value. :return: float, rounded size of the file in MB. """ if not path.exists(): return -1 return round(os.path.getsize(path) / 1024**2, decimal_places)
63271b95f353b201a625e76cb4b5c3015d987ae4
700,837
from datetime import datetime import time def formatTimeFromNow(secs=0): """ Properly Format Time that is `x` seconds in the future :param int secs: Seconds to go in the future (`x>0`) or the past (`x<0`) :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`) :rtype: str """ return datetime.utcfromtimestamp(time.time() + int(secs)).strftime('%Y-%m-%dT%H:%M:%S')
9723ab0656ff4017412ab1fbd5465375ec8df1af
700,839
def standardize_parameter_type(original_type): """Standardize parameter type descriptions Args: original_type (str): The original type Returns: str: The standarized type name """ original_type = original_type.lower() if 'unc' in original_type: return 'uncertainty' if 'lev' in original_type: return 'lever' elif 'con' in original_type or 'fix' in original_type: return 'constant' raise ValueError('cannot decipher parameter ptype')
49a93bebd8ee4918bdf420ee8c285d6574a3d3d0
700,841
from typing import Tuple import torch import math def real_fourier_basis(n: int) -> Tuple[torch.Tensor, torch.Tensor]: """Make a Fourier basis. Args: n: The basis size Returns: An array of shape `(n_domain, n_funs)` containing the basis functions, and an array containing the spectral covariances, of shape `(n_funs, )`. """ assert n > 1 dc = torch.ones((n,)) dc_freq = 0 cosine_basis_vectors = [] cosine_freqs = [] sine_basis_vectors = [] sine_freqs = [] ts = torch.arange(n) for w in range(1, 1 + (n - 1) // 2): x = w * (2 * math.pi / n) * ts cosine_basis_vectors.append(math.sqrt(2) * torch.cos(x)) cosine_freqs.append(w) sine_basis_vectors.append(-math.sqrt(2) * torch.sin(x)) sine_freqs.append(w) if n % 2 == 0: w = n // 2 x = w * 2 * math.pi * ts / n cosine_basis_vectors.append(torch.cos(x)) cosine_freqs.append(w) basis = torch.stack((dc, *cosine_basis_vectors, *sine_basis_vectors[::-1]), -1) freqs = torch.cat( ( torch.tensor([dc_freq], dtype=torch.float), torch.tensor(cosine_freqs, dtype=torch.float), torch.tensor(sine_freqs[::-1], dtype=torch.float), ) ) return basis / math.sqrt(n), freqs / n
704aacf8e34f2713ee5e01b8c0de2350c8e03ae2
700,844
def _define_names(d_t, d_y, treatment_names, output_names): """ Helper function to get treatment and output names Parameters ---------- d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario). d_y: tuple of int Tuple of number of outcome. treatment_names: optional None or list (Default=None) The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: optional None or list (Default=None) The name of the outcome. Returns ------- d_t: int d_y: int treament_names: List output_names: List """ d_t = d_t[0] if d_t else 1 d_y = d_y[0] if d_y else 1 if treatment_names is None: treatment_names = [f"T{i}" for i in range(d_t)] if output_names is None: output_names = [f"Y{i}" for i in range(d_y)] return (d_t, d_y, treatment_names, output_names)
a475968ed70070175f5ef164d1748def62548c9d
700,845
def _as_vw_string(x, y=None): """Convert {feature: value} to something _VW understands Parameters ---------- x : {<feature>: <value>} y : int or float """ result = str(y) x = " ".join(["%s:%f" % (key, value) for (key, value) in list(x.items())]) return result + " | " + x
89e10d3bb8ad47ad4add6baee83280fa700ca65e
700,846
from numpy import where def qinit(x,y): """ Dam break """ eta = where(x<10, 40., 0.) return eta
aa81ca9782917ce730ec70f4281a7b708c368308
700,848
import sys def in_notebook(): """ Returns ``True`` if the module is running in IPython kernel, ``False`` if in IPython shell or other Python shell. """ return "ipykernel" in sys.modules
ca8d703a399dfa8ee0bb0bdb42d9af8e657f5549
700,849
def _try_parse_int(value: str): """ try parse integer from string param value: string to parse return int """ try: return int(value) except ValueError: raise ValueError("Cannot parse int from string")
50b4f5c3d3e703c2c6c329e2f1abb25a948e488d
700,850
def index_settings(shards=5, refresh_interval=None): """Configure an index in ES with support for text transliteration.""" return { "index": { "number_of_shards": shards, "refresh_interval": refresh_interval, "analysis": { "analyzer": { "icu_latin": { "tokenizer": "lowercase", "filter": ["latinize"] } }, "filter": { "latinize": { "type": "icu_transform", "id": "Any-Latin; NFD; [:Nonspacing Mark:] Remove; NFC" # noqa } } } } }
29028b545da2e5ee2b0239029d34f863d1d9d943
700,851
import time def test_approach(approach: str, sample_no: int, func, args: tuple) -> str: """ For a given Sample #sample_no evaluates an approach by running func with provided args and logs run time """ res = f"{approach.capitalize()} Programming Approach for the Example #{sample_no}\n" start = time.time()+1 soln = func(*args) time.sleep(1) res += '%d\nTotal run time %.5f\n' % ( soln, time.time()-start) return res
ef34e7552a887fd4bee221a5d80bb3d5ab0003a9
700,852
def walk(i, j): """ Walks through the String without crossing the boundaries, making sure that i < j-1 """ if i < j - 1: i += 1 # It is necessary to check again if i < j-1, after we incremented i if i < j - 1: j -= 1 return i, j
1597c41408179f5a371ec14e36494ec5ee3a7aa0
700,853
import uuid def converter_spark_em_pandas(df): """ Converte o dataframe Spark em Pandas, adicionando uma coluna de identificador unico e deterministico para cada linha da tabela """ data = df.toPandas() data["ID_REGISTRO_TABELA"] = data.apply( lambda row: uuid.uuid5( uuid.UUID('a658b648-167e-4d4c-8e09-6dfe7a798204'), "".join(map(str, row.values))).__str__(), axis=1) return data
99febbc364025afb39772fa6008f5b4217f394ae
700,854
def kwargs_to_str(kwargs): """ Returns a string of the form '(kw1=val1, kw2=val2)'. """ if len(kwargs) == 0: return "" else: return "(" + ", ".join(f"{k}={v}" for k, v in kwargs.items()) + ")"
39d50d77620061b99861fb7a1fea77ae2a2dc376
700,855
def add_CRUD_pset(pset, sm, model_name): """Adds the ServerModel CRUD function to the Primitve Set Parameters ---------- pset : PrimitiveSet the primitive set for adding the controller functions sm : ServerModel the ServerModel with the CRUD functions model_name : str the name of the OpenAPI model referring to its CRUD functions in the ServerModel ['cart' | 'pole' | 'direction'] Returns ------- PrimitiveSet the primitive set containing the ServerModel's CRUD function """ def pset_cart(): # cart CRUD functions pset.addTerminal(sm.create_cart) pset.addTerminal(sm.read_cart) pset.addTerminal(sm.update_cart) pset.addTerminal(sm.delete_cart) def pset_pole(): # cart CRUD functions pset.addTerminal(sm.create_pole) pset.addTerminal(sm.read_pole) pset.addTerminal(sm.update_pole) pset.addTerminal(sm.delete_pole) options = { 'cart': pset_cart, 'pole': pset_pole, } # add CRUD functions to pset options[model_name]() return pset
f0960c3b96789adfa2e56039ca7c072819438984
700,856
def to_litres(gallons): """Convert US gallons to metric litres""" return 3.78541 * gallons
d1a7be6f01c89b848128218cbec19913e76658cd
700,857
import requests def get_nioshtic_wikidata_mapping(): """ Retrieves a mapping between NIOSHTIC and Wikidata identifiers from the Wikidata Query Service, query.wikidata.org @return dictionary {nioshtic: {identifier_label: value}} """ prefix = 'http://www.wikidata.org/entity/' q = ( 'select%20%3Fi%20%3Fn%20%3Fdoi%20%3Fpubmed%20%3Fpmcid%20%3Fisbn10%20' '%3Fisbn13%20where%20%7B%3Fi%20wdt%3AP2880%20%3Fn%20.%20optional%20%7B' '%20%3Fi%20wdt%3AP356%20%3Fdoi%20%7D%20.%20optional%20%7B%20%3Fi%20wdt' '%3AP698%20%3Fpubmed%20%7D%20.%20optional%20%7B%20%3Fi%20wdt%3AP932%20' '%3Fpmcid%20%7D%20.%20optional%20%7B%20%3Fi%20wdt%3AP957%20%3Fisbn10' '%20%7D%20.%20optional%20%7B%20%3Fi%20wdt%3AP212%20%3Fisbn13%20%7D%20' '%7D') url = 'https://query.wikidata.org/sparql?format=json&query=' + q try: query = requests.get(url).json()['results']['bindings'] except: raise Exception("Wikidata query not possible. Try again later.") data = {} for x in query: key = x['n']['value'] data[key] = {'Wikidata': x['i']['value'].replace(prefix, '')} if 'doi' in x: data[key]['DOI'] = x['doi']['value'] if 'pubmed' in x: data[key]['PubMed ID'] = x['pubmed']['value'] if 'pmcid' in x: data[key]['PMCID'] = x['pmcid']['value'] if 'isbn10' in x: data[key]['ISBN-10'] = x['isbn10']['value'] if 'isbn13' in x: data[key]['ISBN-13'] = x['isbn13']['value'] return data
7216f8ed354bb394e39ceb906d41e19d1e41e589
700,858
import random import copy def distr_labeldata_unequal(label_data, num_workers): """ Idea: 1. For each label, distribute disproportionate allocation to workers. 2. Apply the worker's allocation to label_data and store in distr_labeldata, where the keys are workers and the values are the labeled data with X and Y. Inputs: label_data - dict: output of segregate_labels num_workers - scalar: number of workers """ #Step 1: Distribute allocation to workers distr_propn = dict() #A dict of dicts: labels and then worker allocations labels = label_data.keys() #Initial allocation for label in labels: ndata = len(label_data[label]['X']) #number of data points for the given label #print("Label: {}. No. data points: {}".format(label, ndata)) remaining = 100 #100 percent workers = list(range(num_workers)) w = random.choice(workers) #Pick the first worker to be allocated first workers.remove(w) propn = list() # For sanity check. Distributed propotion should sum up 1. distr_propn[label] = dict() s = int(50 + (50/len(workers))) p = random.randint(1, s) distr_propn[label][w] = int(p/100 * ndata) #proportion of labeled data propn.append(p/100) #Allocation to intermediate workers remaining -= p while len(workers) > 1: w = random.choice(workers) workers.remove(w) p = random.randint(1, int(remaining/len(workers))) distr_propn[label][w] = int(p/100 * ndata) propn.append(p/100) remaining -= p #Last allocation w = workers.pop() #last worker to be allocated distr_propn[label][w] = int(remaining/100 * ndata) propn.append(remaining/100) #"Propn: {}. Sum: {}".format(propn, sum(propn))) #print("distribution:", distr_propn[label]) assert round(sum(propn), 1) == 1.0, "Allocation of proportions should equal 1" #return distr_propn #Step 2: Apply the workers allocation to label_data and store in distr_labeldata distr_labeldata = dict() for_distr = copy.deepcopy(label_data) for worker in range(num_workers): distr_labeldata[worker] = dict() total_data = 0 for label in labels: distr_labeldata[worker][label] = dict() slice_data = distr_propn[label][worker] #print("worker: {}. slice_data: {}".format(worker, slice_data)) distr_labeldata[worker][label]['X'] = for_distr[label]['X'][:slice_data] distr_labeldata[worker][label]['Y'] = for_distr[label]['Y'][:slice_data] #Adjust the available data for for_distr[label]['X'] = for_distr[label]['X'][slice_data: ] for_distr[label]['Y'] = for_distr[label]['Y'][slice_data: ] total_data += len(distr_labeldata[worker][label]['X']) #print("Worker: {}. Allocated data: {}".format(worker, total_data)) return distr_labeldata
dab112ba98e9d5d68fbb519f1c7e3f2a6ea24e17
700,859
from typing import Sequence def all_unique(lst): """Returns True if all elements of Sequence `lst` are unique. False otherwise. """ assert isinstance(lst, Sequence) return bool(len(set(lst)) == len(lst))
546d4254d5ca287952eec6af2bda048e60bb6b89
700,860
def generate_average_csv(fname, fields, trait_list): """ Generate CSV called fname with fields and trait_list """ csv = open(fname, 'w') csv.write(','.join(map(str, fields)) + '\n') csv.write(','.join(map(str, trait_list)) + '\n') csv.close() return fname
43195ea054ea537a4860c07c03c96efc263c472f
700,861
def cal_newpath(dis_mat, path_new, cityNum): """ 计算所有路径对应的距离 :param dis_mat: 城市距离矩阵 ndarray :param path_new: 路径矩阵 ndarray :param cityNum: 城市数量 int :return: 动态规划最优路径 list """ dis_list = [] for each in path_new: dis = 0 for j in range(cityNum - 1): dis = dis_mat[each[j]][each[j + 1]] + dis dis = dis_mat[each[cityNum - 1]][each[0]] + dis # 回家 dis_list.append(dis) return dis_list
2a4d733e9633a44da3d66d74d54b75efc165d8bf
700,862
def load_input(source): """load the input""" if isinstance(source, str): # pragma: no cover with open(source, 'r', encoding='utf8') as stream: return stream.read() else: data = source.read() if isinstance(data, bytes): return data.decode("utf8") return data
fa0a9aac1854af59f400bf2344638c8fb4bee96d
700,863
def is_integer(mark_string): """Function to check if a supposed pk is an integer.""" try: mark_id = int(mark_string) except ValueError: return False return mark_id
1df4842906452cf5672584e5781e0e8a2d3b367d
700,864
def _extract_command_with_args(cmd): """Parse input command with arguments. Parses the input command in such a way that the user may provide additional argument to the command. The format used is this: command=arg1,arg2,arg3,... all the additional arguments are passed as arguments to the target method. """ def _isint(value): try: int(value) return True except ValueError: return False equal_sign = cmd.find('=') if equal_sign == -1: return cmd, [] command = cmd[0:equal_sign] args = cmd[equal_sign+1:].split(',') converted = [x if not _isint(x) else int(x) for x in args] return command, converted
3db8aebab04e32f292e2956412bd81e7a07a471e
700,865
def filtered_secondary_files(unfiltered_secondary_files: dict) -> list: """ Remove unprocessed secondary files. Interpolated strings and optional inputs in secondary files were added to CWL in version 1.1. The CWL libraries we call do successfully resolve the interpolated strings, but add the resolved fields to the list of unresolved fields so we remove them here after the fact. We also remove any secondary files here not containing 'toilfs:', which means that it was not successfully imported into the toil jobstore. The 'required' logic seems to be handled deeper in cwltool.builder.Builder(), and correctly determines which files should be imported. Therefore we remove the files here and if this file is SUPPOSED to exist, it will still give the appropriate file does not exist error, but just a bit further down the track. """ intermediate_secondary_files = [] final_secondary_files = [] # remove secondary files still containing interpolated strings for sf in unfiltered_secondary_files["secondaryFiles"]: sf_bn = sf.get("basename", "") sf_loc = sf.get("location", "") if ("$(" not in sf_bn) and ("${" not in sf_bn): if ("$(" not in sf_loc) and ("${" not in sf_loc): intermediate_secondary_files.append(sf) # remove secondary files that are not present in the filestore # i.e. 'file://' only gets converted to 'toilfs:' upon a successful import for sf in intermediate_secondary_files: sf_loc = sf.get("location", "") # directories aren't imported, so don't worry about them if sf_loc.startswith("toilfs:") or sf.get("class", "") == "Directory": final_secondary_files.append(sf) return final_secondary_files
89c0906984ca634b8afc3dd4bdf2e9ccbbad504c
700,866
def get_P_rtd_hs(q_rtd_hs, e_rtd): """定格消費電力 (4) Args: q_rtd_hs(float): 温水暖房用熱源機の定格能力 (W) e_rtd(float): 当該給湯機の効率 Returns: float: 定格消費電力 """ return q_rtd_hs / e_rtd
61d4a7c2f26b979891936efd6db0195ca3b083b0
700,867
def cpo(total_cost, total_transactions): """Return the CPT (Cost per Order). Args: total_cost (float): Total cost of marketing. total_transactions (int): Total number of transactions. Returns: cpt (float) as total cost per order """ return total_cost / total_transactions
aaaaf5a96fcbef65e59591954bded1afa13f8c47
700,868
def rebin_1darray(a, shape, function='sum'): """Rebin an array into a new shape by making the sum or mean """ sh = shape,a.shape[0]//shape if function == 'mean': return a.reshape(sh).mean(-1) elif function == 'sum': return a.reshape(sh).sum(-1) else: print("WARNING: doing the sum as input function {} " " not recognised".format(function)) return a.reshape(sh).sum(-1)
66de60326e081c27aae471281c79c5e86f5180e3
700,869
def get_ground_truth(obj, image, question): """ Get the ground truth value for the image/question combination in reader study obj. """ ground_truths = obj.statistics["ground_truths"] return ground_truths[image][question]
522b5550344891e0985bacb9c763c4c52686cb67
700,870
import os def get_workspace(strOrigPath, strType_='flatten', strSuff_=''): """ Return TDIS working (flatten) path given TDIS path. """ if os.path.split(strOrigPath)[1].startswith('TDISm__'): strScene = strOrigPath.split(os.sep)[-4].split('_')[0] p, f = os.path.split(strOrigPath) strWorkPath = p + os.sep + strScene + '_' + f + '_' + strType_ + strSuff_ else: print('Archive style folder name.') strWorkPath = strOrigPath + '_flatten' return strWorkPath
adb28fe59a436a85329c7f7e88412835e6486a38
700,871
def is_pandas_df(obj): """Check if an object is a Pandas dataframe The benefit here is that Pandas doesn't have to be included in the dependencies for pydeck The drawback of course is that the Pandas API might change and break this function """ return obj.__class__.__module__ == 'pandas.core.frame' and obj.to_records and obj.to_dict
aa226f86d8640903fef4e51121e285ba9c594a3c
700,872
def solve(f, x0): """ Solve the equation f(x) = x using a fixed point iteration. x0 is the start value. """ x = x0 for n in range(10000): # at most 10000 iterations oldX = x; x = f(x); if abs(x - oldX) < 1.0e-15: return x;
d5f5409d689c842c1a8c70b95c621360c9ae7c8c
700,874
def p2_allocateByScore(CCA, nameCat, quota, rank, scorelist, final_dic, CCA_dic): """ Allocates students to cca where applicants exceed quota Returns the final dictionaries """ cat = "" for key, value in nameCat.items(): #theoretically it will all be the same anyway cat = value quota_int = quota.dic[cat][CCA] in_order = [] for name, score in scorelist.items(): #names in order of score if in_order == []: in_order.append(name) else: added = False for name2 in in_order: if added == False: if scorelist[name2] < score: in_order.insert(in_order.index(name2), name) added = True else: pass else: pass try: #get number of assigned students alras = len(CCA_dic[CCA]) except KeyError: alras = 0 pos_left = quota_int - alras to_add = in_order[0:pos_left] for name in to_add: if name in final_dic: pass else: final_dic[name] = {"CCA":CCA, "rank":rank, "score":scorelist[name]} #Allocation line CCA_dic = {} #clear - CCA_dic is based on most updated final_dic for name, CCA in final_dic.items(): #reverse try: dic = CCA_dic[CCA["CCA"]] dic[name] = {"rank":CCA["rank"],"score":CCA["score"]} CCA_dic[CCA["CCA"]] = dic except KeyError: CCA_dic[CCA["CCA"]] = {name:{"rank":CCA["rank"],"score":CCA["score"]}} return final_dic, CCA_dic
de0ec96eadf396319a7dfbb3942a6917afc6c84c
700,875
def _total_solves(color_info): """ Return total number of linear solves required based on the given coloring info. Parameters ---------- color_info : dict dict['fwd'] = (col_lists, row_maps) col_lists is a list of column lists, the first being a list of uncolored columns. row_maps is a list of nonzero rows for each column, or None for uncolored columns. dict['rev'] = (row_lists, col_maps) row_lists is a list of row lists, the first being a list of uncolored rows. col_maps is a list of nonzero cols for each row, or None for uncolored rows. dict['sparsity'] = a nested dict specifying subjac sparsity for each total derivative. dict['J'] = ndarray, the computed boolean jacobian. Returns ------- int Total number of linear solves required to compute the total Jacobian. """ total_solves = 0 # lists[0] are the uncolored columns or rows, which are solved individually so # we add all of them, along with the number of remaining lists, where each # sublist is a bunch of columns or rows that are solved together, to get the total colors # (which equals the total number of linear solves). if 'fwd' in color_info: row_lists, _ = color_info['fwd'] total_solves += len(row_lists[0]) + len(row_lists[1:]) if 'rev' in color_info: col_lists, _ = color_info['rev'] total_solves += len(col_lists[0]) + len(col_lists[1:]) return total_solves
32031f2ba834f7d6ed310b0a71ab43884b424459
700,876
import argparse def parse_options(): """Parses command line options""" parser = argparse.ArgumentParser() parser.add_argument("-o", "--output", help="Output file name (e.g oui.h)") parser.add_argument("-u", "--url", help="Wireshark oui/manuf file url") opt = parser.parse_args() return opt
f01013440685506ef3bef27e6c0d79147c2b896f
700,877
def gettext(msg, domain='python-apt'): # real signature unknown; restored from __doc__ """ gettext(msg: str[, domain: str = 'python-apt']) -> str Translate the given string. This is much faster than Python's version and only does translations after setlocale() has been called. """ return ""
a6d83a79110ec233f86878fc6aa9f37f2b3e61fa
700,878
import re def createPattern(string): """Cria um padrão de comandos de bot.""" return re.compile(f"/{string}|{string}")
b2b013dc71d5f49f041402ecf98db8b923f4d22a
700,879
def gnomad_filtered_func(raw_value): """ We use FILTER in Gnomad3 (GRCh38 only) - need to convert back to bool """ return raw_value not in (None, "PASS")
6de1d4ef9395e89c11b08cdb43decb045656494b
700,880
import torch def _project_z(z, project_method='clip'): """To be used for projected gradient descent over z.""" if project_method == 'norm': z_p = torch.nn.functional.normalize(z, p=2, dim=-1) #elif project_method == 'clip': #not reimplemented yet # z_p = tf.clip_by_value(z, -1, 1) else: raise ValueError('Unknown project_method: {}'.format(project_method)) return z_p
6cabb462dcbfff7ffdc065601f4a68671822f93f
700,881
from datetime import datetime def __dirnames_matching_format(dirnames, format): """ Iterates through dirnames and returns a sorted array of directory names that match the provided format. """ matching_dates = [] for dirname in dirnames: try: dt = datetime.strptime(dirname, format) if dt is not None: matching_dates.append(dt) except ValueError: pass matching_dates.sort(reverse=True) return [datetime.strftime(dt, format) for dt in matching_dates]
52349f0992a0ac0366d75f5328f674ab179246be
700,882
import re def solve(s): """doc""" return "".join([w.capitalize() for w in re.split(r"(\W+)", s)])
28f075dd47c3ec57d6bb2a1675ae77815378786e
700,883
def get_fields_by_name(model_cls, *field_names): """Return a dict of `models.Field` instances for named fields. Supports wildcard fetches using `'*'`. >>> get_fields_by_name(User, 'username', 'password') {'username': <django.db.models.fields.CharField: username>, 'password': <django.db.models.fields.CharField: password>} >>> get_fields_by_name(User, '*') {'username': <django.db.models.fields.CharField: username>, ..., 'date_joined': <django.db.models.fields.DateTimeField: date_joined>} """ if '*' in field_names: return dict((field.name, field) for field in model_cls._meta.fields) return dict((field_name, model_cls._meta.get_field(field_name)) for field_name in field_names)
8ce9c845adbff9bb53da50c7d7e208aa8077e718
700,884
def list_of_comments(fname) : """Returns list of str objects - comment records from file. - fname - file name for text file. """ #if not os.path.lexists(fname) : raise IOError('File %s is not available' % fname) f=open(fname,'r') cmts = [] for rec in f : if rec.isspace() : continue # ignore empty lines elif rec[0] == '#' : cmts.append(rec.rstrip('\n')) else : break f.close() if len(cmts)==0 : return None return cmts
5aea0668c006b4a4615cab01acd07db8dc1fb2b5
700,885
import re def minify(source): """ Removes comments from the source code """ multiline = re.compile("(/\*.*?\*/)", re.DOTALL) singleline = re.compile("//.*?\n") remove_multiline = lambda f: re.sub(multiline, "", f) remove_singleline = lambda f: re.sub(singleline, "", f) return map(remove_multiline, map(remove_singleline, source))
3743e4071485d1ae085037a4bf4cd492f4f81d29
700,886
import logging def with_logger(cls): """Class decorator to add a logger to a class.""" attr_name = '_logger' cls_name = cls.__qualname__ module = cls.__module__ if module is not None: cls_name = module + '.' + cls_name else: raise AssertionError setattr(cls, attr_name, logging.getLogger(cls_name)) return cls
77f854ac6d1cbe95ff7804184a1a67a513ac81ae
700,887
from numpy import linspace, meshgrid def _get_latlons(nlat, nlon): """Short summary. Parameters ---------- nlat : type Description of parameter `nlat`. nlon : type Description of parameter `nlon`. Returns ------- type Description of returned object. """ lon_min = -179.875 lon_max = -1 * lon_min lat_min = -89.875 lat_max = -1.0 * lat_min lons = linspace(lon_min, lon_max, nlon) lats = linspace(lat_max, lat_min, nlat) lon, lat = meshgrid(lons, lats) return lon, lat
f43553b4f758ac2a060f609e2781645b79466cb4
700,888
from typing import Dict def convert_to_km(distance: float, print_output: bool = True) -> Dict[str, float]: """Convert a miles distance into the double of km. Args: distance: a distance (in miles). print_output: if True, prints the progress. Returns: A dictionary with two keys ('original' and 'converted'). Raises: Exception: if the distance is not a valid value. """ if distance is None: raise Exception('distance is not valid') if print_output: print("calculating ...", "Using ", distance) return { "original": distance, # The constant 2*1.60934 is used as the robot is magic and covers twice # the distance if specified in km. "converted": distance * 3.21868 }
1fb9dbbeb890a348d0feeebaea1a308bc06b039d
700,889
import aiohttp def aiohttp_socket_timeout(socket_timeout_s): """ Return a aiohttp.ClientTimeout object with only socket timeouts set. """ return aiohttp.ClientTimeout(total=None, connect=None, sock_connect=socket_timeout_s, sock_read=socket_timeout_s)
c17a40a532aee15557b4e507439b0a7b2e98989e
700,890
def getMulitples(indices): """Return a subset with no multiples (filters out the bad ones).""" multiples = [] added = [] for i in range(0, len(indices) - 1): if indices[i][0] == indices[i + 1][0]: added.append(indices[i]) elif added: added.append(indices[i]) multiples.append(added) added = [] return multiples
4846fe1950f7d1a5379595b93e18c3ed0eb2d160
700,891
def do_math(a, b, operator): """Helper function that performs computation between two numbers.""" if operator == "+": return int(b) + int(a) elif operator == "-": return int(b) - int(a) elif operator == "*": return int(b) * int(a) elif operator == "/": return int(b) // int(a)
5bffd1db1659c9f3420cc66a28525698de3beef5
700,892
def deg_to_arcsec(angle: float) -> float: """ Convert degrees to arcseconds. Args: angle: Angle in units of degrees. Returns: angle: Angle in units of arcseconds. """ return float(angle) * 3600.
02fd099627970116bf5513af8b8d2d62bdc8ed41
700,893
def set_window_width(image, MIN_BOUND=-1000.0, MAX_BOUND=400.0): """设置窗宽""" image[image > MAX_BOUND] = MAX_BOUND image[image < MIN_BOUND] = MIN_BOUND return image
0dfa6e858c74cacc2cc8724d33f9b7fd96835d0c
700,894
import json def update_chart_info(_figure, chart_data_json_str): """ A callback function to set the sample count for the number of samples that have been displayed on the chart. Args: _figure (object): A figure object for a dash-core-components Graph for the strip chart - triggers the callback. chart_data_json_str (str): A string representation of a JSON object containing the current chart data - triggers the callback. Returns: str: A string representation of a JSON object containing the chart info with the updated sample count. """ chart_data = json.loads(chart_data_json_str) chart_info = {'sample_count': chart_data['sample_count']} return json.dumps(chart_info)
fd28e28b7b48131bb56d6d9f29e4fe438b33bb7a
700,895
def record_copy_all(node, **kwargs): """ A default rcd implementation that copies all kwargs to the tape. the impl is used for the vjp and vjp primitives. the impl is can be used for the apl primitives if no rcd is given; but we use 'record_copy_autodiff' to save a much smaller subset of variables. """ return kwargs
e7e957c1d9fb0cc36bf54c1ab431ba4c7892838c
700,896
def read_file(filename): """ Return the content of a file as a list of strings, each corresponding to a line :param filename: string: location and name of the file :return: content of filename """ with open(filename, 'r') as ofile: content = ofile.read().splitlines() return content
58a2718265fef848e484178e407aee6f7017a52a
700,897
def score_1(game, player): # 82.14% """ Heuristics computing score using #player moves - k * #opponent moves :param game: game :param player: player :return: score """ if game.is_winner(player) or game.is_loser(player): return game.utility(player) opponent = game.get_opponent(player) player_moves = game.get_legal_moves(player) opponent_moves = game.get_legal_moves(opponent) # return float(len(player_moves) - len(opponent_moves)) # 72.86% # return float(len(player_moves) - 2 * len(opponent_moves)) # 79.29% # return float(len(player_moves) - 3 * len(opponent_moves)) # 79.29% # return float(len(player_moves) - 4 * len(opponent_moves)) # 79.29% # return float(len(player_moves) - 5 * len(opponent_moves)) # 80.71% # return float(len(player_moves) - 6 * len(opponent_moves)) # 80.71% return float(len(player_moves) - 7 * len(opponent_moves))
3995237f5d5474660c752c308e1077aad1743d06
700,898
def parse_line( line ): """Returns data located in the line.""" tokens = line.split() return int(tokens[1]), tokens[2], ( int(tokens[4]), # n_i int(tokens[6]), # s_i )
487421bcc50e4d542227e54a1362008a8a678b5b
700,899
def strip_ddp_state_dict(state_dict): """ Workaround the fact that DistributedDataParallel prepends 'module.' to every key, but the sampler models will not be wrapped in DistributedDataParallel. (Solution from PyTorch forums.)""" clean_state_dict = type(state_dict)() for k, v in state_dict.items(): key = k[7:] if k[:7] == "module." else k clean_state_dict[key] = v return clean_state_dict
f4cd6917db3df384e70c6b54dc4142dd760dd1d2
700,901
def _import(module_name, class_name): """ Return class of the module_name, where module_name is of the form package.module (testoob's Asserter, which does simple __import__, returns package, not package.module in this situation). """ mod = __import__(module_name) components = module_name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return getattr(mod, class_name)
27a20935b305e3c387c861392429794976d7d866
700,902
def get_reftypes(exp_type, cal_ver=None, context=None): """Based on the exposure type, CAL s/w version, and CRDS context, determine the list of applicable reference file types. """ return []
ccb08ecd63b85d4602d419fb8fd90e77984c4048
700,903
def get_CV_current(CV): """ Helper function to compute CV current. Args: CV (pd.DataFrame): CV segement of charge Returns: (float): current reached at the end of the CV segment """ if not CV.empty: return(CV.current.iat[-1])
69e32ecadc57d0855eedfe3404db1e5ec9839862
700,904
import unicodedata def filter_nick(name): """ filter_nick(name) -> String Process the name and get rid of all whitespace, invisible characters and make it all lower case. This function is intended to mimic euphoria's name pinging system, however it is slightly less pedantic, allowing for punctuation within names. """ ret = "".join(c for c in name if unicodedata.category(c)[0] not in ["C", "Z"]) ret = "".join(ret.split()) ret = ret.lower() return ret
bba506f4b2e84b4f82df8a4b31feb82203db8356
700,906
def label_name(event_data): """Get the label name from a label-related webhook event.""" return event_data["label"]["name"]
903173b4fd9ddeb0a74a3e10be94626e0685a037
700,907
def bit_length_power_of_2(value): """Return the smallest power of 2 greater than a numeric value. :param value: Number to find the smallest power of 2 :type value: ``int`` :returns: ``int`` """ return 2 ** (int(value) - 1).bit_length()
bb49afee83ac255549ce5b5aaab80bb76ad4e337
700,908
import json def json_top_atom_count(json_str): """Count the number of atoms in a JSON topology used by wepy HDF5.""" top_d = json.loads(json_str) atom_count = 0 atom_count = 0 for chain in top_d['chains']: for residue in chain['residues']: atom_count += len(residue['atoms']) return atom_count
0e1e23cd4b9e5cedf3e6b5d815ee817798188752
700,910
def length_squared(point): """ square of length from origin of a point Args: point (QPointF) the point Returns square of length """ return point.x()*point.x() + point.y()*point.y()
0289ca736f087dd75f9b9e0f1347fdc223d03f84
700,911
def topSort(G): """ 使用BFS实现拓扑排序。 每次找到入度为0的节点放入列队,遍历与入度为0的点相邻的节点,并将度数减少1,如果度数变为0则放入列队。直到列队为空。 """ Q = [] # 列队存储每个节点 counter = 0 sort = {} for i in G: if i.degree == 0: Q.append(i) while len(Q) != 0: vertex = Q.pop() sort[vertex] = counter counter += 1 if vertex.c == None: continue for j in vertex.c : j.degree -= 1 if j.degree == 0: Q.append(j) if len(sort) != len(G): print("Graph has a cycle!") return None return sort
3b8662a4adbc32d9a2174b5faf82d0c763d703fe
700,912
import getopt import sys def get(params_config, params=None, is_show_help=True): """标准化处理参数 根据提供的params_config参数,提供参数params中的字段,返回字典 params_config例: { 'username': {'must':False,'data':True,'short':'U','long':'username','default':'root'}, 'password': {'must':True,'data':True,'short':'P','long':'password'}, 'remember': {'must':False,'data':False,'short':'R','long':'remember'}, } 其中: username:返回字段时的key must:表示是否为必需参数,如果必需,未提供则报错返回 data:表示是否后面带了数据,例:-i 3306中3306为-i的数据 short:短名称,即-i, long:长全称,即--install default:默认值,如果must为False,且未提供,则把该字段设为默认值 成功返回:{'data':{'username':'root', 'password':'password','remember':True},'args':[]} 失败返回:{'errcode':int,'error':'error msg'} 调用示例: :param params_config: dict,每个key对应包含must(是否必须)、data(是否含有数据)、short(短名称)、long(长名称)等4个字段的字典 :param params:list,系统参数 默认为sys.params :param is_show_help:bool,是否显示帮助信息,默认为True :return: {dict} : 例:{'data':{'username':'root', 'password':'password','remember':True},'args':[]} """ if not params: params = sys.argv ret_dict = {} options = '' long_options = [] readme = params[0]+" " for key1, dict1 in params_config.items(): has_add = False short = dict1.get('short') long_tmp = dict1.get('long') has_data = dict1.get('data') must = dict1.get('must') if(short): options += (short+':' if has_data else short) readme_tmp = '-%s' % short if(has_data): readme_tmp = '%s <%s>' % (readme_tmp, key1) if not must: readme_tmp = '[%s]' % readme_tmp readme += readme_tmp+" " has_add = True if(long_tmp): long_options.append(long_tmp+'=' if has_data else long_tmp) if not has_add: readme_tmp = '--%s ' % long_tmp if(has_data): readme_tmp += '%s <%s> ' % (readme_tmp, key1) if not must: readme_tmp = '[%s]' % readme_tmp try: opts, args = getopt.getopt(params[1:], options, long_options) except getopt.GetoptError as e: if is_show_help: print('\033[1;31;43m 超出范围的参数: \033[0m') print(e) print("\n规则:\n", readme) return {'errcode': -1, 'error': '超出范围的参数'+e.__str__()} for opt, arg in opts: if opt in ('-h', '--help'): print(readme) return {'errcode': 0, 'error': readme} for key, dict1 in params_config.items(): if opt in ('-'+dict1.get('short'), '--'+dict1.get('long')): ret_dict[key] = arg if arg else True error = "" for key, dict1 in params_config.items(): if key not in ret_dict: if dict1.get('must'): error += ' -%s <%s>' % (dict1.get('short'), key) elif dict1.get('default'): ret_dict[key] = dict1.get('default') if error: if is_show_help: print("\033[1;31;43m 缺少参数: \033[0m") print(error) print('\n用法:\n', readme) return {'errcode': -1, 'error': '缺少参数:'+error} return {'data': ret_dict, 'args': args}
b149a0aeeb246d0b0d5c36c3e9032d0d64563654
700,913
def get_ancestor(taxid, tree, stop_nodes): """Walk up tree until reach a stop node, or root.""" t = taxid while True: if t in stop_nodes: return t elif not t or t == tree[t]: return t # root else: t = tree[t]
f7841bc5104f96cd66122165a0646b70fc3fd33e
700,914
import os def file_exists(file): """Check if a file exists.""" if not os.path.exists(file): return False try: open(file).close() except IOError: return False return True
34a3f66e8597cd0cf7b77c9c270407268ac52a70
700,915
def check_patch_in_bounds(x, y, X_dim, Y_dim): """ Usage: TrueFalse = check_patch_in_bounds(x, y, X_dim, Y_dim) determine if the box is within the image Args: x: a tuple, list or array (x_start, x_end) y: a tuple, list or array (y_start, Y_end) X_dim: a tuple, list or array (Image_X_start, Image_X_end) Y_dim: a tuple, list or array (Image_Y_start, Image_Y_end) """ if x[0] > x[1] or y[0] > y[1] or X_dim[0] > X_dim[1] or Y_dim[0] > Y_dim[1]: return False if x[0] >= X_dim[0] and y[0] >= Y_dim[0] and x[1] < X_dim[1] and y[1] < Y_dim[1]: return True else: return False
80221a95fda698f31aeed6c91987a5227a21e751
700,916
def domain(request) -> str: """Return AWS domain""" return request.config.getoption("--domain") or "amazonaws.com"
fbf812dd28eb6aa3ff6a647a4cd1d17b739cb320
700,917
def read_table(data, coerce_type, transpose=False): """ Reads in data from a simple table and forces it to be a particular type This is a helper function that allows data to be easily constained in a simple script ::return: a dictionary of with the keys being a tuple of the strings in the first row and colum of the table ::param data: the multiline string containing the table data ::param coerce_type: the type that the table data is converted to ::param transpose: reverses the data if needed Example: >>> table_data = ''' ... L1 L2 L3 L4 L5 L6 ... C1 6736 42658 70414 45170 184679 111569 ... C2 217266 227190 249640 203029 153531 117487 ... C3 35936 28768 126316 2498 130317 74034 ... C4 73446 52077 108368 75011 49827 62850 ... C5 174664 177461 151589 153300 59916 135162 ... C6 186302 189099 147026 164938 149836 286307 ... ''' >>> table = read_table(table_data, int) >>> table[("C1","L1")] 6736 >>> table[("C6","L5")] 149836 """ lines = data.splitlines() headings = lines[1].split() result = {} for row in lines[2:]: items = row.split() for i, item in enumerate(items[1:]): if transpose: key = (headings[i], items[0]) else: key = (items[0], headings[i]) result[key] = coerce_type(item) return result
6701736354b30d41b4adf7c6a11406f26c21c71b
700,919
import argparse def parameter_parser(): """ A method to parse up command line parameters. By default it trains on the PubMed dataset. The default hyperparameters give a good quality representation without grid search. """ parser = argparse.ArgumentParser(description = "Run .") parser.add_argument("--edge-path", nargs = "?", default = "./input/edges.csv", help = "Edge list csv.") parser.add_argument("--features-path", nargs = "?", default = "./input/features.csv", help = "Features json.") parser.add_argument("--target-path", nargs = "?", default = "./input/target.csv", help = "Target classes csv.") parser.add_argument("--clustering-method", nargs = "?", default = "metis", help = "Clustering method for graph decomposition. Default is the metis procedure.") parser.add_argument("--epochs", type = int, default = 200, help = "Number of training epochs. Default is 200.") parser.add_argument("--seed", type = int, default = 42, help = "Random seed for train-test split. Default is 42.") parser.add_argument("--dropout", type = float, default = 0.5, help = "Dropout parameter. Default is 0.5.") parser.add_argument("--learning-rate", type = float, default = 0.01, help = "Learning rate. Default is 0.01.") parser.add_argument("--test-ratio", type = float, default = 0.9, help = "Test data ratio. Default is 0.1.") parser.add_argument("--cluster-number", type = int, default = 10, help = "Number of clusters extracted. Default is 10.") parser.set_defaults(layers = [16, 16, 16]) return parser.parse_args()
7a4b82373d1d1f7028eb6c228d35d3f35107ce19
700,920
def cat_arg_and_value(arg_name, value): """Concatenate a command line argument and its value This function returns ``arg_name`` and ``value concatenated in the best possible way for a command line execution, namely: - if arg_name starts with `--` (e.g. `--arg`): `arg_name=value` is returned (i.e. `--arg=val`) - if arg_name starts with `-` (e.g. `-a`): `arg_name value` is returned (i.e. `-a val`) - if arg_name does not start with `-` and it is a long option (e.g. `arg`): `--arg_name=value` (i.e., `--arg=val`) - if arg_name does not start with `-` and it is a short option (e.g. `a`): `-arg_name=value` (i.e., `-a val`) :param arg_name: the command line argument name :type arg_name: str :param value: the command line argument value :type value: str """ if arg_name.startswith("--"): return "=".join((arg_name, str(value))) elif arg_name.startswith("-"): return " ".join((arg_name, str(value))) elif len(arg_name) == 1: return " ".join(("-" + arg_name, str(value))) else: return "=".join(("--" + arg_name, str(value)))
bcd99ab465707e594646d2152ad7b10b32956f5e
700,921
import inspect def unwrap_obj(obj): """ Gets the actual object from a decorated or wrapped function @obj: (#object) the object to unwrap """ try: obj = obj.fget except (AttributeError, TypeError): pass try: # Cached properties if obj.func.__doc__ == obj.__doc__: obj = obj.func except AttributeError: pass try: # Setter/Getters obj = obj.getter except AttributeError: pass try: # Wrapped Funcs obj = inspect.unwrap(obj) except: pass return obj
d158d7ed832823b66e46bea00a96c5799684bd33
700,923
def get_workspace(repo_context): """ Construct the workspace url from the given repo context :param repo_context: Repo context from context.py :return: Workspace url for known VCS or None """ if not repo_context["repositoryUri"]: return None revision = repo_context.get("revisionId", repo_context.get("branch")) if "github.com" in repo_context["repositoryUri"]: return "{}/blob/{}".format(repo_context["repositoryUri"], revision) if "gitlab" in repo_context["repositoryUri"]: return "{}/-/blob/{}".format(repo_context["repositoryUri"], revision) if "bitbucket" in repo_context["repositoryUri"] and repo_context.get("revisionId"): return "{}/src/{}".format(repo_context["repositoryUri"], revision) if "azure.com" in repo_context["repositoryUri"] and repo_context.get("branch"): return "{}?_a=contents&version=GB{}&path=".format( repo_context["repositoryUri"], repo_context.get("branch") ) return None
ebab3b089c8267932a7a660207b8c7d4b38148f9
700,924
def plotlify_scatter_js(xy=None, x=None, y=None, xtag=None, ytag=None, description=""): """ Update the plot data in plotly format. :param xy: x and y in a single structure. :param description: The description of the plotly plot. :param plot_type: The type of the plotly plot. :return: A dictionary with the data in plotly format. """ ret = { 'data': [], 'layout': { 'paper_bgcolor': 'rgba(243, 243, 243, 1)', 'plot_bgcolor': 'rgba(0,0,0,0)', 'title': { 'text': description, } } } if xy: ret['data'].append( { 'x': [v for k, v in xy.items()], 'y': [k for k, v in xy.items()], 'type': 'scatter', } ) elif x and y: ret['data'].append( { 'x': x, 'y': y, 'type': 'scatter', } ) return ret
2ad2e33dde23ec18f162c28425380b99ae3596a6
700,925
import os import requests def retrieve_article(article, output_filename, clobber=False): """Download the journal article (preferred) or pre-print version of the article provided, and save the PDF to disk. Inputs ------ article : `Article` object The article to retrieve. output_filename : str The filename to save the article to. clobber : bool, optional Overwrite the filename if it already exists. """ if os.path.exists(output_filename) and not clobber: raise IOError("output filename ({filename}) exists and we've been " "asked not to clobber it.".format(filename=output_filename)) # Get the ADS url ads_redirect_url = "http://adsabs.harvard.edu/cgi-bin/nph-data_query" arxiv_payload = { "bibcode": article.bibcode, "link_type": "PREPRINT", "db_key": "PRE" } article_payload = { "bibcode": article.bibcode, "link_type": "ARTICLE", "db_key": "AST" } # Let's try and download the article from the journal first article_r = requests.get(ads_redirect_url, params=article_payload) if not article_r.ok: arxiv_r = requests.get(ads_redirect_url, params=arxiv_payload) if not arxiv_r.ok: arxiv_r.raise_for_status() article_pdf_url = arxiv_r.url.replace("abs", "pdf") else: # Parser the PDF url article_pdf_url = article_r.url.rstrip("+html") article_pdf_r = requests.get(article_pdf_url) if not article_pdf_r.ok: article_pdf_r.raise_for_status() with open(output_filename, "wb") as fp: fp.write(article_pdf_r.content) return True
d742068dd76090fb03f222194f66d66b1dc95ffe
700,926
def envelops(reg, target_reg): """Given a region and another target region, returns whether the region is enveloped within the target region.""" return (target_reg.start <= reg.start < target_reg.end) \ and (target_reg.start <= reg.end < target_reg.end)
716f2e905bdb852d9e0b85ff0482a70a64d325f1
700,927
from typing import MutableMapping from typing import Any def include_filter( include: MutableMapping[Any, Any], target: MutableMapping[Any, Any]) -> MutableMapping[Any, Any]: """Filters target by tree structure in include. Args: include: Dict of keys from target to include. An empty dict matches all values. target: Target dict to apply filter to. Returns: A new dict with values from target filtered out. If a filter key is passed that did not match any values, then an empty dict will be returned for that key. """ if not include: return target result = {} for key, subkeys in include.items(): if key in target: if subkeys and target[key] is not None: result[key] = include_filter(subkeys, target[key]) else: result[key] = target[key] return result
f1c10ec383a430700d8e7f58e8c9f6bc5180c844
700,928
import struct def readRem(byteStream): """The rem - remark/comment type tokens""" commentLength = struct.unpack('bb', byteStream.read(2))[1] bytesRead = 2 comment = struct.unpack('%ds' % commentLength, byteStream.read(commentLength))[0].rstrip("\x00") bytesRead += commentLength return bytesRead, comment
31782ce3a8aa10b306066bd4dc388c62392cc11b
700,930
def get_route_name(route): """Returns route name.""" # split once, take last peice name = route.split("/", 1)[-1] return name
3d5c916711a7631d4eb90c5eff6f1f745c97a664
700,932
import requests def get_remote_version(url: str) -> str: """Gets the remote file and returns it as a long string.""" response = requests.get(url) if response: #print("Getting remote version") s = response.text return s else: return "Url Not Found."
5d5ef45c5b74b326f9386214229529d9b71aca3d
700,934
def single_number_hashtable(nums: list[int]) -> int: """Returns the only element in `nums` that appears exactly once Complexity: n = len(nums) Time: O(n) Space: O(n) Args: nums: array of integers s.t. every element appears twice, except for one Returns: the only element in `nums` that appears exactly once Examples: >>> single_number_hashtable([2,2,1]) 1 >>> single_number_hashtable([4,1,2,1,2]) 4 >>> single_number_hashtable([1]) 1 >>> single_number_hashtable([1,1]) Traceback (most recent call last): ... ValueError: No element in `nums` appears exactly once. """ """ALGORITHM""" ## INITIALIZE VARS ## num_counts = {} for num in nums: num_counts[num] = num_counts.get(num, 0) + 1 for num, count in num_counts.items(): if count == 1: return num else: raise ValueError("No element in `nums` appears exactly once.")
f15b06d4f4683b9604e8b5ab6f2fe9be588551a6
700,935
def metadata(datasets): """ Extract datasette metadata from a CLDF dataset. """ def iter_table_config(cldf): for table in cldf.tables: try: name = cldf.get_tabletype(table) except (KeyError, ValueError): name = None name = name or str(table.url) cfg = {} try: _ = cldf[table, 'name'] cfg['label_column'] = 'cldf_name' except KeyError: pass if name == 'EntryTable': cfg['label_column'] = 'cldf_headword' if name == 'SenseTable': cfg['label_column'] = 'cldf_description' if name == 'ExampleTable': cfg['label_column'] = 'cldf_primaryText' yield name, cfg return { "title": "", "description_html": "<dl>{0}</dl>".format(''.join([ '<dt><strong>{0}</strong></dt><dd><em>{1}</em></dd><dd>{2}</dd>'.format( dbname, cldf_ds.module, cldf_ds.properties.get('dc:title')) for dbname, cldf_ds in datasets.items() ])), "plugins": { "datasette-cluster-map": { "latitude_column": "cldf_latitude", "longitude_column": "cldf_longitude" } }, "databases": { dbname: { "description": cldf_ds.properties.get('dc:title'), "source": cldf_ds.properties.get('dc:bibliographicCitation'), "source_url": cldf_ds.properties.get('dc:identifier'), "license": cldf_ds.properties.get('dc:license'), "tables": dict(iter_table_config(cldf_ds)), } for dbname, cldf_ds in datasets.items() }, }
f4927cea1a5587a417e98453cd7df7bf2f75d507
700,936
def parse_card(card: str) -> tuple: """Separates the card into value and suit. Args: card (str): String representing a poker card, in the format ValueSuit, like '9D' (9 of Diamonds). Returns: tuple: Returns a tuple of the card, like (Value, Suit). Ex: '9D' -> ('9', 'D'). """ if len(card) == 3: #If we receive a card whose len is 3, this is 10 + S(suit), so we replace 10 for T to make things easier. return 'T', card[2] else: return card[0], card[1]
de9051906327dfcf01a3b2076acbed216ce43ced
700,937
def _reformat_policy(policy): """ Policies returned from boto3 are massive, ugly, and difficult to read. This method flattens and reformats the policy. :param policy: Result from invoking describe_load_balancer_policies(...) :return: Returns a tuple containing policy_name and the reformatted policy dict. """ policy_name = policy['PolicyName'] ret = {} ret['type'] = policy['PolicyTypeName'] attrs = policy['PolicyAttributeDescriptions'] if ret['type'] != 'SSLNegotiationPolicyType': return policy_name, ret attributes = dict() for attr in attrs: attributes[attr['AttributeName']] = attr['AttributeValue'] ret['protocols'] = dict() ret['protocols']['sslv2'] = bool(attributes.get('Protocol-SSLv2')) ret['protocols']['sslv3'] = bool(attributes.get('Protocol-SSLv3')) ret['protocols']['tlsv1'] = bool(attributes.get('Protocol-TLSv1')) ret['protocols']['tlsv1_1'] = bool(attributes.get('Protocol-TLSv1.1')) ret['protocols']['tlsv1_2'] = bool(attributes.get('Protocol-TLSv1.2')) ret['server_defined_cipher_order'] = bool(attributes.get('Server-Defined-Cipher-Order')) ret['reference_security_policy'] = attributes.get('Reference-Security-Policy', None) non_ciphers = [ 'Server-Defined-Cipher-Order', 'Protocol-SSLv2', 'Protocol-SSLv3', 'Protocol-TLSv1', 'Protocol-TLSv1.1', 'Protocol-TLSv1.2', 'Reference-Security-Policy' ] ciphers = [] for cipher in attributes: if attributes[cipher] == 'true' and cipher not in non_ciphers: ciphers.append(cipher) ciphers.sort() ret['supported_ciphers'] = ciphers return policy_name, ret
4eb5372e233d53e6949a90833350831542c5bd01
700,939
def checksum1(data, stringlength): """ Calculate Checksum 1 Calculate the ckecksum 1 required for the herkulex data packet Args: data (list): the data of which checksum is to be calculated stringlength (int): the length of the data Returns: int: The calculated checksum 1 """ value_buffer = 0 for count in range(0, stringlength): value_buffer = value_buffer ^ data[count] return value_buffer&0xFE
504b848b651ae5e8c52c987a6a8e270259e1de44
700,942
def allZero(buffer): """ Tries to determine if a buffer is empty. @type buffer: str @param buffer: Buffer to test if it is empty. @rtype: bool @return: C{True} if the given buffer is empty, i.e. full of zeros, C{False} if it doesn't. """ allZero = True for byte in buffer: if byte != 0: allZero = False break return allZero
8520e57dcd09a914566622098b196965f547ac6f
700,943
import re def acad_to_academy(text): """ A function to change `Acad` to `Academy`. Smartly. Tries to ignore instances of `Acad` followed by `emy` or `emies`. Usage: from core.utils import acad_to_academy replace_str = 'Harmony Science Acad (Waco)' print acad_to_academy(replace_str) # 'Harmony Science Academy (Waco)' """ return re.sub("Acad(?!(emy|emies|emia))", "Academy", text)
50f515d2b0c67a5a50799c92ea0a51d6828107b9
700,944
def _earlygetopt(aliases, args): """Return list of values for an option (or aliases). The values are listed in the order they appear in args. The options and values are removed from args. >>> args = ['x', '--cwd', 'foo', 'y'] >>> _earlygetopt(['--cwd'], args), args (['foo'], ['x', 'y']) >>> args = ['x', '--cwd=bar', 'y'] >>> _earlygetopt(['--cwd'], args), args (['bar'], ['x', 'y']) >>> args = ['x', '-R', 'foo', 'y'] >>> _earlygetopt(['-R'], args), args (['foo'], ['x', 'y']) >>> args = ['x', '-Rbar', 'y'] >>> _earlygetopt(['-R'], args), args (['bar'], ['x', 'y']) """ try: argcount = args.index("--") except ValueError: argcount = len(args) shortopts = [opt for opt in aliases if len(opt) == 2] values = [] pos = 0 while pos < argcount: fullarg = arg = args[pos] equals = arg.find('=') if equals > -1: arg = arg[:equals] if arg in aliases: del args[pos] if equals > -1: values.append(fullarg[equals + 1:]) argcount -= 1 else: if pos + 1 >= argcount: # ignore and let getopt report an error if there is no value break values.append(args.pop(pos)) argcount -= 2 elif arg[:2] in shortopts: # short option can have no following space, e.g. hg log -Rfoo values.append(args.pop(pos)[2:]) argcount -= 1 else: pos += 1 return values
034bf7e4cde24cc45ed888b742361ce0a4ed1a8b
700,945
def chunks(l, n): """ https://stackoverflow.com/a/1751478 """ n = max(1, n) return (l[i : i + n] for i in range(0, len(l), n))
01bcae9d06bf430874717b70039c79de532b9fd4
700,946
def plugin_zip(p): """Maps columns to values for each row in a plugins sql_response and returns a list of dicts Parameters ---------- p : :class:`taniumpy.object_types.plugin.Plugin` * plugin object Returns ------- dict * the columns and result_rows of the sql_response in Plugin object zipped up into a dictionary """ return [ dict(list(zip(p.sql_response.columns, x))) for x in p.sql_response.result_row ]
b91e5403fd710875c4588afc0eba3da1b1a82c4a
700,947
import platform import subprocess def ping(host): """ Returns True if host responds to a ping request """ # Ping parameters as function of OS ping_num_param = "-n" if platform.system().lower() == "windows" else "-c" # Ping return subprocess.run(['ping', ping_num_param, '1', host], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode == 0
7665ab5c43380e9b847037c84e03296f00f348a1
700,948
def get_string_from_ascii(input): """ This function is reversed engineered and translated to python based on the CoderUtils class in the ELRO Android app :param input: A hex string :return: A string """ try: if len(input) != 32: return '' byt = bytearray.fromhex(input) name = "".join(map(chr, byt)) name = name.replace("@", "").replace("$", "") except: return '' return name
fdf878ac689c614720e9ad5ebeecbd32c9f893b1
700,949
def split_multiline(value): """Special behaviour when we have a multi line options""" value = [element for element in (line.strip() for line in value.split('\n')) if element] return value
a5eecefb94a79639afe3582e4c3cfb8e7a0adf6f
700,950
def compute_mean_median(all_methods_df): """ Computes the mean values and the median values for each column of the Pandas.DataFrame grouped by the methods. Parameters ---------- all_methods_df : Pandas.DataFrame Returns ------- list - means: Pandas.DataFrame containing the mean values for each column of the Pandas.DataFrame grouped by the methods - medians: Pandas.DataFrame containing the meadian values for each column of the Pandas.DataFrame grouped by the methods """ grouped = all_methods_df.groupby("method") means = round(grouped.mean(), 4) medians = round(grouped.median(), 4) return [means, medians]
b4397bfcef8e0215e3478f1f50708aa4608a14e2
700,953
import torch def get_cosinebased_yaw_pitch(input_: torch.Tensor) -> torch.Tensor: """ Returns a tensor with two columns being yaw and pitch respectively. For yaw, it uses cos(yaw)'s value along with sin(yaw)'s sign. Args: input_: 1st column is sin(yaw), 2nd Column is cos(yaw), 3rd Column is sin(pitch) """ yaw_pitch_cosine = torch.zeros((input_.shape[0], 2)) yaw_pitch_cosine[:, 1] = torch.asin(input_[:, 2]) yaw = torch.acos(input_[:, 1]) right = (input_[:, 0] < 0.) yaw[right] = -1 * yaw[right] yaw_pitch_cosine[:, 0] = yaw return yaw_pitch_cosine
ca5c10908de8dfa8b86446a1874b1ef780ae5313
700,954
import re def CommitPositionFromBuildProperty(value): """Extracts the chromium commit position from a builders got_revision_cp property.""" # Match a commit position from a build properties commit string like # "refs/heads/master@{#819458}" test_arg_commit_position_re = r'\{#(?P<position>\d+)\}' match = re.search(test_arg_commit_position_re, value) if match: return int(match.group('position')) raise RuntimeError('Could not get chromium commit position from test arg.')
c0fdb07ce3be907db3ec8628eaefc3ad1453ef34
700,955