content
stringlengths
22
815k
id
int64
0
4.91M
def SMLB(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]: """ Taken from Minerbo, G. N. and Levy, M. E., "Inversion of Abel’s integral equation by means of orthogonal polynomials.", SIAM J. Numer. Anal. 6, 598-616 and swapped to satisfy SMLB(0) = 0. """ return (np.where((x > 0.00000000001), 1.241 * np.multiply(np.power(2 * x - x ** 2, -1.5), np.exp(1.21 * (1 - np.power(2 * x - x ** 2, -1)))), 0)) / 0.9998251040790366
27,000
def add_colon(in_str): """Add colon after every 4th character.""" return ':'.join([in_str[i:i+4] for i in range(0, len(in_str), 4)])
27,001
def fetch_partial_annotations(): """ Returns the partial annotations as an array Returns: partial_annotations: array of annotation data - [n_annotations, 5] row format is [T, L, X, Y, Z] """ raw_mat = loadmat(PARTIAL_ANNOTATIONS_PATH) annotations = raw_mat['divisionAnnotations'] # chop extra mystery column return annotations[:, :-1]
27,002
def get_historical_tweets(query: str, start_date: str, end_date: str, top_only=True, max_tweets=1000): """ start_date ex: 2020-06-01 end_date ex: 2020-06-15 """ tweetCriteria = got.manager.TweetCriteria()\ .setQuerySearch(query)\ .setTopTweets(top_only)\ .setLang("en")\ .setSince("2020-06-01")\ .setUntil("2020-06-15")\ .setMaxTweets(max_tweets) tweets = got.manager.TweetManager.getTweets(tweetCriteria) print(type(tweets))
27,003
async def test_download_diagnostics(hass, hass_client): """Test record service.""" assert await get_diagnostics_for_config_entry( hass, hass_client, "fake_integration" ) == {"hello": "info"}
27,004
def initialize_program(): """ Get REST configuration """ global CONFIG data = call_responder('config', 'config/rest_services') CONFIG = data['config']
27,005
def get_alt_pos_info(rec): """Returns info about the second-most-common nucleotide at a position. This nucleotide will usually differ from the reference nucleotide, but it may be the reference (i.e. at positions where the reference disagrees with the alignment's "consensus"). This breaks ties arbitrarily. Parameters ========== rec: dict pysamstats record for a given position in an alignment produced by stat_variation(). Returns ======= (cov, alt nt freq, alt nt): tuple of (int, int, str) Describes the second-most-common nucleotide at a position. The first entry in this tuple is the (mis)match coverage at this position. This is an integer defined as the sum of A, C, G, T nucleotides at this position (note that this excludes degenerate nucleotides like N -- we could change this in the future if that'd be useful, I suppose). Note that this coverage could be zero, if no reads are aligned to this specific position. The second entry is the raw frequency of this nucleotide at this position: this will be an integer greater than or equal to 0. This is also referred to in the paper, etc. as alt(pos). The third entry is just the alternate nucleotide (one of A, C, G, T), represented as a string. This is returned for reference -- as of writing this isn't actually needed for Pleuk itself, but I have other code outside of Pleuk that benefits from this! """ cov = rec["A"] + rec["C"] + rec["G"] + rec["T"] ordered_nts = sorted("ACGT", key=rec.get) # The literal nucleotide used in the numerator of freq(pos): one of A, C, # G, T alt_nt = ordered_nts[-2] # The raw frequency (in counts) of alt_nt. An integer >= 0. alt_nt_freq = rec[alt_nt] return (cov, alt_nt_freq, alt_nt)
27,006
def accuracy(output, target, topk=(1,)): """ Computes the accuracy over the k top predictions for the specified values of k. """ with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
27,007
def post(out): """ Posts a request to the slack webhook. Payload can be customized so the message in slack is customized. The variable out is the text to be displayed. """ payload = { "attachments": [ { "title": "THANKS FOR THE RIDE!", "text": out, "color": "#7CD197", "image_url": "https://s3.amazonaws.com/gozde-patron/countdown/fireworks.jpg" } ] } r = requests.post(SLACK_URL, data=json.dumps(payload))
27,008
def main(): """Main function""" # Parse arguments from command line parser = argparse.ArgumentParser(description='Check value of last-modified date header for list of URLs') args = parseCommandLine(parser) fileIn = args.fileIn fileOut = args.fileOut separator = "," # Read input file try: fIn = open(fileIn, "r", encoding="utf-8") except IOError: msg = 'could not read file ' + fileIn errorExit(msg) # Parse input file as comma-delimited data try: inCSV = csv.reader(fIn, delimiter=',') # No header so commented this out #inHeader = next(inCSV) inRows = [row for row in inCSV] fIn.close() except csv.Error: fIn.close() msg = 'could not parse ' + fileIn errorExit(msg) # Empty list for storing output records outRows = [] # Header for output file as list outHeader = ['url', 'isValidUrl', 'http-status', 'last-modified'] # Append header to outRows list outRows.append(outHeader) for inRow in inRows: url = inRow[0] # Validate url urlIsValid = validators.url(url) if urlIsValid: isValidUrl = True res = requests.get(url) httpStatus = res.status_code try: lastModified = res.headers['last-modified'] except KeyError: lastModified = "" else: isValidUrl = False httpStatus = "" lastModified = "" # Add items to output row outRow = [url, isValidUrl, httpStatus, lastModified] # Add output row to outRows list outRows.append(outRow) # Open output file try: fOut = open(fileOut, "w", encoding="utf-8") except IOError: msg = 'could not read file ' + fileIn errorExit(msg) # Write CSV try: outCSV = csv.writer(fOut, delimiter=separator, lineterminator='\n') for row in outRows: outCSV.writerow(row) fOut.close() except IOError: msg = 'could not write file ' + fileOut errorExit(msg)
27,009
def grim(n, mu, prec=2, n_items=1): """ Test that a mean mu reported with a decimal precision prec is possible, given a number of observations n and a number of items n_items. :param n: The number of observations :param mu: The mean :param prec: The precision (i.e., number of decimal places) of the mean :param n_items: The number of scale items that were averaged. Default is 1. :return: True if the mean is possible, False otherwise. """ if n*n_items >= 10**prec: warn("The effective number of data points is such that GRIM will always find a solution.") cval = np.round(mu * n * n_items, 0) valid = np.round(cval/n/n_items, prec) == np.round(mu, prec) return valid
27,010
def split_dataframe(df:pd.DataFrame,split_index:np.ndarray): """ Split out the continuous variables from a dataframe \n Params: df : Pandas dataframe split_index : Indices of continuous variables """ return df.loc[:,split_index].values
27,011
def user_select_columns(): """ Useful columns from the users table, omitting authentication-related columns like password. """ u = orm.User.__table__ return [ u.c.id, u.c.user_name, u.c.email, u.c.first_name, u.c.last_name, u.c.org, u.c.created_at, u.c.updated_at, u.c.sign_in_count, u.c.last_sign_in_at ]
27,012
def remove_query_param(url, key): """ Given a URL and a key/val pair, remove an item in the query parameters of the URL, and return the new URL. """ (scheme, netloc, path, query, fragment) = urlparse.urlsplit(url) query_dict = urlparse.parse_qs(query) query_dict.pop(key, None) query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True) return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
27,013
def test_ap_hs20_sim(dev, apdev): """Hotspot 2.0 with simulated SIM and EAP-SIM""" if not hlr_auc_gw_available(): return "skip" hs20_simulated_sim(dev[0], apdev[0], "SIM") dev[0].request("INTERWORKING_SELECT auto freq=2412") ev = dev[0].wait_event(["INTERWORKING-ALREADY-CONNECTED"], timeout=15) if ev is None: raise Exception("Timeout on already-connected event")
27,014
def FreshReal(prefix='b', ctx=None): """Return a fresh real constant in the given context using the given prefix. >>> x = FreshReal() >>> y = FreshReal() >>> eq(x, y) False >>> x.sort() Real """ ctx = _get_ctx(ctx) return ArithRef(Z3_mk_fresh_const(ctx.ref(), prefix, RealSort(ctx).ast), ctx)
27,015
def ndim_rectangles_integral( # main args func, up_limits, low_limits, ndim, nsamples=10000, args_func = {}, # demo plot args verbose=False, args_subplots = {'sharex':True, 'sharey':True, 'figsize':(10,10)}, args_suptitle = {'fontsize':16}, args_scatter_mesh = {'marker':"+", 'color':"black", 'label':"rectangular mesh"}, args_scatter_func = {'marker':"o", 'label':"computed points"}, args_legend = {}, dim_labels = None ): """ Returns the integral of a function in n-dimensions using the textbook rectangle method. Heavy usage of numpy functions to benefit from parallization. Tip: To save RAM, divide integration space into sub-spaces and integrate one at a time. v0.1 Parameters ---------- func : function A Python function or method to integrate. The function takes an array of coordinates of shape=(ndim) and/or shape=(ndim, nsamples) to be integrated as first argument. Other arguments can be passed using the args_func dictionary argument. up_limits: array_like Upward bounds of integrations. Expected shape = (ndim) low_limits: array_like Downward bounds of integrations. Expected shape = (ndim) nsamples: integer or array_like, optional #Samples of integrations in each dimension. Expected shape = (ndim). If an integer is given, #samples are divided between each dimension by nsamples**(1/ndim). args_func: dictionary, optional Supplementary arguments to pass to func. verbose: boolean, optional Generates a matplotlib (plt) figure of the integration space meshing and samples. This involves the computation of an histogram which is significantly computationaly intensive. Verbose=True should be used for verifications only with a low number of samples. args_subplots: dictionary, optional Supplementary arguments to pass to the plt.subplot function for pdf sample / space meshing visualisation (for verbose=True). args_suptitle: dictionary, optional Supplementary arguments to pass to the plt.suptitle function for pdf sample / space meshing visualisation (for verbose=True). args_scatter_mesh: dictionary, optional Supplementary arguments to pass to the plt.scatter function for space meshing visualisation (for verbose=True). args_scatter_func: dictionary, optional Supplementary arguments to pass to the plt.scatter function for pdf sample visualisation (for verbose=True). args_legend: dictionary, optional Supplementary arguments to pass to the plt.legend function for pdf sample / space meshing visualisation (for verbose=True). dim_labels = array_like, optional Label of each dimension for pdf sample / space meshing visualisation (for verbose=True). Expected shape = (ndim) Returns ------- result : float The result of the integration. Example -------- from scipy import stats import numpy as np import matplotlib.pyplot as plt dim_labels = ["x", "y", "z"] ndim = len(dim_labels) df_func = lambda x:stats.multivariate_normal.pdf(x, mean=np.zeros(ndim), cov=np.eye(ndim)) integral = ndim_rectangles_integral (func = df_func, up_limits = np.full(ndim,4), low_limits = np.full(ndim,-4), ndim=ndim, nsamples = np.full(ndim,11), verbose = True, dim_labels = dim_labels,) print("integral = %f"%(integral)) plt.show() """ #--------------------------------------------------------------- # supporting int as n_samples argument if isinstance(nsamples, int): nsamples = np.full(ndim,int(nsamples**(1/ndim))) # checking arguments if not(len(up_limits)==len(low_limits)==ndim==len(nsamples)): raise ValueError("Shapes should be len(up_limits)=len(low_limits)=ndim") #--------------------------------------------------------------- # todo: max_memory argument. automated space division #--------------------------------------------------------------- # hyperrectangles edge size in each dimension ndx = np.array([(up_limits[dim] - low_limits[dim])/(nsamples[dim]-1) for dim in range(ndim)]) # hyperrectangle volume vol = np.prod(ndx) # hyperrectangles centers: edges ncenters = np.array([np.linspace(start=low_limits[dim]+ndx[dim]/2, stop=up_limits[dim]-ndx[dim]/2, num=nsamples[dim]-1) for dim in range(ndim)]) del ndx # hyperrectangles centers: coords ncoords_centers = np.array(np.meshgrid(*ncenters)) del ncenters ncoords_centers = ncoords_centers.reshape(ncoords_centers.shape[0],np.prod(ncoords_centers.shape[1:])) # equivalent to ncoords_centers = ncoords_centers.reshape(ndim,np.prod(nsamples-1)) ncoords_centers = ncoords_centers.transpose() #--------------------------------------------------------------- # integral computation try: # if func supports array of coords mapped_func = func(ncoords_centers, **args_func) except: # if func only supports 1 coord at a time mapped_func = np.array([func(ncoords_centers[i], **args_func) for i in range (ncoords_centers.shape[0])]) # dividing by volume integral = np.sum(mapped_func)*vol #--------------------------------------------------------------- #todo: error computation # # not sure about this... # mapped_err = np.abs(mapped_func-np.roll(mapped_func, 1))/2 # err = np.sum(mapped_err)*vol #--------------------------------------------------------------- # mesh plot for visualisation purposes if verbose==1: # meshing edges for display nedges = np.array([np.linspace(start=low_limits[dim], stop=up_limits[dim], num=nsamples[dim]) for dim in range(ndim)], dtype=object) # nedges.shape = (ndim, nsamples in dim) ncoords_edges = np.array(np.meshgrid(*nedges)) ncoords_edges = ncoords_edges.reshape(ncoords_edges.shape[0],np.prod(ncoords_edges.shape[1:])) # plot fig, ax = plt.subplots(ndim ,ndim, **args_subplots) #title args_suptitle_default = {'t':"Mesh and func samples used. Integral = %f"%(integral)} # default title args_suptitle_default.update(args_suptitle) fig.suptitle(**args_suptitle_default) for i in range(ndim): for j in range (ndim): # mesh: plot ax[i,j].scatter(ncoords_edges[i,:], ncoords_edges[j,:], **args_scatter_mesh) # df sample points: cleaning supperposed values, summing prob along other dimensions temp_centers_ij = np.append(ncoords_centers[:,[i,j]], mapped_func.reshape(mapped_func.shape[0],1),axis=1) temp_centers_ij = temp_centers_ij[np.lexsort((temp_centers_ij[:,0], temp_centers_ij[:,1]))] unique_centers = [] unique_prob = [] counter = -1 for k in range(temp_centers_ij.shape[0]): if np.sum(temp_centers_ij[k,0:2] != temp_centers_ij[k-1,0:2]): unique_prob.append(temp_centers_ij[k,2]) unique_centers.append(temp_centers_ij[k,0:2]) counter+=1 else: unique_prob[counter]+=temp_centers_ij[k,2] unique_centers = np.array(unique_centers) unique_prob = np.array(unique_prob) #todo: use an image instead of points for the sampled pdf # df sample points: plot df_plot = ax[i,j].scatter(unique_centers[:,0], unique_centers[:,1], c=unique_prob, **args_scatter_func) plt.colorbar(df_plot, ax=ax[i,j]) # labels if dim_labels != None: ax[i,j].set_xlabel(dim_labels[i]) ax[i,j].set_ylabel(dim_labels[j]) # legend handles, labels = plt.gca().get_legend_handles_labels() by_label = collections_OrderedDict(zip(labels, handles)) fig.legend(by_label.values(), by_label.keys(), **args_legend) #--------------------------------------------------------------- return integral
27,016
def expand_inhomog_tuple_assignments(block, language_has_vectors = False): """ Simplify expressions in a CodeBlock by unravelling tuple assignments into multiple lines Parameters ========== block : CodeBlock The expression to be modified Examples -------- >>> from pyccel.ast.builtins import PythonTuple >>> from pyccel.ast.core import Assign, CodeBlock >>> from pyccel.ast.literals import LiteralInteger >>> from pyccel.ast.utilities import expand_to_loops >>> from pyccel.ast.variable import Variable >>> a = Variable('int', 'a', shape=(,), rank=0) >>> b = Variable('int', 'b', shape=(,), rank=0) >>> c = Variable('int', 'c', shape=(,), rank=0) >>> expr = [Assign(PythonTuple(a,b,c),PythonTuple(LiteralInteger(0),LiteralInteger(1),LiteralInteger(2))] >>> expand_inhomog_tuple_assignments(CodeBlock(expr)) [Assign(a, LiteralInteger(0)), Assign(b, LiteralInteger(1)), Assign(c, LiteralInteger(2))] """ if not language_has_vectors: allocs_to_unravel = [a for a in block.get_attribute_nodes(Assign) \ if isinstance(a.lhs, HomogeneousTupleVariable) \ and isinstance(a.rhs, (HomogeneousTupleVariable, Duplicate, Concatenate))] new_allocs = [(Assign(a.lhs, NumpyEmpty(a.lhs.shape, dtype=a.lhs.dtype, order=a.lhs.order) ), a) if a.lhs.is_stack_array else (a) if a.lhs.allocatable else (Allocate(a.lhs, shape=a.lhs.shape, order = a.lhs.order, status="unknown"), a) for a in allocs_to_unravel] block.substitute(allocs_to_unravel, new_allocs) assigns = [a for a in block.get_attribute_nodes(Assign) \ if isinstance(a.lhs, InhomogeneousTupleVariable) \ and isinstance(a.rhs, (PythonTuple, InhomogeneousTupleVariable))] if len(assigns) != 0: new_assigns = [[Assign(l,r) for l,r in zip(a.lhs, a.rhs)] for a in assigns] block.substitute(assigns, new_assigns) expand_inhomog_tuple_assignments(block)
27,017
def handle_readable(client): """ Return True: The client is re-registered to the selector object. Return False: The server disconnects the client. """ data = client.recv(1028) if data == b'': return False client.sendall(b'SERVER: ' + data) print(threading.active_count()) return True
27,018
def test_referencelibrary_construction_list(): """Check that we construct reference library where sub-props are lists.""" book = ReferenceBook("a") library = ReferenceLibrary(books=[book]) assert library.books == [book]
27,019
def fund_wallet(): """ --- post: summary: fund a particular wallet description: sends funds to a particular user given the user id the amount will be removed from the wallet with the respective currency, if not it falls to the default wallet. if the sender is admin no money will be deducted from any wallet else an amount will be deducted from the sender wallet with the respective currency. so that means that admin can geenrate cash while other users can perform transactions between wallets. requestBody: required: true content: application/json: schema: Fund responses: '200': description: success content: application/json: schema: TransactionResponse tags: - user - admin """ try: required = ["currency", "amount", "receiver"] data = request.get_json() if not all([rq in data.keys() for rq in required]): return jsonify(status=error, message="Missing Required JSON Field!") amount = data["amount"] currency = data["currency"] receiver_id = data["receiver"] if not CurrencyUtils.iscurrency_valid(currency): return jsonify(status=error, message="Please Enter a valid Currency code"), 400 if g.user.role.name != "Admin": sender_wallet = g.user.wallet.filter_by(currency=currency).first() if sender_wallet is None: sender_wallet = g.user.wallet.filter_by( currency=g.user.main_currency) if CurrencyUtils.convert_currency(sender_wallet.currency.upper(), currency.upper(), sender_wallet.balance) < amount: return jsonify(status=error, message="Insufficient fund!"), 403 amount = CurrencyUtils.convert_currency( sender_wallet.currency.upper(), currency.upper(), amount) else: if sender_wallet.balance < amount: return jsonify(status=error, message="Insufficient fund!"), 403 receiver = User.query.filter_by(id=receiver_id).first() if not receiver: return jsonify(status=error, message=f"Sorry User with id {receiver_id} does not exsits!"), 400 if receiver.role.name == "Admin": return jsonify(status=unauthorized, message="Sorry Admin account can't be funded!"), 403 receiver_wallet = receiver.wallet.filter_by(currency=currency).first() if receiver_wallet is None: if receiver.role.name == "Elite": new_wallet = Wallet(currency=currency, user_id=receiver.id) db.session.add(new_wallet) db.session.commit() receiver_wallet = new_wallet elif receiver.role.name == "Noob": receiver_wallet = receiver.wallet.filter_by( currency=receiver.main_currency.lower()).first() if g.user.role.name == "Admin": tx = Transaction(receiver=receiver_wallet.id, sender=None, amount=amount, currency=currency, at=datetime.datetime.utcnow()) else: tx = Transaction(receiver=receiver_wallet.id, sender=sender_wallet.id, amount=amount, currency=currency, at=datetime.datetime.utcnow()) if receiver.role.name == "Noob": tx.isapproved = False db.session.add(tx) db.session.commit() return jsonify(status=ok, data=tx.serialize), 200 except SyntaxError as e: logging.error(e) return jsonify(status=error, message=str(e)), 400
27,020
def create_objective(dist, abscissas): """Create objective function.""" abscissas_ = numpy.array(abscissas[1:-1]) def obj(absisa): """Local objective function.""" out = -numpy.sqrt(dist.pdf(absisa)) out *= numpy.prod(numpy.abs(abscissas_ - absisa)) return out return obj
27,021
def get_party_to_seats(year, group_id, party_to_votes): """Give votes by party, compute seats for party.""" eligible_party_list = get_eligible_party_list( group_id, party_to_votes, ) if not eligible_party_list: return {} n_seats = YEAR_TO_REGION_TO_SEATS[year][group_id] n_seats_bonus = 0 if (group_id == COUNTRY_ID) else 1 n_seats_non_bonus = n_seats - n_seats_bonus winning_party = sorted(party_to_votes.items(), key=lambda x: -x[1],)[ 0 ][0] party_to_seats = {winning_party: n_seats_bonus} relevant_num = sum( list( map( lambda party: party_to_votes[party], eligible_party_list, ) ) ) party_r = [] n_seats_r = n_seats_non_bonus resulting_num = (int)(relevant_num / n_seats_non_bonus) for party in eligible_party_list: seats_r = party_to_votes[party] / resulting_num seats_non_bonus_whole = (int)(seats_r) party_to_seats[party] = ( party_to_seats.get(party, 0) + seats_non_bonus_whole ) party_r.append((party, seats_r % 1)) n_seats_r -= seats_non_bonus_whole party_r = sorted(party_r, key=lambda x: -x[1]) for i in range(0, n_seats_r): party = party_r[i][0] party_to_seats[party] = party_to_seats.get(party, 0) + 1 return party_to_seats
27,022
def get_product(name, version): """Get info about a specific version of a product""" product = registry.get_product(name, version) return jsonify(product.to_dict())
27,023
def test_extract_fields() -> None: """ Test extract_fields """ assert extract_fields([], [0]) == [] assert extract_fields(['abc', 'def'], [0]) == ['abc'] assert extract_fields(['abc', 'def'], [1, 0, 2]) == ['def', 'abc']
27,024
def _is_segment_in_block_range(segment, blocks): """Return whether the segment is in the range of one of the blocks.""" for block in blocks: if block.start <= segment.start and segment.end <= block.end: return True return False
27,025
def acrobatic(m): """More power and accuracy at the cost of increased complexity; can stun""" if 'do_agility_based_dam' in m['functions'] or 'do_strength_based_dam' in m['functions']: return None if 'takedown' in m['features']: return None m = m.copy() mult(m, 'stam_cost', 1.25) add(m, 'complexity', 2) up_tier(m, 3) add_fun(m, 'do_agility_based_dam') add_fun(m, 'do_strength_based_dam') add(m, 'freq', -2, mn=1) prefix(m, 'Acrobatic') return m
27,026
def systemd(config, args): """ Build and install systemd scripts for the server """ try: config = load_config(args.cfg_file, explicit=True) except (OSError, configparser.ParsingError) as exc: print(exc, file=sys.stderr) return 1 print("Building systemd services") using_venv = sys.prefix != sys.base_prefix if using_venv: print(f" - Detected virtualenv: {sys.prefix}") print(" Service files will be built for this virutalenv") site_path = None global_site = config.get("taky", "cfg_path") == "/etc/taky/taky.conf" if global_site: print(" - Detected system-wide site install") svcs = { "taky": "taky.service", "cot": "taky-cot.service", "dps": "taky-dps.service", } else: site_path = os.path.dirname(config.get("taky", "cfg_path")) hostname = config.get("taky", "hostname") print(f" - Detected site install: {site_path}") svcs = { "taky": f"taky-{hostname}.service", "cot": f"taky-{hostname}-cot.service", "dps": f"taky-{hostname}-dps.service", } if not args.user: print( " - WARNING: taky will run as root! It's strongly recommended", file=sys.stderr, ) print(" to create a system user for taky!", file=sys.stderr) # Do not overwrite files if they exist for svc in svcs: path = os.path.join(args.path, svcs[svc]) if os.path.exists(path): print(f"ERROR: Refusing to overwite service file: {path}", file=sys.stderr) return 1 print(f" - Writing services to {args.path}") try: print(f" - Writing {svcs['cot']}") write_cot_svc(svcs, config, args, using_venv, site_path) if args.dps: print(f" - Writing {svcs['dps']}") write_dps_svc(svcs, config, args, using_venv, site_path) print(f" - Writing {svcs['taky']}") write_uni_svc(svcs, config, args, using_venv, site_path) except PermissionError as exc: print(f"ERROR: Unable to write service files to {args.path}", file=sys.stderr) return 1 except OSError as exc: print(f"ERROR: {exc}", file=sys.stderr) return 1 if args.install: try: print(" - Reloading systemctl services") subprocess.check_output(["systemctl", "daemon-reload"]) print(" - Enabling service") subprocess.check_output(["systemctl", "enable", svcs["taky"]]) print(" - Starting service") subprocess.check_output(["systemctl", "start", svcs["taky"]]) except subprocess.CalledProcessError as exc: print(f"ERROR: systemctl calls failed: {exc}") return 1 return 0
27,027
def load_graph(filename): """Unpersists graph from file as default graph.""" with tf.gfile.FastGFile(filename, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='')
27,028
def compute_snes_color_score(img): """ Returns the ratio of SNES colors to the total number of colors in the image Parameters: img (image) -- Pillow image Returns: count (float) -- ratio of SNES colors """ score = _get_distance_between_palettes(img, util.get_snes_color_palette()) return score # colors, snes_color_count = get_color_count(img, util.get_snes_color_palette()) w, h = img.size colors = np.array(img.getcolors(maxcolors=w * h)) total_color_count = len(colors) invalid_color_count = np.sum([((r & 0x03) & (g & 0x03) & (b & 0x03)) for (_, (r, g, b)) in colors]) # zero out valid bits, leaving only invalid bits snes_color_count = total_color_count - invalid_color_count # count remaining colors with invalid bits return snes_color_count / total_color_count
27,029
def get_assignments_for_team(user, team): """ Get openassessment XBlocks configured for the current teamset """ # Confirm access if not has_specific_team_access(user, team): raise Exception("User {user} is not permitted to access team info for {team}".format( user=user.username, team=team.team_id )) # Limit to team-enabled ORAs for the matching teamset in the course return modulestore().get_items( team.course_id, qualifiers={'category': 'openassessment'}, settings={'teams_enabled': True, 'selected_teamset_id': team.topic_id} )
27,030
def is_flexible_uri(uri: Uri_t) -> bool: """Judge if specified `uri` has one or more flexible location. Args: uri: URI pattern to be judged. Returns: True if specified `uri` has one or more flexible location, False otherwise. """ for loc in uri: if isinstance(loc, FlexibleLocation): return True return False
27,031
def load_frame_gray(img_path, gray_flag=False): """Load image at img_path, and convert the original image to grayscale if gray_flag=True. Return image and grayscale image if gray_flag=True; otherwise only return original image. img_path = a string containing the path to an image file readable by cv.imread """ try: img = cv.imread(img_path) except Exception as err: print(f"The following error occurred when reading the image file at {img_path}: \n{err}") img = None if gray_flag and isinstance(img, np.ndarray): gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) else: gray = None return (img, gray) if gray_flag else img
27,032
def read_vocab_file(path): """ Read voc file. This reads a .voc file, stripping out empty lines comments and expand parentheses. It returns each line as a list of all expanded alternatives. Args: path (str): path to vocab file. Returns: List of Lists of strings. """ LOG.warning("read_vocab_file is deprecated! " "use SkillResources class instead") vocab = [] with open(path, 'r', encoding='utf8') as voc_file: for line in voc_file.readlines(): if line.startswith('#') or line.strip() == '': continue vocab.append(expand_options(line.lower())) return vocab
27,033
def release_cherry_pick(obj, version, dry_run, recreate): """ Cherry pick commits. """ from .release import Release, MinorRelease, PatchRelease release = Release.from_jira(version, jira=obj['jira'], repo=obj['repo']) if not isinstance(release, (MinorRelease, PatchRelease)): raise click.UsageError('Cherry-pick command only supported for minor ' 'and patch releases') if not dry_run: release.cherry_pick_commits(recreate_branch=recreate) click.echo('Executed the following commands:\n') click.echo( 'git checkout {} -b {}'.format(release.previous.tag, release.branch) ) for commit in release.commits_to_pick(): click.echo('git cherry-pick {}'.format(commit.hexsha))
27,034
def natural_key(string_): """See http://www.codinghorror.com/blog/archives/001018.html""" return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
27,035
def _set_wpa_supplicant_config(interface, config, opt): """Starts or restarts wpa_supplicant unless doing so would be a no-op. The no-op case (i.e. wpa_supplicant is already running with an equivalent config) can be overridden with --force-restart. Args: interface: The interface on which to start wpa_supplicant. config: A wpa_supplicant configuration, as a string. opt: The OptDict parsed from command line options. Returns: Whether wpa_supplicant was started successfully. Raises: BinWifiException: On various errors. """ tmp_config_filename = utils.get_filename( 'wpa_supplicant', utils.FILENAME_KIND.config, interface, tmp=True) forced = False current_config = None band = opt.band try: with open(tmp_config_filename) as tmp_config_file: current_config = tmp_config_file.read() except IOError: pass already_running = _is_wpa_supplicant_running(interface) if not already_running: utils.log('wpa_supplicant not running yet, starting.') elif current_config != config: # TODO(rofrankel): Consider using wpa_cli reconfigure here. utils.log('wpa_supplicant config changed, reconfiguring.') elif opt.force_restart: utils.log('Forced restart requested.') forced = True else: utils.log('wpa_supplicant-%s already configured and running', interface) return True if not forced: utils.atomic_write(tmp_config_filename, config) # TODO(rofrankel): Consider removing all the restart hostapd stuff when # b/30140131 is resolved. hostapd seems to keep working without being # restarted, at least on Camaro. restart_hostapd = False ap_interface = iw.find_interface_from_band(band, iw.INTERFACE_TYPE.ap, opt.interface_suffix) if _is_hostapd_running(ap_interface): restart_hostapd = True opt_without_persist = options.OptDict({}) opt_without_persist.persist = False opt_without_persist.band = opt.band opt_without_persist.interface_suffix = opt.interface_suffix if not stop_ap_wifi(opt_without_persist): raise utils.BinWifiException( "Couldn't stop hostapd to start wpa_supplicant.") if already_running: subprocess.check_call(['ifdown', interface]) subprocess.check_call(['/etc/ifplugd/ifplugd.action', interface, 'down']) if not _reconfigure_wpa_supplicant(interface): raise utils.BinWifiException('Failed to reconfigure wpa_supplicant.') subprocess.check_call(['ifup', interface]) subprocess.check_call(['/etc/ifplugd/ifplugd.action', interface, 'up']) elif not _start_wpa_supplicant(interface, tmp_config_filename): raise utils.BinWifiException( 'wpa_supplicant failed to start. Look at wpa_supplicant logs for ' 'details.') if restart_hostapd: _restart_hostapd(ap_interface) return True
27,036
def test_resample(): """Tests :func:`~pomdp_belief_tracking.pf.importance_sampling.resample`""" p = ParticleFilter([0]) pf = resample(p, 5) assert len(pf) == 5 assert pf() == 0 p = ParticleFilter([True, False, True, False]) n = 1000 pf = resample(p, n) assert len(pf) == n assert 0.4 < pf.probability_of(False) < 0.6 assert 0.4 < pf.probability_of(True) < 0.6
27,037
def test_workspace_permissions(app, session, default_user, sample_yadage_workflow_in_db, tmp_shared_volume_path): """Test workspace dir permissions.""" create_workflow_workspace(sample_yadage_workflow_in_db.get_workspace()) expeted_worspace_permissions = 'drwxrwxr-x' absolute_workflow_workspace = os.path.join( tmp_shared_volume_path, sample_yadage_workflow_in_db.get_workspace()) workspace_permissions = \ stat.filemode(os.stat(absolute_workflow_workspace).st_mode) assert os.path.exists(absolute_workflow_workspace) assert workspace_permissions == expeted_worspace_permissions _delete_workflow(sample_yadage_workflow_in_db, hard_delete=True, workspace=True)
27,038
def generate_episode(sim, policy, horizon=200): """ Generate an episode from a policy acting on an simulation. Returns: sequence of state, action, reward. """ obs = sim.reset() policy.reset() # Reset the policy too so that it knows its the beginning of the episode. states, actions, rewards = [], [], [] states.append(obs) for _ in range(horizon): action = policy.act(obs) obs, reward, done, _ = sim.step(action) states.append(obs) actions.append(action) rewards.append(reward) if done: break states.pop() # Pop off the terminating state return states, actions, rewards
27,039
async def _ensure_meadowrun_vault(location: str) -> str: """ Gets the meadowrun key vault URI if it exists. If it doesn't exist, also creates the meadowrun key vault, and tries to assign the Key Vault Administrator role to the current user. """ subscription_id = await get_subscription_id() vault_name = get_meadowrun_vault_name(subscription_id) resource_group_path = await ensure_meadowrun_resource_group(location) vault_path = ( f"{resource_group_path}/providers/Microsoft.KeyVault/vaults/{vault_name}" ) try: vault = await azure_rest_api("GET", vault_path, "2019-09-01") return vault["properties"]["vaultUri"] except ResourceNotFoundError: # theoretically key_vault_client.vaults.get_deleted() should be faster, # but that requires specifying a location and there's no way to know what # location the key vault may have been originally created in. deleted_vault_found = False async for page in azure_rest_api_paged( "GET", f"/subscriptions/{subscription_id}/providers/Microsoft.KeyVault/" f"deletedVaults", "2019-09-01", ): for vault in page["value"]: if vault["name"] == vault_name: deleted_vault_found = True break if deleted_vault_found: break if deleted_vault_found: # if we have a deleted vault, then we should try to recover it create_mode = "recover" print(f"The meadowrun Key Vault {vault_name} was deleted, recovering") else: create_mode = "default" print( f"The meadowrun Key Vault {vault_name} does not exist, creating it " "now" ) # if we're creating or recreating the Key Vault, assume that we need to add the # current user to the Key Vault Administrator role so that the current user can # access secrets. assign_role_task = asyncio.create_task( assign_role_to_principal( "Key Vault Administrator", await get_current_user_id(), location ) ) # Now we can create/recover the Key Vault. # https://docs.microsoft.com/en-us/rest/api/keyvault/keyvault/vaults/create-or-update#vaultproperties vault, _ = await wait_for_poll( await azure_rest_api_poll( "PUT", vault_path, "2019-09-01", "AsyncOperationJsonStatus", json_content={ "location": location, "properties": { "tenantId": await get_tenant_id(), "sku": {"family": "A", "name": "Standard"}, "enableRbacAuthorization": True, "createMode": create_mode, }, }, ) ) try: await assign_role_task except Exception as e: print( "Warning: we were not able to assign the Key Vault Administrator role " f"to the current user. You may not be able to create/read secrets: {e}" ) return vault["properties"]["vaultUri"]
27,040
def export_postgresql_to_tmp_csv(**kwargs): """ export table data from mysql to csv file """ print(f"Entering export_postgresql_to_csv {kwargs['copy_sql']}") #gcs_hook = GoogleCloudStorageHook(GOOGLE_CONN_ID) pg_hook = PostgresHook.get_hook(kwargs['conn_id']) current_dir = AIRFLOW_HOME + "/dags/data_exp/" with tempfile.NamedTemporaryFile(suffix=".csv", dir= current_dir) as temp_file: temp_name = temp_file.name print(f"Exporting query to file {temp_name}") pg_hook.copy_expert(kwargs['copy_sql'], filename=temp_name) #logging.info("Uploading to %s/%s", kwargs['bucket_name'], kwargs['file_name']) #gcs_hook.upload(kwargs['bucket_name'], kwargs['file_name'], temp_name)
27,041
def test_upload_file(mocker): """ Ensure a file gets properly uploaded.""" resp.status_code = SUCCESS http_obj = HTTPTransput(PATH_UP, URL, FTYPE) mocker.patch('requests.put', return_value=resp) assert 0 == http_obj.upload_file()
27,042
def execute_scanner(dataset_location: str, result_location_str, j, use_ml=False): """Execute CredSweeper as a separate process to make sure no global states is shared with training script""" dir_path = os.path.dirname(os.path.realpath(__file__)) + "/.." command = f"{sys.executable} -m credsweeper --path {dataset_location}/data --save-json {result_location_str} -j {j}" if use_ml: command += " --ml_validation" subprocess.call(command, shell=True, cwd=dir_path, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
27,043
def get_dataset_splits( datasets: Iterable[HarmonicDataset], data_dfs: Dict[str, pd.DataFrame] = None, xml_and_csv_paths: Dict[str, List[Union[str, Path]]] = None, splits: Iterable[float] = (0.8, 0.1, 0.1), seed: int = None, ) -> Tuple[List[List[HarmonicDataset]], List[List[int]], List[List[Piece]]]: """ Get datasets representing splits of the data in the given DataFrames. Parameters ---------- datasets : Iterable[HarmonicDataset] An Iterable of HarmonicDataset class objects, each representing a different type of HarmonicDataset subclass to make a Dataset from. These are all passed so that they will have identical splits. data_dfs : Dict[str, pd.DataFrame] If using dataframes, a mapping of 'files', 'measures', 'chords', and 'notes' dfs. xml_and_csv_paths : Dict[str, List[Union[str, Path]]] If using the MusicXML ('xmls') and label csvs ('csvs'), a list of paths of the matching xml and csv files. splits : Iterable[float] An Iterable of floats representing the proportion of pieces which will go into each split. This will be normalized to sum to 1. seed : int A numpy random seed, if given. Returns ------- dataset_splits : List[List[HarmonicDataset]] An iterable, the length of `dataset` representing the splits for each given dataset type. Each element is itself an iterable the length of `splits`. split_ids : List[List[int]] A list the length of `splits` containing the file_ids for each data point in each split. split_pieces : List[List[Piece]] A list of the pieces in each split. """ split_ids, split_pieces = get_split_file_ids_and_pieces( data_dfs=data_dfs, xml_and_csv_paths=xml_and_csv_paths, splits=splits, seed=seed, ) dataset_splits = np.full((len(datasets), len(splits)), None) for split_index, (split_prop, pieces) in enumerate(zip(splits, split_pieces)): if len(pieces) == 0: logging.warning( "Split %s with prop %s contains no pieces. Returning None for those.", split_index, split_prop, ) continue for dataset_index, dataset_class in enumerate(datasets): dataset_splits[dataset_index][split_index] = dataset_class(pieces) return dataset_splits, split_ids, split_pieces
27,044
def put_data_es(job, jobSite, stageoutTries, files, workDir=None, activity=None): """ Do jobmover.stageout_outfiles or jobmover.stageout_logfiles (if log_transfer=True) or jobmover.stageout_logfiles_os (if special_log_transfer=True) :backward compatible return: (rc, pilotErrorDiag, rf, "", filesNormalStageOut, filesAltStageOut) """ tolog("Mover put data started [new implementation]") from PilotErrors import PilotException from movers import JobMover from movers.trace_report import TraceReport si = getSiteInformation(job.experiment) si.setQueueName(jobSite.computingElement) # WARNING: SiteInformation is singleton: may be used in other functions! FIX me later workDir = workDir or os.path.dirname(job.workdir) mover = JobMover(job, si, workDir=workDir, stageoutretry=stageoutTries) eventType = "put_es" mover.trace_report = TraceReport(pq=jobSite.sitename, localSite=jobSite.sitename, remoteSite=jobSite.sitename, dataset="", eventType=eventType) mover.trace_report.init(job) error = None storageId = None try: if not activity: activity = "es_events" file = files[0] if file.storageId and file.storageId != -1: storageId = file.storageId copytools = [('objectstore', {'setup': ''})] else: copytools = None transferred_files, failed_transfers = mover.stageout(activity=activity, files=files, copytools=copytools) except PilotException, e: error = e except Exception, e: tolog("ERROR: Mover put data failed [stageout]: exception caught: %s" % e) import traceback tolog(traceback.format_exc()) error = PilotException('STAGEOUT FAILED, exception=%s' % e, code=PilotErrors.ERR_STAGEOUTFAILED, state='STAGEOUT_FAILED') if error: ## send trace mover.trace_report.update(clientState=error.state or 'STAGEOUT_FAILED', stateReason=error.message, timeEnd=time()) mover.sendTrace(mover.trace_report) return error.code, error.message, None tolog("Mover put data finished") # prepare compatible output # keep track of which files have been copied not_transferred = [e.lfn for e in files if e.status not in ['transferred']] if not_transferred: err_msg = 'STAGEOUT FAILED: not all output files have been copied: remain files=%s, errors=%s' % ('\n'.join(not_transferred), ';'.join([str(ee) for ee in failed_transfers])) tolog("Mover put data finished: error_msg=%s" % err_msg) return PilotErrors.ERR_STAGEOUTFAILED, err_msg, None return 0, "", storageId
27,045
def floor(data): """ Returns element-wise largest integer not greater than x. Args: data (tvm.tensor.Tensor): Tensor of type float16, and float32 Returns: tvm.tensor.Tensor, has the same shape as data and type of int32. """ vc_util.ops_dtype_check(data.dtype, vc_util.DtypeForDavinci.ALL_FLOAT) shape = [x.value for x in data.shape] vc_util.check_shape(shape) if utils.product_is_mini() and data.dtype == "float32": # solve the problem of 87==floor(86.9996) when high_precision is needed. # problem is caused by such as fp16(86.9996)==87. # detect problem by fp32(86.9996) - fp32(floor(fp16(86.9996))) < 0 # floor could only apply on float16 data_fp16 = akg.lang.cce.cast_to(data, "float16") floor_data = akg.lang.cce.floor(data_fp16) floor_fp16 = akg.lang.cce.cast_to(floor_data, "float16") floor_fp32 = akg.lang.cce.cast(floor_fp16, "float32") # if diff=1e-7, we cannot get right sign of fp16(diff) # but we can get right sign of 10000*diff = 1e-3, which has the same # sign as diff diff = (data - floor_fp32) * 10000 diff_fp16 = akg.lang.cce.cast_to(diff, "float16") # if diff < 0 and floor == ceil, then it's 87 = floor(86.99999) res = akg.tvm.compute(shape, lambda *i: akg.tvm.expr.Select( diff_fp16(*i) < tvm.const(0, "float16"), floor_fp16(*i) - tvm.const(1, "float16"), floor_fp16(*i)), name="res") res = akg.lang.cce.cast_to(res, "int32") else: res = akg.lang.cce.floor(data) return res
27,046
def pixel_link_model(inputs, config): """ PixelLink architecture. """ if config['model_type'] == 'mobilenet_v2_ext': backbone = mobilenet_v2(inputs, original_stride=False, weights_decay=config['weights_decay']) elif config['model_type'] == 'ka_resnet50': backbone = keras_applications_resnet50(inputs) elif config['model_type'] == 'ka_vgg16': backbone = keras_applications_vgg16(inputs) elif config['model_type'] == 'ka_mobilenet_v2_1_0': backbone = keras_applications_mobilenetv2(inputs, alpha=1.0) elif config['model_type'] == 'ka_mobilenet_v2_1_4': backbone = keras_applications_mobilenetv2(inputs, alpha=1.4) elif config['model_type'] == 'ka_xception': backbone = keras_applications_xception(inputs) segm_logits = fcn_head(backbone, num_classes=2, name='segm_logits', weights_decay=config['weights_decay']) link_logits = fcn_head(backbone, num_classes=16, name='link_logits_', weights_decay=config['weights_decay']) new_shape = tf.shape(link_logits)[1], tf.shape(link_logits)[2], 8, 2 link_logits = tf.keras.layers.Reshape(new_shape, name='link_logits')(link_logits) return tf.keras.Model(inputs, [segm_logits, link_logits])
27,047
def ingresar_datos(): """Pide al usuario los datos para calcular el precio de la compra de boletos. :return: tipo, cantidad :rtype: tuple """ text_align("Datos de la compra", width=35) tipo: str = choice_input(tuple(TIPO.keys())) cantidad: int = int_input("Ingrese el número de boletos: ", min=1, max=12) return tipo, cantidad
27,048
def trim_spectrum(freqs, power_spectra, f_range): """Extract a frequency range from power spectra. Parameters ---------- freqs : 1d array Frequency values for the power spectrum. power_spectra : 1d or 2d array Power spectral density values. f_range: list of [float, float] Frequency range to restrict to, as [lowest_freq, highest_freq]. Returns ------- freqs_ext : 1d array Extracted frequency values for the power spectrum. power_spectra_ext : 1d or 2d array Extracted power spectral density values. Notes ----- This function extracts frequency ranges >= f_low and <= f_high. It does not round to below or above f_low and f_high, respectively. Examples -------- Using a simulated spectrum, extract a frequency range: >>> from fooof.sim import gen_power_spectrum >>> freqs, powers = gen_power_spectrum([1, 50], [1, 1], [10, 0.5, 1.0]) >>> freqs, powers = trim_spectrum(freqs, powers, [3, 30]) """ # Create mask to index only requested frequencies f_mask = np.logical_and(freqs >= f_range[0], freqs <= f_range[1]) # Restrict freqs & spectra to requested range # The if/else is to cover both 1d or 2d arrays freqs_ext = freqs[f_mask] power_spectra_ext = power_spectra[f_mask] if power_spectra.ndim == 1 \ else power_spectra[:, f_mask] return freqs_ext, power_spectra_ext
27,049
def plot_neighbor_rids_of_edge(eid, df_edges_unvisted, dis_buffer=25): """Plot the neighbor rids of the eid. Args: eid ([type]): [description] df_edges_unvisted ([type]): [description] """ road_mask = gpd.GeoDataFrame({'eid': eid, 'geometry': df_edges_unvisted.query(f'eid=={eid}').buffer(dis_buffer*DIS_FACTOR)}) tmp = gpd.sjoin(gdf_roads, road_mask, op='intersects') tmp.reset_index(inplace=True) fig, ax = map_visualize(tmp) tmp.plot(column='ID', ax=ax, legend=True) df_edges_unvisted.query(f'eid=={eid}').plot(ax=ax, color='blue', linestyle='--') return
27,050
def _get_variable_for(v): """Returns the ResourceVariable responsible for v, or v if not necessary.""" if v.op.type == "VarHandleOp": for var in ops.get_collection(ops.GraphKeys.RESOURCES): if (isinstance(var, resource_variable_ops.ResourceVariable) and var.handle.op is v.op): return var raise ValueError("Got %s but could not locate source variable." % (str(v))) return v
27,051
def append_jsonlines(dest_filename, items, encoding=__ENCODING_UTF8): """ append item as some lines of json string to file :param dest_filename: destination file :param items: items to be saved :param encoding: file encoding :return: None """ with open(dest_filename, 'a', encoding=encoding) as f: for item in items: f.write(json.dumps(item, ensure_ascii=False) + '\n')
27,052
def load_prefixes(filepath): """Dado um arquivo txt contendo os prefixos utilizados na SPARQL, é devolvida uma string contendo os prefixos e uma lista de tuplas contendo os prefixos. Parameters ---------- filepath : str Caminho do arquivo txt contendo o conjunto de prefixos. Returns ------- tuple of str Uma tupla contendo os prefixos carregados na forma de string e uma lista de tuplas, onde a primeira posição é o nome dado ao URI e a segunda contém a URI correspondente. Examples -------- .. code-block:: python >>> from QApedia.io import load_prefixes >>> filename = "prefixes.txt" >>> prefixes = load_prefixes(filename) >>> for uri_name, uri in prefixes[1]: ... print(uri_name, uri) ... owl: http://www.w3.org/2002/07/owl# xsd: http://www.w3.org/2001/XMLSchema# rdfs: http://www.w3.org/2000/01/rdf-schema# rdf: http://www.w3.org/1999/02/22-rdf-syntax-ns# foaf: http://xmlns.com/foaf/0.1/ dc: http://purl.org/dc/elements/1.1/ dbpedia2: http://dbpedia.org/property/ dbpedia: http://dbpedia.org/ skos: http://www.w3.org/2004/02/skos/core# """ f = open(filepath, "r") lines = f.readlines() f.close() prefixes = "\n".join(line.rstrip() for line in lines) list_of_prefixes = convert_prefixes_to_list(prefixes) return prefixes, list_of_prefixes
27,053
def messenger_database_setup(force=False): """Setup the database from SQL database dump files. Repopulates the database using a SQL backup rather than the original IDL save files. See :doc:`database_fields` for a description of the tables and fields used by MESSENGERuvvs. **Parameters** force If True, deletes old database tables and remakes them. Default is False, which only creates the tables if necessary. **Returns** No output. """ # Get database name and port database, port = database_connect(return_con=False) if ((isinstance(sys.modules['psycopg2'], types.ModuleType)) and ('test' not in database)): # Read in current config file if it exists configfile = os.path.join(os.environ['HOME'], '.nexoclom') datapath = None if os.path.isfile(configfile): for line in open(configfile, 'r').readlines(): key, value = line.split('=') if key.strip() == 'datapath': datapath = value.strip() else: pass else: pass if datapath is None: datapath = input('What is the path to the MESSENGER data? ') with open(configfile, 'a') as f: f.write(f'datapath = {datapath}\n') else: pass # Verify database is running status = os.popen('pg_ctl status').read() if 'no server running' in status: os.system(f'pg_ctl start -D $HOME/.postgres/main ' f'-l $HOME/.postgres/logfile -o "-p {port}"') else: pass # Create MESSENGER database if necessary with database_connect(database='postgres') as con: cur = con.cursor() cur.execute('select datname from pg_database') dbs = [r[0] for r in cur.fetchall()] if database not in dbs: print(f'Creating database {database}') cur.execute(f'create database {database}') else: pass # Create the MESSENGER tables if necessary with database_connect() as con: cur = con.cursor() cur.execute('select table_name from information_schema.tables') tables = [r[0] for r in cur.fetchall()] mestables = ['capointing', 'cauvvsdata', 'caspectra', 'mgpointing', 'mguvvsdata', 'mgspectra', 'napointing', 'nauvvsdata', 'naspectra', 'mesmercyear'] there = [m in tables for m in mestables] if (False in there) or force: # Delete any tables that may exist for mestab in mestables: if mestab in tables: cur.execute(f'drop table {mestab}') else: pass # Import the dumped tables datafiles = glob.glob(datapath+'/UVVS*sql') for dfile in datafiles: print(f'Loading {os.path.basename(dfile)}') os.system(f'psql -d {database} -p {port} -f {dfile}') else: pass
27,054
def build_convolutional_box_predictor(is_training, num_classes, conv_hyperparams_fn, min_depth, max_depth, num_layers_before_predictor, use_dropout, dropout_keep_prob, kernel_size, box_code_size, apply_sigmoid_to_scores=False, add_background_class=True, class_prediction_bias_init=0.0, use_depthwise=False, box_encodings_clip_range=None): """Builds the ConvolutionalBoxPredictor from the arguments. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. num_layers_before_predictor: Number of the additional conv layers before the predictor. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). box_code_size: Size of encoding for each box. apply_sigmoid_to_scores: If True, apply the sigmoid on the output class_predictions. add_background_class: Whether to add an implicit background class. class_prediction_bias_init: Constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping the box_encodings. Returns: A ConvolutionalBoxPredictor class. """ box_prediction_head = box_head.ConvolutionalBoxHead( is_training=is_training, box_code_size=box_code_size, kernel_size=kernel_size, use_depthwise=use_depthwise, box_encodings_clip_range=box_encodings_clip_range) class_prediction_head = class_head.ConvolutionalClassHead( is_training=is_training, num_class_slots=num_classes + 1 if add_background_class else num_classes, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, kernel_size=kernel_size, apply_sigmoid_to_scores=apply_sigmoid_to_scores, class_prediction_bias_init=class_prediction_bias_init, use_depthwise=use_depthwise) other_heads = {} return convolutional_box_predictor.ConvolutionalBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, other_heads=other_heads, conv_hyperparams_fn=conv_hyperparams_fn, num_layers_before_predictor=num_layers_before_predictor, min_depth=min_depth, max_depth=max_depth)
27,055
def _get_default_directory(): """Returns the default directory for the Store. This is intentionally underscored to indicate that `Store.get_default_directory` is the intended way to get this information. This is also done so `Store.get_default_directory` can be mocked in tests and `_get_default_directory` can be tested. """ return os.environ.get('PRE_COMMIT_HOME') or os.path.join( os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'), 'pre-commit', )
27,056
def parse_command_line(): """ :return: """ parser = argp.ArgumentParser(prog='TEPIC/findBackground.py', add_help=True) ag = parser.add_argument_group('Input/output parameters') ag.add_argument('--input', '-i', type=str, dest='inputfile', required=True, help='Path to input file. First three columns in file' ' are expected to be chrom - start - end.') ag.add_argument('--genome', '-g', type=str, dest='genome', required=True, help='Path to genome reference file in 2bit format.') ag.add_argument('--output', '-o', type=str, dest='outputfile', default='stdout', help='Path to output file or stdout. Default: stdout') ag = parser.add_argument_group('Runtime parameters') ag.add_argument('--workers', '-w', type=int, dest='workers', default=1, help='Number of CPU cores to use. 1 CPU core' ' processes 1 chromosome at a time. Default: 1') ag.add_argument('--time-out', '-to', type=int, dest='timeout', default=3, help='Maximal number of minutes to spend searching for' ' background regions per chromosome. Default: 3 minutes') ag.add_argument('--threshold', '-th', type=int, dest='threshold', default=90, help='Stop searching after having found more than <THRESHOLD>%%' ' matches per chromosome. Default: 90%%') ag.add_argument('--eps-init', '-ei', type=float, dest='epsinit', default=1., help='Init value for epsilon. Error tolerance in percentage points' ' for similarity matching. Default: 1.0 ppt') ag.add_argument('--eps-step', '-es', type=float, dest='epsstep', default=0.5, help='Increment epsilon at each iteration by this value. Default: 0.5') ag.add_argument('--eps-max', '-em', type=float, dest='epsmax', default=2., help='Maximal value for epsilon. After reaching this value, restart' ' search with different starting positions. Default: 2.0') return parser.parse_args()
27,057
def test_bit_integer_index_list(c_or_python): """Make sure sequential integers are returned for a full bitstring """ fqe.settings.use_accelerated_code = c_or_python test_list = list(range(8)) start = (1 << 8) - 1 biti_list = bitstring.integer_index(start) assert (biti_list == test_list).all()
27,058
def get_rental_data(neighborhoods): """This function loops through all the items in neighborhoods, scrapes craiglist for date for that neighborhood, appends it to a list, and uploads a json to s3. Args: neighborhoods: neighborhoods is a dictionary containing the names of the neighborhoods as keys and the craigslist URLs as values. """ # Create list to hold all scraped data rental_data = [] # Loop through neighborhoods dict for neighborhood, url in neighborhoods.items(): # Retrieve page with the requests module response = requests.get(url) # Create BeautifulSoup object; parse with 'lxml' soup = BeautifulSoup(response.text, 'lxml') # results are returned as an iterable list results = soup.find_all('li', class_="result-row") # Loop through returned results for result in results: # Error handling try: # Identify and return bedrooms and footage raw_br = result.find( 'span', class_="housing").text.split("-")[0].strip() if regex.search(raw_br): bedrooms = float(regex.search(raw_br).group(1)) else: continue raw_sqft = result.find( 'span', class_="housing").text.split("-")[1].strip() if regex.search(raw_sqft): sqft = float(regex.search(raw_sqft).group(1)) else: continue # Get datetime of post datetime = result.find("time")["datetime"] # Identify and return title of listing title = result.find('a', class_="result-title").text # Identify and return price of listing price = float(result.a.span.text.strip("$")) # Identify and return link to listing link = result.a['href'] # Create dictionary for result data = { "neighborhood": neighborhood, "datetime": datetime, "title": title, "price": price, "bedrooms": bedrooms, "sqft": sqft, "link": link } # Append data to list rental_data.append(data) except: continue # Load rental data to s3 obj = s3.Object(output_bucket, ouput_obj_path) # we need this json object in one line for athena (hive json serde) obj.put(Body=json.dumps(rental_data, separators=(',', ':')))
27,059
def enable_bootstrap_logging(service_name=None): """ Turn on the bootstrap logger, which provides basic stdout logs until the main twisted logger is online and ready. :param name_prefix: :return: """ global bootstrap_logger_enabled, bootstrap_logger, log_level if log_level: level = filter(lambda x: x[1] == log_level, log_level_map.items()) # now select the right element of the tuple if level: level = level[0][0] if level == 'SPEW': level = 'DEBUG' else: level = 'INFO' prefix = 'service:{}'.format(service_name if service_name else ' ') logging.basicConfig(level=level, stream=sys.stdout, format="[{}] %(asctime)s [-] [%(name)s] [%(levelname)s] %(message)s".format(prefix), datefmt='%Y-%m-%d %H:%M:%S+0000') bootstrap_logger = logging.getLogger('bootstrap') bootstrap_logger_enabled = True
27,060
def get_text(event=None): """ :param event: None :return: None Gets the recommendations and shows it in a text widget. """ pygame.mixer.music.load('music/button-3.wav') pygame.mixer.music.play() text_widget = Text(frame, font='Courier 13 italic', cursor='arrow', bg='yellow', height=11, width=60) hyperlink = HyperlinkManager(text_widget) text_widget.tag_configure('tag-center', justify='center') text_widget.tag_configure('tag-left', justify='left') query = combo1.get() # get input from combo widget query = ' '.join([word for word in re.split(r'\s+', query) if word != '']) # handling white space text = ims.get_recommendations(query) if text is None: # if the movie/tv show not found print some tips text = "Item not found!\n" text_widget.insert(1.0, text, 'tag-center') text_widget.insert(END, '\nYou can try the following:\n\n 1. Enter keywords and choose from dropdown menu.\n ' '2. Check for typos.', 'tag-left') else: # if found iterate over the DataFrame to create hyperlinks in the text widget text_widget.delete(1.0, END) # clear previous entries for idx, title, imdb_url in text.itertuples(): # iterating over the DataFrame as tuples text_widget.insert(END, title, hyperlink.add(partial(open_link, imdb_url))) # insert hyperlinks in the # widget if idx != 9: # if not the last index, insert a new line after the previous entry text_widget.insert(END, '\n') text_widget.insert(END, '\n') text_widget.config(highlightcolor='black', highlightbackground="black", highlightthickness=2) text_widget.place(x=185, y=310) # adding scrollbar to the text widget scroll_y = Scrollbar(text_widget, orient='vertical', command=text_widget.yview) scroll_y.place(x=185*3 + 30, relheight=1) text_widget.configure(state='disabled', yscrollcommand=scroll_y.set)
27,061
def disparity_to_idepth(K, T_right_in_left, left_disparity): """Function athat transforms general (non-rectified) disparities to inverse depths. """ assert(len(T_right_in_left.shape) == 3) # assert(T_right_in_left.shape[0] == self.batch_size) assert(T_right_in_left.shape[1] == 4) assert(T_right_in_left.shape[2] == 4) assert(len(K.shape) == 3) # assert(K.shape[0] == self.batch_size) assert(K.shape[1] == 4) assert(K.shape[2] == 4) batch_size = K.shape[0] rows = left_disparity.shape[-2] cols = left_disparity.shape[-1] # Create tensor of homogeneous pixel coordinates of size (batch, 3, rows*cols). y_grid, x_grid = torch.meshgrid(torch.arange(0, rows, device=left_disparity.device), torch.arange(0, cols, device=left_disparity.device)) xys = torch.cat([x_grid.reshape(-1, rows * cols).float(), y_grid.reshape(-1, rows * cols).float()], dim=0) xys = xys.unsqueeze(0).repeat(batch_size, 1, 1) ones = torch.ones(batch_size, 1, rows * cols, dtype=torch.float32, device=xys.device) xyz_pix = torch.cat([xys, ones], 1) Kinv = torch.inverse(K) T_left_in_right = torch.inverse(T_right_in_left) R_left_in_right = T_left_in_right[:, :3, :3] KRKinv = torch.matmul(K[:, :3, :3], torch.matmul(R_left_in_right, Kinv[:, :3, :3])) KRKinv3 = KRKinv[:, 2, :] # (batch, 3) KRKinv3_rep = torch.unsqueeze(KRKinv3, dim=2).repeat(1, 1, rows*cols) # (batch, 3, rows*cols) KT_left_in_right = torch.matmul(K, T_left_in_right) Kt = KT_left_in_right[:, :3, 3] # (batch, 3) Kt_rep = torch.unsqueeze(Kt, dim=2).repeat(1, 1, rows*cols) # (batch, 3, rows*cols) # (batch, rows*cols) left_disparity_flat = left_disparity.reshape(batch_size, -1) # Compute pixels at infinite depth. pix_inf = torch.matmul(KRKinv, xyz_pix) # (batch, 3, rows*cols) pix_inf[:, 0, :] /= pix_inf[:, 2, :] pix_inf[:, 1, :] /= pix_inf[:, 2, :] pix_inf[:, 2, :] /= pix_inf[:, 2, :] # Compute epipolar lines (must point from far to near depth). pix_far = torch.matmul(KRKinv, xyz_pix * 1e2) pix_far += Kt_rep pix_far[:, 0, :] /= pix_far[:, 2, :] pix_far[:, 1, :] /= pix_far[:, 2, :] pix_far[:, 2, :] /= pix_far[:, 2, :] epi_diff = pix_far[:, :2, :] - pix_inf[:, :2, :] epi_norm = torch.sqrt(torch.sum(epi_diff**2, dim=1)) epiline = epi_diff[:, :2, :] # (batch, 2, rows*cols) epiline[:, 0, :] /= (epi_norm + 1e-6) epiline[:, 1, :] /= (epi_norm + 1e-6) mask = epi_norm < 1e-6 mask = mask.reshape(batch_size, 1, rows, cols) # Convert disparity to idepth. # (batch, rows*cols) w = KRKinv3_rep[:, 0, :] * xyz_pix[:, 0, :] + \ KRKinv3_rep[:, 1, :] * xyz_pix[:, 1, :] + \ KRKinv3_rep[:, 2, :] # (batch, rows*cols) A0 = Kt_rep[:, 0, :] - Kt_rep[:, 2, :]*(pix_inf[:, 0, :] + left_disparity_flat * epiline[:, 0, :]) A1 = Kt_rep[:, 1, :] - Kt_rep[:, 2, :]*(pix_inf[:, 1, :] + left_disparity_flat * epiline[:, 1, :]) b0 = w * left_disparity_flat * epiline[:, 0, :] b1 = w * left_disparity_flat * epiline[:, 1, :] ATA = A0 * A0 + A1 * A1 ATb = A0 * b0 + A1 * b1 left_idepthmap = ATb / ATA left_idepthmap = left_idepthmap.reshape(batch_size, 1, rows, cols) # Set bad points to 0 idepth. left_idepthmap = (~mask).float() * left_idepthmap return left_idepthmap
27,062
def resol_dijkstra(): """Utilise l'algorithme de Djikstra pour trouver le plus cours chemin""" # les sommets sont nommés de 0 à longueur*largeur-1 du labyrinthe G = gen_graph() reponse_dijkstra = G.dijkstra("0") chemin = Trouve_chemin(reponse_dijkstra) suis_chemin(chemin)
27,063
def test_tls_client_auth( # noqa: C901 # FIXME # FIXME: remove twisted logic, separate tests mocker, tls_http_server, adapter_type, ca, tls_certificate, tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path, tls_ca_certificate_pem_path, is_trusted_cert, tls_client_identity, tls_verify_mode, ): """Verify that client TLS certificate auth works correctly.""" test_cert_rejection = ( tls_verify_mode != ssl.CERT_NONE and not is_trusted_cert ) interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4) client_cert_root_ca = ca if is_trusted_cert else trustme.CA() with mocker.mock_module.patch( 'idna.core.ulabel', return_value=ntob(tls_client_identity), ): client_cert = client_cert_root_ca.issue_server_cert( # FIXME: change to issue_cert once new trustme is out ntou(tls_client_identity), ) del client_cert_root_ca with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem: tls_adapter_cls = get_ssl_adapter_class(name=adapter_type) tls_adapter = tls_adapter_cls( tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path, ) if adapter_type == 'pyopenssl': tls_adapter.context = tls_adapter.get_context() tls_adapter.context.set_verify( _stdlib_to_openssl_verify[tls_verify_mode], lambda conn, cert, errno, depth, preverify_ok: preverify_ok, ) else: tls_adapter.context.verify_mode = tls_verify_mode ca.configure_trust(tls_adapter.context) tls_certificate.configure_cert(tls_adapter.context) tlshttpserver = tls_http_server((interface, port), tls_adapter) interface, _host, port = _get_conn_data(tlshttpserver.bind_addr) make_https_request = functools.partial( requests.get, 'https://{host!s}:{port!s}/'.format(host=interface, port=port), # Server TLS certificate verification: verify=tls_ca_certificate_pem_path, # Client TLS certificate verification: cert=cl_pem, ) if not test_cert_rejection: resp = make_https_request() is_req_successful = resp.status_code == 200 if ( not is_req_successful and IS_PYOPENSSL_SSL_VERSION_1_0 and adapter_type == 'builtin' and tls_verify_mode == ssl.CERT_REQUIRED and tls_client_identity == 'localhost' and is_trusted_cert ) or PY34: pytest.xfail( 'OpenSSL 1.0 has problems with verifying client certs', ) assert is_req_successful assert resp.text == 'Hello world!' return # xfail some flaky tests # https://github.com/cherrypy/cheroot/issues/237 issue_237 = ( IS_MACOS and adapter_type == 'builtin' and tls_verify_mode != ssl.CERT_NONE ) if issue_237: pytest.xfail('Test sometimes fails') expected_ssl_errors = ( requests.exceptions.SSLError, OpenSSL.SSL.Error, ) if PY34 else ( requests.exceptions.SSLError, ) if IS_WINDOWS or IS_GITHUB_ACTIONS_WORKFLOW: expected_ssl_errors += requests.exceptions.ConnectionError, with pytest.raises(expected_ssl_errors) as ssl_err: make_https_request() if PY34 and isinstance(ssl_err, OpenSSL.SSL.Error): pytest.xfail( 'OpenSSL behaves wierdly under Python 3.4 ' 'because of an outdated urllib3', ) try: err_text = ssl_err.value.args[0].reason.args[0].args[0] except AttributeError: if PY34: pytest.xfail('OpenSSL behaves wierdly under Python 3.4') elif IS_WINDOWS or IS_GITHUB_ACTIONS_WORKFLOW: err_text = str(ssl_err.value) else: raise if isinstance(err_text, int): err_text = str(ssl_err.value) expected_substrings = ( 'sslv3 alert bad certificate' if IS_LIBRESSL_BACKEND else 'tlsv1 alert unknown ca', ) if not six.PY2: if IS_MACOS and IS_PYPY and adapter_type == 'pyopenssl': expected_substrings = ('tlsv1 alert unknown ca',) if ( tls_verify_mode in ( ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL, ) and not is_trusted_cert and tls_client_identity == 'localhost' ): expected_substrings += ( 'bad handshake: ' "SysCallError(10054, 'WSAECONNRESET')", "('Connection aborted.', " 'OSError("(10054, \'WSAECONNRESET\')"))', "('Connection aborted.', " 'OSError("(10054, \'WSAECONNRESET\')",))', "('Connection aborted.', " 'error("(10054, \'WSAECONNRESET\')",))', "('Connection aborted.', " 'ConnectionResetError(10054, ' "'An existing connection was forcibly closed " "by the remote host', None, 10054, None))", ) if IS_WINDOWS else ( "('Connection aborted.', " 'OSError("(104, \'ECONNRESET\')"))', "('Connection aborted.', " 'OSError("(104, \'ECONNRESET\')",))', "('Connection aborted.', " 'error("(104, \'ECONNRESET\')",))', "('Connection aborted.', " "ConnectionResetError(104, 'Connection reset by peer'))", "('Connection aborted.', " "error(104, 'Connection reset by peer'))", ) if ( IS_GITHUB_ACTIONS_WORKFLOW and IS_LINUX ) else ( "('Connection aborted.', " "BrokenPipeError(32, 'Broken pipe'))", ) assert any(e in err_text for e in expected_substrings)
27,064
def display_board(state: object, player_num: int, winners: Union[List[int], None] = None): """Render Board with graphical interface Parameters ---------- state : object The state to render player_num : int The current player whose turn it is. winners : List[int] (optional) The winners of the game Notes ----- :py:func:`blokus.blokus_env.start_gui` must first be called once before calling this function. See Also -------- blokus.blokus_env.start_gui blokus.blokus_env.terminate_gui blokus.blokus_env.print_board """ board, round_count, players = state current_player = players[player_num] gui.display_board(board_contents=board.board_contents, current_player=current_player, players=players, round_count=round_count, winners=winners)
27,065
def processPhoto(photoInfo, panoramioreview=False, reviewer='', override=u'', addCategory=u'', autonomous=False, site=None): """Process a single Panoramio photo.""" if not site: site = pywikibot.Site('commons', 'commons') if isAllowedLicense(photoInfo) or override: # Should download the photo only once photo = downloadPhoto(photoInfo.get(u'photo_file_url')) # Don't upload duplicate images, should add override option duplicates = findDuplicateImages(photo, site=site) if duplicates: pywikibot.output(u'Found duplicate image at %s' % duplicates.pop()) else: filename = getFilename(photoInfo, site=site) pywikibot.output(filename) description = getDescription(photoInfo, panoramioreview, reviewer, override, addCategory) pywikibot.output(description) if not autonomous: (newDescription, newFilename, skip) = Tkdialog( description, photo, filename).show_dialog() else: newDescription = description newFilename = filename skip = False # pywikibot.output(newPhotoDescription) # if (pywikibot.Page(title=u'File:'+ filename, # site=pywikibot.Site()).exists()): # # I should probably check if the hash is the same and if not upload # # it under a different name # pywikibot.output(u'File:' + filename + u' already exists!') # else: # Do the actual upload # Would be nice to check before I upload if the file is already at # Commons # Not that important for this program, but maybe for derived # programs if not skip: bot = UploadRobot(photoInfo.get(u'photo_file_url'), description=newDescription, useFilename=newFilename, keepFilename=True, verifyDescription=False, site=site) bot.upload_image(debug=False) return 1 return 0
27,066
def auto_apilado(datos,target,agrupacion,porcentaje=False): """ Esta función recibe un set de datos DataFrame, una variable target, y la variable sobre la que se desean agrupar los datos (eje X). Retorna un grafico de barras apilado. """ total = datos[[target,agrupacion]].groupby(agrupacion).count() tabla = pd.DataFrame([]) fig = go.Figure() #Creamos una traza for value in datos[target].unique(): trace = datos[[target,agrupacion]].loc[datos[target]==value].groupby(agrupacion).count() if porcentaje: #Las columnas deben tener el mismo nombre trace = 100*trace/total y_title ='Porcentaje (Individuos)' trace.rename(columns={target:str(value)},inplace=True) tabla = pd.concat([tabla, trace],axis = 1) #Creación de la figura fig.add_trace(go.Bar( x = tabla.index, y = tabla[str(value)], name=str(value), # marker_color='rgb(26, 118, 255)' )) y_title='Conteo (Individuos)' fig.update_layout( title='Conteo de '+str(target)+' agrupado por '+str(agrupacion), xaxis_tickfont_size=14, yaxis=dict( title=y_title, titlefont_size=16, tickfont_size=14, ), xaxis=dict( title=str(agrupacion) )) fig.update_layout(barmode='stack') return fig, tabla
27,067
def month_from_string(month_str: str) -> datetime.date: """ Accepts year-month strings with hyphens such as "%Y-%m" """ return datetime.datetime.strptime(month_str, "%Y-%m").date()
27,068
def get_isomorphic_signature(graph: DiGraph) -> str: """ Generate unique isomorphic id with pynauty """ nauty_graph = pynauty.Graph(len(graph.nodes), directed=True, adjacency_dict=nx.to_dict_of_lists(graph)) return hashlib.md5(pynauty.certificate(nauty_graph)).hexdigest()
27,069
def rebuild_cluster(): """Signal other etcd units to rejoin new cluster.""" log('Requesting peer members to rejoin cluster') rejoin_request = uuid4().hex hookenv.leader_set(force_rejoin=rejoin_request)
27,070
def watchdog(timeout: int | float, function: Callable, *args, **kwargs) -> Any: """Time-limited execution for python function. TimeoutError raised if not finished during defined time. Args: timeout (int | float): Max time execution in seconds. function (Callable): Function that will be evaluated. *args: Args for the function. *kwargs: Kwargs for the function. Raises: TimeoutError: If defined time runs out. RuntimeError: If function call with defined params fails. Returns: Any: Depends on used function. Examples: >>> import time >>> def sleep(sec): ... for _ in range(sec): ... time.sleep(1) >>> watchdog(1, sleep, 0) >>> watchdog(1, sleep, 10) Traceback (most recent call last): TimeoutError: ... """ old_tracer = sys.gettrace() def tracer(frame, event, arg, start=time.time()): "Helper." now = time.time() if now > start + timeout: raise TimeoutError("Time exceeded") return tracer if event == "call" else None try: sys.settrace(tracer) result = function(*args, **kwargs) except TimeoutError: sys.settrace(old_tracer) raise TimeoutError( mylogging.return_str( "Timeout defined in watchdog exceeded.", caption="TimeoutError", level="ERROR", ) ) except Exception: sys.settrace(old_tracer) raise RuntimeError( mylogging.return_str( f"Watchdog with function {function.__name__}, args {args} and kwargs {kwargs} failed." ) ) finally: sys.settrace(old_tracer) return result
27,071
def _encode_raw_string(str): """Encodes a string using the above encoding format. Args: str (string): The string to be encoded. Returns: An encoded version of the input string. """ return _replace_all(str, _substitutions)
27,072
def char_pred(pred: Callable[[int], bool]) -> Parser: """Parses a single character passing a given predicate.""" def f(x): if pred(x): return value(x) else: raise Failure(f"Character '{chr(x)}' fails predicate" " `{pred.__name__}`") return item >> f
27,073
def test_missing_proteins(): """fails on bad input""" rv, out = getstatusoutput(f'{prg} -p {proteins}') assert rv > 0 assert re.search('the following arguments are required: -c/--cdhit', out)
27,074
def convert_bound(bound, coord_max, coord_var): """ This function will return a converted bound which which matches the range of the given input file. Parameters ---------- bound : np.array 1-dimensional 2-element numpy array which represent the lower and upper bounding box on this coordinate, respectively. coord_max : integer The max value which is possible given this coordinate. For example, the max for longitude is 360. coord_var : xarray.DataArray The xarray variable for some coordinate. Returns ------- np.array 1-dimensional 2-element number array which represents the lower and upper bounding box on this coordinate and has been converted based on the valid bounds coordinate range of the dataset. Notes ----- Assumption that 0 is always on the prime meridian/equator. """ scale = coord_var.attrs.get('scale_factor', 1.0) offset = coord_var.attrs.get('add_offset', 0.0) valid_min = coord_var.attrs.get('valid_min', None) if valid_min is None or valid_min > 0: # If coord var doesn't contain valid min, attempt to find # manually. Note: Given the perfect storm, this could still fail # to find the actual bounds. # Filter out _FillValue from data before calculating min and max fill_value = coord_var.attrs.get('_FillValue', None) var_values = coord_var.values if fill_value: var_values = np.where(var_values != fill_value, var_values, np.nan) var_min = np.nanmin(var_values) var_max = np.nanmax(var_values) if 0 <= var_min <= var_max <= (coord_max / scale): valid_min = 0 # If the file coords are 0 --> max if valid_min == 0: bound = (bound + coord_max) % coord_max # If the right/top bound is 0, set to max. if bound[1] == 0: bound[1] = coord_max # If edges are the same, assume it wraps and return all if bound[0] == bound[1]: bound = np.array([0, coord_max]) # If the file longitude is -coord_max/2 --> coord_max/2 if valid_min != 0: # If edges are the same, assume it wraps and return all if bound[0] == bound[1]: bound = np.array([-(coord_max / 2), coord_max / 2]) # Calculate scale and offset so the bounds match the coord data return apply_scale_offset(scale, offset, bound)
27,075
def tk_window_focus(): """Return true if focus maintenance under TkAgg on win32 is on. This currently works only for python.exe and IPython.exe. Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on.""" if rcParams['backend'] != 'TkAgg': return False return rcParams['tk.window_focus']
27,076
def multi_qubit_decay_bellstate(): """ Test multiqubit decay """ program = Program().inst([RYgate(np.pi/3)(0), CNOTgate(0, 1)]) noise = NoiseModel(ro_fidelity=1.0) qvm = QVMConnection(type_trans='density', noise_model=noise) initial_density = np.zeros((4, 4), dtype=complex) initial_density[0, 0] = 1.0 cnot_01 = np.kron(I, P0) + np.kron(X, P1) p1 = 1 - np.exp(-noise.gate_time_1q/noise.T1) p2 = 1 - np.exp(-noise.gate_time_1q/noise.T2) kraus_ops_1 = noise_gates['relaxation'](p1) kraus_ops_2 = noise_gates['dephasing'](p2) gate_1 = np.kron(np.eye(2), RY(np.pi/3)) state = gate_1.dot(initial_density).dot(np.conj(gate_1).T) for ii in range(2): new_density = np.zeros_like(state) for kop in kraus_ops_1: operator = lifted_gate(ii, kop, 2).todense() new_density += operator.dot(state).dot(np.conj(operator).T) state = new_density for ii in range(2): new_density = np.zeros_like(state) for kop in kraus_ops_2: operator = lifted_gate(ii, kop, 2).todense() new_density += operator.dot(state).dot(np.conj(operator).T) state = new_density state = cnot_01.dot(state).dot(cnot_01.T) p1 = 1 - np.exp(-noise.gate_time_2q/noise.T1) p2 = 1 - np.exp(-noise.gate_time_2q/noise.T2) kraus_ops_1 = noise_gates['relaxation'](p1) kraus_ops_2 = noise_gates['dephasing'](p2) for ii in range(2): new_density = np.zeros_like(state) for kop in kraus_ops_1: operator = lifted_gate(ii, kop, 2).todense() new_density += operator.dot(state).dot(np.conj(operator).T) state = new_density for ii in range(2): new_density = np.zeros_like(state) for kop in kraus_ops_2: operator = lifted_gate(ii, kop, 2).todense() new_density += operator.dot(state).dot(np.conj(operator).T) state = new_density density = qvm.density(program) assert np.allclose(density.todense(), state)
27,077
def execute_by_key_event(code): """ :param code: int, keyEvent :return: """ adb = [PATH + 'adb', 'shell', 'input', 'keyevent', str(code)] con = Popen(adb, env=ENV, stdout=PIPE, stderr=PIPE)
27,078
def cc_filter_set_variables(operator, bk_biz_id, bk_obj_id, bk_obj_value): """ 通过集群ID、过滤属性ID、过滤属性值,过滤集群 :param operator: 操作者 :param bk_biz_id: 业务ID :param bk_obj_id: 过滤属性ID :param bk_obj_value: 过滤属性值 :return: """ client = get_client_by_user(operator) obj_value_list = bk_obj_value.split(",") results = [] # 多个过滤属性值时循环请求接口 for obj_value in obj_value_list: kwargs = { "bk_biz_id": int(bk_biz_id), "condition": {bk_obj_id: obj_value}, } result = client.cc.search_set(kwargs) if not result["result"]: err_msg = _( "[cc_filter_set_variables] 调用 cc.search_set 接口获取集群失败, " "kwargs={kwargs}, result={result}" ).format(kwargs=kwargs, result=result) logger.error(err_msg) raise ApiRequestError(err_msg) results += result["data"]["info"] if not results: return [], set() bk_attributes = reduce(set.intersection, [set(result.keys()) for result in results]) return results, bk_attributes
27,079
def distance(mags, spt, spt_unc): """ mags is a dictionary of bright and faint mags set a bias """ res={} f110w=mags['F110W'] f140w=mags['F140W'] f160w=mags['F160W'] relations=POLYNOMIAL_RELATIONS['abs_mags'] nsample=1000 for k in mags.keys(): #take the standard deviation spt=make_spt_number(spt) absmag_scatter=relations[k][1] spts=np.random.normal(spt, spt_unc, nsample) #trim out spectral types outside range of validitiy mask=(spts<15) & (spts >40) absmags=(relations[k][0])(spts)[~mask] #total_uncertainty mag_unc=(absmag_scatter**2+mags[k][1]**2)**0.5 relmags=np.random.normal(mags[k][0], mag_unc, nsample)[~mask] dists=get_distance(absmags, relmags) res[str('dist')+k]=np.nanmedian(dists) res[str('dist_er')+k]=np.nanstd(dists) return res
27,080
def loss_function(recon_x, x, mu, logvar, cl, target, natural): """ Universally calculates the loss, be it for training or testing. Hardcoded to use mse_loss. Change below to binary_cross_entropy if desired. @param recon_x: images reconstructed by the decoder(s) @param x: original images for comparison @param mu: latent mean @param logvar: latent log variance @param cl: cell count predictions for given images @param target: cell count label for given labeled images @param natural: bool, true if x is of type natural @return: float, float, float, the summed loss as well as the Kullback-Leibler divergence and the loss of the regressor in separate """ global decoder_nat_loss, decoder_syn_loss, KLD_syn_loss, KLD_nat_loss, regressor_nat, regressor_syn # decoder_loss = F.binary_cross_entropy(recon_x, x.view(-1, 1, img_size, img_size), reduction='sum') * decoder_l_factor decoder_loss = F.mse_loss(recon_x, x) * decoder_l_factor # see Appendix B from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) beta = 1 / (batch_size * boundary_dim) # disentanglement factor#extremely small KLD = -0.5 * beta * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) * KLD_l_factor regressor_loss = F.mse_loss(cl, target.view(-1, 1)) * regressor_l_factor if epoch < regressor_start: regressor_loss *= 0 if natural: decoder_nat_loss += decoder_loss KLD_nat_loss += KLD regressor_nat += regressor_loss else: decoder_syn_loss += decoder_loss KLD_syn_loss += KLD regressor_syn += regressor_loss if KLD > 1e10: playSound() print('KLD diverged') # print(KLD) # print(torch.max(logvar)) # print(logvar) # print(mu) # print(beta) sys.exit('KLD diverged') return decoder_loss + KLD + regressor_loss, KLD, regressor_loss
27,081
def foo(X): """The function to evaluate""" ret = [] for x in X: r = 2*math.sqrt(sum([n*n for n in x])); if r == 0: ret.append(0) else: ret.append(math.sin(r) / r); return ret
27,082
def measure_curv(left_fit, right_fit, plot_points, ym_per_pix, xm_per_pix): """ calculates the curvature using a given polynom Args: left_fit ([type]): [description] right_fit ([type]): [description] plot_points ([type]): [description] """ #get the max y value (start of the lane) this is the place we want to calc the curvature y_curve = np.max(plot_points) #calculate/defin the new polynom values to get m instead of pixel cofA_left = xm_per_pix / (ym_per_pix**2) * left_fit[0] cofB_left = (xm_per_pix/ym_per_pix) * left_fit[1] cofA_right = xm_per_pix / (ym_per_pix**2) * right_fit[0] cofB_right = (xm_per_pix/ym_per_pix) * right_fit[1] #calculate the curvature using the formula: R = (1+(2Ay+B)^2)^3/2)/|2A| with y = A*y^2+B*y+C left_curv_m = ((1+(2*cofA_left*y_curve*ym_per_pix+cofB_left)**2)**(2/2))/np.absolute(2*cofA_left) right_curv_m = ((1+(2*cofA_right*y_curve*ym_per_pix+cofB_right)**2)**(2/2))/np.absolute(2*cofA_right) #calculate the mean curvature (curvatur from the middle of the lane) curv_mean = (left_curv_m + right_curv_m) / 2 return curv_mean, left_curv_m, right_curv_m
27,083
def get_external_dns(result): """ Function to validate the ip address. Used to extract EXTERNAL_DNS server information Args: result(dict): Input result dictionary with all network parameters and boolean flags Returns: result(dict): The updated result dictionary with network parameters Raises: Exception on Invalid IP addresses """ logging.info('[%s] - Collect the external dns.', datetime.datetime.today()) try: is_answer = False while not is_answer: external_dns = case_check(input("Do you have public EXTERNAL DNS IP servers? y or n \n")) if external_dns == 'n' or external_dns == 'y': result['external_dns_flag'] = external_dns is_answer = True if external_dns == 'y': is_internal = False while not is_internal: external = case_check( input("Enter the EXTERNAL DNS public IP address(s) comma separated or 's' to skip \n")) if external == 's': result['external_dns_flag'] = 's' logging.info("EXTERNAL_DNS option skipped by user ") break if len(external) > 0: result, is_internal = ip_validation('EXTERNAL_DNS', external, result, is_internal) else: print(f'{Style.RED}Wrong value! Please input y or n{Style.RESET}') return result except Exception as error: logging.error(error)
27,084
def save_json(data_coco, json_file): """ save COCO data in json file :param json_file: :return: """ json.dump(data_coco, open(json_file, 'w'), indent=4) # indent=4 更加美观显示 print("save file:{}".format(json_file))
27,085
def __process_agent(agent_param): """Get the agent id and namespace from an input param.""" if not agent_param.endswith('TEXT'): param_parts = agent_param.split('@') if len(param_parts) == 2: ag, ns = param_parts elif len(param_parts) == 1: ag = agent_param ns = 'HGNC-SYMBOL' else: raise DbAPIError('Unrecognized agent spec: \"%s\"' % agent_param) else: ag = agent_param[:-5] ns = 'TEXT' if ns == 'HGNC-SYMBOL': original_ag = ag ag = hgnc_client.get_hgnc_id(original_ag) if ag is None and 'None' not in agent_param: raise DbAPIError('Invalid agent name: \"%s\"' % original_ag) ns = 'HGNC' return ag, ns
27,086
def inference(predictions_op, true_labels_op, display, sess): """ Perform inference per batch on pre-trained model. This function performs inference and computes the CER per utterance. Args: predictions_op: Prediction op true_labels_op: True Labels op display: print sample predictions if True sess: default session to evaluate the ops. Returns: char_err_rate: list of CER per utterance. """ char_err_rate = [] # Perform inference of batch worth of data at a time. [predictions, true_labels] = sess.run([predictions_op, true_labels_op]) pred_label = sparse_to_labels(predictions[0][0]) actual_label = sparse_to_labels(true_labels) for (label, pred) in zip(actual_label, pred_label): char_err_rate.append(distance(label, pred)/len(label)) if display: # Print sample responses for i in range(ARGS.batch_size): print(actual_label[i] + ' vs ' + pred_label[i]) return char_err_rate
27,087
def run_temp_dir(args): """Setup temporary directory and run server.""" with tempfile.TemporaryDirectory(prefix="scelvis.tmp") as tmpdir: logger.info("Creating temporary directory %s", tmpdir) settings.TEMP_DIR = tmpdir run_server(args)
27,088
def callback_with_answer_and_close_window( on_change: Optional[OnQuestionChangeCallback], window: Window ) -> OnQuestionChangeCallback: """Create a callback that calls both the on_change and window.close methods.""" def inner(answer: Any) -> None: if on_change is not None: on_change(answer) # Use kill rather than close because we don't want to call the on_cancel that using # the close button would do. window.kill() return inner
27,089
def nlu_tuling(question, loc="上海"): """图灵 API """ url = 'http://www.tuling123.com/openapi/api' data = { 'key': "fd2a2710a7e01001f97dc3a663603fa1", 'info': question, "loc": loc, 'userid': mac_address } try: r = json.loads(requests.post(url=url, data=data).text) except: return if not r['code'] in (100000, 200000, 302000, 308000, 313000, 314000): return if r['code'] == 100000: # 文本类 return '\n'.join([r['text'].replace('<br>','\n')]) elif r['code'] == 200000: # 链接类 return '\n'.join([r['text'].replace('<br>','\n'), r['url']]) elif r['code'] == 302000: # 新闻类 l = [r['text'].replace('<br>','\n')] for n in r['list']: l.append('%s - %s'%(n['article'], n['detailurl'])) return '\n'.join(l) elif r['code'] == 308000: # 菜谱类 l = [r['text'].replace('<br>','\n')] for n in r['list']: l.append('%s - %s'%(n['name'], n['detailurl'])) return '\n'.join(l) elif r['code'] == 313000: # 儿歌类 return '\n'.join([r['text'].replace('<br>','\n')]) elif r['code'] == 314000: # 诗词类 return '\n'.join([r['text'].replace('<br>','\n')])
27,090
def update_hpo(): """Update human phenotype ontology.""" url = 'http://purl.obolibrary.org/obo/hp.obo' OboClient.update_resource(path, url, 'hp', remove_prefix=False)
27,091
def test_auth_middleware(setup_user, client): """test middleware""" c = Client() res = c.post( "/accounts/login", {"email": "admin@domain.com", "password": "admin"}, follow=True, ) res.status_code == 200 res = c.get("/accounts/login", follow=True) assert res.status_code == 200 path = reverse("connections") res = c.get(path) assert res.status_code == 200
27,092
def Lines( apply_clip: bool = True, close_path: bool = False, color: ndarray = None, colors: list = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"], curves_subset: list = [], display_legend: bool = False, enable_hover: bool = True, fill: str = "none", fill_colors: list = [], fill_opacities: list = [], interactions: dict = {"hover": "tooltip"}, interpolation: str = "linear", labels: list = [], labels_visibility: str = "none", line_style: str = "solid", marker: str = None, marker_size: int = 64, opacities: list = [], preserve_domain: dict = {}, scales: dict = {}, scales_metadata: dict = { "x": {"orientation": "horizontal", "dimension": "x"}, "y": {"orientation": "vertical", "dimension": "y"}, "color": {"dimension": "color"}, }, selected: ndarray = None, selected_style: dict = {}, stroke_width: float = 2.0, tooltip: Element[ipywidgets.widgets.domwidget.DOMWidget] = None, tooltip_location: str = "mouse", tooltip_style: dict = {"opacity": 0.9}, unselected_style: dict = {}, visible: bool = True, x: ndarray = np.array([]), y: ndarray = np.array([]), on_apply_clip: typing.Callable[[bool], Any] = None, on_close_path: typing.Callable[[bool], Any] = None, on_color: typing.Callable[[ndarray], Any] = None, on_colors: typing.Callable[[list], Any] = None, on_curves_subset: typing.Callable[[list], Any] = None, on_display_legend: typing.Callable[[bool], Any] = None, on_enable_hover: typing.Callable[[bool], Any] = None, on_fill: typing.Callable[[str], Any] = None, on_fill_colors: typing.Callable[[list], Any] = None, on_fill_opacities: typing.Callable[[list], Any] = None, on_interactions: typing.Callable[[dict], Any] = None, on_interpolation: typing.Callable[[str], Any] = None, on_labels: typing.Callable[[list], Any] = None, on_labels_visibility: typing.Callable[[str], Any] = None, on_line_style: typing.Callable[[str], Any] = None, on_marker: typing.Callable[[str], Any] = None, on_marker_size: typing.Callable[[int], Any] = None, on_opacities: typing.Callable[[list], Any] = None, on_preserve_domain: typing.Callable[[dict], Any] = None, on_scales: typing.Callable[[dict], Any] = None, on_scales_metadata: typing.Callable[[dict], Any] = None, on_selected: typing.Callable[[ndarray], Any] = None, on_selected_style: typing.Callable[[dict], Any] = None, on_stroke_width: typing.Callable[[float], Any] = None, on_tooltip: typing.Callable[[Element[ipywidgets.widgets.domwidget.DOMWidget]], Any] = None, on_tooltip_location: typing.Callable[[str], Any] = None, on_tooltip_style: typing.Callable[[dict], Any] = None, on_unselected_style: typing.Callable[[dict], Any] = None, on_visible: typing.Callable[[bool], Any] = None, on_x: typing.Callable[[ndarray], Any] = None, on_y: typing.Callable[[ndarray], Any] = None, ) -> Element[bqplot.marks.Lines]: """Lines mark. In the case of the Lines mark, scales for 'x' and 'y' MUST be provided. Attributes ---------- icon: string (class-level attribute) Font-awesome icon for the respective mark name: string (class-level attribute) User-friendly name of the mark colors: list of colors (default: CATEGORY10) List of colors of the Lines. If the list is shorter than the number of lines, the colors are reused. close_path: bool (default: False) Whether to close the paths or not. fill: {'none', 'bottom', 'top', 'inside', 'between'} Fill in the area defined by the curves fill_colors: list of colors (default: []) Fill colors for the areas. Defaults to stroke-colors when no color provided opacities: list of floats (default: []) Opacity for the lines and patches. Defaults to 1 when the list is too short, or the element of the list is set to None. fill_opacities: list of floats (default: []) Opacity for the areas. Defaults to 1 when the list is too short, or the element of the list is set to None. stroke_width: float (default: 2) Stroke width of the Lines labels_visibility: {'none', 'label'} Visibility of the curve labels curves_subset: list of integers or None (default: []) If set to None, all the lines are displayed. Otherwise, only the items in the list will have full opacity, while others will be faded. line_style: {'solid', 'dashed', 'dotted', 'dash_dotted'} Line style. interpolation: {'linear', 'basis', 'cardinal', 'monotone'} Interpolation scheme used for interpolation between the data points provided. Please refer to the svg interpolate documentation for details about the different interpolation schemes. marker: {'circle', 'cross', 'diamond', 'square', 'triangle-down', 'triangle-up', 'arrow', 'rectangle', 'ellipse'} Marker shape marker_size: nonnegative int (default: 64) Default marker size in pixels Data Attributes x: numpy.ndarray (default: []) abscissas of the data points (1d or 2d array) y: numpy.ndarray (default: []) ordinates of the data points (1d or 2d array) color: numpy.ndarray (default: None) colors of the different lines based on data. If it is [], then the colors from the colors attribute are used. Each line has a single color and if the size of colors is less than the number of lines, the remaining lines are given the default colors. Notes ----- The fields which can be passed to the default tooltip are: name: label of the line index: index of the line being hovered on color: data attribute for the color of the line The following are the events which can trigger interactions: click: left click of the mouse hover: mouse-over an element The following are the interactions which can be linked to the above events: tooltip: display tooltip """ kwargs: Dict[Any, Any] = without_default(Lines, locals()) widget_cls = bqplot.marks.Lines comp = react.core.ComponentWidget(widget=widget_cls) return Element(comp, **kwargs)
27,093
def parse_currencies(row): """Clean up and convert currency fields to floats.""" date_columns = ( 'Datum van laatste bijwerking', 'Einddatum', 'Begindatum' ) for key in date_columns: try: row[key] = arrow.get(row[key], 'DD.MM.YYYY HH:mm') except ParserError: if row[key] != '0000-00-00 00:00:00': message = 'Could not parse %s to a date, returning None' logging.warning(message, row[key]) row[key] = None return row
27,094
def as_tuple(item, type=None, length=None): """ Force item to a tuple. Partly extracted from: https://github.com/OP2/PyOP2/. """ # Empty list if we get passed None if item is None: t = () elif isinstance(item, (str, sympy.Function)): t = (item,) else: # Convert iterable to list... try: t = tuple(item) # ... or create a list of a single item except (TypeError, NotImplementedError): t = (item,) * (length or 1) if length and not len(t) == length: raise ValueError("Tuple needs to be of length %d" % length) if type and not all(isinstance(i, type) for i in t): raise TypeError("Items need to be of type %s" % type) return t
27,095
def messages(request): """ Return a lazy 'messages' context variable as well as 'DEFAULT_MESSAGE_LEVELS'. """ return { "messages": get_messages(request=request), "DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS, }
27,096
def hash_graph(graph): """ A hash value of the tupelized version of graph. Args: graph (NetworkX graph): A graph Returns: int: A hash value of a graph. Example: >>> g = dlib.sample(5) >>> g.nodes NodeView((0, 1, 2, 3, 4)) >>> g.edges EdgeView([(0, 1), (0, 3), (1, 2), (1, 3), (2, 3)]) >>> glib.hash_graph(g) 249771633555694270 """ return hash(str(graph_to_tuple(graph)))
27,097
def test_get_api_url(test_dict: FullTestDict): """ - GIVEN a list of words - WHEN API urls are generated - THEN check the urls are encoded and correct """ word_list = convert_list_of_str_to_kaki(test_dict['input']) sections = test_dict['jisho']['expected_sections'] for word in word_list: assert jisho.get_api_url(word) == sections[word]["url"]
27,098
def deserialize(rank: str, suit: str) -> Card.Name: """ Convert a serialized card string to a `Card.Name`. Parameters ---------- rank : str A, 2, 3, ..., 10, J, Q, K suit : str C, D, H, S """ suit_map = { 'C': Suit.CLUBS, 'D': Suit.DIAMONDS, 'H': Suit.HEARTS, 'S': Suit.SPADES } return Card.Name(_map_rank(rank), suit_map[suit])
27,099