content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Callable from re import T from typing import Iterable def space(fn: Callable[[State], T], verbose: bool=False) -> Iterable[T]: """ Return an iterable that generates values from ``fn`` fully exhausting the state space. During iteration, the function ``fn`` is called repeatedly with a :class:`~exhaust.State` instance as only argument. :param fn: The function to generate values from. :param verbose: If True, print the state of the generator. """ return SpaceIterable(fn, verbose=verbose)
b901a3936b6e1020db123bce9f72b600117f5825
3,638,300
import json def get_menu_as_json(menu): """Build Tree-like JSON structure from the top menu. From the top menu items, its children and its grandchildren. """ top_items = menu.items.filter(parent=None) menu_data = [] for item in top_items: top_item_data = get_menu_item_as_dict(item) top_item_data["child_items"] = [] children = item.children.all() for child in children: child_data = get_menu_item_as_dict(child) grand_children = child.children.all() grand_children_data = [ get_menu_item_as_dict(grand_child) for grand_child in grand_children ] child_data["child_items"] = grand_children_data top_item_data["child_items"].append(child_data) menu_data.append(top_item_data) return json.dumps(menu_data)
f191d883f44b5cbed729ebcee7670ba99e28d941
3,638,301
import numpy def vortex_contribution_normal(panels): """ Builds the vortex contribution matrix for the normal velocity. Parameters ---------- panels: 1D array of Panel objects List of panels. Returns ------- A: 2D Numpy array of floats Vortex contribution matrix. """ A = numpy.empty((panels.size, panels.size), dtype=float) # vortex contribution on a panel from itself numpy.fill_diagonal(A, 0.0) # vortex contribution on a panel from others for i, panel_i in enumerate(panels): for j, panel_j in enumerate(panels): if i != j: A[i, j] = -0.5/numpy.pi*integral(panel_i.xc, panel_i.yc, panel_j, numpy.sin(panel_i.beta), -numpy.cos(panel_i.beta)) print(A) return A
e5089509646be80307210cad528357d3f85774e9
3,638,302
def find_template(raw, name): """Return Template node with given name or None if there is no such template""" e=Expander('', wikidb=DictDB()) todo = [parse(raw, replace_tags=e.replace_tags)] while todo: n = todo.pop() if isinstance(n, basestring): continue if isinstance(n, Template) and isinstance(n[0], basestring): if n[0] == name: return n todo.extend(n)
ec74c099a810126b798c83fdac50bbb3d79c37cd
3,638,303
def app(): """Required by pytest-tornado's http_server fixture""" return tornado.web.Application()
556ac2b69eaca3d8c4f934fba0deea820ab4e1ff
3,638,304
import inspect def is_bound_builtin_method(meth): """Helper returning True if meth is a bound built-in method""" return (inspect.isbuiltin(meth) and getattr(meth, '__self__', None) is not None and getattr(meth.__self__, '__class__', None))
a7a45f0f519119d795e91723657a1333eb6714e4
3,638,305
def normalize(adj): """Row-normalize sparse matrix""" rowsum = np.array(adj.sum(1)) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = np.diag(r_inv) mx = r_mat_inv.dot(adj) return mx
c342890befeddd3db01403914e80b9e89dc4f20d
3,638,306
def get_recommendation_and_prediction_from_text(input_text, num_feats=10): """ Gets a score and recommendations that can be displayed in the Flask app :param input_text: input string :param num_feats: number of features to suggest recommendations for :return: current score along with recommendations """ global MODEL feats = get_features_from_input_text(input_text) pos_score = MODEL.predict_proba([feats])[0][1] print("explaining") exp = EXPLAINER.explain_instance( feats, MODEL.predict_proba, num_features=num_feats, labels=(1,) ) print("explaining done") parsed_exps = parse_explanations(exp.as_list()) recs = get_recommendation_string_from_parsed_exps(parsed_exps) output_str = """ Current score (0 is worst, 1 is best): <br/> %s <br/> <br/> Recommendations (ordered by importance): <br/> <br/> %s """ % ( pos_score, recs, ) return output_str
6f5737c5ac293a3e33fed7a95119c30c72fafa1e
3,638,307
def set_title(title, uid='master'): """ Sets a new title of the window """ try: _webview_ready.wait(5) return gui.set_title(title, uid) except NameError: raise Exception('Create a web view window first, before invoking this function') except KeyError: raise Exception('Cannot call function: No webview exists with uid: {}'.format(uid))
e2ad0fd3673ab2ad0966527b394e9afdf8e2a531
3,638,308
def FK42FK5MatrixOLDATTEMPT(): """ ---------------------------------------------------------------------- Experimental. Create matrix to precess from an epoch in FK4 to an epoch in FK5 So epoch1 is Besselian and epoch2 is Julian 1) Do an epoch transformation in FK4 from input epoch to 1984 January 1d 0h 2) Apply a zero point correction for the right ascension w.r.t. B1950. The formula is: E = E0 + E1*(jd-jd1950)/Cb E0 = 0.525; E1 = 1.275 and Cb = the length of the tropical century (ES 3.59 p 182) = 36524.21987817305 For the correction at 1984,1,1 the ES lists 0.06390s which is 0.06390*15=0.9585" This function calculated E = 0.958494476885" which agrees with the literature. 3) Transform in FK5 from 1984 January 1d 0h to epoch2 Note that we do not use the adopted values for the precession angles, but use the Woolward and Clemence expressions to calculate the angles. These are one digit more accurate than the adopted values. ---------------------------------------------------------------------- """ # Epoch transformation from B1950 to 1984, 1,1 in FK4 jd = JD(1984,1,1) epoch1984 = JD2epochBessel(jd) M1 = BMatrixEpoch12Epoch2(1950.0, epoch1984) # Equinox correction to the right ascension jd1950 = epochBessel2JD(1950.0) E0 = 0.525; E1 = 1.275 Cb = 36524.21987817305 # In days = length of the tropical century E = E0 + E1*(jd-jd1950)/Cb E /= 3600.0 # From seconds of arc to degree M2 = rotZ(-E) # The correction is positive so we have to rotate # around the z-axis in the negative direction. # Epoch transformation from 1984,1,1 to J2000 epoch1984 = JD2epochJulian(jd) M3 = JMatrixEpoch12Epoch2(epoch1984, 2000.0) return M3*M2*M1
bbf98f3073fda4a248190e417332d72645faf5c1
3,638,309
def _lg_undirected(G, selfloops=False, create_using=None): """Return the line graph L of the (multi)graph G. Edges in G appear as nodes in L, represented as sorted tuples of the form (u,v), or (u,v,key) if G is a multigraph. A node in L corresponding to the edge {u,v} is connected to every node corresponding to an edge that involves u or v. Parameters ---------- G : graph An undirected graph or multigraph. selfloops : bool If `True`, then self-loops are included in the line graph. If `False`, they are excluded. create_using : None A graph instance used to populate the line graph. Notes ----- The standard algorithm for line graphs of undirected graphs does not produce self-loops. """ if create_using is None: L = G.__class__() else: L = create_using # Graph specific functions for edges and sorted nodes. get_edges = _edge_func(G) sorted_node = _node_func(G) # Determine if we include self-loops or not. shift = 0 if selfloops else 1 edges = set([]) for u in G: # Label nodes as a sorted tuple of nodes in original graph. nodes = [ sorted_node(*x) for x in get_edges(u) ] if len(nodes) == 1: # Then the edge will be an isolated node in L. L.add_node(nodes[0]) # Add a clique of `nodes` to graph. To prevent double adding edges, # especially important for multigraphs, we store the edges in # canonical form in a set. for i, a in enumerate(nodes): edges.update([ _sorted_edge(a,b) for b in nodes[i+shift:] ]) L.add_edges_from(edges) return L
172fbe2e1d2ec425c3b37c97429df67d789f2c9c
3,638,310
def get_utxo_provider_client(utxo_provider, config_file): """ Get or instantiate our blockchain UTXO provider's client. Return None if we were unable to connect """ utxo_opts = default_utxo_provider_opts( utxo_provider, config_file ) try: utxo_provider = connect_utxo_provider( utxo_opts ) return utxo_provider except Exception, e: log.exception(e) return None
79d72221f707f36bdb07a57b634a57bb42942b2e
3,638,311
from typing import Dict from typing import Any def metadata( sceneid: str, pmin: float = 2.0, pmax: float = 98.0, hist_options: Dict = {}, **kwargs: Any, ) -> Dict: """ Return band bounds and statistics. Attributes ---------- sceneid : str CBERS sceneid. pmin : int, optional, (default: 2) Histogram minimum cut. pmax : int, optional, (default: 98) Histogram maximum cut. hist_options : dict, optional Options to forward to numpy.histogram function. e.g: {bins=20, range=(0, 1000)} kwargs : optional These are passed to 'rio_tiler.reader.preview' Returns ------- out : dict Dictionary with bounds and bands statistics. """ scene_params = cbers_parser(sceneid) cbers_prefix = "{scheme}://{bucket}/{prefix}/{scene}".format(**scene_params) bands = scene_params["bands"] addresses = [f"{cbers_prefix}_BAND{band}.tif" for band in bands] responses = reader.multi_metadata( addresses, indexes=[1], nodata=0, percentiles=(pmin, pmax), hist_options=hist_options, **kwargs, ) info: Dict[str, Any] = dict(sceneid=sceneid) info["instrument"] = scene_params["instrument"] info["band_descriptions"] = [(ix + 1, b) for ix, b in enumerate(bands)] info["bounds"] = [ r["bounds"] for b, r in zip(bands, responses) if b == scene_params["reference_band"] ][0] info["statistics"] = {b: d["statistics"][1] for b, d in zip(bands, responses)} return info
c3b5203ddbec575f791bef1fb6689088dfa666a2
3,638,312
def size_to_string(volume_size): # type: (int) -> str """ Convert a volume size to string format to pass into Kubernetes. Args: volume_size: The size of the volume in bytes. Returns: The size of the volume in gigabytes as a passable string to Kubernetes. """ if volume_size >= Gi: return str(volume_size >> 30) + 'Gi' elif volume_size >= Mi: return str(volume_size >> 20) + 'Mi' else: return str(volume_size >> 10) + 'Ki'
b1b30f4a383d29951d12189180271a9752e5ba61
3,638,313
def argToDic(arg): """ Converts a parameter sequence into a dict. Args: arg (string): specified simulation parameters.""" params = dict() options = arg.split("_") if "=" in options[0]: params["mode"] = "" else: params["mode"] = options.pop(0) # parse arguments such as "M=2" for op in options: pair = op.split("=") pv = parseValue(pair[1]) # exception if "IT" in pair[0]: pv = int(pv) params[pair[0]] = pv return params
173284e8ee45d9e61d786be33d6d6df60e0f9389
3,638,314
import glob import os def _installed_snpeff_genome(config_file, base_name): """Find the most recent installed genome for snpEff with the given name. """ data_dir = _find_snpeff_datadir(config_file) dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True) if os.path.isdir(d)] if len(dbs) == 0: raise ValueError("No database found in %s for %s" % (data_dir, base_name)) else: return os.path.split(dbs[0])[-1]
07f6de2665bb61cc195c515e9750a143f5ec4358
3,638,315
def geth2hforplayer(matches,name): """get all head-to-heads of the player""" matches = matches[(matches['winner_name'] == name) | (matches['loser_name'] == name)] h2hs = {} for index, match in matches.iterrows(): if (match['winner_name'] == name): if (match['loser_name'] not in h2hs): h2hs[match['loser_name']] = {} h2hs[match['loser_name']]['l'] = 0 h2hs[match['loser_name']]['w'] = 1 else: h2hs[match['loser_name']]['w'] = h2hs[match['loser_name']]['w']+1 elif (match['loser_name'] == name): if (match['winner_name'] not in h2hs): h2hs[match['winner_name']] = {} h2hs[match['winner_name']]['w'] = 0 h2hs[match['winner_name']]['l'] = 1 else: h2hs[match['winner_name']]['l'] = h2hs[match['winner_name']]['l']+1 #create list h2hlist = [] for k, v in h2hs.items(): h2hlist.append([k, v['w'],v['l']]) #sort by wins and then by losses + print #filter by h2hs with more than 6 wins: #h2hlist = [i for i in h2hlist if i[1] > 6] if (len(h2hlist) == 0): return '' else: return sorted(h2hlist, key=itemgetter(1,2)) #for h2h in h2hlist: # print(name+';'+h2h[0]+';'+str(h2h[1])+';'+str(h2h[2]))
5bcf3e520085acd00e607cad386708b490937e9f
3,638,316
import random def backtracking_solver( starting_event: Event, **kwargs) -> FiniteSequence: """Compose a melodic sequence based upon the domain and constraints given. starting_event: Event dictate the starting pitch. All subsequent events will be of similar duration. constraints - list of constraint functions (see composerstoolkit.composers.constraints) heuristics - list of heuristics (weight maps) that can be used to provide a rough shape to the line (see composerstoolkit.composers.heuristics) n_events - the number of notes of the desired target sequence. (Default 1) """ opts = { "constraints": [], "heuristics": [], "n_events": 1 } opts.update(kwargs) constraints = opts["constraints"] heuristics = opts["heuristics"] n_events = opts["n_events"] tick = 0 seq = FiniteSequence([starting_event]) use_weights = len(heuristics) > 0 if n_events == 1: return FiniteSequence(seq) results = set() for constraint in constraints: results.update([constraint(seq)]) if results != {True}: raise InputViolatesConstraints("Unable to solve!") choices = list(range(NOTE_MIN, NOTE_MAX)) dead_paths = [] while tick < n_events-1: if use_weights: weights= [1.0 for i in range(len(choices))] for heuristic in heuristics: weights = heuristic(tick, choices, weights) try: if use_weights: note = Event([random.choices(choices, weights)[0]], starting_event.duration) else: note = Event([random.choice(choices)], starting_event.duration) except IndexError: # this was thrown because we ran out of choices (we have reached a dead-end) dead_paths.append(seq[:]) seq = seq[:-1] tick = tick -1 choices = list(range(NOTE_MIN, NOTE_MAX)) if tick == 0: raise AllRoutesExhausted("Unable to solve!") continue context = FiniteSequence(seq.events[:]) context.events.append(note) results = set() for constraint in constraints: results.update([constraint(context)]) candidate = seq[:] candidate.events.append(note) if results == {True} and candidate not in dead_paths: seq.events.append(note) tick = tick + 1 choices = list(range(NOTE_MIN, NOTE_MAX)) else: #this choice was bad, so we must exclude it choices.remove(note.pitches[-1]) return seq
86f33615a2bb72e0f656ba7e021ab3f49dcc79e2
3,638,317
def jdos(bs, f, i, occs, energies, kweights, gaussian_width, spin=Spin.up): """ Args: bs: bandstructure object f: final band i: initial band occs: occupancies over all bands. energies: energy mesh (eV) kweights: k-point weights gaussian_width: width of gaussian plot. spin: Which spin channel to include. Returns: Cumulative JDOS value for a specific i->f transition, with consideration of partial occupancy and spin polarisation. """ jdos = np.zeros(len(energies)) for k in range(len(bs.bands[spin][i])): final_occ = occs[f][k] init_energy = bs.bands[spin][i][k] final_energy = bs.bands[spin][f][k] init_occ = occs[i][k] k_weight = kweights[k] factor = k_weight * ( (init_occ * (1 - final_occ)) - (final_occ * (1 - init_occ)) ) jdos += factor * gaussian( energies, gaussian_width, center=final_energy - init_energy ) return jdos
adc2a9c6c91da91b02c0ed9823016b3f256625fb
3,638,318
def findConstantMetrics(inpath): """ Simple function that checks which metrics in a dictionary (read from a CSV) are constant and which change over time. As a reference, the first record read from the file is used :param inpath: The path to the CSV file that must be analyzed :return: The list of metrics (keys) that are constant in the file """ infile = open(inpath, 'r') reader = DictReader(infile) try: metricSet = next(reader) except (StopIteration, IOError): infile.close() return [] line = metricSet while line is not None: metricsToRemove = [] for k in metricSet.keys(): if line[k] != metricSet[k]: metricsToRemove.append(k) for m in metricsToRemove: metricSet.pop(m) try: line = next(reader) except (StopIteration, IOError): line = None infile.close() return list(metricSet.keys())
0faefe77cfea5e1d74d2bb0dda33ed622ce87f02
3,638,319
def scoreGold(playerList, iconCount, highScore): """Update each players' score based on the amount of gold that they have collected. Args: playerList: A list of all PlayerSprite objects in the game. iconCount: A list of integers representing how many times each player has gained points from the scoreGold function this level. highScore: An integer showing the current high score. Returns: looping: A boolean indicating if scoreLevel should call this function again. iconCount: A list of integers representing how many times each player has gained points from the scoreGold function this level. scoreText: A list of the current scores for each of the players. iconCountText: A list of text objects representing each player's iconCount value. highScore: An integer showing the current high score. """ scoreText = [] iconCountText = [] checkQuitGame() checkPauseGameWithInput(playerList) if any(player.goldCollectedCount > 0 for player in playerList): playSound("count_points.wav") # All living players increase their score by 100 points each time this function is called, until it has been called # as many times as they've collected gold this level. for num, player in enumerate(playerList): if player.goldCollectedCount > iconCount[num]: player.score += 100 iconCount[num] += 1 scoreText.append(c.FONT.render("{:06d}PTS.".format(player.score % 1000000), False, c.WHITE)) iconCountText.append(c.FONT.render("+{:02d}".format(iconCount[num] % 100), False, c.WHITE)) highScore = compareHighScore(playerList, highScore) # Once iconCount has reached the correct number of collected gold for each player, looping is set to False so # scoreGold will not be called again. if all(iconCount[num] == player.goldCollectedCount for num, player in enumerate(playerList)): return False, iconCount, scoreText, iconCountText, highScore else: return True, iconCount, scoreText, iconCountText, highScore
255b4ee987a6ac4a5274ad5ae7b5bf6698840407
3,638,320
def image_2d_transformer(pretrained=False, **kwargs): """ modified copy from timm DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_transformer_2d('vit_deit_base_patch16_384', pretrained=pretrained, **model_kwargs) return model
c5891105446ffc4fac5f19f73b5f401bfc769827
3,638,321
import torch def create_fourier_heatmap_from_error_matrix( error_matrix: torch.Tensor, ) -> torch.Tensor: """Create Fourier Heat Map from error matrix (about quadrant 1 and 4). Note: Fourier Heat Map is symmetric about the origin. So by performing an inversion operation about the origin, Fourier Heat Map is created from error matrix. Args: error_matrix (torch.Tensor): The size of error matrix should be (H, H/2+1). Here, H is height of image. This error matrix shoud be about quadrant 1 and 4. Returns: torch.Tensor (torch.Tensor): Fourier Heat Map created from error matrix. """ assert len(error_matrix.size()) == 2 assert error_matrix.size(0) == 2 * (error_matrix.size(1) - 1) fhmap_rightside = error_matrix[1:, :-1] fhmap_leftside = torch.flip(fhmap_rightside, (0, 1)) return torch.cat([fhmap_leftside[:, :-1], fhmap_rightside], dim=1)
25a4a4e2aa2ffda317f28d85c3798682fd72c466
3,638,322
from ostap.logger.logger import colored_string def _sc_print_ ( sc ) : """Print the Status Code >>> st = ... >>> print st """ BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list ( range ( 8 ) ) ## if sc.isSuccess () : return colored_string( 'SUCCESS' , WHITE , GREEN , True ) elif sc.isRecoverable () : return colored_string( 'RECOVERABLE' , RED , YELLOW , True ) elif _FAILURE != sc.getCode () : return colored_string('FAILURE[%d]' % sc.getCode() , YELLOW , RED , True ) return colored_string('FAILURE' , YELLOW , RED , True )
504f7c662e93fd1e1c1c1759143fa8d77e9b89cc
3,638,323
def get_state_name(state): """Maps a mongod node state id to a human readable string.""" if state in REPLSET_MEMBER_STATES: return REPLSET_MEMBER_STATES[state][0] else: return 'UNKNOWN'
ddfbfa53c05941747ebedc242baa8e29bddf6771
3,638,324
import sysconfig import sys from pathlib import Path def _get_platform_information(): """Return a dictionary containing platform-specific information.""" system_information = {"platform": sysconfig.get_platform()} system_information.update({"python version": sys.version_info}) if sys.platform == "win32": system_information.update({"binary path": sys.exec_prefix}) system_information.update( {"main binary": str(Path(sys.exec_prefix).joinpath("python.exe"))}) system_information.update( {"hidden console binary": str(Path(sys.exec_prefix).joinpath("pythonw.exe"))}) else: system_information.update({"binary path": str(Path(sys.exec_prefix).joinpath(sys.exec_prefix).joinpath("bin"))}) system_information.update( {"main binary": str(Path(sys.exec_prefix).joinpath("bin", "python"))}) system_information.update( {"hidden console binary": str(Path(sys.exec_prefix).joinpath("bin", "python"))}) return system_information
0fe475f8aecb72be45d613cf80d08353cebf34af
3,638,325
def compute_resilience(ugraph, attack_order): """ Alias to bfs or union find :param ugraph: :param attack_order: :return: """ if USE_UF: return uf.compute_resilience_uf(ugraph, attack_order) else: return bfs_visited.compute_resilience(ugraph, attack_order)
db623ae30b20a076ff8e0f45fb84a9bb24fa414a
3,638,326
def NumericalFlux(b, r, c): """Compute the flux by numerical integration of the surface integral.""" # I'm only coding up a specific case here assert r <= 1, "Invalid range." if b < 0: b = np.abs(b) # No occ if b >= 1 + r: return 1 # Get points of intersection if b > 1 - r: yi = (1. + b ** 2 - r ** 2) / (2. * b) xi = (1. / (2. * b)) * np.sqrt(4 * b ** 2 - (1 + b ** 2 - r ** 2) ** 2) else: yi = np.inf xi = r # Specific intensity map def I(y, x): mu = np.sqrt(1 - x ** 2 - y ** 2) return 1 - c[0] * (1 - mu ** 0.5) - c[1] * (1 - mu) - c[2] * (1 - mu ** 1.5) - c[3] * (1 - mu ** 2) # Total flux total, _ = dblquad(I, -1, 1, lambda x: 0, lambda x: np.sqrt(1 - x ** 2), epsabs=1e-12, epsrel=1e-12) total *= 2 # Lower integration limit def y1(x): if yi <= b: # Lower occultor boundary return b - np.sqrt(r ** 2 - x ** 2) elif b <= 1 - r: # Lower occultor boundary return b - np.sqrt(r ** 2 - x ** 2) else: # Tricky: we need to do this in two parts return b - np.sqrt(r ** 2 - x ** 2) # Upper integration limit def y2(x): if yi <= b: # Upper occulted boundary return np.sqrt(1 - x ** 2) elif b <= 1 - r: # Upper occultor boundary return b + np.sqrt(r ** 2 - x ** 2) else: # Tricky: we need to do this in two parts return np.sqrt(1 - x ** 2) # Compute the total flux flux, _ = dblquad(I, -xi, xi, y1, y2, epsabs=1e-12, epsrel=1e-12) # Do we need to solve an additional integral? if not (yi <= b) and not (b <= 1 - r): def y1(x): return b - np.sqrt(r ** 2 - x ** 2) def y2(x): return b + np.sqrt(r ** 2 - x ** 2) additional_flux, _ = dblquad(I, -r, -xi, y1, y2, epsabs=1e-12, epsrel=1e-12) flux += 2 * additional_flux return (total - flux) / total
c2e5918702dfd99f7710adf29eb2e8d668cb1cc0
3,638,327
from typing import Container def build_volume_from(volume_from_spec): """ volume_from can be either a service or a container. We want to return the container.id and format it into a string complete with the mode. """ if isinstance(volume_from_spec.source, Service): containers = volume_from_spec.source.containers(stopped=True) if not containers: return "{}:{}".format( volume_from_spec.source.create_container().id, volume_from_spec.mode) container = containers[0] return "{}:{}".format(container.id, volume_from_spec.mode) elif isinstance(volume_from_spec.source, Container): return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
ee5b997ea1832aa490501da3556faa52c611ada9
3,638,328
def generate_peripheral(csr, name, **kwargs): """ Generates definition of a peripheral. Args: csr (dict): LiteX configuration name (string): name of the peripheral kwargs (dict): additional parameterss, including 'model' and 'properties' Returns: string: repl definition of the peripheral """ peripheral = get_descriptor(csr, name) model = kwargs['model'] if csr['constants']['config_csr_data_width'] == 32 and 'model_CSR32' in kwargs: model = kwargs['model_CSR32'] result = '\n{}: {} @ {}\n'.format( kwargs['name'] if 'name' in kwargs else name, model, generate_sysbus_registration(peripheral)) for constant, val in peripheral['constants'].items(): if 'ignored_constants' not in kwargs or constant not in kwargs['ignored_constants']: if constant == 'interrupt': result += ' -> cpu@{}\n'.format(val) else: result += ' {}: {}\n'.format(constant, val) if 'properties' in kwargs: for prop, val in kwargs['properties'].items(): result += ' {}: {}\n'.format(prop, val(csr)) if 'interrupts' in kwargs: for prop, val in kwargs['interrupts'].items(): result += ' {} -> {}\n'.format(prop, val()) return result
154428b153b804c23eb9b2a99380e987402c9fb4
3,638,329
import os def export_file(isamAppliance, instance_id, component_id, file_id, filepath, check_mode=False, force=False): """ Exporting the transaction logging data file or rollover transaction logging data file for a component """ if os.path.exists(filepath) is True: logger.info("File '{0}' already exists. Skipping export.".format(filepath)) warnings = ["File '{0}' already exists. Skipping export.".format(filepath)] return isamAppliance.create_return_object(warnings=warnings) if check_mode is True: return isamAppliance.create_return_object(changed=True) else: return isamAppliance.invoke_get_file( "Exporting the transaction logging data file or rollover transaction logging data file for a component", "{0}/{1}/transaction_logging/{2}/translog_files/{3}?export".format(uri, instance_id, component_id, file_id), filepath ) return isamAppliance.create_return_object()
1e040a5d9b827fbcf95c2443755201710ce2c79b
3,638,330
def map_vocabulary(docs, vocabulary): """ Maps sentencs and labels to vectors based on a vocabulary. """ mapped = np.array([[vocabulary[word] for word in doc] for doc in docs]) return mapped
b5b39aeac6306709a4b4ac10a29d40a2006d57ff
3,638,331
def mobilenetv3_large_minimal_100(pretrained=False, **kwargs): """ MobileNet V3 Large (Minimalistic) 1.0 """ # NOTE for train set drop_rate=0.2 model = _gen_mobilenet_v3('mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model
717a67b1ab7cb0ad7a6c8d40ea4b0b29108eff94
3,638,332
def get_identity(user, identity_uuid): """ Given the (request) user and an identity uuid, return None or an Active Identity """ try: identity_list = get_identity_list(user) if not identity_list: raise CoreIdentity.DoesNotExist( "No identities found for user %s" % user.username) identity = identity_list.get(uuid=identity_uuid) return identity except CoreIdentity.DoesNotExist: logger.warn("Identity %s DoesNotExist" % identity_uuid) return None
800e47d8782fc5e71e97192f76713032eade9441
3,638,333
def same_strange_looking_function(param1, callback_fn): """ This function is documented, but the function is identical to some_strange_looking_function and should result in the same hash """ tail = param1[-1] # return the callback value from the tail of param whatever that is return callback_fn(tail)
438becf6803e6b25a200a34e18eb648aaa4b6fbb
3,638,334
import os import shutil def put_output(dir_in, opts, Flowcell, Lane): """Uses shutil to move the output into galaxy directory""" seq1_name = '%(code)s_%(Flowcell)s_s_%(lane)s_fastq.txt'%\ ({'code': 'R1samplecode123','Flowcell':Flowcell, 'lane':Lane}) seq2_name = '%(code)s_%(Flowcell)s_s_%(lane)s_fastq.txt'%\ ({'code': 'R2samplecode123','Flowcell':Flowcell, 'lane':Lane}) if not os.exists(os.path.join(dir_in, seq1_name)): seq1_name += ".gz" seq2_name += ".gz" shutil.move(os.path.join(dir_in, seq1_name), opts.match1) shutil.move(os.path.join(dir_in, seq2_name), opts.match2) return 0
bc91a83cbf14d91dc3a82fbb21d1b3ceb15351fb
3,638,335
def __extractFunction(text, jsDoc, classConstructor): """ Extracts a function depending of its pattern: 'function declaration': function <name>(<parameters>) { <realization> }[;] 'named function expression': <variable> = function <name>(<parameters>) { <realization> }[;] 'unnamed function expression'. <variable> = function(<parameters>) { <realization> }[;] 'alias function': <variable> = <name>(<parameters>)[;] @param {string} text. @param {jsCodeParser.jsDoc.JsDoc} jsDoc. @param {(jsCodeParser.elements.Class|jsCodeParser.elements.Method)} classConstructor. @return {(jsCodeParser.elements.Class|jsCodeParser.elements.Method)} Element. """ parameters = extractTextBetweenTokens(text, '(') if not parameters: return None end = text.find(parameters) + len(parameters) realization = text[end:].strip() if realization[0] == '{': realization = extractTextBetweenTokens(realization, '{') end = text.find(realization) + len(realization) if end < len(text) and text[end] == ';': end += 1 code = text[:end].strip() return classConstructor(code, jsDoc)
992604ccd1e56da6706cf2e4ec2955c2c9ecfa7e
3,638,336
def vocabulary_size(tokens): """Returns the vocabulary size count defined as the number of alphabetic characters as defined by the Python str.isalpha method. This is a case-sensitive count. `tokens` is a list of token strings.""" vocab_list = set(token for token in tokens if token.isalpha()) return len(vocab_list)
5e26e1be98a3e82737277458758f0fd65a64fe8f
3,638,337
from typing import Dict from typing import Any from typing import Optional from typing import Tuple def max_iteration_for_analysis(query: Dict[str, Any], db: cosem_db.MongoCosemDB, check_evals_complete: bool = False, conv_it: Optional[Tuple[int, int]] = None) -> Tuple[int, bool]: """ Find the first iteration that meets the convergence criterion like `convergence_iteration` but return a minimum iteration of 700k if the convergence criterion is met at a previous iteration. To avoid re-computation if `convergence_iteration` has explicitly been called before, the previous output can be passed in explicitly. Args: query: Dictionary specifying which set of configuration to consider for the maximum iteration. This will typically contain keys for setups, label and crop. db: Database containing the evaluation results. conv_it: Output of `convergence_iteration` if already known. Otherwise, None and `convergence_iteration` will be called. check_evals_complete: Whether to first check whether the considered evaluations are consistent across the queries (i.e. same for all crops/labels/raw_datasets within one setup, at least to 500k, if above threshold by 500k at least to 700k). Should generally be set to True unless this has already been checked. Returns: The max iteration. If none of the results produce above threshold segmentations False is returned. If the convergence condition isn't met anywhere or not evaluated to at least 700k iterations. Raises: ValueError if no evaluations are found for given query. """ if conv_it is None: it, valid = convergence_iteration(query, db, check_evals_complete=check_evals_complete) else: it, valid = conv_it if valid != 2: it = max(it, 700000) return it, bool(valid)
b5d0bebd2af634ac72f8bc318276d0f7c03114f2
3,638,338
def getMatirces(Dynamics, Cost): """ This functions takes the dynamics class as input and outputs the required matrices and cvxpy.variables to turn the covariance steering problem into a finite dimensional optimization problem. """ Alist = Dynamics.Alist Blist = Dynamics.Blist Dlist = Dynamics.Dlist zlist = Dynamics.zlist sigmaWlist = Dynamics.sigmaWlist Rulist = Cost.Rulist Rvlist = Cost.Rvlist N = len(Alist) # Problem horizon nx, nu, nv = Alist[0].shape[1], Blist[0].shape[1], Dlist[0].shape[1] # Set Constant Matirces: Gamma = [] for i in range(N+1): Gamma.append(Phi_func(Alist, i, 0)) Gamma = np.vstack(Gamma) block_Hu, block_Hv, block_Hw = [], [], [] for i in range(N+1): row_Hu, row_Hv, row_Hw = [], [], [] for j in range(N): if j < i: row_Hu.append(Phi_func(Alist, i, j) @ Blist[j]) row_Hv.append(Phi_func(Alist, i, j) @ Dlist[j]) row_Hw.append(Phi_func(Alist, i, j)) else: row_Hu.append(np.zeros((nx, nu))) row_Hv.append(np.zeros((nx, nv))) row_Hw.append(np.zeros((nx, nx))) block_Hu.append(np.hstack(row_Hu)) block_Hv.append(np.hstack(row_Hv)) block_Hw.append(np.hstack(row_Hw)) Hu, Hv, Hw = np.vstack(block_Hu), np.vstack(block_Hv), np.vstack(block_Hw) Z = np.vstack(zlist) Wbig = np.zeros((nx*N, nx*N)) for k in range(N): Wbig[k*nx:(k+1)*nx, k*nx:(k+1)*nx] = sigmaWlist[k] Rubig = np.zeros((nu*N, nu*N)) Rvbig = np.zeros((nv*N, nv*N)) # set_trace() for k in range(N): Rubig[k*nu:(k+1)*nu, k*nu:(k+1)*nu] = Rulist[k] Rvbig[k*nv:(k+1)*nv, k*nv:(k+1)*nv] = Rvlist[k] return Gamma, Hu, Hv, Hw, Z, Wbig, Rubig, Rvbig
50de11ba3f3d1528f7ff577861613b96f8e35254
3,638,339
def get_transit_boundary_indices(time, transit_size): """ Determines transit boundaries from sorted time of transit cut out :param time (1D np.array) sorted times of transit cut out :param transit_size (float) size of the transit crop window in days :returns tuple: [0] list of transit start indices (int) [1] list of sequence lengths (int) of each transit """ sequence_lengths = [] transit_start_indices = [0] for i, t in enumerate(time): if t - time[transit_start_indices[-1]] > transit_size: sequence_lengths.append(i - transit_start_indices[-1]) transit_start_indices.append(i) # last length is from last transit start til the end of the array sequence_lengths.append(len(time) - transit_start_indices[-1]) return transit_start_indices, sequence_lengths
cd3775d72690eb4539e0434b0ac7f715d14374a6
3,638,340
from urllib.error import HTTPError from time import time import gc import os import tempfile def example_3(): """Loads into tempory storage' """ def cleanup(path): # Clean up the temp folder to remove the BerkeleyDB database files... for f in os.listdir(path): os.unlink(path + "/" + f) os.rmdir(path) print(f"\n{80 * '*'}\nExample 3, loading 45K triples from GitHub into memory and then adding them to a\nSQLiteLSM-backed ConjunctiveGraph...\n") doacc_abox = "https://raw.githubusercontent.com/DOACC/individuals/master/cryptocurrency.nt" path = os.path.join(tempfile.gettempdir(), "doacc") store = plugin.get("SQLiteLSM", Store)( identifier=URIRef("rdflib_sqlitelsm_test") ) # Create an in-memory Graph into which to load the data memgraph = Graph("Memory", URIRef("http://rdflib.net")) # Factor out any gc-related lags gcold = gc.isenabled() gc.collect() gc.disable() # Load memgraph with remote data # print("Downloading and parsing data\n") try: t0 = time() memgraph.parse(location=doacc_abox, format="nt") t1 = time() except HTTPError as e: cleanup(path) return e.code, str(e), None print(f"Time taken to download and parse {len(memgraph)} triples to in-memory graph: {t1 - t0:.4f}s\n") if os.path.exists(path): cleanup(path) # Create ConjunctiveGraph with LSM-backed Stg sqlitelsmgraph = ConjunctiveGraph(store) sqlitelsmgraph.open(path, create=True) # Step through the memgraph triples, adding to the LSM-backed ConjunctiveGraph t0 = time() for triple in memgraph.triples((None, None, None)): sqlitelsmgraph.add(triple) t1 = time() # Check total and report time assert len(sqlitelsmgraph) == 44947, len(sqlitelsmgraph) print(f"Time to add {len(sqlitelsmgraph)} triples to LSM-backed graph: {t1 - t0:.4f}s\n") # Close the graphs memgraph.close() sqlitelsmgraph.close() # Re-open (with “create=False”) sqlitelsmgraph with saved store: t0 = time() sqlitelsmgraph.open(path, create=False) t1 = time() print(f"Time to load {len(sqlitelsmgraph)} triples from LSM-backed store: {t1 - t0:.4f}s\n") if gcold: gc.enable() print(f"Example 3 completed\n{80 * '*'}\n\n")
643d5b658c02398d7159b66c98b9d14ad2b87517
3,638,341
def decode_gbe_string(s): """This helper function turns gbe output strings into dataframes""" columns, df = s.replace('","',';').replace('"','').split('\n') df = pd.DataFrame([column.split(',') for column in df.split(';')][:-1]).transpose().ffill().iloc[:-1] df.columns = [c.replace('tr_','') for c in columns.split(',')[:-1]] return df
0a2d262b2653f736ef8ae7c7ed4b969faf80e9bf
3,638,342
import re def get_scihub_namespaces(xml): """Take an xml string and return a dict of namespace prefixes to namespaces mapping.""" nss = {} matches = re.findall(r'\s+xmlns:?(\w*?)\s*=\s*[\'"](.*?)[\'"]', xml.decode('utf-8')) for match in matches: prefix = match[0]; ns = match[1] if prefix == '': prefix = '_default' nss[prefix] = ns return nss
b1d5a32d7583a655c59fa5175bdd133899bf6223
3,638,343
def valid_verify_email(form, email): """ Returns true if "email" is equal the first email """ try: if(form.email.data!=form.email_verify.data): raise ValidationError('Email address is not the same') if models.Account.pull_by_email(form.email.data) is not None: print('Account already exist') raise ValidationError('An account already exists for that email address') except Exception as e: raise ValidationError('Email is wrong check it again: ' + str(e)) return True
16073bb559e06759632323289f49e127bb9f8cb1
3,638,344
def _computePolyVal(poly, value): """ Evaluates a polynomial at a specific value. :param poly: a list of polynomial coefficients, (first item = highest degree to last item = constant term). :param value: number used to evaluate poly :return: a number, the evaluation of poly with value """ #return numpy.polyval(poly, value) acc = 0 for c in poly: acc = acc * value + c return acc
0377ba0757439409824b89b207485a99f804cb41
3,638,345
from io import StringIO def fix_e26(source): """Format block comments.""" if '#' not in source: # Optimization. return source string_line_numbers = multiline_string_lines(source, include_docstrings=True) fixed_lines = [] sio = StringIO(source) for (line_number, line) in enumerate(sio.readlines(), start=1): if (line.lstrip().startswith('#') and line_number not in string_line_numbers): indentation = _get_indentation(line) line = line.lstrip() # Normalize beginning if not a shebang. if len(line) > 1: # Leave multiple spaces like '# ' alone. if line.count('#') > 1 or line[1].isalnum(): line = '# ' + line.lstrip('# \t') fixed_lines.append(indentation + line) else: fixed_lines.append(line) return ''.join(fixed_lines)
ec569e442c2244421afa94cc8316478c55377220
3,638,346
def graph_distance(tree, node1, node2=None): """ Return shortest distance from node1 to node2, or just update all node.distance shortest to node1 """ for node in tree.nodes(): node.distance = inf node.back = None # node backwards towards node1 fringe = Queue([node1]) while fringe: node = fringe.pop() #print(f"looking at '{node}'") previous_distance = node.back.distance if node.back else -1 node.distance = previous_distance + 1 if node == node2: break for neighbor in node.neighbors(): if neighbor.distance > node.distance: fringe.push(neighbor) neighbor.back = node if node2: return node2.distance
0764d2a687933631d592e1b6d40ceec8d629036c
3,638,347
def trunicos(b): """Return a unit-distance embedding of the truncated icosahedron graph.""" p0 = star_radius(5)*root(1,20,1) p1 = p0 + root(1,20,1) p2 = mpc(b, 0.5) p3 = cu(p2, p1) p4 = cu(p3, p1*root(1,5,-1)) p5 = cu(p4, p2*root(1,5,-1)) return (symmetrise((p0, p1, p2, p3, p4, p5), "D5"), [abs(p5 - root(1,5,-1)*conj(p5)) - 1])
018112497882a6f0a572cf2c1c222cdf36ca95e9
3,638,348
import torch def histogram2d( x1: torch.Tensor, x2: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10 ) -> torch.Tensor: """Function that estimates the 2d histogram of the input tensor. The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter. Args: x1 (torch.Tensor): Input tensor to compute the histogram with shape :math:`(B, D1)`. x2 (torch.Tensor): Input tensor to compute the histogram with shape :math:`(B, D2)`. bins (torch.Tensor): The number of bins to use the histogram :math:`(N_{bins})`. bandwidth (torch.Tensor): Gaussian smoothing factor with shape shape [1]. epsilon (float): A scalar, for numerical stability. Default: 1e-10. Returns: torch.Tensor: Computed histogram of shape :math:`(B, N_{bins}), N_{bins})`. Examples: >>> x1 = torch.rand(2, 32) >>> x2 = torch.rand(2, 32) >>> bins = torch.torch.linspace(0, 255, 128) >>> hist = histogram2d(x1, x2, bins, bandwidth=torch.tensor(0.9)) >>> hist.shape torch.Size([2, 128, 128]) """ pdf1, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon) pdf2, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon) pdf = joint_pdf(kernel_values1, kernel_values2) return pdf
5e360f1e9350a29664e3beb1d0cc6ba3024647b9
3,638,349
import json def webhooks_v2(request): """ Handles all known webhooks from stripe, and calls signals. Plug in as you need. """ if request.method != "POST": return HttpResponse("Invalid Request.", status=400) event_json = json.loads(request.body) event_key = event_json['type'].replace('.', '_') if event_key in WEBHOOK_MAP: WEBHOOK_MAP[event_key].send(sender=None, full_json=event_json) return HttpResponse(status=200)
afa86e189c417a147ae05fa46e89d985207c403b
3,638,350
def nth(iterable, n, default=None): """ Returns the nth item or a default value :param iterable: The iterable to retrieve the item from :param n: index of the item to retrieve. Must be >= 0 :param default: the value to return if the index isn't valid :return: the nth item, or the default value if n isn't a valid index """ return next(islice(iterable, n, None), default)
9f0eb8a31d8b4499d8538f6aefc9dba8231b27e0
3,638,351
import types def _dict_items(typingctx, d): """Get dictionary iterator for .items()""" resty = types.DictItemsIterableType(d) sig = resty(d) codegen = _iterator_codegen(resty) return sig, codegen
6435320c6ba490b85c3ef4c065f55cef0d7d2c8e
3,638,352
def odd_desc(count): """ Replace ___ with a single call to range to return a list of descending odd numbers ending with 1 For e.g if count = 2, return a list of 2 odds [3,1]. See the test below if it is not clear """ return list(reversed(range(1,count*2,2)))
2f90095c5b25f8ac33f3bb86d3f46e67932bc78a
3,638,353
def retrieval_score(test_ratings: pd.DataFrame, recommender, remove_known_pos: bool = False, metric: str = 'mrr') -> float: """ Mean Average Precision / Mean Reciprocal Rank of first relevant item @ N """ N = recommender.N user_scores = [] relevant_items = get_relevant_items(test_ratings) for user in recommender.users: if user in relevant_items.keys(): predicted_items = recommender.get_recommendations(user, remove_known_pos) predicted_items = [item for item, _ in predicted_items] if metric == 'map': true_positives = np.intersect1d(relevant_items[user], predicted_items) score = len(true_positives) / N elif metric == 'mrr': score = np.mean([reciprocal_rank(item, predicted_items) for item in relevant_items[user]]) else: raise ValueError(f"Unknown value {metric} for Argument `metric`") user_scores.append(score) return np.mean(user_scores)
c7167eef0195496ea460dcbe63926028c430433e
3,638,354
def test_dump_load_keras_model_with_dict(tmpdir, save_and_load): """Test whether tensorflow ser/de-ser work for models returning dictionaries""" class DummyModel(tf.keras.Model): def __init__(self): super().__init__() def _random_method(self): pass def call(self, in_): out = {} out["b1"], out["b2"] = in_, in_ return out in_ = tf.ones((1, 3)) model = DummyModel() # this line is very important or tensorflow cannot trace the graph of the module model(in_) loaded = save_and_load(model, str(tmpdir)) out = loaded(in_) assert set(out) == {"b1", "b2"} assert tf.is_tensor(out["b1"]) assert tf.is_tensor(out["b2"]) assert not hasattr(loaded, "_random_method")
5fcaf73e5a0b138a04091573782a2c03f4459f15
3,638,355
def stemmer_middle_high_german(text_l, rem_umlauts=True, exceptions=exc_dict): """text_l: text in string format rem_umlauts: choose whether to remove umlauts from string exceptions: hard-coded dictionary for the cases the algorithm fails""" # Normalize text text_l = normalize_middle_high_german( text_l, to_lower_all=False, to_lower_beginning=True ) # Tokenize text word_tokenizer = WordTokenizer("middle_high_german") text_l = word_tokenizer.tokenize(text_l) text = [] for word in text_l: try: text.append(exceptions[word]) # test if word in exception dictionary except: if word[0].isupper(): # MHG only uses upper case for locations, people, etc. So any word that starts with a capital # letter while not being at the start of a sentence will automatically be excluded. text.append(word) elif word in MHG_STOPS: text.append(word) # Filter stop words else: text.append(stem_helper(word, rem_umlaut=rem_umlauts)) return text
608ec49ad36ee5ae7ad41fe4eab5d9f7c65eb609
3,638,356
def test_queue_trials(start_connected_emptyhead_cluster): """Tests explicit oversubscription for autoscaling. Tune oversubscribes a trial when `queue_trials=True`, but does not block other trials from running. """ cluster = start_connected_emptyhead_cluster runner = TrialRunner() def create_trial(cpu, gpu=0): kwargs = { "resources": Resources(cpu=cpu, gpu=gpu), "stopping_criterion": { "training_iteration": 3 } } return Trial("__fake", **kwargs) runner.add_trial(create_trial(cpu=1)) with pytest.raises(TuneError): runner.step() # run 1 del runner executor = RayTrialExecutor(queue_trials=True) runner = TrialRunner(trial_executor=executor) cluster.add_node(num_cpus=2) cluster.wait_for_nodes() cpu_only = create_trial(cpu=1) runner.add_trial(cpu_only) runner.step() # add cpu_only trial gpu_trial = create_trial(cpu=1, gpu=1) runner.add_trial(gpu_trial) runner.step() # queue gpu_trial # This tests that the cpu_only trial should bypass the queued trial. for i in range(3): runner.step() assert cpu_only.status == Trial.TERMINATED assert gpu_trial.status == Trial.RUNNING # Scale up cluster.add_node(num_cpus=1, num_gpus=1) cluster.wait_for_nodes() for i in range(3): runner.step() assert gpu_trial.status == Trial.TERMINATED
fed9fe1458db15f871ccd4afff942c0d022a9b8a
3,638,357
def get_bboxes(outputs, proposals, num_proposals, num_classes, im_shape, im_scale, max_per_image=100, thresh=0.001, nms_thresh=0.4): """ Returns bounding boxes for detected objects, organized by class. Transforms the proposals from the region proposal network to bounding box predictions using the bounding box regressions from the classification network: (1) Applying bounding box regressions to the region proposals. (2) For each class, take proposed boxes where the corresponding objectness score is greater then THRESH. (3) Apply non-maximum suppression across classes using NMS_THRESH (4) Limit the maximum number of detections over all classes to MAX_PER_IMAGE Arguments: outputs (list of tensors): Faster-RCNN model outputs proposals (Tensor): Proposed boxes from the model's proposalLayer num_proposals (int): Number of proposals num_classes (int): Number of classes im_shape (tuple): Shape of image im_scale (float): Scaling factor of image max_per_image (int): Maximum number of allowed detections per image. Default is 100. None indicates no enforced maximum. thresh (float): Threshold for objectness score. Default is 0.001. nms_thresh (float): Threshold for non-maximum suppression. Default is 0.4. Returns: detections (list): List of bounding box detections, organized by class. Each element contains a numpy array of bounding boxes for detected objects of that class. """ detections = [[] for _ in range(num_classes)] proposals = proposals.get()[:num_proposals, :] # remove padded proposals boxes = proposals[:, 1:5] / im_scale # scale back to real image space # obtain bounding box corrections from the frcn layers scores = outputs[2][0].get()[:, :num_proposals].T bbox_deltas = outputs[2][1].get()[:, :num_proposals].T # apply bounding box corrections to the region proposals pred_boxes = bbox_transform_inv(boxes, bbox_deltas) pred_boxes = clip_boxes(pred_boxes, im_shape) # Skip the background class, start processing from class 1 for j in range(1, num_classes): inds = np.where(scores[:, j] > thresh)[0] # obtain class-specific boxes and scores cls_scores = scores[inds, j] cls_boxes = pred_boxes[inds, j * 4:(j + 1) * 4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False) # apply non-max suppression keep = nms(cls_dets, nms_thresh) cls_dets = cls_dets[keep, :] # store results detections[j] = cls_dets # Limit to max_per_image detections *over all classes* if max_per_image is not None: # obtain flattened list of all image scores image_scores = np.hstack([detections[j][:, -1] for j in range(1, num_classes)]) if len(image_scores) > max_per_image: # compute threshold needed to keep the top max_per_image image_thresh = np.sort(image_scores)[-max_per_image] # apply threshold for j in range(1, num_classes): keep = np.where(detections[j][:, -1] >= image_thresh)[0] detections[j] = detections[j][keep, :] return detections
09e5eb94f35672e77980c89e71fcb9ed6b460ab4
3,638,358
def air_transport_per_year_by_country(country): """Returns the number of passenger carried per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="IS.AIR.PSGR"'.format(country)) air_transport = cur.fetchall() cur.close() return jsonify(air_transport)
4ca85c537c5bc7ccda332af977f1252b14672235
3,638,359
def outside_range(number, min_range, max_range): """ Returns True if `number` is between `min_range` and `max_range` exclusive. """ return number < min_range or number > max_range
dc3889fbabb74db38b8558537413ebc5bc613d05
3,638,360
import re def is_string_constant(node): """Checks whether the :code:`node` is a string constant.""" return is_leaf(node) and re.match('^\"[^\"]*\"$', node) is not None
5a62c513bc856571e62c40b9d14bdefb67be4c79
3,638,361
from typing import List def is_list_type(t) -> bool: """ Return True if ``t`` is ``List`` python type """ # print(t, getattr(t, '__origin__', None) is list) return t == list or is_pa_type(t, pa.types.is_list) or ( hasattr(t, '__origin__') and t.__origin__ in (list, List) ) or ( isinstance(t, dict) and is_list_type(t.get('type')) )
7da1ea98dccc4341a6db7a3e13e9f9bd278bd984
3,638,362
from datetime import datetime def get_measure_of_money_supply(): """ 从 Sina 获取 中国货币供应量数据。 Returns: 返回获取到的数据表。数据从1978.1开始。 Examples: .. code-block:: python >>> from finance_datareader_py.sina import get_measure_of_money_supply >>> df = get_measure_of_money_supply() >>> print(df.iloc[0][df.columns[0]]) >>> print(df.index[-1]) >>> print(df.columns) 1776196.11 1978.8 Index(['货币和准货币(广义货币M2)(亿元)', '货币和准货币(广义货币M2)同比增长(%)', '货币(狭义货币M1)(亿元)', '货币(狭义货币M1)同比增长(%)', '流通中现金(M0)(亿元)', '流通中现金(M0)同比增长(%)', '活期存款(亿元)', '活期存款同比增长(%)', '准货币(亿元)', '准货币同比增长(%)', '定期存款(亿元)', '定期存款同比增长(%)', '储蓄存款(亿元)', '储蓄存款同比增长(%)', '其他存款(亿元)', '其他存款同比增长(%)'], dtype='object') """ num = (datetime.date.today().year + 1 - 1978) * 12 return _get_mac_price(num=num, event=1, cate='fininfo', index='统计时间')
304cf05be6a226e7da46ec16e36a6632f02848c5
3,638,363
def _SparseMatrixAddGrad(op, grad): """Gradient for sparse_matrix_add op.""" # input to sparse_matrix_add is (a, b, alpha, beta) # with a, b CSR and alpha beta scalars. # output is: alpha * a + beta * b # d(a*A + b*B)/dA . grad = a * grad # May have gotten the transposes wrong below. # d(a*A + b*B)/da . grad = tr(A' . grad) # For now, only implement gradients w.r.t. A and B. # TODO(ebrevdo): Implement reduce_sum for SparseMatrix so that we # can implement gradients w.r.t. a and b. (a_csr, b_csr, alpha, beta) = op.inputs return (sparse_csr_matrix_ops.sparse_matrix_mul( _PruneCSRMatrix(grad, a_csr), alpha), sparse_csr_matrix_ops.sparse_matrix_mul( _PruneCSRMatrix(grad, b_csr), beta), None, None)
43485431ca2e7028e005dc6a49adf96bb990770f
3,638,364
def make_inverter_path(wire, inverted): """ Create site pip path through an inverter. """ if inverted: return [('site_pip', '{}INV'.format(wire), '{}_B'.format(wire)), ('inverter', '{}INV'.format(wire))] else: return [('site_pip', '{}INV'.format(wire), wire)]
066c4bbad0f65fec587b12fc7a2947246401b877
3,638,365
import sys def get_install_path(): """Use registry and asking the user to better determine the install directory.""" reg_likely_path = get_registry_path() if reg_likely_path: user_path = get_user_path(initial_dir=reg_likely_path.as_posix()) else: user_path = get_user_path(initial_dir=r'C:/Program Files (x86)/') if reg_likely_path != user_path: msg = ( 'Registry path and user defined path do not match. ' 'reg_likely_path: {} != user_path: {}'.format(reg_likely_path, user_path) ) log.error(msg) sys.exit('Exiting...') check_selected_base_path(user_path) return user_path
e6960014d926d0fac14190288d06eac13289cdc7
3,638,366
def constant(t, length): """ ezgal.sfhs.constant( ages, length ) Burst of constant starformation from t=0 to t=length """ if type(t) == type(np.array([])): sfr = np.zeros(t.size) m = t <= length if m.sum(): sfr[m] = 1.0 return sfr else: return 0.0 if t > length else 1.0
bfbc32042512465c7fecc50d976b369ac8e2c9fe
3,638,367
def model_setup_fn(attrs): """Generate the setup function for models.""" model = load_model(attrs['type'], attrs['data']) def func(self): self.model = model self.type = attrs['type'] self.data = attrs['data'] self.network_type = attrs['network_type'] self.dto = attrs.get('dto') self.catbuffer = attrs.get('catbuffer') self.extras = attrs.get('extras', {}) return func
4f0ffa9e1de3f60edef847faf319f3c5a4bef28d
3,638,368
def _mkdir(space, dirname, mode=0777, recursive=False, w_ctx=None): """ mkdir - Makes directory """ mode = 0x7FFFFFFF & mode if not _valid_fname(dirname): space.ec.warn("mkdir() expects parameter 1 to " "be a valid path, string given") return space.w_False if not is_in_basedir(space, 'mkdir', rpath.realpath(dirname)): return space.w_False try: if not os.path.isdir(dirname): if recursive: _recursive_mkdir(dirname, mode) else: os.mkdir(dirname, mode) return space.w_True else: space.ec.warn("mkdir(): No such file or directory") return space.w_False except OSError, e: space.ec.warn("mkdir(): %s" % os.strerror(e.errno)) return space.w_False except TypeError: return space.w_False
c16b5e0100c50e300fcf9268383f20b1cb5c11b5
3,638,369
import decimal def prepare_fixed_decimal(data, schema): """Converts decimal.Decimal to fixed length bytes array""" if not isinstance(data, decimal.Decimal): return data scale = schema.get('scale', 0) size = schema['size'] # based on https://github.com/apache/avro/pull/82/ sign, digits, exp = data.as_tuple() if -exp > scale: raise ValueError( 'Scale provided in schema does not match the decimal') delta = exp + scale if delta > 0: digits = digits + (0,) * delta unscaled_datum = 0 for digit in digits: unscaled_datum = (unscaled_datum * 10) + digit bits_req = unscaled_datum.bit_length() + 1 size_in_bits = size * 8 offset_bits = size_in_bits - bits_req mask = 2 ** size_in_bits - 1 bit = 1 for i in range(bits_req): mask ^= bit bit <<= 1 if bits_req < 8: bytes_req = 1 else: bytes_req = bits_req // 8 if bits_req % 8 != 0: bytes_req += 1 tmp = MemoryIO() if sign: unscaled_datum = (1 << bits_req) - unscaled_datum unscaled_datum = mask | unscaled_datum for index in range(size - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) else: for i in range(offset_bits // 8): tmp.write(mk_bits(0)) for index in range(bytes_req - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) return tmp.getvalue()
5dc5ae8355842e175e1fa83394a63b37c04bdade
3,638,370
from typing import Any def device_traits() -> dict[str, Any]: """Fixture that sets default traits used for devices.""" return {"sdm.devices.traits.Info": {"customName": "My Sensor"}}
1ccaeac4a716706915654d24270c24dac0210977
3,638,371
import os def writeOutput(ipData,outfilename): """ Writes the text output """ # Get the current working directory so we can write the results file there outfilename = os.path.join(os.getcwd(),outfilename+'.txt') file1 = open(outfilename,'w') numPoints = ipData.size for i in xrange(numPoints): ip = ipData[i] instName = ip['iname'] label = ip['label'] ipnum = ip['ipnum'] huval = ip['HUval'] file1.write('%s %7d %2d %8.1f\n' % (instName,label,ipnum,huval)) file1.close() print ('HU results written to file: %s' % (outfilename)) return 0
ea2d3814f3e39ef01015cb6b33d5f883f1dd9bd7
3,638,372
def calculate_equivalent_diameter(areas): """Calculate the equivalent diameters of a list or numpy array of areas. :param areas: List or numpy array of areas. :return: List of equivalent diameters. """ areas = np.asarray(areas) diameters = np.sqrt(4 * areas / np.pi) return diameters.tolist()
a353883cf148819d9f298167e73acd60b89720e5
3,638,373
def truncation_error(stencil: list, deriv: int, interval: str = DEFAULT_INTERVAL): """ derive the leading-order of error term in the finite difference equation based on the given stencil. Args: stencil (list of int): relative point numbers used for discretization. deriv (int): order of derivative. interval (str, optional): an interval symbol like `dx`. Defaults to DEFAULT_INTERVAL. Returns: sympy Expr: the leading-order of error term Examples: >>> from dictos import finite_difference as fd >>> fd.truncation_error([-1, 0, 1], deriv=1) -f^(3)*h**2/6 >>> fd.truncation_error([-1, 0, 1], deriv=2) -f^(4)*h**2/12 >>> fd.truncation_error([-2, -1, 0, 1, 2], deriv=1) f^(5)*h**4/30 >>> fd.truncation_error([-2, -1, 0, 1, 2], deriv=2) f^(6)*h**4/90 """ coef = coefficients(stencil, deriv) # derive finite difference coefficients based on given stencil x_set = create_coordinate_symbols(stencil, interval=interval) # create set of coordinate symbols from stencil. # [-2, -1, 0, 1, 2] -> [-2*h, -h, 0, h, 2*h] num_term = len(x_set) + deriv f_ts = [taylor_series(x, num_term) for x in x_set] # calculate Taylor series around points in x_set. fd_eq = dot_product(coef, f_ts) # calculate weighted sum of Taylor series. # for instance, 2nd-order 3-point central finite difference # for 1st derivative is # fd_eq [= f(h)/2 - f(-h)/2)] = f^(1)*h + f^(3)*h**3/6 + ... h = sp.symbols(interval) return sp.expand( sp.simplify( derivative_symbol(DEFAULT_DIFFERENTIAND, deriv) - sp.nsimplify(fd_eq / h ** deriv, rational=True, tolerance=1e-10) ) ).as_leading_term(h) # extract the leading-order of errer term. # A finite difference formulation with error term is, for instance, # f^(1) = (f(h) - f(-h))/(2*h) - f^(3)*h**3/6 - ... # to extract error terms, reformulate fd_eq as # f^(1) - fd_eq/h**1 = - f^(3)*h**3/6 - ...
e3b8d312d551ed88ead3690b285659d56865e6e0
3,638,374
def _get_parameter_defaults(fpm, metadata, readout_mode, subarray, frame_time, temperature, cosmic_ray_mode, verbose=2, logger=LOGGER): """ Helper function to obtain appropriate defaults for parameters that have not been explicitly set. (Saves duplication of code between simulate_sca and simulate_sca_fromdata.) """ # If the readout mode is not specified, obtained a default from # the FITS metadata of the input file. Failing that, obtain a # default value from the detector properties. if readout_mode is None: if metadata is not None and 'READPATT' in metadata: readout_mode = metadata['READPATT'] if verbose > 2: logger.info( "Readout mode %s obtained from FITS metadata." % \ readout_mode ) else: readout_mode = detector_properties['DEFAULT_READOUT_MODE'] if verbose > 2: logger.info( "Readout mode defaulted to " + \ "%s from detector properties." % readout_mode ) else: if verbose > 2: logger.info( "Readout mode explicitly set to %s." % readout_mode ) # If the output subarray mode is not specified, obtained a default # from the FITS metadata of the input file, as long as this is a # known subarray mode. Failing that, obtain a default value from # the detector properties. if subarray is None: if metadata is not None and 'SUBARRAY' in metadata: subarray = metadata['SUBARRAY'] if subarray in detector_properties['SUBARRAY']: if verbose > 2: logger.info( "Subarray mode %s obtained from FITS metadata." % \ subarray ) else: nonstandard = subarray subarray = detector_properties['DEFAULT_SUBARRAY'] if verbose > 2: strg = "Subarray mode %s obtained from FITS metadata " % \ nonstandard strg += "is non-standard, so output subarray mode " strg += "defaulted to %s from detector properties." % \ subarray logger.info( strg ) else: subarray = detector_properties['DEFAULT_SUBARRAY'] if verbose > 2: logger.info( "Subarray mode defaulted to " + \ "%s from detector properties." % subarray ) else: if verbose > 2: logger.info( "Subarray mode explicitly set to %s." % subarray ) if frame_time is None: if metadata is not None and 'TFRAME' in metadata: frame_time = metadata['TFRAME'] if verbose > 2: strg = "Frame time of %f seconds obtained " % \ frame_time strg += "from FITS metadata " strg += "(overriding the readout mode and subarray)." logger.info( strg ) else: strg = "Frame time of %f seconds specified explicitly " % frame_time strg += "(overriding the readout mode and subarray)." logger.info( strg ) # If the detector temperature is not specified, use the target # temperature from the detector properties. if temperature is None: temperature = fpm['TARGET_TEMPERATURE'] if verbose > 3: logger.debug( "Temperature defaulted to %fK from detector properties." % \ temperature ) else: if verbose > 2: logger.info( "Temperature explicitly set to %fK." % temperature ) # If the cosmic ray mode is not specified, obtained a default from # the FITS metadata of the input file. Failing that, obtain a default # value from the cosmic ray properties. if cosmic_ray_mode is None: if metadata is not None and 'CRMODE' in metadata: cosmic_ray_mode = metadata['CRMODE'] if verbose > 3: logger.debug( "Cosmic ray mode %s obtained from FITS metadata." % \ cosmic_ray_mode ) else: cosmic_ray_mode = cosmic_ray_properties['DEFAULT_CR_MODE'] if verbose > 3: logger.debug( "Cosmic ray mode defaulted to " + \ "%s from cosmic ray properties." % cosmic_ray_mode ) else: if verbose > 2: logger.info( "Cosmic ray mode explicitly set to %s." % cosmic_ray_mode ) return (readout_mode, subarray, frame_time, temperature, cosmic_ray_mode)
59431a086b15748cec45fd879bef8bdcd9af00c3
3,638,375
def cmd_renderurl(cfg, command, argv): """Renders a single url of your blog to stdout.""" parser = build_parser('%prog renderurl [options] <url> [<url>...]') parser.add_option('--headers', action='store_true', dest='headers', default=False, help='Option that causes headers to be displayed ' 'when rendering a single url.') (options, args) = parser.parse_args(argv) if not args: parser.print_help() return 0 for url in args: p = build_douglas(cfg) base_url = cfg['base_url'] if url.startswith(base_url): url = url[len(base_url):] p.run_render_one(url, options.headers) return 0
2073c71c459357c0b6a9661596cad34196fd6c24
3,638,376
def combine_expressions(expressions, relation='AND', licensing=Licensing()): """ Return a combined license expression string with relation, given a list of license expressions strings. For example: >>> a = 'mit' >>> b = 'gpl' >>> combine_expressions([a, b]) 'mit AND gpl' >>> assert 'mit' == combine_expressions([a]) >>> combine_expressions([]) >>> combine_expressions(None) >>> combine_expressions(('gpl', 'mit', 'apache',)) 'gpl AND mit AND apache' """ if not expressions: return if not isinstance(expressions, (list, tuple)): raise TypeError( 'expressions should be a list or tuple and not: {}'.format( type(expressions))) # Remove duplicate element in the expressions list expressions = list(dict((x, True) for x in expressions).keys()) if len(expressions) == 1: return expressions[0] expressions = [licensing.parse(le, simple=True) for le in expressions] if relation == 'OR': return str(licensing.OR(*expressions)) else: return str(licensing.AND(*expressions))
8955522546a8b803caf0b1c6a3c6e8752cb35a19
3,638,377
import torch def parrallelize(model: nn.Module) -> nn.Module: """ Make use of all available GPU using nn.DataParallel NOTE: ensure to be using different random seeds for each process if you use techniques like data-augmentation or any other techniques which needs random numbers different for each steps. TODO: make sure this isn't already done by Pytorch? """ if torch.cuda.device_count() > 1: print(f'> Using "nn.DataParallel(model)" on {torch.cuda.device_count()} GPUs.') model = nn.DataParallel(model) return model
8579086103c30664d91c37dee90353fe9d4b4c6b
3,638,378
import sqlite3 def get_prof_details(prof_id): """ Returns the details of the professor in same order as DB. """ cursor = sqlite3.connect('./db.sqlite3').cursor() cursor.execute("SELECT * FROM professor WHERE prof_id = ?;", (prof_id)) return cursor.fetchone()
668652474009abdda36d3e97fb5d30074f0a2755
3,638,379
import sys import os def get_process_path(tshark_path=None, process_name='tshark'): """ Finds the path of the tshark executable. If the user has provided a path or specified a location in config.ini it will be used. Otherwise default locations will be searched. :param tshark_path: Path of the tshark binary :raises TSharkNotFoundException in case TShark is not found in any location. """ config = get_config() possible_paths = [config.get('tshark', 'tshark_path')] # Add the user provided path to the search list if tshark_path is not None: possible_paths.insert(0, tshark_path) # Windows search order: configuration file's path, common paths. if sys.platform.startswith('win'): for env in ('ProgramFiles(x86)', 'ProgramFiles'): program_files = os.getenv(env) if program_files is not None: possible_paths.append( os.path.join(program_files, 'Wireshark', '%s.exe' % process_name) ) # Linux, etc. search order: configuration file's path, the system's path else: os_path = os.getenv( 'PATH', '/usr/bin:/usr/sbin:/usr/lib/tshark:/usr/local/bin' ) for path in os_path.split(':'): possible_paths.append(os.path.join(path, process_name)) for path in possible_paths: if os.path.exists(path): return path raise TSharkNotFoundException( 'TShark not found. Try adding its location to the configuration file. ' 'Search these paths: {}'.format(possible_paths) )
71bc7179379387da15cc38fee0ca19a01c1798e2
3,638,380
import logging import scipy def stats_per_gop(processed_video_sequence, needed=[]): """ general helper to extract statistics on a per gop basis """ logging.debug(f"calculate {needed} gop based for {processed_video_sequence}") results = [] for gop in by_gop(processed_video_sequence, columns=needed + ["FrameType"]): df = pd.DataFrame(gop) gop_res = {} for x in needed: gop_res["mean_" + x] = df[x].mean() gop_res["median_" + x] = df[x].median() gop_res["std_" + x] = df[x].std() gop_res["skew_" + x] = float(scipy.stats.skew(df[x])) gop_res["kurtosis_" + x] = float(scipy.stats.kurtosis(df[x])) gop_res["iqr_" + x] = float(scipy.stats.iqr(df[x])) for i in range(11): quantile = round(0.1 * i, 1) gop_res["{}_quantil_{}".format(quantile, x)] = float(df[x].quantile(quantile)) # select non-iframes df = df[df["FrameType"] != 1] for x in needed: gop_res["mean_" + x + "_non-i"] = df[x].mean() gop_res["median_" + x + "_non-i"] = df[x].median() gop_res["std_" + x + "_non-i"] = df[x].std() gop_res["skew_" + x + "_non-i"] = float(scipy.stats.skew(df[x])) gop_res["kurtosis_" + x + "_non-i"] = float(scipy.stats.kurtosis(df[x])) gop_res["iqr_" + x + "_non-i"] = float(scipy.stats.iqr(df[x])) for i in range(11): quantile = round(0.1 * i, 1) gop_res["{}_quantil_{}_non-i".format(quantile, x)] = float(df[x].quantile(quantile)) results.append(gop_res) df = pd.DataFrame(results) result = df.mean().to_dict() logging.debug(f"estimated {needed} feature values: {result}") return result
296593578cbf131dcee6a9746b1d1a5f696c4989
3,638,381
def available_help(mod, ending="_command"): """Returns the dochelp from all functions in this module that have _command at the end.""" help_text = [] for key in mod.__dict__: if key.endswith(ending): name = key.split(ending)[0] help_text.append(name + ":\n" + mod.__dict__[key].__doc__) return help_text
9afa1525c016aa74dd4b3eb91851890da3590524
3,638,382
from functools import reduce import operator def __s_polynomial(g, h): """ Computes the S-polynomial of g, h. The S-polynomial is a polynomial built explicitly so that the leading terms cancel when combining g and h linearly. """ deg_g = __multidegree(g) deg_h = __multidegree(h) max_deg = map(max, zip(deg_g, deg_h)) R = g.parent() # Builds a polynomial with the variables raised to max_deg, in order vars = map(R, R.variable_names()) x_pow_max_deg = reduce(operator.mul, [x ** d for (d, x) in zip(max_deg, vars)], R(1)) quo_g, _ = x_pow_max_deg.quo_rem(g.lt()) quo_h, _ = x_pow_max_deg.quo_rem(h.lt()) return quo_g * g - quo_h * h
49aa5b5b1dbebde1309aaa9fd2cb5947a010709f
3,638,383
def generate_map_chunk(size_x: int, size_y: int, biome_type: str, x_offset: int = 0, y_offset: int = 0): """ Function responsible for generating map chunk in specified or random biome type, map chunk is basically a rectangular part of a map; generated array is basically nested list representing a 2d-array, where fields are integers indicating elevation of certain point. For generating map chunk I use OpenSimplex noise generator, which is a deterministic coherent (gradient) noise generator, The chunk is randomised by chosing random seed for the generator object initialisation. Args: size_x (int): horizontal size of chunk in map pixels size_y (int): vertical size of chunk in map pixels biome_type (str): string indicating which biome type to use x_offset (int): integer indicating horizontal offset used in generating Simplex Noise y_offset (int): integer indicating vertical offset used in generating Simplex Noise Returns: map_array (:obj:`list` of :obj:`list` of :obj:`int`): list of lists containing elevation number for specified coordinates """ map_array = [] for _ in range(size_x): map_array_part = [] for _ in range(size_y): map_array_part.append(127) map_array.append(map_array_part) noise_maker = OpenSimplex(randint(-10000, 10000)) for x in range(size_x): for y in range(size_y): for octave in range(OCTAVES): if map_array[x][y] > LEVELS.water or octave < 1: map_array[x][y] = int_median_cutter(0, 255, map_array[x][y]+OCTAVE_AMPLITUDE[octave]*\ noise_maker.noise2d((x+x_offset)/OCTAVE_WAVELENGTH[octave], (y+y_offset)/OCTAVE_WAVELENGTH[octave])) if biome_type == 'random': biome_type = ['ocean_islands', 'ocean', 'high_mountains', 'default'][randint(0,3)] if biome_type == 'ocean_islands': for x in range(size_x): for y in range(size_y): map_array[x][y] = max(map_array[x][y] - 100, 20) elif biome_type == 'ocean': for x in range(size_x): for y in range(size_y): map_array[x][y] = max(int(map_array[x][y]*0.3125), 20) elif biome_type == 'high_mountains': for x in range(size_x): for y in range(size_y): map_array[x][y] = min(map_array[x][y] + 100 + 10 * noise_maker.noise2d(x/OCTAVE_WAVELENGTH[1], y/OCTAVE_WAVELENGTH[1]), 250) return map_array
42863b7058bfce23b1123c14db562483254bdc21
3,638,384
import math import base64 import os def newid(length=16): """ Generate a new random string ID. The generated ID is uniformly distributed and cryptographically strong. It is hence usable for things like secret keys and access tokens. :param length: The length (in chars) of the ID to generate. :type length: int :returns: A random string ID. :rtype: str """ l = int(math.ceil(float(length) * 6. / 8.)) return base64.b64encode(os.urandom(l))[:length].decode('ascii')
b287a929f0dde6244b66bb8d9d9289b97f2d090b
3,638,385
def test_process_cycle(zs2_file_name, verbose=True): """This is a test to check if util output changed in an incompatible manner. A zs2 file is read, converted to XML, and back-converted to a raw datastream.""" if verbose: print('Decoding %s...' % zs2_file_name) data_stream = _parser.load(zs2_file_name) input_fingerprint = fingerprint(data_stream) if verbose: print(' Data fingerprint %s' % input_fingerprint) xml_data = data_stream_to_xml(data_stream) if verbose: print(' Length of XML: %.0f kB' % (len(xml_data)/1024.)) if verbose: print('Encoding XML to zs2...') enc_data_stream = xml_to_data_stream(xml_data) output_fingerprint = fingerprint(enc_data_stream) if verbose: print(' Data fingerprint: %s' % output_fingerprint) if input_fingerprint != output_fingerprint: raise ValueError('Decode/Encode cycle of %s is unsuccessful.' % zs2_file_name) return input_fingerprint == output_fingerprint
6417362a9bdaa4086865f0b8fc510dda186534f7
3,638,386
def get_dev_risk(weight, error): """ :param weight: shape [N, 1], the importance weight for N source samples in the validation set :param error: shape [N, 1], the error value for each source sample in the validation set (typically 0 for correct classification and 1 for wrong classification) """ N, d = weight.shape _N, _d = error.shape assert N == _N and d == _d, "dimension mismatch!" weighted_error = weight * error cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1] var_w = np.var(weight, ddof=1) eta = -cov / var_w return np.mean(weighted_error) + eta * np.mean(weight) - eta
7278a8827dd48c341d9f294a3fed3a8b2e3c71ae
3,638,387
import torch def skewness_fn(x, dim=1): """Calculates skewness of data "x" along dimension "dim".""" std, mean = torch.std_mean(x, dim) n = torch.Tensor([x.shape[dim]]).to(x.device) eps = 1e-6 # for stability sample_bias_adjustment = torch.sqrt(n * (n - 1)) / (n - 2) skewness = sample_bias_adjustment * ( (torch.sum((x.T - mean.unsqueeze(dim).T).T.pow(3), dim) / n) / std.pow(3).clamp(min=eps) ) return skewness
ae0bdea16c1461a2e407ed57279557bc8c7f56de
3,638,388
import random def encrypt(message): """ Self-developed encryption method that uses base conversion """ base = random.randint(3, 9) number_list = [] for i in message: number_list.append(keys.index(i)+1) converted_number_list = [] for i in number_list: converted_number_list.append(convert(i, base)) encryption_list = [] for number in converted_number_list: cur = [] for digit in str(number): cur.append(chars[int(digit)]) encryption_list.append(cur) string_encryption_list = [] for i in encryption_list: string_encryption_list.append(''.join([str(x) for x in i])) converted_base_number = convert(123, base) encrypted_base_list = [] for i in str(converted_base_number): if i == '0': encrypted_base_list.append('?') elif i == '1': encrypted_base_list.append('{') elif i == '2': encrypted_base_list.append('[') elif i == '3': encrypted_base_list.append('/') elif i == '4': encrypted_base_list.append('$') elif i == '6': encrypted_base_list.append('@') elif i == '7': encrypted_base_list.append('>') return insert(list('|'.join(string_encryption_list)), encrypted_base_list)
967d45341fb8a5ec87f946ba6fc0a603f491485e
3,638,389
def get_signature_algorithm(algorithm_type_string): """convert a string into a key_type (TFTF_SIGNATURE_TYPE_xxx) returns a numeric key_type, or raises an exception if invalid """ try: return TFTF_SIGNATURE_ALGORITHMS[algorithm_type_string] except: raise ValueError("Unknown algorithm type: '{0:s}'". format(algorithm_type_string))
41ca226dc7e6c1c0f8d5b8592803d6555630902c
3,638,390
def bending_without_n_iteration(model, values, concrete_type, exp): """Calculate the necessery longitudial reinforcment of a beam that is loaded by a torque load without normal forces. Parameters ---------- model : class class method that contains the Finite Element Analysis Returns ------- erf_As : float necessary longitudial reinforcement """ m = [] erf_As = [] fcd = values.concrete(concrete_type)['fcd'] for i, ele in enumerate(model.elements): if type(ele)==BeamColumnElement: m.append(ele.local_internal_forces[2]) m.append(ele.local_internal_forces[5]*-1) m_ed = max(abs(m[0]), abs(m[1])) # initial values for eps eps_c_2 = -3.5 eps_s_1 = 25 sigma_c = 3.4 sigma_s = 24 # first iteration step alpha_r = (3*abs(eps_c_2)-2)/(3*abs(eps_c_2)) k_a = (abs(eps_c_2)*(3*abs(eps_c_2)-4)+2)/(2*abs(eps_c_2)*(3*abs(eps_c_2)-2)) x_c = abs(eps_c_2)/(abs(eps_c_2)+abs(eps_s_1))*values.static_usable_height(ele.h, exp) F_cd = -1*alpha_r*ele.b*x_c*fcd z = values.static_usable_height(ele.h, exp)-k_a*x_c m_rds = -1*F_cd*z*1000 diff = abs(m_rds-m_ed) if m_rds > m_ed: eps_s_1 = 25 while diff > 0.001: x_c = abs(eps_c_2)/(abs(eps_c_2)+abs(eps_s_1))*values.static_usable_height(ele.h, exp) if abs(eps_c_2)<= 2: alpha_r = 1/12*abs(eps_c_2)*(6-abs(eps_c_2)) k_a = (8-abs(eps_c_2))/(4*(6-abs(eps_c_2))) elif abs(eps_c_2)>2 or abs(eps_c_2)<=3.5: alpha_r = (3*abs(eps_c_2)-2)/(3*abs(eps_c_2)) k_a = (abs(eps_c_2)*(3*abs(eps_c_2)-4)+2)/(2*abs(eps_c_2)*(3*abs(eps_c_2)-2)) F_cd = -1*alpha_r*ele.b*x_c*fcd z = values.static_usable_height(ele.h, exp)-k_a*x_c m_rds = -1*F_cd*z*1000 diff = abs(m_rds-m_ed) if m_rds > m_ed: eps_c_2 = eps_c_2 + sigma_c else: eps_c_2 = eps_c_2 - sigma_c if eps_c_2 < -3.5: eps_c_2 = -3.5 sigma_c = sigma_c/2 elif m_rds < m_ed: print('Betondruckzone zu gering') eps_c_2 = -3.5 while diff > 0.001: x_c = abs(eps_c_2)/(abs(eps_c_2)+abs(eps_s_1))*values.static_usable_height(ele.h, exp) alpha_r = (3*abs(eps_c_2)-2)/(3*abs(eps_c_2)) k_a = (abs(eps_c_2)*(3*abs(eps_c_2)-4)+2)/(2*abs(eps_c_2)*(3*abs(eps_c_2)-2)) F_cd = -1*alpha_r*ele.b*x_c*fcd z = values.static_usable_height(ele.h, exp)-k_a*x_c m_rds = -1*F_cd*z*1000 diff = abs(m_rds-m_ed) if m_rds < m_ed: eps_s_1 = eps_s_1 - sigma_s else: eps_s_1 = eps_s_1 + sigma_s if eps_s_1 > 25: eps_s_1 = 25 sigma_s = sigma_s/2 #Stahlspannung sigma_s1d = 435+(525/1.15-500/1.15)/(25-2.175)*(eps_s_1-2.175) #Benötigte Bewehrung s = eps_c_2 /(eps_c_2-eps_s_1) mue2 = m_ed*0.001*fcd/(ele.b*values.static_usable_height(ele.h, exp)**2*fcd*sigma_s1d*(1-k_a*s)) As=mue2*ele.b*values.static_usable_height(ele.h, exp)*10000 #a ist richtig ist beim Leonhardt auf Seite 171 erf_As.append(As) #TODO: if abfrage für hyperjet #diff_erf_As.append(erf_As[i].g) #debug('As') del m[0] del m[0] return erf_As
6beca7f38f993f25bd468e0ff5eb0342b1bf85b0
3,638,391
def corrgroups60(display=False): """ A simulated dataset with tight correlations among distinct groups of features. """ # set a constant seed old_seed = np.random.seed() np.random.seed(0) # generate dataset with known correlation N = 1000 M = 60 # set one coefficent from each group of 3 to 1 beta = np.zeros(M) beta[0:30:3] = 1 # build a correlation matrix with groups of 3 tightly correlated features x = np.ones(M) mu = np.zeros(M) C = np.eye(M) for i in range(0,30,3): C[i,i+1] = C[i+1,i] = 0.99 C[i,i+2] = C[i+2,i] = 0.99 C[i+1,i+2] = C[i+2,i+1] = 0.99 f = lambda X: np.matmul(X, beta) # Make sure the sample correlation is a perfect match X_start = np.random.randn(N, M) X_centered = X_start - X_start.mean(0) Sigma = np.matmul(X_centered.T, X_centered) / X_centered.shape[0] W = np.linalg.cholesky(np.linalg.inv(Sigma)).T X_white = np.matmul(X_centered, W.T) assert np.linalg.norm(np.corrcoef(np.matmul(X_centered, W.T).T) - np.eye(M)) < 1e-6 # ensure this decorrelates the data # create the final data X_final = np.matmul(X_white, np.linalg.cholesky(C).T) X = X_final y = f(X) + np.random.randn(N) * 1e-2 # restore the previous numpy random seed np.random.seed(old_seed) return pd.DataFrame(X), y
5a80116890ff262a164f48421871107c4cdaf8a6
3,638,392
def alpha_nu_gao08(profile, **kwargs): """log normal distribution of alpha about the alpha--peak height relation from Gao+2008""" z = kwargs["z"] alpha = kwargs["alpha"] # scatter in dex if "sigma_alpha" in kwargs: sigma_alpha = kwargs["sigma_alpha"] else: # take scatter from Dutton & Maccio 2014 sigma_alpha = 0.16 + 0.03 * z try: M = profile.MDelta(z, "vir") nu = peakHeight(M, z) except: # can't find peak height, reject model return -np.inf alpha_model = 0.155 + 0.0095 * nu**2 return lnlike_gauss(np.log10(alpha_model), np.log10(alpha), sigma_alpha)
393fdc6c87d4bf61fc367e7f9033bac24b9d6cea
3,638,393
import base64 def get_feed_entries(helper, name, stats): """Pulls the indicators from the minemeld feed.""" feed_url = helper.get_arg('feed_url') feed_creds = helper.get_arg('credentials') feed_headers = {} # If auth is specified, add it as a header. if feed_creds is not None: auth = '{0}:{1}'.format(feed_creds['username'], feed_creds['password']) auth = base64.encodestring(auth).replace('\n', '') feed_headers['Authorization'] = 'Basic {0}'.format(auth) # Pull events as json. resp = helper.send_http_request( url=feed_url, method='GET', parameters={'v': 'json', 'tr': 1}, headers=feed_headers) # Raise exceptions on problems. resp.raise_for_status() feed_entries = resp.json() # Return the normalized events to be saved to the kv store. return normalized(name, feed_entries)
e881eebaaa9c31bc8d0abdd8b8f4aaeb9efcffe6
3,638,394
def get_skeleton_definition(character): """ Returns skeleton definition of the given character :param character: str, HIK character name :return: dict """ hik_bones = dict() hik_count = maya.cmds.hikGetNodeCount() for i in range(hik_count): bone = get_skeleton_node(character, i) if not bone: continue hik_name = maya.cmds.GetHIKNodeName(i) hik_bones[hik_name] = {'bone': bone, 'hik_id': i} return hik_bones
f76d4613f3a8adec649ea689d049ccff2966783c
3,638,395
def get_f_a_st( fuel="C3H8", oxidizer="O2:1 N2:3.76", mech="gri30.cti" ): """ Calculate the stoichiometric fuel/air ratio of an undiluted mixture using Cantera. Calculates using only x_fuel to allow for compound oxidizer (e.g. air) Parameters ---------- fuel : str oxidizer : str mech : str mechanism file to use Returns ------- float stoichiometric fuel/air ratio """ if oxidizer.lower() == "air": oxidizer = "O2:1 N2:3.76" gas = ct.Solution(mech) gas.set_equivalence_ratio( 1, fuel, oxidizer ) x_fuel = gas.mole_fraction_dict()[fuel] return x_fuel / (1 - x_fuel)
ecd711d8a1d5499e47ccbedebfb5641aec7c7a8b
3,638,396
def get_parser_args(args=None): """ Transform args (``None``, ``str``, ``list``, ``dict``) to parser-compatible (list of strings) args. Parameters ---------- args : string, list, dict, default=None Arguments. If dict, '--' are added in front and there should not be positional arguments. Returns ------- args : None, list of strings. Parser arguments. Notes ----- All non-strings are converted to strings with :func:`str`. """ if isinstance(args,str): return args.split() if isinstance(args,list): return list(map(str,args)) if isinstance(args,dict): toret = [] for key in args: toret += ['--%s' % key] if isinstance(args[key],list): toret += [str(arg) for arg in args[key]] else: val = str(args[key]) if val: toret += [val] return toret return args
41b607a6ebf12526efcd38469192b398419327bf
3,638,397
def parse_time_to_min(time): """Convert a duration to an integer in minutes. Example ------- >>> parse_time_to_min("2m 30s") 2.5 """ if " " in time: return sum([parse_time_to_min(t) for t in time.split(" ")]) time = time.strip() for unit, value in time_units.items(): if time.endswith(unit): number = float(time.replace(unit, "")) return number * value / time_units["m"]
6bf9656694ba4787bf9fd3e7c269d9c84e3ed143
3,638,398
def relate_stream_island(stream_layer, island_layer): """ Return the streams inside or delimiting islands. The topology is defined by DE-9IM matrices. :param stream_layer: the layer of the river network :stream_layer type: QgisVectorLayer object (lines) :param island_layer: the layer of the islands :island_layer type: QgisVectorLayer object (polygons) :return: list of lists of all the streams that make up the islands :rtype: list of lists of QgisFeatures objects """ # Get the features of the stream and island layers streams_list = list(stream_layer.dataProvider().getFeatures()) islands_list = list(island_layer.dataProvider().getFeatures()) # Initialise output list streams_in_island_list = [] for island in islands_list: # Initialise list of output list island_list = [] # Get the AbstractGeometry object for the current island current_island_abstract_geom = island.geometry().constGet() for stream in streams_list: # Get the AbstractGeometry object for the current stream current_stream_abstract_geom = stream.geometry().constGet() # Create QgsGeometryEngine object engine = QgsGeometry.createGeometryEngine(current_stream_abstract_geom) # Prepares the geometry, so that subsequent calls to spatial relation methods are much faster engine.prepareGeometry() # Test if the current stream fits with the DE-9IM matrices if engine.relatePattern(current_island_abstract_geom,'F1FF0F212') or engine.relatePattern(current_island_abstract_geom,'1FF00F212') or engine.relatePattern(current_island_abstract_geom,'1FF0FF212') or engine.relatePattern(current_island_abstract_geom,'1FFF0F212'): # If so, then the current stream is appended to the output list island_list.append(stream) streams_in_island_list.append(island_list) return streams_in_island_list
1d6c90349808f6364cc8b1461b09a0c31df6d9d3
3,638,399