content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def make_connector(name=None): """A connector between constraints""" informant = None constraints = [] def set_value(source, value): nonlocal informant val = connector["val"] if val is None: informant, connector["val"] = source, value if name is not None: print(name, "=", value) inform_all_except(source, "new_val", constraints) elif val != value: print("Contradiction detected", val, "vs", value) def forget_value(source): nonlocal informant if informant == source: informant, connector["val"] = None, None if name is not None: print(name, "is forgotten") inform_all_except(source, "forget", constraints) connector = { "val": None, "set_val": set_value, "forget": forget_value, "has_val": lambda: connector["val"] is not None, "connect": lambda source: constraints.append(source), } return connector
46b6574b6e8965d56f71efa89a1d7389502f2867
3,635,400
from osgeo import ogr def read_shp(path): """Generates a networkx.DiGraph from shapefiles. Point geometries are translated into nodes, lines into edges. Coordinate tuples are used as keys. Attributes are preserved, line geometries are simplified into start and end coordinates. Accepts a single shapefile or directory of many shapefiles. "The Esri Shapefile or simply a shapefile is a popular geospatial vector data format for geographic information systems software [1]_." Parameters ---------- path : file or string File, directory, or filename to read. Returns ------- G : NetworkX graph Examples -------- >>> G=nx.read_shp('test.shp') # doctest: +SKIP References ---------- .. [1] http://en.wikipedia.org/wiki/Shapefile """ try: except ImportError: raise ImportError("read_shp requires OGR: http://www.gdal.org/") net = nx.DiGraph() def getfieldinfo(lyr, feature, flds): f = feature return [f.GetField(f.GetFieldIndex(x)) for x in flds] def addlyr(lyr, fields): for findex in range(lyr.GetFeatureCount()): f = lyr.GetFeature(findex) flddata = getfieldinfo(lyr, f, fields) g = f.geometry() attributes = dict(zip(fields, flddata)) attributes["ShpName"] = lyr.GetName() if g.GetGeometryType() == 1: # point net.add_node((g.GetPoint_2D(0)), attributes) if g.GetGeometryType() == 2: # linestring attributes["Wkb"] = g.ExportToWkb() attributes["Wkt"] = g.ExportToWkt() attributes["Json"] = g.ExportToJson() last = g.GetPointCount() - 1 net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes) if isinstance(path, str): shp = ogr.Open(path) lyrcount = shp.GetLayerCount() # multiple layers indicate a directory for lyrindex in range(lyrcount): lyr = shp.GetLayerByIndex(lyrindex) flds = [x.GetName() for x in lyr.schema] addlyr(lyr, flds) return net
f68b5e1716c10b9c58f7f661fddbda7a84acfc99
3,635,401
def network_alignment(network_a, network_b): """combines two networks into a new network | Arguments: | :- | network_a (networkx object): biosynthetic network from construct_network | network_b (networkx object): biosynthetic network from construct_network\n | Returns: | :- | Returns combined network as a networkx object """ U = nx.Graph() all_nodes = list(network_a.nodes(data = True)) + list(network_b.nodes(data = True)) network_a_nodes = [k[0] for k in list(network_a.nodes(data = True))] network_b_nodes = [k[0] for k in list(network_b.nodes(data = True))] node_origin = ['cornflowerblue' if (k[0] in network_a_nodes and k[0] not in network_b_nodes) \ else 'darkorange' if (k[0] in network_b_nodes and k[0] not in network_a_nodes) else 'saddlebrown' for k in all_nodes] U.add_nodes_from(all_nodes) U.add_edges_from(list(network_a.edges(data = True))+list(network_b.edges(data = True))) nx.set_node_attributes(U, {all_nodes[k][0]:node_origin[k] for k in range(len(all_nodes))}, name = 'origin') return U
e1aa0fdbbd74d63fe85028339ef9402a21f566b4
3,635,402
def grab_cpu_scalar(v, nd): """ Get a scalar variable value from the tree at `v`. This function will dig through transfers and dimshuffles to get the constant value. If no such constant is found, it returns None. Parameters ---------- v Aesara variable to extract the constant value from. nd : int Expected number of dimensions for the variable (for broadcasted constants). """ if v.owner is not None: n = v.owner if ( isinstance(n.op, (GpuDimShuffle, DimShuffle)) and n.op.new_order == ("x",) * nd ): return grab_cpu_scalar(n.inputs[0], n.inputs[0].ndim) elif isinstance(n.op, (GpuFromHost, HostFromGpu)): return grab_cpu_scalar(n.inputs[0], nd) else: return None else: if isinstance(v, Constant) and v.broadcastable == (True,) * nd: return v.dimshuffle(())
b842b59f903b23146cbcec5d391e8ecd7db67604
3,635,403
def _get_interface_name_index(dbapi, host): """ Builds a dictionary of interfaces indexed by interface name. """ interfaces = {} for iface in dbapi.iinterface_get_by_ihost(host.id): interfaces[iface.ifname] = iface return interfaces
0217f6ef8d4e5e32d76a4fc0d66bf74aa45f8c36
3,635,404
def generate_data(shape, num_seed_layers=3, avg_bkg_tracks=3, noise_prob=0.01, verbose=True, seed=1234): """ Top level function to generate a dataset. Returns arrays (events, sig_tracks, sig_params) """ np.random.seed(seed) num_event, num_det_layers, det_layer_size, _ = shape # Signal tracks sig_tracks, sig_params = gen_straight_tracks( num_event, num_det_layers, det_layer_size) # Background tracks bkg_tracks = gen_bkg_tracks( num_event, num_det_layers, det_layer_size, avg_bkg_tracks=avg_bkg_tracks, seed_layers=num_seed_layers) # Noise noise = gen_noise(shape, prob=noise_prob, seed_layers=num_seed_layers) # Full events events = sig_tracks + bkg_tracks + noise events[events > 1] = 1 # Print data sizes if verbose: print('Sizes of arrays') print(' events: %g MB' % (events.dtype.itemsize * events.size / 1e6)) print(' sig_tracks: %g MB' % (sig_tracks.dtype.itemsize * sig_tracks.size / 1e6)) print(' bkg_tracks: %g MB' % (bkg_tracks.dtype.itemsize * bkg_tracks.size / 1e6)) print(' noise: %g MB' % (noise.dtype.itemsize * noise.size / 1e6)) print(' sig_params: %g MB' % (sig_params.dtype.itemsize * sig_params.size / 1e6)) return events, sig_tracks, sig_params
74116397ced9085794416a7c2c191e3a4fac135a
3,635,405
import xml from typing import List from typing import Optional from typing import Tuple import pathlib def create_project( root: xml.etree.ElementTree.Element, include: List[str], exclude: Optional[List[str]] = None ) -> Tuple[List[str], List[str], List[pathlib.Path], List[pathlib.Path], List[pathlib.Path], List[pathlib.Path]]: """Create a project from a list of specified components. Args: root: root of element tree. include: list of component ids included in the project. exclude: list of component ids excluded from the project. Returns: (component_ids, defines, include_paths, headers, sources, libs) for the project. """ # Build the project list from the list of included components by expanding # dependencies. project_list = [] pending_list = include while len(pending_list) > 0: component_id = pending_list.pop(0) if component_id in project_list: continue if exclude is not None and component_id in exclude: continue project_list.append(component_id) pending_list.extend(parse_dependencies(root, component_id)) return ( project_list, sum((parse_defines(root, component_id) for component_id in project_list), []), sum((parse_include_paths(root, component_id) for component_id in project_list), []), sum((parse_headers(root, component_id) for component_id in project_list), []), sum((parse_sources(root, component_id) for component_id in project_list), []), sum((parse_libs(root, component_id) for component_id in project_list), []), )
dda783efec500750dec65823701f186234bbc5dc
3,635,406
import torch def to_data(x): """Converts variable to numpy""" if torch.cuda.is_available(): x = x.cpu() return x.data.numpy()
b91f755d43fde06db1bd38158881eb2f84e43d10
3,635,407
def levmar_bc(func, p0, y, bc, args=(), jacf=None, mu=1.0e-03, eps1=1.5e-08, eps2=1.5e-08, eps3=1.5e-08, maxit=1000, cdiff=False): """ Parameters ---------- func: callable Function or method computing the model function, `y = func(p, *args)`. p0: array_like, shape (m,) Initial estimate of the parameters. y: array_like, shape (n,) Dependent data, or the observation. bc: sequence of 2-tuples `(min, max)` pairs for each element of the parameters, specifying the (inclusive) upper and lower bounds. Use None for one of `min` or `max` for specifying no bound in that direction. args: tuple, optional Extra arguments passed to `func` (and `jacf`). jacf: callable, optional Function or method computing the Jacobian of `func`. If it is None, the Jacobian will be approximated. mu: float, optional Scale factor for initial mu. eps1: float, optional Stopping threshold for ||J^T e||_inf. eps2: float, optional Stopping threshold for ||Dp||_2. eps3: float, optional Stopping threshold for ||e||_2. maxit: int, optional The maximum number of iterations. cdiff: {True, False}, optional If this is True, the Jacobian is approximated with central differentiation. Returns ------- p: ndarray, shape=(m,) Best-fit parameters. pcov: ndarray, shape=(m,m) Covariance of the best-fit parameters. info: tuple Information regarding minimization. 0: ||e||_2 at `p0` 1: 0: 2-norm of e 1: infinity-norm of J^T.e 2: 2-norm of Dp 3: mu / max{(J^T.J)_ii} 2: The number of iterations 3: The reason for termination 4: The number of `func` evaluations 5: The number of `jacf` evaluations 6: The number of the linear system solved """ return _levmar.levmar_bc(func, p0, y, bc, args, jacf, mu, eps1, eps2, eps3, maxit, cdiff)
6d861988290c00b846a8ff35bc36beca778985f7
3,635,408
import json def set_name_filter(request): """ Sets product filters given by passed request. """ product_filters = request.session.get("product_filters", {}) if request.POST.get("name", "") != "": product_filters["product_name"] = request.POST.get("name") else: if product_filters.get("product_name"): del product_filters["product_name"] request.session["product_filters"] = product_filters products = _get_filtered_products_for_product_view(request) paginator = Paginator(products, 25) page = paginator.page(request.REQUEST.get("page", 1)) product_id = request.REQUEST.get("product-id", 0) html = ( ("#products-inline", products_inline(request, page, paginator)), ("#selectable-products-inline", selectable_products_inline(request, page, paginator, product_id)), ("#pages-inline", pages_inline(request, page, paginator, product_id)), ) result = json.dumps({ "html": html, }, cls=LazyEncoder) return HttpResponse(result, content_type='application/json')
a36e9e46a58b71926bafee248312abddaba6ce90
3,635,409
import argparse def get_arguments(): """ Wrapper function to get the command line arguments. Inserting this piece of code into its own function for conda compatibility. """ parser = argparse.ArgumentParser( prog='KrakMeOpen', usage='krakmeopen [--input FILE | --input_pickle FILE | --input_file_list FILE] [--output FILE | --output_pickle FILE] --names FILE --nodes FILE [--tax_id INT | --tax_id_file FILE] --kmer_tally_table FILE', description=''' A Kraken2 downstream analysis toolkit. More specifically, calculate a series of quality metrics for Kraken2 classifications.''', epilog=''' The metrics are calculated on the clade-level. All kmers from all reads that are classified to any of the nodes in the clades rooted at the supplied tax IDs are aggregated, and metrics are calculated on those aggregations. Input is Kraken2 read-by-read classification files (can be gzipped). Output is a tab separated file containing the metrics.''') # Input arguments input_group = parser.add_mutually_exclusive_group(required=True) input_group.add_argument( '--input', metavar='FILE', type=str, help='Kraken2 read-by-read classifications file.') input_group.add_argument( '--input_pickle', metavar='FILE', type=str, help='A pickle file containing kmer tallies, produced with --output_pickle') input_group.add_argument( '--input_file_list', metavar='FILE', type=str, help='''A file containing file paths to multiple pickles, one per line. Will calculate metrics on the sum of kmer counts from all pickles.''') # Output arguments output_group = parser.add_mutually_exclusive_group(required=True) output_group.add_argument( '--output', metavar='FILE', type=str, help='The file to write the quality metrics output to.') output_group.add_argument( '--output_pickle', metavar='FILE', type=str, help='''The pickle file to write kmer tallies to. Use this argument to supress calculation of quality metrics and only output kmer counts to a pickled file. Input the pickled file using --input_pickle.''') parser.add_argument( '--kmer_tally_table', metavar='FILE', required=False, help='File to output the complete kmer tally table for each tax ID to. Optional.') # The taxonomy parser.add_argument( '--names', metavar='FILE', required=True, help='NCBI style taxonomy names dump file (names.dmp). Required.') parser.add_argument( '--nodes', metavar='FILE', required=True, help='NCBI style taxonomy nodes dump file (nodes.dmp). Required.') # Supply relevant taxonomic ID on command line, or one or multiple taxonomic IDs # through a text file. tax_id_group = parser.add_mutually_exclusive_group(required=True) tax_id_group.add_argument( '--tax_id', metavar='INT', type=int, help='A taxonomic ID for a clade that you wish to calculate quality metrics for.') tax_id_group.add_argument( '--tax_id_file', metavar='FILE', type=str, help='''Supply multiple taxonomic IDs at once. A textfile with one taxonomic ID per line. Calculate quality metrics for the clades rooted at the taxonomic IDs in the file.''') return parser.parse_args()
ad326fdab79874f33e8df005d5d5e470d23f8e42
3,635,410
def select( da, longitude=None, latitude=None, T=None, Z=None, iT=None, iZ=None, extrap=False, extrap_val=None, locstream=False, ): """Extract output from da at location(s). Parameters ---------- da: DataArray Property to take gradients of. longitude, latitude: int, float, list, array (1D or 2D), DataArray, optional longitude(s), latitude(s) at which to return model output. Package `xESMF` will be used to interpolate with "bilinear" to these horizontal locations. T: datetime-like string, list of datetime-like strings, optional Datetime or datetimes at which to return model output. `xarray`'s built-in 1D interpolation will be used to calculate. To selection time in any way, use either this keyword argument or `iT`, but not both simultaneously. Z: int, float, list, optional Depth(s) at which to return model output. `xarray`'s built-in 1D interpolation will be used to calculate. To selection depth in any way, use either this keyword argument or `iZ`, but not both simultaneously. iT: int or list of ints, optional Index of time in time coordinate to select using `.isel`. To selection time in any way, use either this keyword argument or `T`, but not both simultaneously. iZ: int or list of ints, optional Index of depth in depth coordinate to select using `.isel`. To selection depth in any way, use either this keyword argument or `Z`, but not both simultaneously. extrap: bool, optional Whether or not to extrapolate outside the available domain. If False, will return 0 by default outside the domain, or optionally `extrap_value`. If True, will use `extrap_method = "nearest_s2d"` to extrapolate. extrap_val: int, float, optional If `extrap==False`, values outside domain will be returned as 0, or as `extra_value` if input. locstream: boolean, optional Which type of interpolation to do: * False: 2D array of points with 1 dimension the lons and the other dimension the lats. * True: lons/lats as unstructured coordinate pairs (in xESMF language, LocStream). Returns ------- DataArray of interpolated and/or selected values from da. Examples -------- Select a single grid point. >>> longitude = 100 >>> latitude = 10 >>> iZ = 0 >>> iT = 0 >>> varname = 'u' >>> kwargs = dict(da=da, longitude=longitude, latitude=latitude, iT=iT, iZ=iZ, varname=varname) >>> da_out = em.select(**kwargs) """ # can't run in both Z and iZ mode, same for T/iT assert not ((Z is not None) and (iZ is not None)) assert not ((T is not None) and (iT is not None)) if (longitude is not None) and (latitude is not None): if (isinstance(longitude, int)) or (isinstance(longitude, float)): longitude = [longitude] if (isinstance(latitude, int)) or (isinstance(latitude, float)): latitude = [latitude] latitude = np.asarray(latitude) longitude = np.asarray(longitude) if extrap: extrap_method = "nearest_s2d" else: extrap_method = None if (not extrap) and ((longitude is not None) and (latitude is not None)): assertion = "the input longitude range is outside the model domain" assert (longitude.min() >= da.cf["longitude"].min()) and ( longitude.max() <= da.cf["longitude"].max() ), assertion assertion = "the input latitude range is outside the model domain" assert (latitude.min() >= da.cf["latitude"].min()) and ( latitude.max() <= da.cf["latitude"].max() ), assertion # Horizontal interpolation # # grid of lon/lat to interpolate to, with desired ending attributes if (longitude is not None) and (latitude is not None): if latitude.ndim == 1: da_out = xr.Dataset( { "lat": ( ["lat"], latitude, dict(axis="Y", units="degrees_north", standard_name="latitude"), ), "lon": ( ["lon"], longitude, dict(axis="X", units="degrees_east", standard_name="longitude"), ), } ) elif latitude.ndim == 2: da_out = xr.Dataset( { "lat": ( ["Y", "X"], latitude, dict(units="degrees_north", standard_name="latitude"), ), "lon": ( ["Y", "X"], longitude, dict(units="degrees_east", standard_name="longitude"), ), } ) # set up regridder, which would work for multiple interpolations if desired regridder = xe.Regridder( da, da_out, "bilinear", extrap_method=extrap_method, locstream_out=locstream ) # do regridding da = regridder(da, keep_attrs=True) # Time and depth interpolation or iselection # if iZ is not None: with xr.set_options(keep_attrs=True): da = da.cf.isel(Z=iZ) elif Z is not None: with xr.set_options(keep_attrs=True): da = da.cf.interp(Z=Z) if iT is not None: with xr.set_options(keep_attrs=True): da = da.cf.isel(T=iT) elif T is not None: with xr.set_options(keep_attrs=True): da = da.cf.interp(T=T) if extrap_val is not None: # returns 0 outside the domain by default. Assumes that no other values are exactly 0 # and replaces all 0's with extrap_val if chosen. da = da.where(da != 0, extrap_val) return da
91fbf82ca99ddc3eb6328b1c435cdb70f706c9a3
3,635,411
def afsluitmiddel_regelbaarheid(damo_gdf=None, obj=None): """" Zet naam van AFSLUITREGELBAARHEID om naar attribuutwaarde """ data = [_afsluitmiddel_regelbaarheid(name) for name in damo_gdf['SOORTREGELBAARHEID']] df = pd.Series(data=data, index=damo_gdf.index) return df
66bb59b2cea9e84faafe54adf114a73571f6fd05
3,635,412
import copy def generate(i): """ Input: { (output_txt_file) - if !='', generate text file for a given conference (conf_id) - record names for this conf } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ s='' tags_desc=i.get('tags_desc','') otf=i.get('output_text_file','') conf_id=i.get('conf_id','') tt='' s+='<table border="0" cellpadding="3" cellspacing="0" class="ck_margin_40px">\n\n' s+=' <tr><td><b>Name:</b></td> <td><b>Organization:</b></td>' x='' y='style="background-color:#efefef"' highlight1=False for q in tags_desc: if highlight1: x1=x highlight1=False else: x1=y highlight1=True tg=q['id'] name=q['name'] s+='<td '+x1+' align="center"><b>'+name+'</b></td>' s+='</tr>\n' aec={} aecx={} selected_aec={} selected_aecx={} for q in tags_desc: tg=q['id'] name=q['name'] # List all people ii={'action':'search', 'module_uoa':cfg['module_deps']['person'], 'tags':tg, 'add_meta':'yes' } r=ck.access(ii) if r['return']>0: return r lst=r['lst'] for q in lst: d=q.get('meta',{}) n=d.get('name','') o=d.get('organization','') if n!='': # Sort by surname n1='' n2=n j=n.rfind(' ') if j>0: n1=n[:j].strip() n2=n[j+1:].strip() ny=n2+' '+n1 aecx[ny]=n if ny not in aec: aec[ny]={'tags':[]} aec[ny]['tags'].append(tg) aec[ny]['org']=o if conf_id==tg: selected_aec[ny]=copy.deepcopy(aec[ny]) selected_aecx[ny]=copy.deepcopy(aecx[ny]) highlight=True for q in sorted(aec): n=aecx[q] t=aec[q]['tags'] o=aec[q]['org'] if highlight: x='style="background-color:#dfcfff"' y='style="background-color:#cf9fff"' highlight=False else: x='' y='style="background-color:#efefef"' highlight=True s+=' <tr '+x+'><td>'+n+'</td><td>'+o+'</td>' highlight1=False for q in tags_desc: if highlight1: x1=x highlight1=False else: x1=y highlight1=True tg=q['id'] name=q['name'] sx='' if tg in t: sx='<b>*</b>' s+='<td '+x1+' align="center">'+sx+'</td>' s+='</tr>\n' s+='\n' s+='</table>\n' ck.out(s) if otf!='' and len(selected_aec)>0: j=0 for q in sorted(selected_aec): j+=1 org=selected_aec[q]['org'] org=org.replace(' (',', ').replace(')','') tt+=str(j)+') '+selected_aecx[q]+' ('+org+')\n' r=ck.save_text_file({'text_file':otf, 'string':tt}) if r['return']>0: return r return {'return':0}
ec2de19931fa527cba88baec8775b21c7bcbf88c
3,635,413
def calc_solidangle_particle( pts=None, part_traj=None, part_radius=None, config=None, approx=None, aniso=None, block=None, ): """ Compute the solid angle subtended by a particle along a trajectory The particle has radius r, and trajectory (array of points) traj It is observed from pts (array of points) traj and pts are (3, N) and (3, M) arrays of cartesian coordinates approx = True => use approximation aniso = True => return also unit vector of emission block = True consider LOS collisions (with Ves, Struct...) if block: config used for LOS collisions Parameters ---------- pts: np.ndarray Array of (3, M) pts coordinates (X, Y, Z) representing the points from which the particle is observed part_traj: np.ndarray Array of (3, N) pts coordinates (X, Y, Z) representing the particle positions part_radius: float / np.ndarray Unique of multiple values for the radius of the spherical particle if multiple, rad is a np.ndarray of shape (N,) config: None / tf.geom.Config if block = True, solid angles are non-zero only if the field of view is not blocked bya structural element in teh chamber approx: None / bool Flag indicating whether to compute the solid angle using an 1st-order series development (in whichcase the solid angle becomes proportional to the radius of the particle, see Notes_Upgrades/) aniso: None / bool Flag indicating whether to consider anisotropic emissivity, meaning the routine must also compute and return the unit vector directing the flux from each pts to each position on the trajectory of the particle block: None / bool Flag indicating whether to check for vignetting by structural elements provided by config Return: ------- sang: np.ndarray (N, M) Array of floats, solid angles """ ################ # Prepare inputs ( part_traj, pts, part_radius, config, approx, aniso, block ) = _check_calc_solidangle_particle( traj=part_traj, pts=pts, rad=part_radius, config=config, approx=approx, aniso=aniso, block=block, ) ################ # Main computation # traj2pts vector, with length (3d array (3, N, M)) vect = - pts[:, :, None] + part_traj[:, None, :] len_v = np.ascontiguousarray(np.sqrt(np.sum(vect**2, axis=0))) # If aniso or block, normalize if aniso or block: vect = vect / len_v[None, :, :] # Solid angle r_d = part_radius[None, :] / len_v where_zero = len_v <= part_radius[None, :] r_d[where_zero] = 0. # temporary value if approx: sang = np.pi * (r_d**2 + r_d**4 / 4. + r_d**6 / 8. + r_d**8 * 5 / 64) else: sang = 2.*np.pi * (1 - np.sqrt(1. - r_d ** 2)) # when particle in mesh point, distance len_v = 0 thus sang neglected sang[where_zero] = 0. # block if block: kwdargs = config.get_kwdargs_LOS_isVis() indvis = _GG.LOS_areVis_PtsFromPts_VesStruct( pts, part_traj, dist=len_v, **kwdargs ) iout = indvis == 0 sang[iout] = 0. vect[:, iout] = np.nan ################ # Return if aniso: return sang, vect return sang
46a9700c12bc8d734678795a0cb989d025f02685
3,635,414
def fields_view(arr, fieldNameLst=None): """ Return a view of a numpy record array containing only the fields names in the fields argument. 'fields' should be a list of column names. """ # Default to all fields if not fieldNameLst: fieldNameLst = arr.dtype.names dtype2 = np.dtype({name:arr.dtype.fields[name] for name in fieldNameLst}) return np.ndarray(arr.shape, dtype2, arr, 0, arr.strides)
5d4e6629c3acf26619033d9d7e103c9e817bea78
3,635,415
import os def initialize_servers_and_threads(): """ Returns a dictionary object containing an initialized server and thread object for each email """ servers_and_threads = {} for email in os.listdir(".emails"): server = classes.server.Server(email) servers_and_threads[email] = {"Server": server, "Thread": classes.server.ServerThread(server)} return servers_and_threads
895cddf530e8e77f5356f14ce19a266ba0f9ed0b
3,635,416
from typing import List from typing import Tuple def image_detach_with_id_color_list( color_img: np.ndarray, id_color_list: List[Tuple[int, Tuple[int, int, int]]], bin_num: int, mask_value: float = 1.0, ) -> np.ndarray: """ 컬러 이미지 `color_img`를 색상에 따른 각 인스턴스 객체로 분리합니다. Parameters ---------- color_img : np.ndarray 컬러 이미지 id_color_list : List[Tuple[int, Tuple[int, int, int]]] ID, BGR 컬러 튜플의 리스트. `(0, 0, 0)`이 있는 경우, `(0, 0, 0)`이 맨 앞에 옵니다. bin_num : int 분리할 최대 인스턴스 객체의 개수 mask_value : float 색상이 존재하는 이미지의 객체에 덮어 씌울 값, by default 1. Returns ------- np.ndarray `color_img`의 width, height에 `bin_num` channel인 `np.ndarray` """ result: np.ndarray = np.zeros((color_img.shape[0], color_img.shape[1], bin_num)) for index, id_color in enumerate(id_color_list): result_image = np.zeros( (color_img.shape[0], color_img.shape[1], 1), dtype=np.float32 ) mask = np.all(color_img == id_color[1], axis=-1) result_image[mask] = mask_value result[:, :, index : index + 1] = result_image return result
941100eb6fed342b4a89a10115ee55350c58ae6e
3,635,417
from typing import Dict from typing import Any from typing import List import copy def defaultArgs(options: Dict = None, **kwargs: Any) -> List[str]: # noqa: C901,E501 """Get the default flags the chromium will be launched with. ``options`` or keyword arguments are set of configurable options to set on the browser. Can have the following fields: * ``headless`` (bool): Whether to run browser in headless mode. Defaults to ``True`` unless the ``devtools`` option is ``True``. * ``args`` (List[str]): Additional arguments to pass to the browser instance. The list of chromium flags can be found `here <http://peter.sh/experiments/chromium-command-line-switches/>`__. * ``userDataDir`` (str): Path to a User Data Directory. * ``devtools`` (bool): Whether to auto-open DevTools panel for each tab. If this option is ``True``, the ``headless`` option will be set ``False``. """ options = merge_dict(options, kwargs) devtools = options.get('devtools', False) headless = options.get('headless', not devtools) args = options.get('args', list()) userDataDir = options.get('userDataDir') chromeArguments = copy(DEFAULT_ARGS) if userDataDir: chromeArguments.append(f'--user-data-dir={userDataDir}') if devtools: chromeArguments.append('--auto-open-devtools-for-tabs') if headless: chromeArguments.extend(( '--headless', '--hide-scrollbars', '--mute-audio', )) if current_platform().startswith('win'): chromeArguments.append('--disable-gpu') if all(map(lambda arg: arg.startswith('-'), args)): # type: ignore chromeArguments.append('about:blank') chromeArguments.extend(args) return chromeArguments
3f43e3505b77f232e7f797caff318ffe5f466f7d
3,635,418
from typing import List def process_v3_fields(fields: List[str], endpoint: str) -> str: """ Filter v3 field list to only include valid fields for a given endpoint. Logs a warning when fields get filtered out. """ valid_fields = [field for field in fields if field in FIELDS_V3] if len(valid_fields) < len(fields): _LOGGER.warning( "Removed invalid fields: %s", list(set(fields) - set(valid_fields)) ) processed_fields = [field for field in valid_fields if endpoint in FIELDS_V3[field]] if len(processed_fields) < len(valid_fields): _LOGGER.warning( "Remove fields not available for `%s` endpoint: %s", endpoint, list(set(valid_fields) - set(processed_fields)), ) return ",".join(processed_fields)
87f21c58a4a5613dc0c319cf4adcc29a42cd2054
3,635,419
def getManifestFsLayers(manifest): """ returns hashes pointing to layers for manifest""" return manifest["manifest"]["fsLayers"]
a3449c2828222c2b806df8621dd6a24375778ed2
3,635,420
def Jones_METIS(c, tau, w, q): """ Returns Jones polynomial evaluated at t(q) via METIS contraction of the tensor network of the knot encoded in edgelist c. w is the writhe of the knot. """ nc = len(c) # number of crossings if nc > 0: nv = cnf_nvar(c) ekpotts = -tpotts(q) g = boltz_tngraph(c, ekpotts, q) t1 = default_timer() m = recursive_bipartition(g) # Uses METIS md, sg = contract_dendrogram(g, m, combine_attrs=dict(attr=attr_contract)) t2 = default_timer() runtime = t2 - t1 Z = sg.vs["attr"][0][1] else: nv = 1 Z = q runtime = 0 # Multiply Z with appropriate prefactors to get Jones polynomial: jpoly = ( Z * (-tpotts(q) ** 0.5 - tpotts(q) ** -0.5) ** (-nv - 1) * (-tpotts(q) ** (3.0 / 4)) ** w * tpotts(q) ** (1.0 / 4 * tau) ) return jpoly, runtime
77b16bd2737ea3b9628d52c9fa2eea014644af37
3,635,421
def screen_name_filter(tweet_list, stoplist): """ Filter list of tweets by screen_names in stoplist. Pulls original tweets out of retweets. stoplist may be a list of usernames, or a string name of a configured named stoplist. """ tweets = [] id_set = set() if isinstance(stoplist, basestring): stoplist = get_named_stoplist(stoplist) stoplist = [n.lower() for n in stoplist] for tweet in tweet_list: if tweet.id_str in id_set: continue id_set.add(tweet.id_str) if tweet.user.screen_name.lower() in stoplist: continue try: rs = tweet.retweeted_status if rs.id_str in id_set: continue id_set.add(rs.id_str) if rs.user.screen_name.lower() in stoplist: continue tweets.append(tweet.rs) except AttributeError: tweets.append(tweet) return tweets
da8f292e496503cb70c3cf7f33ba7f87ed874cbb
3,635,422
def insert_into(table_name, values, column_names, create_if_not_exists=False, inspect=True, engine=None): """ Inserts a list of values into an existing table :param table_name: the name of the table into which to insert records :param values: a list of lists containing literal values to insert into the table :param column_names: one or more comma-seperated column names, or a list of column names * column names must correspond exactly to the order of the values provided example names: 'col1,col2,col3' OR ['col1', 'col2', 'col3'] example values: [(0, 42, 'first'), (True, 86, 'next'), (1, -4, 'last')] :param create_if_not_exists: if True, create table_name if it doesn't exist, otherwise exit with warning :param inspect: if True, ensure all value rows correspond to the number of column names :param engine: an optional sqlalchemy.engine to use in the INSERT query """ into_table = get_table(table_name, engine, ignore_missing=create_if_not_exists) if not table_exists(into_table): return select_into(table_name, values, column_names, inspect=inspect, engine=engine) if isinstance(column_names, str): column_names = column_names.split(",") validate_columns_in(into_table, column_names, empty_table=table_name) val_length = len(values) if not val_length: logger.warning(f"insert_into: no values to insert") return row_length = len(column_names) if inspect and not all(row_length == len(val) for val in values): raise ValueError(f"Values provided do not match columns: {column_names}") logger.info(f"insert_into: populating {table_name} from {val_length} value records") # Ensure column types are defined in the order column_names was given column_types = [str(into_table.columns[c].type) for c in column_names] insert_cols = [column(c) for c in column_names] insert_vals = Select(insert_cols).select_from(Values(column_names, column_types, *values)) insert_into = Insert(into_table).from_select(names=column_names, select=insert_vals) with into_table.bind.connect() as conn: conn.execute(insert_into.execution_options(autocommit=True))
ac9f299da13cb12446e1ac14c1962deb93a1d233
3,635,423
def delchars(str, chars): """Returns a string for which all occurrences of characters in chars have been removed.""" # Translate demands a mapping string of 256 characters; # whip up a string that will leave all characters unmolested. identity = "".join([chr(x) for x in range(256)]) return str.translate(identity, chars)
a220202a05e0ead7afa6226ef309c56940a1d153
3,635,424
from typing import Optional from typing import cast import collections from typing import Set from typing import Type from typing import Union from typing import List def normalize_typed_substitution( value: SomeValueType, data_type: Optional[AllowedTypesType] ) -> NormalizedValueType: """ Normalize a mix of substitution and values. This function becomes handy when you need a substitution whose result will be coerced to a specific type. Example: ------- ```python3 class MyAction(Action): def __init__(self, my_int: Union[int, SomeSubstitutionsType]): self.__my_int_normalized = normalize_typed_substitution(some_int, int) ... def execute(self, context): my_int = perform_typed_substitution(context, self.__my_int_normalized, int) ... ``` List of substitutions coerced a list to strings can be confused with a single substitution that is coerced to a list of strings. e.g.: `['asd', TextSubstitution(text='bsd')]` To avoid confusions, the passed value will always be interpreted as a single substitution if possible, like in the above example (which will resolved to `'asdbsd'`). To make it a list of substitutions: `['asd', [TextSubstitution(text='bsd')]]` :param value: value to be normalized. :param data_type: `value` can be either an instance of `data_type` or a substitution. In the case of lists, `value` can be either a substitution or a list. In the later case, its items should match the type specified by `data_type` or be a substitution. If `None`, it should be possible to perform the resulting normalized `value` to a valid type. See :py:func:`is_substitution`. :return: the normalized `value`. :raise: `TypeError` if the normalized `value` cannot later be resolved to an instance of `data_type`, or to a valid type when `data_type is `None`. :raise: `ValueError` if `data_type` is not valid. See :py:obj:`AllowedTypesTuple`. """ # Resolve scalar types immediately if isinstance(value, ScalarTypesTuple): if not is_instance_of(value, data_type): raise TypeError(f"value='{value}' is not an instance of {data_type}") return value # Resolve substitutions and list of substitutions immediately if is_substitution(value): return normalize_to_list_of_substitutions(cast(SomeSubstitutionsType, value)) # For the other cases, the input must be an iterable if not isinstance(value, collections.abc.Iterable): raise TypeError( 'value should be either a scalar, a substitutions,' ' or a mixed list of scalars and substitutions. ' f'Got `value={value}` of type `{type(value)}`. ' ) # Collect the types of the items of the list types_in_list: Set[Optional[Type[Union[str, int, float, bool, Substitution]]]] = set() for x in value: if isinstance(x, ScalarTypesTuple): types_in_list.add(type(x)) elif is_substitution(x): types_in_list.add(Substitution) else: raise TypeError( 'value is a list, and one of the items is not a scalar, a Substitution ' f"or a list of substitutions. Got value='{value}'" ) # Extract expected type information is_list = True if data_type is not None: data_type, is_list = extract_type(data_type) # Must be expecting a list if not is_list: raise TypeError( 'The provided value resolves to a list, though the required type is a scalar. ' f"Got value='{value}', data_type='{data_type}'." ) # Normalize each specific uniform list input err_msg = ( "Got a list of '{}'" f", expected a list of '{data_type}'. value='{value}'" ) if types_in_list == {Substitution}: # list of substitutions, can be coerced later to anything return cast(List[Union[List[Substitution], str]], [ normalize_to_list_of_substitutions(cast(SomeSubstitutionsType, x)) for x in value ]) if types_in_list.issubset({str, Substitution}): # list of mixed strings and substitutions if data_type not in (None, str): raise TypeError(err_msg.format(str)) return cast(List[Union[List[Substitution], str]], [ x if isinstance(x, str) else # Don't convert strings to TextSubstitution normalize_to_list_of_substitutions(cast(SomeSubstitutionsType, x)) for x in value ]) if types_in_list.issubset({bool, Substitution}): # list of booleans and substitutions if data_type not in (None, bool): raise TypeError(err_msg.format(bool)) return cast(List[Union[List[Substitution], bool]], [ normalize_to_list_of_substitutions(cast(SomeSubstitutionsType, x)) if is_substitution(x) else x for x in value ]) if types_in_list.issubset({int, Substitution}): # list of ints and substitutions if data_type not in (None, int): raise TypeError(err_msg.format(int)) return cast(List[Union[List[Substitution], int]], [ normalize_to_list_of_substitutions(cast(SomeSubstitutionsType, x)) if is_substitution(x) else x for x in value ]) if types_in_list.issubset({int, float, Substitution}): # list of floats and substitutions if data_type not in (None, float): raise TypeError(err_msg.format(float)) return cast(List[Union[List[Substitution], float]], [ normalize_to_list_of_substitutions(cast(SomeSubstitutionsType, x)) if is_substitution(x) else float(cast(Union[int, float], x)) for x in value ]) # Invalid input raise TypeError( "Input value is not an acceptable 'SomeValueType'." f"Got value='{value}'" )
40250403240d591734770b146dd8253fe24c5083
3,635,425
def index(request): """ View for the static index page """ return render(request, 'public/home.html', _get_context('Home'))
3598ef8776943c63f49787f63f69d8a22536805e
3,635,426
def bytes2hex(bytes_array): """ Converts byte array (output of ``pickle.dumps()``) to spaced hexadecimal string representation. Parameters ---------- bytes_array: bytes Array of bytes to be converted. Returns ------- str Hexadecimal representation of the byte array. """ s_hex = bytes_array.hex() # Insert spaces between each hex number. It makes YAML file formatting better. return " ".join([s_hex[n : n + 2] for n in range(0, len(s_hex), 2)])
19019ee1e3cd45d671f53e0ae4fd92b283c3b38d
3,635,427
def atan2(x1: Array, x2: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.arctan2 <numpy.arctan2>`. See its docstring for more information. """ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes: raise TypeError("Only floating-point dtypes are allowed in atan2") # Call result type here just to raise on disallowed type combinations _result_type(x1.dtype, x2.dtype) x1, x2 = Array._normalize_two_args(x1, x2) return Array._new(np.arctan2(x1._array, x2._array))
8a0621bfd0ad8ac4ce9e14cc939e5a8b9e3e511c
3,635,428
import os def family_directory(fonts): """Get the path of font project directory.""" if fonts: dirname = os.path.dirname(fonts[0]) if dirname == '': dirname = '.' return dirname
91d1f880a01ba2de11e6570272d5748d1dea6d47
3,635,429
def authorizer(*args, **kwargs): """ decorator to register an authorizer. :param object args: authorizer class constructor arguments. :param object kwargs: authorizer class constructor keyword arguments. :keyword bool replace: specifies that if there is another registered authorizer with the same name, replace it with the new one, otherwise raise an error. defaults to False. :raises InvalidAuthorizerTypeError: invalid authorizer type error. :raises DuplicatedAuthorizerError: duplicated authorizer error. :returns: authorizer class. :rtype: type """ def decorator(cls): """ decorates the given class and registers an instance of it into available authorizer. :param type cls: authorizer class. :returns: authorizer class. :rtype: type """ instance = cls(*args, **kwargs) authorization_services.register_authorizer(instance, **kwargs) return cls return decorator
5af038c7e1bebee228bffc24c72aa54e03a8d28e
3,635,430
def option_getter(config_model): """Returns a get_option() function using the given config_model and data""" def get_option(option, x=None, default=None, ignore_inheritance=False): def _get_option(opt, fail=False): try: result = config_model.get_key('techs.' + opt) except KeyError: if ignore_inheritance: return _get_option(default, fail) # 'ccgt.constraints.s_time' -> 'ccgt', 'constraints.s_time' tech, remainder = opt.split('.', 1) if ':' in tech: parent = tech.split(':')[0] else: # parent = e.g. 'defaults' parent = config_model.get_key('techs.' + tech + '.parent') try: result = _get_option(parent + '.' + remainder, fail) except KeyError: e = exceptions.OptionNotSetError if fail: raise e('Failed to read option `{}` ' 'with given default ' '`{}`'.format(option, default)) elif default: result = _get_option(default, fail=True) elif tech == 'defaults': raise e('Reached top of inheritance chain ' 'and no default defined for: ' '`{}`'.format(option)) else: raise e('Can not get parent for `{}` ' 'and no default defined ' '({}).'.format(tech, option)) return result def _get_location_option(key, location): # NB1: KeyErrors raised here are always caught in get_option # so need no further information or handling return config_model.get_key( 'locations.' + location + '.override.' + key ) if x: try: result = _get_location_option(option, x) # If can't find a location-specific option, fall back to model-wide except KeyError: result = _get_option(option) else: result = _get_option(option) # Deal with 'inf' settings if result == 'inf': result = float('inf') return result return get_option
ad5726bc957e1c5902960ebc0e51af43f0ab31eb
3,635,431
def is_core_dump(file_path): """ Determine whether given file is a core file. Works on CentOS and Ubuntu. Args: file_path: full path to a possible core file """ file_std_out = exec_local_command("file %s" % file_path) return "core file" in file_std_out and 'ELF' in file_std_out
33c9974888857f913de1702117a0add2c686c252
3,635,432
from pathlib import Path def to_posix(d): """Convert the Path objects to string.""" if isinstance(d, dict): for k, v in d.items(): d[k] = to_posix(v) elif isinstance(d, list): return [to_posix(x) for x in d] elif isinstance(d, Path): return d.as_posix() return d
91dbda7738308dd931b58d59dad8e04a277034ea
3,635,433
from datetime import datetime def lc_concatenate_worker(task): """ This is the parallel worker for the function below. task is a tuple: task[0] = base LC file task[1] = new LC file to be concatenated to base task[2] = zero-indexed column number to use to sort resulting LC (if None, then no sorting is done) FIXME: this currently uses a dumb method, make it faster by streaming newlc lines to baselc file directly FIXME: add sorting by any column """ try: baselcfile = task[0] newlcfile = task[1] baselcf = open(baselcfile, 'rb') baselclines = baselcf.readlines() baselcf.close() baselcndet = len(baselclines) newlcf = open(newlcfile, 'rb') newlclines = newlcf.readlines() newlcf.close() newlcndet = len(newlclines) baselclines.extend(newlclines) finalndet = len(baselclines) with open(baselcfile,'wb') as outfd: for line in baselclines: outfd.write(line.encode('utf-8')) print('%sZ: concat LC OK: %s with ndet %s -> %s with ndet %s' % (datetime.utcnow().isoformat(), baselcfile, baselcndet, baselcfile, finalndet )) return baselcfile, baselcfile except Exception as e: print('ERR! %sZ: concat LC task failed: %s, error: %s' % (datetime.utcnow().isoformat(), repr(task), e )) return task[0], None
c5b72c4576748949b1968dfc9ed93bf34d347b48
3,635,434
import traceback def list_entities(currency, ids=None, page=None, pagesize=None): # noqa: E501 """Get entities # noqa: E501 :param currency: The cryptocurrency (e.g., btc) :type currency: str :param ids: Restrict result to given set of comma separated IDs :type ids: List[str] :param page: Resumption token for retrieving the next page :type page: str :param pagesize: Number of items returned in a single page :type pagesize: int :rtype: Entities """ try: return service.list_entities( currency=currency, ids=ids, page=page, pagesize=pagesize) except RuntimeError as e: return notfound(str(e)) except ValueError as e: return badrequest(str(e)) except Exception as e: traceback.print_exception(type(e), e, e.__traceback__) return internalerror(str(e))
7956ab50da50811785be9fd1c834984500460378
3,635,435
def sensor(request): """HTTP/GET /sensor コール時の処理 ADC以外のセンサー値を読んで返す Args: request (QueryDict): リクエストパラメータ Returns: dict: クライアントに返すjson形式の値 """ params = request.GET.copy() api_response = ApiResponse() params["ids"] = params.getlist("ids") parse = ParseApiParams(params, mode="sensor") if parse.has_bme(): api_response.set_params({"BME0": Bme280Handler.main()}) if parse.has_tick(): # NOTICE: 配列の最初固定。複数対応は必要なときに実施する tick = Tick(parse.tick(0)["no"]) api_response.set_params({parse.tick(0)["name"]: tick.sensor_value}) return JsonResponse(api_response.response)
d9df39aeeeb6077c6bf2b6c0019d682f3a05e78f
3,635,436
def stat(lst): """Calculate mean and std deviation from the input list.""" n = float(len(lst)) mean = sum(lst) / n stdev = sqrt((sum(x * x for x in lst) / n) - (mean * mean)) return mean, stdev
c1983fc9da96397a5f55e45d0eac9cbc921a91fc
3,635,437
import attr def _recursive_generic_validator(typed): """Recursively assembles the validators for nested generic types Walks through the nested type structure and determines whether to recurse all the way to a base type. Once it hits the base type it bubbles up the correct validator that is nested within the upper validator Args: typed: input type Returns: return_type: recursively built deep_iterable validators """ if hasattr(typed, "__args__"): # If there are more __args__ then we still need to recurse as it is still a GenericAlias # Iterate through since there might be multiple types? if len(typed.__args__) > 1: return_type = attr.validators.deep_iterable( member_validator=_recursive_generic_validator(typed.__args__), iterable_validator=attr.validators.instance_of(typed.__origin__), ) else: return_type = attr.validators.deep_iterable( member_validator=_recursive_generic_validator(typed.__args__[0]), iterable_validator=attr.validators.instance_of(typed.__origin__), ) return return_type else: # If no more __args__ then we are to the base type and need to bubble up the type # But we need to check against base types and enums if isinstance(typed, EnumMeta): base_type, allowed = _check_enum_props(typed) return_type = attr.validators.and_( attr.validators.instance_of(base_type), attr.validators.in_(allowed) ) else: return_type = attr.validators.instance_of(typed) return return_type
dcc89a9c358da848d56d3c031e3967402ef70a28
3,635,438
import os def getAllFilesOfExtension(rootDir, extension): """Traverse a directory tree and find all the files with a specified extension Args: rootDir: The directory to traverse extension: The extension Returns: A list of files """ fileList = [] for (dirPath, dirNames, fileNames) in os.walk(rootDir): for baseName in fileNames: if baseName.endswith(extension): fileList.append((dirPath, baseName)) return fileList
13248ca9aef4ff2c52a8281d107b3adf8ce4e479
3,635,439
def get_posts(di, po, syn): """ Gets the postings list for each unique token in the query """ words = {} #goes through each token in the query and returns its postings list for i in range(len(syn)): word = syn[i][0] #goes through each synonym for each word for k in range(len(syn[i])): j=syn[i][k] #only returns the first instance of a token if words.get(j, False): continue #only retrieves postings with corresponding dictionary entries resp = di.get(j, []) if len(resp) > 0: po.seek(int(resp[1])) line=literal_eval(po.readline()) words[word]=words.get(word,[])+line return words
8a45df246bf6bb19baf570b133427f937d36f5d9
3,635,440
def need_food(board, bad_positions, snake): """ Determines if we need food and returns potential food that we can get """ potential_food = [] # food that is not contested (we are the closest) safe_food = [fud for fud in board.food if board.get_cell(fud) != SPOILED] # always go for safe food even if we kind of need it for food in safe_food: if dist(food, snake.head) >= snake.attributes['health']: continue # check if enemy is approaching food we are close to steal = False for enemy in board.enemies: if dist(enemy.head, food) <= FOOD_MEDIUM_DIST + FOOD_STEAL_DIST: steal = True and not DISABLE_STEALING break # prioritize safe food if it's close and we are a little hungry otherwise wait a little bit if dist(food, snake.head) <= FOOD_CLOSE_DIST and (snake.attributes['health'] <= FOOD_CLOSE_HEALTH or steal): potential_food.append(food) elif dist(food, snake.head) <= FOOD_MEDIUM_DIST and (snake.attributes['health'] <= FOOD_MEDIUM_HEALTH or steal): potential_food.append(food) elif snake.attributes['health'] < FOOD_HUNGRY_HEALTH: potential_food.append(food) # if there is no safe food and we are relatively hungry then go for contested food if len(potential_food) < 1 and snake.attributes['health'] < FOOD_HUNGRY_HEALTH: contested_food = [fud for fud in board.food if board.get_cell(fud) == SPOILED] # if we are in possible distance of getting it then add it for food in contested_food: if dist(food, snake.head) <= snake.attributes['health']: potential_food.append(food) if not potential_food: return None # get food that is not next to wall so we can potentially ignore food that is next to wall no_wall_food = [] for fud in potential_food: if not next_to_wall(fud, board) or snake.attributes['health'] < FOOD_HUNGRY_WALL_HEALTH: no_wall_food.append(fud) if len(no_wall_food) == 0 and snake.attributes['health'] > FOOD_HUNGRY_WALL_HEALTH: return None # remove food that puts us in a bad_position food_to_get = [] for fud in (potential_food if len(no_wall_food) == 0 else no_wall_food): # if we are really low on health or the food is not super close then add it if snake.attributes['health'] < FOOD_DANGEROUS_HEALTH or dist(snake.head, fud) > FOOD_DANGEROUS_DIST: food_to_get.append(fud) continue # for each direction that would make sense for the shortest path verify the next move doesn't put us in a bad position for direction in get_directions(snake.head, fud): next_pos = get_next_from_direction(snake.head, direction) if next_pos not in bad_positions and board.inside(next_pos) and board.get_cell(next_pos) != SNAKE: food_to_get.append(fud) break return (food_to_get if len(food_to_get) > 0 else None)
73359fa082feda5a2e3efb8d9d7f091d9d23a8bb
3,635,441
def categoriesJSON(): """Return JSON for all the categories""" categorys = session.query(Category).all() return jsonify(categories=[c.serialize for c in categorys])
2f300dca846d2c01dca1ea80798fbcf258e459e8
3,635,442
def rescale_column(img, gt_bboxes, gt_label, gt_num, img_shape): """rescale operation for image""" img_data, scale_factor = rescale_with_tuple(img, (config.img_width, config.img_height)) if img_data.shape[0] > config.img_height: img_data, scale_factor2 = rescale_with_tuple(img_data, (config.img_height, config.img_width)) scale_factor = scale_factor * scale_factor2 img_shape = np.append(img_shape, scale_factor) img_shape = np.asarray(img_shape, dtype=np.float32) gt_bboxes = gt_bboxes * scale_factor gt_bboxes = split_gtbox_label(gt_bboxes) if gt_bboxes.shape[0] != 0: gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1) gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1) return (img_data, gt_bboxes, gt_label, gt_num, img_shape)
1f765b69b65ab34b1d272223a2344e6068f3f4ab
3,635,443
def ausc_trapazoidal(mean_df, doses): """Performs numerical integration using the trapazoidal rule to determine the area under the survival curve (AUSC) for the drug respose. The only argument, mean_df, is a data frame made from make_mean_std(). """ y = mean_df.normalized_mean x = doses ausc = np.trapz(y, x) return round(ausc, 2)
fad1c30d6996ea7ea3e4d6cf0d6546f76647e459
3,635,444
import os def _get_file_handler(name): """ Создание файлового хандлера. Ротация 5 файлов по 50 мегабайт. """ file_obj = RotatingFileHandler(os.path.join(_path, name + ".log"), maxBytes = 50000000, backupCount = 5) file_obj.setLevel("TRACE") return file_obj
c1bf53ad63ea0aae49c7ff93c7ab736350bcc752
3,635,445
def psu_info_table(psu_name): """ :param: psu_name: psu name :return: psu info entry for this psu """ return "PSU_INFO" + TABLE_NAME_SEPARATOR_VBAR + psu_name
e3894e0ae5735d8f096cfa72ac50bc6fd3d966da
3,635,446
def delete_rds(rds_client, rds_instances) -> list: """Deletes all instances in the instances parameter. Args: rds_client: A RDS boto3 client. rds_instances: A list of instances you want deleted. Returns: A count of deleted instances """ terminated_instances = [] for instance in rds_instances: rds_indentifier = instance["DBInstanceIdentifier"] if helpers.check_in_whitelist(rds_indentifier, WHITELIST_NAME): continue try: deletion_response = rds_client.delete_db_instance( DBInstanceIdentifier=rds_indentifier, SkipFinalSnapshot=True ) except ClientError as error: error_string = "{0} on {1} - {2}".format(error, RESOURCE_NAME, rds_indentifier) print(error_string) terminated_instances.append(error_string) continue terminated_instances.append(rds_indentifier) return terminated_instances
f1e66cce8e2d98c53bc247c2e2b8fd57c195b86c
3,635,447
import itertools def all_preferences(candidates, concentrate=False): """ Generates all possible preferences given a list of candidates """ permutations_tuple = list(itertools.permutations(candidates)) permutations_list = list(map(list, list(permutations_tuple))) if concentrate: return convert_condensed(permutations_list) else: return permutations_list
53f078a9fd3f66696cc5bc64de9b41d4ccbe2c8c
3,635,448
import torch def div_reg(net, data, ref): """ Regulize the second term of the loss function """ mean_f = net(data).mean() log_mean_ef_ref = torch.logsumexp(net(ref), 0) - np.log(ref.shape[0]) return mean_f - log_mean_ef_ref - log_mean_ef_ref**2
04d28e11df8f7b5e723d1930372afca317e1c349
3,635,449
def action_list(): """ Prints all the available actions present in this file. """ rospy.loginfo(color.BOLD + color.PURPLE + '|-------------------|' + color.END) rospy.loginfo(color.BOLD + color.PURPLE + '| AVAILABLE ACTIONS |' + color.END) rospy.loginfo(color.BOLD + color.PURPLE + '| 1: MOVE TO POINT |' + color.END) rospy.loginfo(color.BOLD + color.PURPLE + '| 2: OPEN GRIPPER |' + color.END) rospy.loginfo(color.BOLD + color.PURPLE + '| 3: CLOSE GRIPPER |' + color.END) rospy.loginfo(color.BOLD + color.PURPLE + '|-2: EXIT |' + color.END) rospy.loginfo(color.BOLD + color.PURPLE + '|-------------------|' + color.END) actions = ['MOVE TO POINT', 'OPEN GRIPPER', 'CLOSE GRIPPER'] return actions
2c5e80c9e2ddcc127812491e4bfd8ed0a4765b53
3,635,450
def firstLetterCipher(ciphertext): """ Returns the first letters of each word in the ciphertext Example: Cipher Text: Horses evertime look positive Decoded text: Help """ return "".join([i[0] for i in ciphertext.split(" ")])
87f37d1a428bde43c07231ab2e5156c680c96f91
3,635,451
def polar_decode(N, K, P0): """ Decode a (N, K) polar code. P0 must be 1-normalized probabilities """ n = np.log2(N).astype(int) A = polar_hpw(N)[-K:] # We're not using all the elements in the P array, as each layer lamb # only uses 2**(n-lamb) elements. Given the current indexing it's still fine. P = np.empty((n + 1, N, 2), dtype=float) B = np.zeros((n + 1, N), dtype=np.uint8) for beta in range(N): P[0, beta, 0] = P0[beta] P[0, beta, 1] = 1 - P0[beta] for phi in range(N): recursivlyCalcP(n, phi, P, B, n) if not phi in A: B[n, phi] = 0 else: if P[n, 0, 0] > P[n, 0, 1]: B[n, phi] = 0 else: B[n, phi] = 1 if phi % 2 == 1: recursivelyUpdateB(n, phi, B, n) u_hat_full = B[n] return u_hat_full[A]
c305aaaa124faec73145a87cdbafe35965e26ec6
3,635,452
from pathlib import Path from typing import Tuple import re def parse_samtools_flagstat(p: Path) -> Tuple[int, int]: """Parse total and mapped number of reads from Samtools flagstat file""" total = 0 mapped = 0 with open(p) as fh: for line in fh: m = re.match(r'(\d+)', line) if m: if 'in total' in line: total = int(m.group(1)) if ' mapped (' in line: mapped = int(m.group(1)) return total, mapped
60c6f9b227cefdea9877b05bb2fe66e4c82b4dd1
3,635,453
import io import tarfile import time import os import posixpath def create_tar_in_memory(files): """ Construct a tar file in-memory. :param files: Sequence of (path, data) tuples. :type files: list :return: Tar file. :rtype: File-like object. """ output = io.BytesIO() with tarfile.open(fileobj=output, mode='w') as tarf: def add_entry(name, content): tarinfo = tarfile.TarInfo(name) tarinfo.mtime = time.time() tarinfo.size = len(content) tarf.addfile(tarinfo, io.BytesIO(content)) for path, data in files: if isinstance(data, bytes): add_entry(path, data) elif isinstance(data, str): # Assume data is a local path. if os.path.isdir(data): for dirpath, _, filenames in os.walk(data): for filename in filenames: fullpath = os.path.join(dirpath, filename) relpath = os.path.relpath(fullpath, start=data) with open(fullpath, 'rb') as f: add_entry(posixpath.join(path, *relpath.split(os.sep)), f.read()) else: # Assume data is a path to a regular file. with open(data, 'rb') as f: add_entry(path, f.read()) else: # Assume data is file-like. add_entry(path, data.read()) # Rewind so read() starts from the beginning output.seek(0) return output
feb49d92d0790e3d4cec08f7cfc8947f8520b74c
3,635,454
def get_country(country_id=None, incomelevel=None, lendingtype=None, cache=True): """ Retrieve information on a country or regional aggregate. Can specify either country_id, or the aggregates, but not both :country_id: a country id or sequence thereof. None returns all countries and aggregates. :incomelevel: desired incomelevel id or ids. :lendingtype: desired lendingtype id or ids. :cache: use the cache :returns: WBSearchResult containing dictionary objects representing each country """ if country_id: if incomelevel or lendingtype: raise ValueError("Can't specify country_id and aggregates") return id_only_query(COUNTRIES_URL, country_id, cache=cache) args = {} if incomelevel: args["incomeLevel"] = parse_value_or_iterable(incomelevel) if lendingtype: args["lendingType"] = parse_value_or_iterable(lendingtype) return WBSearchResult(fetcher.fetch(COUNTRIES_URL, args, cache=cache))
b798667f1bd1c0c8649986b948201392eae1f165
3,635,455
def _PmapWalkARMLevel2(tte, vaddr, verbose_level = vSCRIPT): """ Pmap walk the level 2 tte. params: tte - value object vaddr - int returns: str - description of the tte + additional informaiton based on verbose_level """ pte_base = kern.PhysToKernelVirt(tte & 0xFFFFFC00) pte_index = (vaddr >> 12) & 0xFF pte_base_val = kern.GetValueFromAddress(pte_base, 'pt_entry_t *') pte = pte_base_val[pte_index] paddr = 0 if pte & 0x2: paddr = (unsigned(pte) & 0xFFFFF000) | (vaddr & 0xFFF) if verbose_level >= vSCRIPT: print "{0: <#020x}\n\t{1: <#020x}\n\t".format(addressof(tte), tte), PmapDecodeTTEARM(tte, 1, verbose_level) if verbose_level >= vSCRIPT: print "second-level table (index {:d}):".format(pte_index) if verbose_level >= vDETAIL: for i in range(256): tmp = pte_base_val[i] print "{0: <#020x}:\t{1: <#020x}".format(addressof(tmp), unsigned(tmp)) if verbose_level >= vSCRIPT: print " {0: <#020x}\n\t{1: <#020x}\n\t".format(addressof(pte), unsigned(pte)), PmapDecodeTTEARM(pte, 2, verbose_level) return paddr #end of level 2 walking of arm
bb4a7dcd70abf5f3f451c5ce56d10673bdb803b4
3,635,456
def mel_spectrogram_feature(wav, hparams=None): """ Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform. Note: this not a log-mel spectrogram. """ hparams = hparams or default_hparams frames = librosa.feature.melspectrogram( wav, hparams.sample_rate, n_fft=hparams.n_fft, hop_length=hparams.hop_size, n_mels=hparams.num_mels ) return _amp_to_db(frames.astype(np.float32))
5f1541109b81b535ca9a263a954ce82b067fe3eb
3,635,457
def getRdkitAtomXYZbyId(rdkitMol,atomId, confId=0): """Returns an xyz atom position given atom's id.""" conf = rdkitMol.GetConformer(confId) return np.array(list(conf.GetAtomPosition(atomId)))
6a28646e7c7275c2c43da5fea512273e68e4a9e4
3,635,458
def _mn_minos_ ( self , *args ) : """Get MINOS errors for parameter: >>> m = ... # TMinuit object >>> result = m.minos( 1 , 2 ) """ ipars = [] for i in args : if not i in self : raise IndexError ipars.append ( i ) return _mn_exec_ ( self , 'MINOS' , 200 , *tuple(ipars) )
cd99344cb7af2bca3db1abb576ec63ffff2df032
3,635,459
def app_files(proj_name): """Create a list with the project files Args: proj_name (str): the name of the project, where the code will be hosted Returns: files_list (list): list containing the file structure of the app """ files_list = [ "README.md", "setup.py", "setup.cfg", f"{proj_name}/__init__.py", f"{proj_name}/{proj_name}.py", "tests/tests.py", ] return files_list
2c6cbf112c7939bea12672668c8a5db1656b6edd
3,635,460
def periodic_name(userword): """Generate a sequence of periodic elements from a word or sentence.""" # split up into individual words sentence = userword.split() output = [] # match each word with the periodic system for word in sentence: sequencer = ElementalWord(word) basescore = 0 periodicname = [] for word_sequence in sequencer.wordlist: sname,score = _score_wordlist(word_sequence) if score > basescore: periodicname = sname basescore = score output.append(periodicname) return(output)
bfd7b2aa26baa193ac055afc64334058860d9a16
3,635,461
import json def presentationRequestApiCallback(): """ This method is called by the VC Request API when the user scans a QR code and presents a Verifiable Credential to the service """ presentationResponse = request.json print(presentationResponse) if request.headers['api-key'] != apiKey: print("api-key wrong or missing") return Response( jsonify({'error':'api-key wrong or missing'}), status=401, mimetype='application/json') if presentationResponse["code"] == "request_retrieved": cacheData = { "status": presentationResponse["code"], "message": "QR Code is scanned. Waiting for validation..." } cache.set( presentationResponse["state"], json.dumps(cacheData) ) return "" if presentationResponse["code"] == "presentation_verified": cacheData = { "status": presentationResponse["code"], "message": "Presentation received", "payload": presentationResponse["issuers"], "subject": presentationResponse["subject"], "firstName": presentationResponse["issuers"][0]["claims"]["firstName"], "lastName": presentationResponse["issuers"][0]["claims"]["lastName"], "presentationResponse": presentationResponse } cache.set( presentationResponse["state"], json.dumps(cacheData) ) return "" return ""
f45e4580d73ec9679679c801aaeaf664011c340a
3,635,462
def process_table(data: document.TableNode, caption: str) -> NoEscape: """ Returns a Latex formatted Table Item, wrapped with a NoEscape Command """ rows = [ tuple( " ".join(process(c) for c in table_cell["children"]) for table_cell in table_row["children"] ) for table_row in data["children"] ] dataframe = pd.DataFrame(rows[1:], columns=rows[0]) pd.set_option("max_colwidth", None) latex_table = dataframe.to_latex( index=False, escape=False, na_rep=" ", column_format="".join([f"p{{{1/len(rows[0])}\\linewidth}}" for _ in rows[0]]), caption=caption, position="H", longtable=True, ) return NoEscape(latex_table)
bd9b8e450a5c872a83d0d146d7256f581fa4bc39
3,635,463
def getTracksForArtist(artistName, tracks = None): """ Return a Track object for each track found with the specified artistName. """ tracksToSearch = tracks or getTracks() return filter(lambda x:x.artist == artistName, tracksToSearch)
5df8ead4d3c5f47810ccbb55a9f442e542d9c454
3,635,464
import torch def log_safe(x): """The same as torch.log(x), but clamps the input to prevent NaNs.""" x = torch.as_tensor(x) return torch.log(torch.min(x, torch.tensor(33e37).to(x)))
98c73b316d22ebe9ef4b322b1ba984a734422e7a
3,635,465
def index(request): """Redirect to the index page.""" context = {'form': LoginForm() } return render(request, 'index.htm', context)
f2181aa02dd8709350be8675e5da21e2a94bdf27
3,635,466
def summarize_chrom_classif_by_sample(psd_list, sample_list): """ Summarize chromosome classification by sample Inputs: psd_list: list of SamplePSD AFTER calc_chrom_props() has been run sample_list: list of samples in same order as psd_list Returns: data frame where rows are samples, columns are chromosomes, and entries are the classification """ cl_list = [psd.chrom_props.classif for psd in psd_list] cols = psd_list[0].chrom_props.index df_stat = pd.DataFrame(cl_list, columns=cols, index=sample_list) return df_stat
64b77680d8e5a3170d4bd8a0680e979d0e3f8e89
3,635,467
def fake_quant_with_min_max_vars_per_channel_gradient(input_gradients, input_data, input_min, input_max, num_bits=8, narrow_range=False): """ Computes gradients of Fake-quantize on the 'input_data' tensor, output_backprops = input_gradients*(if input_data>=nudged_min and <=nudged_max 1 else 0) Args: input_gradients (tvm.tensor.Tensor): input gradients from previously operation input_data (tvm.tensor.Tensor): input of fake-quantize, only supports "float32" input_min (tvm.tensor.Tensor): input_min shape equals to input_max shape The last dimension shoud be same for shapes of min, max and shape_inputs only support fp32 input_max (tvm.tensor.Tensor): only support fp32 num_bits (int): Defaults to 8. bitwidth of the quantization,between 2 and 16 narrow_range (bool): True, quantized into the quantization range [1, 2^num_bits - 1] False,quantized into the quantization range [0, 2^num_bits - 1] Returns: tvm.tensor.Tensor """ input_gradients_shape = get_shape(input_gradients) input_data_shape = get_shape(input_data) input_min_shape = get_shape(input_min) input_max_shape = get_shape(input_max) vc_util.check_shape(input_gradients_shape) vc_util.check_shape(input_data_shape) vc_util.check_shape(input_min_shape) vc_util.check_shape(input_max_shape) vc_util.elemwise_shape_check(input_gradients.shape, input_data.shape) vc_util.elemwise_shape_check(input_min_shape, input_max_shape) if input_min_shape[0] != input_data_shape[-1]: raise RuntimeError( "The shapes of min,max and shape_inputs last one dimension shoud be same") vc_util.ops_dtype_check(input_gradients.dtype, vc_util.DtypeForDavinci.FLOAT32) vc_util.ops_dtype_check(input_data.dtype, vc_util.DtypeForDavinci.FLOAT32) vc_util.ops_dtype_check(input_min.dtype, vc_util.DtypeForDavinci.FLOAT32) vc_util.ops_dtype_check(input_max.dtype, vc_util.DtypeForDavinci.FLOAT32) if num_bits > 16 or num_bits < 2: raise RuntimeError("numbits should be range[2,16]") input_min_broadcast = topi.broadcast_to(input_min, input_data_shape) input_max_broadcast = topi.broadcast_to(input_max, input_data_shape) res = fake_quant_with_min_max_vars_per_channel_gradient_compute(input_gradients, input_data, input_min_broadcast, input_max_broadcast, num_bits, narrow_range) return res
36452b2783b35280f87dcbd260fad4f11d95f73a
3,635,468
def test_split(data, test_size=0.3): """ Split data to train and test subsets. :param data: Array like list. :param test_size: Size of test subset. :return: Returns tuple of matrix like subsets (train_subset, test_subset). """ return sklearn.model_selection.train_test_split(data, test_size)
d53d5957960046e09c6e2fcdf88dc24251689af8
3,635,469
def unpack_remotedata(o, byte_keys=False, myset=None): """ Unpack WrappedKey objects from collection Returns original collection and set of all found keys Examples -------- >>> rd = WrappedKey('mykey') >>> unpack_remotedata(1) (1, set()) >>> unpack_remotedata(()) ((), set()) >>> unpack_remotedata(rd) ('mykey', {'mykey'}) >>> unpack_remotedata([1, rd]) ([1, 'mykey'], {'mykey'}) >>> unpack_remotedata({1: rd}) ({1: 'mykey'}, {'mykey'}) >>> unpack_remotedata({1: [rd]}) ({1: ['mykey']}, {'mykey'}) Use the ``byte_keys=True`` keyword to force string keys >>> rd = WrappedKey(('x', 1)) >>> unpack_remotedata(rd, byte_keys=True) ("('x', 1)", {"('x', 1)"}) """ if myset is None: myset = set() out = unpack_remotedata(o, byte_keys, myset) return out, myset typ = type(o) if typ in collection_types: if not o: return o outs = [unpack_remotedata(item, byte_keys, myset) for item in o] return type(o)(outs) elif typ is dict: if o: values = [unpack_remotedata(v, byte_keys, myset) for v in o.values()] return dict(zip(o.keys(), values)) else: return o elif issubclass(typ, WrappedKey): # TODO use type is Future k = o.key if byte_keys: k = tokey(k) myset.add(k) return k else: return o
d1beda543ff3045bcb603d14dba7a5d6c4689f4b
3,635,470
def xscontrol_Vars(*args): """ Args: pilot(Handle_IFSelect_SessionPilot) Returns: static Handle_XSControl_Vars Returns the Vars of a SessionPilot, it is brought by Session it provides access to external variables """ return _XSControl.xscontrol_Vars(*args)
280e0d6823fa86adbff02ce633075b5b6c4a4e4e
3,635,471
def swing_twist_decomposition(q, twist_axis): """ code by janis sprenger based on Dobrowsolski 2015 Swing-twist decomposition in Clifford algebra. https://arxiv.org/abs/1506.05481 """ q = normalize(q) #twist_axis = np.array((q * offset))[0] projection = np.dot(twist_axis, np.array([q[1], q[2], q[3]])) * twist_axis twist_q = np.array([q[0], projection[0], projection[1],projection[2]]) if np.linalg.norm(twist_q) == 0: twist_q = np.array([1,0,0,0]) twist_q = normalize(twist_q) swing_q = quaternion_multiply(q, quaternion_inverse(twist_q))#q * quaternion_inverse(twist) return swing_q, twist_q
555b7897aafc3085c875513274c44abc3f06fbb4
3,635,472
import os def DatasetFileName(filename): """Returns location of `filename` within --dataset_dir`.""" return os.path.join(dataset_dir, filename)
b74c7e1c5e8386ef74172f9bd317badff6833d65
3,635,473
import requests def resolve_s1_slc(identifier, download_url, project): """Resolve S1 SLC using ASF datapool (ASF or NGAP). Fallback to ESA.""" # determine best url and corresponding queue vertex_url = "https://datapool.asf.alaska.edu/SLC/SA/{}.zip".format( identifier) r = requests.head(vertex_url, allow_redirects=True) if r.status_code == 403: url = r.url queue = "{}-job_worker-small".format(project) elif r.status_code == 404: url = download_url queue = "factotum-job_worker-scihub_throttled" else: raise RuntimeError("Got status code {} from {}: {}".format( r.status_code, vertex_url, r.url)) return url, queue
cf489b0d65a83dee3f87887a080d67acd180b0b3
3,635,474
def equivalent_gaussian_Nsigma_from_logp(logp): """Number of Gaussian sigmas corresponding to tail log-probability. This function computes the value of the characteristic function of a standard Gaussian distribution for the tail probability equivalent to the provided p-value, and turns this value into units of standard deviations away from the Gaussian mean. This allows the user to make a statement about the signal such as “I detected this pulsation at 4.1 sigma The example values below are obtained by brute-force integrating the Gaussian probability density function using the mpmath library between Nsigma and +inf. Examples -------- >>> pvalues = [0.15865525393145707, 0.0013498980316301035, ... 9.865877e-10, 6.22096e-16, ... 3.0567e-138] >>> log_pvalues = np.log(np.array(pvalues)) >>> sigmas = np.array([1, 3, 6, 8, 25]) >>> # Single number >>> np.isclose(equivalent_gaussian_Nsigma_from_logp(log_pvalues[0]), ... sigmas[0], atol=0.01) True >>> # Array >>> np.allclose(equivalent_gaussian_Nsigma_from_logp(log_pvalues), ... sigmas, atol=0.01) True """ if logp < -300: # print("Extended") return _extended_equiv_gaussian_Nsigma(logp) return stats.norm.isf(np.exp(logp))
8dde13e19fe15d2dbdfdc3c1902edcf220672796
3,635,475
import tempfile from pathlib import Path import logging import zipfile def fetch_ratings(): """Fetches ratings from the given URL.""" url = "http://files.grouplens.org/datasets/movielens/ml-25m.zip" with tempfile.TemporaryDirectory() as tmp_dir: tmp_path = Path(tmp_dir, "download.zip") logging.info(f"Downloading zip file from {url}") urlretrieve(url, tmp_path) with zipfile.ZipFile(tmp_path) as zip_: logging.info(f"Downloaded zip file with contents: {zip_.namelist()}") logging.info("Reading ml-25m/ratings.csv from zip file") with zip_.open("ml-25m/ratings.csv") as file_: ratings = pd.read_csv(file_) return ratings
439b9603a849d822d30e93e663a3e9195651cd06
3,635,476
def _parse_affected(text): """Extract the affected releases, services, and other software Args: text: the text version of the security note Returns: a list of the affected releases """ valid_releases = ['austin', 'bexar', 'cactus', 'diablo', 'essex', 'folsom', 'grizzly', 'havana', 'icehouse', 'juno', 'kilo', 'liberty'] valid_services = ['barbican', 'ceilometer', 'cinder', 'designate', 'glance', 'heat', 'horizon', 'ironic', 'keystone', 'manila', 'neutron', 'nova', 'sahara', 'swift', 'trove', 'zaqar'] # Extract the affected section section = _parse_section(text, "Affected Services / Software") # Split the values up into separate lists of releases, services, # and other software. releases = [] services = [] other = [] for val in [v.strip() for v in section[0].split(',')]: if val.lower() in valid_releases: releases.append(val) elif val.lower() in valid_services: services.append(val) else: other.append(val) # NGK(TODO) What about other free form text? Can we assume the first line # is csv? Maybe detect CSV and make an affected text from the non-CSV # content? return {'releases': releases, 'services': services, 'other': other}
e9da321ee587455d60da09d26a818038ab6e7c0f
3,635,477
def _window_view(a, window, step = None, axis = None, readonly = True): """ Create a windowed view over `n`-dimensional input that uses an `m`-dimensional window, with `m <= n` Parameters ------------- a : Array-like The array to create the view on window : tuple or int If int, the size of the window in `axis`, or in all dimensions if `axis == None` If tuple, the shape of the desired window. `window.size` must be: equal to `len(axis)` if `axis != None`, else equal to `len(a.shape)`, or 1 step : tuple, int or None The offset between consecutive windows in desired dimension If None, offset is one in all dimensions If int, the offset for all windows over `axis` If tuple, the step along each `axis`. `len(step)` must me equal to `len(axis)` axis : tuple, int or None The axes over which to apply the window If None, apply over all dimensions if tuple or int, the dimensions over which to apply the window generator : boolean Creates a generator over the windows If False, it will be an array with `a.nidim + 1 <= a_view.ndim <= a.ndim *2`. If True, generates one window per .next() call readonly: return array as readonly Returns ------- a_view : ndarray A windowed view on the input array `a`, or a generator over the windows """ ashp = np.array(a.shape) if axis != None: axs = np.array(axis, ndmin = 1) assert np.all(np.in1d(axs, np.arange(ashp.size))), "Axes out of range" else: axs = np.arange(ashp.size) window = np.array(window, ndmin = 1) assert (window.size == axs.size) | (window.size == 1), "Window dims and axes don't match" wshp = ashp.copy() wshp[axs] = window assert np.all(wshp <= ashp), "Window is bigger than input array in axes" stp = np.ones_like(ashp) if step: step = np.array(step, ndmin = 1) assert np.all(step > 0), "Only positive step allowed" assert (step.size == axs.size) | (step.size == 1), "step and axes don't match" stp[axs] = step astr = np.array(a.strides) shape = tuple((ashp - wshp) // stp + 1) + tuple(wshp) strides = tuple(astr * stp) + tuple(astr) as_strided = np.lib.stride_tricks.as_strided a_view = np.squeeze(as_strided(a, shape = shape, strides = strides, writeable=not readonly)) return a_view
2e083a105be37e1fe9e784c0378c3a0bb667601e
3,635,478
def approveReport(id): """ Function to approve a report """ # Approve the doc source entity record sgtable = s3db.stats_group sgt_table = s3db.stats_group_type resource = s3db.resource("stats_group", id=id, unapproved=True) resource.approve() # find the type of report that we have query = (sgtable.id == id) & \ (sgtable.group_type_id == sgt_table.id) record = db(query).select(sgt_table.name, sgt_table.stats_group_instance, limitby=(0, 1)).first() rec_type = record.name if rec_type == "vulnerability_indicator" or rec_type == "stats_demographic": # Find the type of stats source record that we have if rec_type == "vulnerability_indicator": query = (s3db.vulnerability_data.group_id == id) resource = s3db.resource("vulnerability_data", filter=query, unapproved=True) resource.approve() if rec_type == "stats_demographic": query = (s3db.stats_demographic_data.group_id == id) resource = s3db.resource("stats_demographic_data", filter=query, unapproved=True) resource.approve() # Approve the stats_data records query = (s3db.stats_data.group_id == id) resource = s3db.resource("stats_data", filter=query, unapproved=True) resource.approve() return True rec_instance = record.stats_group_instance if rec_instance == "doc_image": query = (sgtable.id == id) &\ (s3db.doc_image.source_id == sgtable.source_id) resource = s3db.resource("doc_image", filter=query, unapproved=True) resource.approve() return True return False
25a87f20870b5ac90dea6c5013a337ffabfd9e04
3,635,479
def _maybe_promote_geometry(geom): """ Either promote the geometry to a Multi-geometry, or return input""" promoter = _promotion_dispatch.get(geom.type, lambda x: x[0]) return promoter([geom])
4b6f5805049025cb3692dff77af708350e3ccc8f
3,635,480
def preprocess_input(frames): """Resize and subtract mean from video input Args: frames (tf.Tensor): Video frames to preprocess. Expected shape (frames, rows, columns, channels). Returns: A TF Tensor. """ # Reshape to 128x171 frames = tf.image.resize(frames, (128, 171)) frames -= mean # Crop to 112x112 frames = tf.image.crop_to_bounding_box(frames, 8, 30, 112, 112) return frames
8bbede3ef2d8f131ee09f46bddfd4b66ad868f27
3,635,481
from pathlib import Path def get_cache_info(path: Path) -> CacheInfo: """Return the information used to check if a file is already formatted or not.""" stat = path.stat() return stat.st_mtime, stat.st_size
4559d5e0179c803c7a4b23c9bd15a7317c84a2a7
3,635,482
def gmm_component_contributions_to_message_length(responsibilities, log_likelihoods, covs, weights): """ Return the component-wise contributions to the message length. """ K, N = responsibilities.shape K, D, _ = covs.shape # TODO: Refactor with gaussian_mixture_message_length I_data = responsibilities @ -log_likelihoods Q = gmm_number_of_parameters(K, D) I_mixtures = K * np.log(2) * (1 - D/2.0) + gammaln(K) \ + 0.25 * (2.0 * (K - 1) + K * D * (D + 3)) * np.log(N) I_parameters = 0.5 * np.log(Q * np.pi) - 0.5 * Q * np.log(2 * np.pi) I_slogdetcovs = -0.5 * (D + 2) * np.linalg.slogdet(covs)[1] I_weights = (0.25 * D * (D + 3) - 0.5) * np.log(weights) I_components = (I_mixtures + I_parameters)/K \ + I_data + I_slogdetcovs + I_weights return I_components
0c06c5b5881f58ae63fe8326c87f2fcbac2ddbe7
3,635,483
def validate(request): """Method for validating a common request.""" validation = versioning.validate(request) if validation['status'] != 'ok': return validation cursor = mysql.connection.cursor() validation = player.validate(request, cursor) if validation['status'] != 'ok': return validation return { 'status': 'ok' }
5f4a0205dc002b994b3b49582f82594f84fcc64b
3,635,484
def text_to_list(text): """ Convert the paper into a list of preformatted sentences. """ s = symbol.substitute_symbol(text) # Convert all characters to lowercase s = s.lower() # Convert text into list of paragraphs a = s.split("\r\n") b = ignoretopics.ignore_topics(a) b = accenter.deaccent(b) b = ignoretopics.ignore_small(b, 5) return b
c971423eaa7dcecd3f36019a852cfb400ab014c6
3,635,485
def make_anagram_dict(filename): """Takes a text file containing one word per line. Returns a dictionary: Key is an alphabetised duple of letters in each word, Value is a list of all words that can be formed by those letters""" result = {} fin = open(filename) for line in fin: word = line.strip().lower() letters_in_word = tuple(sorted(word)) if letters_in_word not in result: result[letters_in_word] = [word] else: result[letters_in_word].append(word) return result
c6c0ad29fdf63c91c2103cefc506ae36b64a40ec
3,635,486
def test_hierarchical_seeding(RefSimulator): """Changes to subnetworks shouldn't affect seeds in top-level network""" def create(make_extra, seed): objs = [] with nengo.Network(seed=seed, label='n1') as model: objs.append(nengo.Ensemble(10, 1, label='e1')) with nengo.Network(label='n2'): objs.append(nengo.Ensemble(10, 1, label='e2')) if make_extra: # This shouldn't affect any seeds objs.append(nengo.Ensemble(10, 1, label='e3')) objs.append(nengo.Ensemble(10, 1, label='e4')) return model, objs same1, same1objs = create(False, 9) same2, same2objs = create(True, 9) diff, diffobjs = create(True, 10) same1seeds = RefSimulator(same1).model.seeds same2seeds = RefSimulator(same2).model.seeds diffseeds = RefSimulator(diff).model.seeds for diffobj, same2obj in zip(diffobjs, same2objs): # These seeds should all be different assert diffseeds[diffobj] != same2seeds[same2obj] # Skip the extra ensemble same2objs = same2objs[:2] + same2objs[3:] for same1obj, same2obj in zip(same1objs, same2objs): # These seeds should all be the same assert same1seeds[same1obj] == same2seeds[same2obj]
108cc8b05f5ea30a2d8b2892e77bc0b42b880500
3,635,487
def group_property_types(row : str) -> str: """ This functions changes each row in the dataframe to have the one of five options for building type: - Residential - Storage - Retail - Office - Other this was done to reduce the dimensionality down to the top building types. :param: row (str) : The row of the pandas series :rvalue: str :return: One of 5 building types. """ if row == 'Multifamily Housing' or\ row == 'Residence Hall/Dormitory' or\ row == 'Hotel' or\ row == 'Other - Lodging/Residential' or\ row == 'Residential Care Facility': return 'Residential' elif row == 'Non-Refrigerated Warehouse' or\ row == 'Self-Storage Facility' or\ row == 'Refrigerated Warehouse': return 'Storage' elif row == 'Financial Office' or\ row == 'Office': return 'Office' elif row == 'Restaurant' or\ row == 'Retail Store' or\ row == 'Enclosed Mall' or\ row == 'Other - Mall' or\ row == 'Strip Mall' or\ row == 'Personal Services (Health/Beauty, Dry Cleaning, etc.)' or\ row == 'Lifestyle Center' or\ row == 'Wholesale Club/Supercenter': return 'Retail' else: return 'Other'
44aa5d70baaa24b0c64b7464b093b59ff39d6d1c
3,635,488
def parseLbannLayer(l, tensorShapes, knownNodes=[]): """ Parses a given LBANN layer and returns the equivalent ONNX expressions needed to be represent the layer. Args: l (lbann_pb2.Layer): A LBANN layer to be converted. tensorShapes (dict): Shapes of known named tensors. knownNodes (list): A list of known ONNX nodes in the same netowrk. This information is needed when the layer refers information of another layer, such as unpooling. Returns: dict: Generated ONNX expressions. "nodes" (list of onnx.NodeProto): A list of ONNX operators. "inputs" (list of onnx.ValueInfoProto): A list of ONNX value information. "inits" (list of onnx.TensorProto): A list of ONNX tensors. """ if any(map(lambda x: l.HasField(x), ["input", "identity", # LBANN's "identity" does not have outputs "dummy"])): return {} if l.HasField("split"): if l.name not in tensorShapes.keys(): raise RuntimeError("The shape of \"{}\" cannot be inferred.".format(l.name) \ + " This error may happen when you set incorret an input tensor name.") ipt = onnx.helper.make_tensor_value_info(name="{}_0".format(l.name), elem_type=lbann.onnx.ELEM_TYPE, shape=tensorShapes[l.name]) return {"inputs": [ipt]} lbannInputs = list(map(lambda x: "{}_0".format(x), l.parents.split(" ") if l.parents != "" else [])) lbannOutputs = l.children.split(" ") if len(l.children) > 0 else [] for f in PARSERS.keys(): if l.HasField(f): for i in lbannInputs: if not i in tensorShapes.keys(): raise RuntimeError("The shape of \"{}\" cannot be inferred.".format(i)) p = PARSERS[f](l, f, list(map(lambda x: tensorShapes[x], lbannInputs)), knownNodes) p.parse() return {"nodes": p.nodes, "inputs": p.paramValueInfos, "inits": p.paramInits} NotImplementedError("Unimplemented LBANN operator: {}".format(l))
c915b175ecf7c99dbc675302d055393e90bf8b12
3,635,489
def write_simple_templates(n_rules, body_predicates=1, order=1): """Generate rule template of form C < A ^ B of varying size and order""" text_list = [] const_term = "(" for i in range(order): const_term += chr(ord('X') + i) + "," const_term = const_term[:-1] + ")" write_string = "{0} #1{1} :- #2{1}".format(n_rules, const_term) if body_predicates > 1: for i in range(body_predicates - 1): write_string += ", #" + str(i + 3) + const_term text_list.append(write_string) return text_list
3a911702be9751b0e674171ec961029f5b10a9e7
3,635,490
def get_datatoken_minter(datatoken_address): """ :return: Eth account address of the Datatoken minter """ dt = get_dt_contract(get_web3(), datatoken_address) publisher = dt.caller.minter() return publisher
da71e8e05569a6cdc2661fd3f8b937510d5e037f
3,635,491
from pixar import UsdviewPlug import os, time def GetAssetCreationTime(primStack, assetIdentifier): """Finds the weakest layer in which assetInfo.identifier is set to 'assetIdentifier', and considers that an "asset-defining layer". We then effectively consult the asset resolver plugin to tell us the creation time for the asset, based on the layer.realPath and the identifier. 'effectively' because Ar does not yet have such a query, so we leverage usdview's plugin mechanism, consulting a function GetAssetCreationTime(filePath, layerIdentifier) if it exists, falling back to stat'ing the filePath if the plugin does not exist. Returns a triple of strings: (fileDisplayName, creationTime, owner)""" definingLayer = None for spec in reversed(primStack): if spec.HasInfo('assetInfo'): identifier = spec.GetInfo('assetInfo')['identifier'] if identifier == assetIdentifier: definingLayer = spec.layer break if definingLayer: definingFile = definingLayer.realPath else: definingFile = primStack[-1].layer.realPath print "Warning: Could not find expected asset-defining layer for %s" %\ assetIdentifier try: return UsdviewPlug.GetAssetCreationTime(definingFile, assetIdentifier) except: stat_info = os.stat(definingFile) return (definingFile.split('/')[-1], time.ctime(stat_info.st_ctime), GetFileOwner(definingFile))
24281a61c9a5dbf18a30ccc4856ebae56b11b96b
3,635,492
def symplectic_map_personal(x, px, step_values, n_iterations, epsilon, alpha, beta, x_star, delta, omega_0, omega_1, omega_2, action_radius, gamma=0.0): """computation for personal noise symplectic map Parameters ---------- x : ndarray x initial condition px : ndarray px initial condition step_values : ndarray iterations already performed n_iterations : unsigned int number of iterations to perform epsilon : float epsilon value alpha : float alpha exponential beta : float beta exponential x_star : float nek coefficient delta : float nek coefficient omega_0 : float ipse dixit omega_1 : float ipse dixit omega_2 : float ipse dixit action_radius : float barrier radius in action value! gamma : float, optional correlation coefficient, by default 0.0 Returns ------- (ndarray, ndarray, ndarray) x, px, step_values """ for i in prange(len(x)): personal_noise = make_correlated_noise(n_iterations, gamma) x[i], px[i], step_values[i] = iterate(x[i], px[i], personal_noise, epsilon, alpha, beta, x_star, delta, omega_0, omega_1, omega_2, action_radius, step_values[i]) return x, px, step_values
a7f71eb1e160069adbe18a3d2792310fc3c58517
3,635,493
import os def wl_offset(input_table, **kwargs): """ Derives wavelength offsets for white dwarf exposures. Cross-correlates flux and net from white dwarf exposures against one another to derive offsets Parameters ---------- input_table : abscal.common.exposure_data_table.AbscalDataTable Table of exposures to have offsets generated. kwargs : dict Dictionary of overrides to the default reduction parameters, and command-line option selections. Returns ------- output_table : astropy.table.Table Updated table """ task = "wfc3: grism: wloffset" default_values = get_defaults('abscal.common.args') base_defaults = default_values | get_defaults(kwargs.get('module_name', __name__)) verbose = kwargs.get('verbose', base_defaults['verbose']) show_plots = kwargs.get('plots', base_defaults['plots']) if 'out_file' in kwargs: out_file = kwargs['out_file'] out_dir, out_table = os.path.split(out_file) if out_dir == '': out_dir = os.getcwd() elif 'out_dir' in kwargs: out_dir = kwargs['out_dir'] else: out_dir = os.getcwd() spec_name = kwargs.get('spec_dir', base_defaults['spec_dir']) spec_dir = os.path.join(out_dir, spec_name) return input_table
54f4f1acd943d1fab33322c949cf88f3a8fe934f
3,635,494
def GetRevertedRevision(message): """Parse message to get the reverted revision if there is one.""" lines = message.strip().splitlines() if not lines: return None if not lines[0].lower().startswith('revert'): return None for line in reversed(lines): # pragma: no cover # TODO: Handle cases where no reverted_revision in reverting message. reverted_revision_match = REVERTED_REVISION_PATTERN.match(line) if reverted_revision_match: return reverted_revision_match.group(1)
eb6f43e3adb906f2f27916ac6071309a24cdf9f4
3,635,495
def line_center(p0, p1): """ given two points p0, p1 inside a poincare disk find the centre and radius of the arc that defines a line through them https://en.wikipedia.org/wiki/Poincar%C3%A9_disk_model#Analytic_geometry_constructions_in_the_hyperbolic_plane """ u1, u2, u3 = p0 v1, v2, v3 = p1 c = u1 * v2 - u2 * v1 if abs(c) < 1e-8: # points are on diameter r = 9999.9 theta = atan2(p0[1], p0[0]) a = cos(theta) b = sin(theta) else: a = -0.5 * (u2 * (v1 ** 2 + v2 ** 2) - v2 * (u1 ** 2 + u2 ** 2) + u2 - v2) / c b = -0.5 * (v1 * (u1 ** 2 + u2 ** 2) - u1 * (v1 ** 2 + v2 ** 2) + v1 - u1) / c r = (a ** 2 + b ** 2 - 1) ** 0.5 return [a, b, 0], r
f8af690c30423621041244ae0cb8da2eebd45063
3,635,496
import math import decimal def infer_decimals(value): """ Devuelve la cantidad de cifras decimales del valor, aplicando una heurística de corrección previa. Para valores del estilo 1.0000000000000001, (común al serializar números en punto flotante), se los trunca a 17 - N dígitos, siendo N la cantidad de dígitos enteros del mismo indicador. El objetivo es recortar el margen de error dado por la precisión en floats. De esta manera, el valor del ejemplo anterior, 1.0000000000000001, pasa a 1.000000000000000 -> 1.0 """ integer_digits = len(str(int(math.modf(value)[1]))) truncated = truncate(value, 17 - integer_digits) return decimal.Decimal(str(truncated)).normalize().as_tuple().exponent
b813d5baa2c8fa6e5697a5502a1a5d299f2167a2
3,635,497
def desalt_smiles(row, smilesfield, desalter): """This function creates desalted smiles for a pandas dataframe row row : row of the dataframe smilesfiel : str (name of the smiles field in the row) desalter : instance of Smiles_desalter class reaction_list : list of dictionaries (rdkit reaction named rxn and reaction name named reaction required) returns : result (str, smiles string or np.nan)""" smiles = desalter.desalt_smiles(smiles = row[smilesfield]) if smiles is None: return np.nan return smiles
cf722c51a6f5b01a946b2ecabe6e10f9abda6c79
3,635,498
def mesh_vertex_2_coloring(mesh): """Try to color the vertices of a mesh with two colors only without adjacent vertices with the same color. Parameters ---------- mesh : Mesh A mesh. Returns ------- dict, None A dictionary with vertex keys pointing to colors, if two-colorable. None if not two-colorable. """ return is_adjacency_two_colorable(mesh.adjacency)
4ba963b94e9db024c2012d8b565e5af9fb838d80
3,635,499