content
stringlengths
22
815k
id
int64
0
4.91M
def download(i): """ Input: { (repo_uoa) (module_uoa) (data_uoa) (new_repo_uoa) - new repo UOA; "local" by default (skip_module_check) - if 'yes', do not check if module for a given component exists } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ o=i.get('out','') ruoa=i.get('repo_uoa','') muoa=i.get('module_uoa','') duoa=i.get('data_uoa','') smc=(i.get('skip_module_check','')=='yes') # Check components to skip if muoa in ['repo', 'befd7892b0d469e9', 'env', '9b9b3208ac44b891', 'kernel', 'b1e99f6461424276', 'cfg', 'b34231a3467566f8']: return {'return':0} if muoa=='': return {'return':1, 'error':'module UOA is not defined'} if duoa=='': duoa='*' # return {'return':1, 'error':'data UOA is not defined'} nruoa=i.get('new_repo_uoa','') if nruoa=='': nruoa='local' # Check if writing to new repo is allowed r=find_path_to_repo({'repo_uoa':nruoa}) if r['return']>0: return r nruoa=r['repo_uoa'] nruid=r['repo_uid'] nrd=r['dict'] npath=r['path'] ii={'repo_uoa':nruoa, 'repo_uid':nruid, 'repo_dict':nrd} r=check_writing(ii) if r['return']>0: return r rz={'return':0} if o=='con': # out('') out(' WARNING: downloading missing CK component "'+muoa+':'+duoa+'" from the cKnowledge.io portal ...') ii={ 'action':'download', 'dict':{ 'module_uoa':muoa, 'data_uoa':duoa } } import ck.net r=ck.net.access_ck_api({'url':cfg['cknowledge_api'], 'dict':ii}) if r['return']>0: return r d=r['dict'] if d['return']>0: if d['return']!=16: return {'return':d['return'], 'error':d['error']} out(' Warning: component not found') return {'return':0} nlst=d.get('components',[]) # Check if module:module there (bootstrapping) lst1=[] lst=[] path_to_module='' for q in nlst: nmuoa=q['module_uoa'] nmuid=q['module_uid'] nduoa=q['data_uoa'] nduid=q['data_uid'] if nmuoa=='module' and nduoa=='module': out(' Bootstrapping '+nmuoa+':'+nduoa+' ...') # TBD: Check split dirs in local repo... iii={'path':npath, 'data_uoa':'module', 'data_uid':nduid} rz=find_path_to_entry(iii) if rz['return']>0 and rz['return']!=16: return rz elif rz['return']==16: rz=create_entry(iii) if rz['return']>0: return rz npath2=rz['path'] iii={'path':npath2, 'data_uoa':'module', 'data_uid':nduid} rz=find_path_to_entry(iii) if rz['return']>0 and rz['return']!=16: return rz elif rz['return']==16: rz=create_entry(iii) if rz['return']>0: return rz path_to_module=rz['path'] lst.append(q) else: lst1.append(q) lst+=lst1 # Recording downloaded components for q in lst: # Get UOA nmuoa=q['module_uoa'] nmuid=q['module_uid'] nduoa=q['data_uoa'] nduid=q['data_uid'] file_url=q['file_url'] file_md5=q['file_md5'] out(' Downloading and extracting '+nmuoa+':'+nduoa+' ...') # Check that module:module exists if nmuoa=='module' and nduoa=='module' and path_to_module!='': new_path=path_to_module else: if not smc: save_state=cfg['download_missing_components'] cfg['download_missing_components']='no' rz=access({'action':'find', 'module_uoa':'module', 'data_uoa':'module', 'common_func':'yes'}) if rz['return']>0 and rz['return']!=16: return rz if rz['return']==16: rz=download({'repo_uoa':nruoa, 'module_uoa':'module', 'data_uoa':'module', 'skip_module_check':'yes'}) if rz['return']>0: return rz cfg['download_missing_components']=save_state # Adding dummy module rz=access({'action':'add', 'module_uoa':nmuoa, 'module_uid':nmuoa, 'data_uoa':nduoa, 'data_uid':nduid, 'repo_uoa':'local', 'common_func':'yes'}) if rz['return']>0: out(' Skipping ...') continue new_path=rz['path'] # Prepare pack ppz=os.path.join(new_path, 'pack.zip') if os.path.isfile(ppz): os.remove(ppz) # Download file # Import modules compatible with Python 2.x and 3.x import urllib try: from urllib.request import urlretrieve except: from urllib import urlretrieve # Connect try: urlretrieve(file_url, ppz) except Exception as e: return {'return':1, 'error':'download failed ('+format(e)+')'} statinfo = os.stat(ppz) file_size=statinfo.st_size # MD5 of the pack rx=load_text_file({'text_file':ppz, 'keep_as_bin':'yes'}) if rx['return']>0: return rx bpack=rx['bin'] import hashlib md5=hashlib.md5(bpack).hexdigest() if md5!=file_md5: return {'return':1, 'error':'MD5 of the newly created pack ('+md5+') did not match the one from the portal ('+file_md5+')'} # Unzipping archive import zipfile new_f=open(ppz, 'rb') new_z=zipfile.ZipFile(new_f) for new_d in new_z.namelist(): if new_d!='.' and new_d!='..' and not new_d.startswith('\\'): new_pp=os.path.join(new_path,new_d) if new_d.endswith('/'): if not os.path.exists(new_pp): os.makedirs(new_pp) else: new_ppd=os.path.dirname(new_pp) if not os.path.exists(new_ppd): os.makedirs(new_ppd) # extract file new_fo=open(new_pp, 'wb') new_fo.write(new_z.read(new_d)) new_fo.close() new_f.close() # Remove pack file os.remove(ppz) return {'return':0}
21,400
def recursiveUpdate(target, source): """ Recursively update the target dictionary with the source dictionary, leaving unfound keys in place. This is different than dict.update, which removes target keys not in the source :param dict target: The dictionary to be updated :param dict source: The dictionary to be integrated :return: target dict is returned as a convenience. This function updates the target dict in place. :rtype: dict """ for k, v in source.items(): if isinstance(v, dict): target[k] = recursiveUpdate(target.get(k, {}), v) else: target[k] = v return target
21,401
def macro_bank_usa_interest_rate(): """ 美联储利率决议报告, 数据区间从19820927-至今 https://datacenter.jin10.com/reportType/dc_usa_interest_rate_decision https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v=1578581921 :return: 美联储利率决议报告-今值(%) :rtype: pandas.Series """ t = time.time() res = requests.get( f"https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}" ) json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1]) date_list = [item["date"] for item in json_data["list"]] value_list = [item["datas"]["美国利率决议"] for item in json_data["list"]] value_df = pd.DataFrame(value_list) value_df.columns = json_data["kinds"] value_df.index = pd.to_datetime(date_list) temp_df = value_df["今值(%)"] url = "https://datacenter-api.jin10.com/reports/list_v2" params = { "max_date": "", "category": "ec", "attr_id": "24", "_": str(int(round(t * 1000))), } headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "zh-CN,zh;q=0.9,en;q=0.8", "cache-control": "no-cache", "origin": "https://datacenter.jin10.com", "pragma": "no-cache", "referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36", "x-app-id": "rU6QIu7JHe2gOUeR", "x-csrf-token": "", "x-version": "1.0.0", } r = requests.get(url, params=params, headers=headers) temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2] temp_se.index = pd.to_datetime(temp_se.iloc[:, 0]) temp_se = temp_se.iloc[:, 1] temp_df = temp_df.append(temp_se) temp_df.dropna(inplace=True) temp_df.sort_index(inplace=True) temp_df = temp_df.reset_index() temp_df.drop_duplicates(subset="index", inplace=True) temp_df.set_index("index", inplace=True) temp_df = temp_df.squeeze() temp_df.index.name = None temp_df.name = "usa_interest_rate" temp_df = temp_df.astype("float") return temp_df
21,402
def print_usage(): """ Prints usage instructions to user """ print("python3 main.py -f [path/to/file] -s [simulator] -a [simulate/verify]") print(" -f Path to input file. REQUIRED.") print(" -a Simulate or Verify. Optional.") print(' [default] "simulate" or "s" ') print(' "verify" or "v" ') print(" -s Simulator choice. Optional.") print(' [default] "odeintfixed" or "of" for ODEINT: fixed step') print(' "odeintadp" or "oa" for ODEINT: adaptive step') print(" -r Refinement strategy. Optional.") print(' [defualt] "default" or "d" for Default Strategy') print(' "user" or "us" for User Strategy') return
21,403
def get_kwd_group(soup): """ Find the kwd-group sections for further analysis to find subject_area, research_organism, and keywords """ kwd_group = None kwd_group = extract_nodes(soup, 'kwd-group') return kwd_group
21,404
def drqa_train(): """ Train the drqa model, requires preprocessed data """ pass
21,405
def read_transport_file(input_file_name): """ Reads File "input_file_name".dat, and returns lists containing the atom indices of the device atoms, as well as the atom indices of the contact atoms. Also, a dictionary "interaction_distances" is generated, which spcifies the maximum interaction distance between each type of atom. """ transport_file_path = "./" + INPUT_FOLDER_NAME + "/" + \ str(input_file_name) + "_" + "transport.dat" file = open(transport_file_path, 'r') max_file_lines = 1000 iterations = 0 # IMPORTANT: In file, first atom has index is one, but in my program, # first atom has index is zero region_list = [] # List of regions, starting with device region line = file.readline() entries = line.split() #A single list of device atom indices. device_region = [] # A list of lists, one list of atom indices for each contact. contact_regions = [] iterations = 0 while iterations < max_file_lines: new_indices = list(range(int(entries[1]) - 1, int(entries[2]))) if "Device" in entries[0]: # Don't append, because we want a single list of indices for the # device region. device_region = device_region + new_indices if "Contact" in entries[0]: contact_regions.append(new_indices) line = file.readline() entries = line.split() iterations += 1 if not("Device" in entries[0] or "Contact" in entries[0]): break region_list.append(device_region) region_list += contact_regions interaction_distances = {} #line = file.readline() #stripped_line = line.replace(" ", "").replace("\n", "") #entries = line.split() # loop terminates at first empty line, or at end of file # (since readline() returns empty string at end of file) iterations = 0 while iterations < max_file_lines: key = entries[0] + entries[1] interaction_distances[key] = float(entries[2]) line = file.readline() entries = line.split() iterations += 1 stripped_line = line.replace(" ", "").replace("\n", "") if stripped_line == '': break # print("In read_transport_file: " + str(region_list)) return (region_list, interaction_distances)
21,406
def init_zooplankton_defaults(): """Initialize default parameters for zooplankton""" global _zooplankton_defaults
21,407
def testNoResources(): """Test making a match without resources.""" claim = ResourceClaim.create(( ResourceSpec.create('ref0', 'typeA', ()), )) check(claim, (), None)
21,408
def a_metric_of_that_run_exists(data_access, metric_id, run_id): """A metric of that run exists.""" data_access.get_run_dao().get(run_id)["info"] = { "metrics": [metric_id]} data_access.get_metrics_dao().should_receive("get") \ .with_args(run_id, metric_id) \ .and_return({"metric_id": str(metric_id), "run_id": str(run_id)}) assert data_access.get_metrics_dao().get(run_id, metric_id) is not None
21,409
def handle_srv6_path(operation, grpc_address, grpc_port, destination, segments=None, device='', encapmode="encap", table=-1, metric=-1, bsid_addr='', fwd_engine='linux', key=None, update_db=True, db_conn=None, channel=None): """ Handle a SRv6 Path. """ # Dispatch depending on the operation if operation == 'add': return add_srv6_path( grpc_address=grpc_address, grpc_port=grpc_port, destination=destination, segments=segments, device=device, encapmode=encapmode, table=table, metric=metric, bsid_addr=bsid_addr, fwd_engine=fwd_engine, key=key, update_db=update_db, db_conn=db_conn, channel=channel ) if operation == 'get': return get_srv6_path( grpc_address=grpc_address, grpc_port=grpc_port, destination=destination, segments=segments, device=device, encapmode=encapmode, table=table, metric=metric, bsid_addr=bsid_addr, fwd_engine=fwd_engine, key=key, update_db=update_db, db_conn=db_conn, channel=channel ) if operation == 'change': return change_srv6_path( grpc_address=grpc_address, grpc_port=grpc_port, destination=destination, segments=segments, device=device, encapmode=encapmode, table=table, metric=metric, bsid_addr=bsid_addr, fwd_engine=fwd_engine, key=key, update_db=update_db, db_conn=db_conn, channel=channel ) if operation == 'del': return del_srv6_path( grpc_address=grpc_address, grpc_port=grpc_port, destination=destination, segments=segments, device=device, encapmode=encapmode, table=table, metric=metric, bsid_addr=bsid_addr, fwd_engine=fwd_engine, key=key, update_db=update_db, db_conn=db_conn, channel=channel ) # Operation not supported, raise an exception logger.error('Operation not supported') raise utils.OperationNotSupportedException
21,410
def mask_outside_polygon(poly_verts, ax, facecolor=None, edgecolor=None, alpha=0.25): """ Plots a mask on the specified axis ("ax", defaults to plt.gca()) such that all areas outside of the polygon specified by "poly_verts" are masked. "poly_verts" must be a list of tuples of the verticies in the polygon in counter-clockwise order. Returns the matplotlib.patches.PathPatch instance plotted on the figure. """ # Get current plot limits xlim = ax.get_xlim() ylim = ax.get_ylim() # Verticies of the plot boundaries in clockwise order bound_verts = [ (xlim[0], ylim[0]), (xlim[0], ylim[1]), (xlim[1], ylim[1]), (xlim[1], ylim[0]), (xlim[0], ylim[0]), ] # A series of codes (1 and 2) to tell matplotlib whether to draw a line or # move the "pen" (So that there's no connecting line) bound_codes = [mpath.Path.MOVETO] + (len(bound_verts) - 1) * [mpath.Path.LINETO] poly_codes = [mpath.Path.MOVETO] + (len(poly_verts) - 1) * [mpath.Path.LINETO] # Plot the masking patch path = mpath.Path(bound_verts + poly_verts, bound_codes + poly_codes) patch = mpatches.PathPatch( path, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha ) patch = ax.add_patch(patch) # Reset the plot limits to their original extents ax.set_xlim(xlim) ax.set_ylim(ylim) return patch
21,411
def setup_data(cluster): """ Get decision boundaries by means of np.meshgrid :return: Tuple (vectors, centroids, X component of mesghgrid, Y component of meshgrid, ) """ feature_vectors, _, centroids, _, kmeans = cluster # Step size of the mesh. Decrease to increase the quality of the VQ. h = .2 # point in the mesh [x_min, x_max]x[y_min, y_max]. # Plot the decision boundary. For that, we will assign a color to each x_min, x_max = feature_vectors[:, 0].min() - 1, feature_vectors[:, 0].max() + 1 y_min, y_max = feature_vectors[:, 1].min() - 1, feature_vectors[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) return feature_vectors, centroids, xx, yy, Z
21,412
def generate_equations(model, cleanup=True, verbose=False): """ Generate math expressions for reaction rates and species in a model. This fills in the following pieces of the model: * odes * species * reactions * reactions_bidirectional * observables (just `coefficients` and `species` fields for each element) """ # only need to do this once # TODO track "dirty" state, i.e. "has model been modified?" # or, use a separate "math model" object to contain ODEs if model.odes: return lines = iter(generate_network(model,cleanup,verbose=verbose).split('\n')) _parse_netfile(model, lines)
21,413
def Routing_Table(projdir, rootgrp, grid_obj, fdir, strm, Elev, Strahler, gages=False, Lakes=None): """If "Create reach-based routing files?" is selected, this function will create the Route_Link.nc table and Streams.shp shapefiles in the output directory.""" # Stackless topological sort algorithm, adapted from: http://stackoverflow.com/questions/15038876/topological-sort-python def sort_topologically_stackless(graph): '''This function will navigate through the list of segments until all are accounted for. The result is a sorted list of which stream segments should be listed first. Simply provide a topology dictionary {Fromnode:[ToNode,...]} and a sorted list is produced that will provide the order for navigating downstream. This version is "stackless", meaning it will not hit the recursion limit of 1000.''' levels_by_name = {} names_by_level = defaultdict(set) def add_level_to_name(name, level): levels_by_name[name] = level names_by_level[level].add(name) def walk_depth_first(name): stack = [name] while(stack): name = stack.pop() if name in levels_by_name: continue if name not in graph or not graph[name]: level = 0 add_level_to_name(name, level) continue children = graph[name] children_not_calculated = [child for child in children if child not in levels_by_name] if children_not_calculated: stack.append(name) stack.extend(children_not_calculated) continue level = 1 + max(levels_by_name[lname] for lname in children) add_level_to_name(name, level) for name in graph: walk_depth_first(name) list1 = list(takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in count()))) list2 = [item for sublist in list1 for item in sublist][::-1] # Added by KMS 9/2/2015 to reverse sort the list list3 = [x for x in list2 if x is not None] # Remove None values from list return list3 print(' Routing table will be created...') tic1 = time.time() # Setup whitebox tool object and options wbt = WhiteboxTools() wbt.verbose = False wbt.work_dir = projdir esri_pntr = True zero_background = False id_field = 'STRM_VAL' # Whitebox-assigned stream ID field # Setup temporary and other outputs stream_id_file = os.path.join(projdir, stream_id) streams_vector_file = os.path.join(projdir, streams_vector) outStreams = os.path.join(projdir, StreamSHP) RoutingNC = os.path.join(projdir, RT_nc) # Run Whitebox functions for creating link IDs and vectors ''' The stream_link_identifier appears to output an int16 raster, limiting the number of individual stream link IDs possible. Further, it will populate negative values in the output, providing both positive and negative IDs. Unfortunately, any IDs that are assigned negative values in the output will not be resolved as stream vectors in the raster_streams_to_vector routine. ''' wbt.stream_link_identifier(fdir, strm, stream_id, esri_pntr=esri_pntr, zero_background=zero_background) wbt.raster_streams_to_vector(stream_id, fdir, streams_vector, esri_pntr=esri_pntr) print(' Stream to features step complete.') # Read the link IDs as an array from the output file strm_link_arr, ndv = return_raster_array(stream_id_file) if numpy.unique(strm_link_arr).shape[0] > 32768 or strm_link_arr[strm_link_arr<0].shape[0] > 0: print(' Warning: Number of unique IDs exceeds limit of 16-bit unsigned integer type. ' + \ 'Not all reaches may be converted to stream vectors. Check output carefully.') strm_link_arr[strm_link_arr==ndv] = NoDataVal # Set nodata values to WRF-Hydro nodata value strm_link_arr[strm_link_arr<1] = NoDataVal # Remove zeros from background of grid # Find any LINKID reach ID values that did not get transferred to the stream vector file. # These are typically single-cell channel cells on the edge of the grid. ds = ogr.Open(streams_vector_file) lyr = ds.GetLayer(0) # Get the 'layer' object from the data source vector_reach_IDs = numpy.unique([feature.GetField('STRM_VAL') for feature in lyr]) print(' Found {0} unique IDs in stream vector layer.'.format(len(vector_reach_IDs))) ds = lyr = None # Resolve issue where LINKID values are present that do not correspond to a vector ID (10/25/2020) grid_reach_IDs = numpy.unique(strm_link_arr[strm_link_arr!=NoDataVal]) missing_reach_IDs = grid_reach_IDs[~numpy.in1d(grid_reach_IDs, vector_reach_IDs)] print(' Eliminating {0} IDs in LINKID grid that could not be resolved in stream vector layer.'.format(missing_reach_IDs.shape[0])) print(' {0}'.format(missing_reach_IDs.tolist())) channel_arr = rootgrp.variables['CHANNELGRID'][:] strorder_arr = rootgrp.variables['STREAMORDER'][:] for idVal in missing_reach_IDs: arr_mask = strm_link_arr==idVal # Build a boolean mask for masking all array elements to be changed strm_link_arr[arr_mask] = NoDataVal # Set all linkid values that didn't get resolved in the routelink file to nodata. channel_arr[arr_mask] = NoDataVal # Set all channel values that didn't get resolved in the routelink file to nodata. strorder_arr[arr_mask] = NoDataVal # Set all channel values that didn't get resolved in the routelink file to nodata. del arr_mask rootgrp.variables['LINKID'][:] = strm_link_arr rootgrp.variables['CHANNELGRID'][:] = channel_arr rootgrp.variables['STREAMORDER'][:] = strorder_arr del channel_arr, strorder_arr, grid_reach_IDs, missing_reach_IDs gage_linkID = {} if gages: print(' Adding forecast points:LINKID association.') gage_arr = rootgrp.variables['frxst_pts'][:] unique_gages = numpy.unique(gage_arr[gage_arr!=NoDataVal]) gage_linkID = {gage:strm_link_arr[gage_arr==gage][0] for gage in unique_gages} # Create blank dictionary so that it exists and can be deleted later print(' Found {0} forecast point:LINKID associations.'.format(len(gage_linkID))) del unique_gages, gage_arr linkID_gage = {val:key for key, val in gage_linkID.items()} # Reverse the dictionary del strm_link_arr, ndv, gage_linkID # Setup coordinate transform for calculating lat/lon from x/y wgs84_proj = osr.SpatialReference() wgs84_proj.ImportFromProj4(wgs84_proj4) # Added 11/19/2020 to allow for GDAL 3.0 changes to the order of coordinates in transform if int(osgeo.__version__[0]) >= 3: # GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546 wgs84_proj.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER) coordTrans = osr.CoordinateTransformation(grid_obj.proj, wgs84_proj) # Transformation from grid projection to WGS84 # Initiate dictionaries for storing topology and attribute information Lengths = {} # Gather the stream feature length StrOrder = {} # Store stream order for each node NodeElev = {} # Elevation of the start node slope_dic = {} # Slope (unitless drop/length) NodeLL = {} # Dictionary to store geocentric (longitude, latitude) coordinates for every start node NodeXY = {} # Dictionary to store projectedc (x, y) coordinates for every start node # Open shapefile driver = ogr.GetDriverByName("ESRI Shapefile") data_source = driver.Open(streams_vector_file, 1) lyr = data_source.GetLayer() topology_dic = {} coords_dic = {} for feature in lyr: # Open each feature and get geometry flowline_id = int(feature.GetField(id_field)) geom = feature.GetGeometryRef() flowline_length = geom.Length() # Get coordinates of first and last point, flow line ID, and flow line length first_point = geom.GetPoint(0) last_point = geom.GetPoint(geom.GetPointCount() - 1) first_point_coords = (first_point[0], first_point[1]) last_point_coords = (last_point[0], last_point[1]) # Create topology dictionary of 'bottom_point geometry: stream flowline ID' try: topology_dic[last_point_coords] += [flowline_id] except KeyError: topology_dic[last_point_coords] = [flowline_id] # Create coordinate dictionary of flowline ID: first point, last point, length coords_dic[flowline_id] = first_point_coords, last_point_coords, flowline_length feature = geom = first_point = last_point = None lyr.ResetReading() # Create to/from dictionary matching bottom point to top point, creating dic of 'from ID: to ID' to_from_dic = {} for flowline_id, (first_point_coords, last_point_coords, flowline_length) in coords_dic.items(): if first_point_coords in topology_dic: #for feature_id in topology_dic[first_point_coords]: for feature_id in topology_dic.pop(first_point_coords): to_from_dic[feature_id] = flowline_id # Add in flowlines with nothing downstream for feature_id in coords_dic: if feature_id not in to_from_dic: to_from_dic[feature_id] = 0 del topology_dic # Get the order of segments according to a simple topological sort order = sort_topologically_stackless({key:[val] for key,val in to_from_dic.items()}) # Open elevation raster dem_array = gdal.Open(Elev, 0) dem_rb = dem_array.GetRasterBand(1) # Open strahler stream order raster strahler_array = gdal.Open(Strahler, 0) strahler_rb = strahler_array.GetRasterBand(1) # Iterate over coordinate dictionary​ tic2 = time.time() for idval, (top_xy, bot_xy, length) in coords_dic.items(): # Get top/first coordinates values from DEM row, col = grid_obj.xy_to_grid_ij(top_xy[0], top_xy[1]) top_elevation = float(dem_rb.ReadAsArray(col, row, 1, 1)) strahler_value = int(strahler_rb.ReadAsArray(col, row, 1, 1)) # Get bottom/last coordinates values from DEM row, col = grid_obj.xy_to_grid_ij(bot_xy[0], bot_xy[1]) bottom_elevation = dem_rb.ReadAsArray(col, row, 1, 1) # Fix negative slopes drop = top_elevation - bottom_elevation slope = drop/length if slope < minSo: slope = minSo # Populate all dictionaries slope_dic[idval] = float(slope) StrOrder[idval] = strahler_value NodeElev[idval] = top_elevation Lengths[idval] = length NodeXY[idval] = (top_xy[0], top_xy[1]) point = ogr.Geometry(ogr.wkbPoint) point.AddPoint(top_xy[0], top_xy[1]) point.Transform(coordTrans) # Transform the geometry NodeLL[idval] = (point.GetX(), point.GetY()) point = None del coords_dic print(' All dictionaries have been created in {0: 3.2f} seconds.'.format(time.time()-tic2)) # Create new field in shapefile field_defn = ogr.FieldDefn("to", ogr.OFTInteger64) lyr.CreateField(field_defn) field_defn = ogr.FieldDefn("Order_", ogr.OFTInteger) lyr.CreateField(field_defn) field_defn = ogr.FieldDefn("GageID", ogr.OFTString) field_defn.SetWidth(15) lyr.CreateField(field_defn) field_defn = ogr.FieldDefn("LakeID", ogr.OFTInteger64) lyr.CreateField(field_defn) field_defn = ogr.FieldDefn("length", ogr.OFTReal) lyr.CreateField(field_defn) field_defn = ogr.FieldDefn("Slope", ogr.OFTReal) lyr.CreateField(field_defn) field_defn = ogr.FieldDefn("TopElev", ogr.OFTReal) lyr.CreateField(field_defn) # Iterate over shapefile to add new values to the newly created field for feature in lyr: link_id = int(feature.GetField(id_field)) feature.SetField("to", to_from_dic.get(link_id, 0)) feature.SetField("Order_", StrOrder[link_id]) feature.SetField("GageID", str(linkID_gage.get(link_id, None))) feature.SetField("LakeID", NoDataVal) feature.SetField("length", Lengths[link_id]) feature.SetField("Slope", slope_dic[link_id]) feature.SetField("TopElev", NodeElev[link_id]) lyr.SetFeature(feature) data_source = feature = lyr = None print(' Fields have been added to the shapefile.') # We need to define the projection for the streams file: this is not done automatically by Whitebox. define_projection(streams_vector_file, grid_obj.proj) # Added 8/16/2020 because a value of 0 exists in the order list order.remove(0) # Call function to build the netCDF parameter table build_RouteLink(RoutingNC, order, to_from_dic, NodeElev, NodeLL, NodeXY, Lengths, StrOrder, slope_dic, gageDict=linkID_gage) del linkID_gage, order, to_from_dic, NodeElev, Lengths, StrOrder, NodeLL, NodeXY, slope_dic print('Reach-based routing inputs generated in {0:3.2f} seconds.'.format(time.time()-tic1)) return rootgrp
21,414
def list_errors( conx: Connection, ) -> t.List[t.Tuple[int, datetime.datetime, str, str, str, str]]: """Return list of all errors. The list returned contains each error as an element in the list. Each element is a tuple with the following layout: (seq nr, date, err msg, err detail, level, state mask) The 'err detail' and 'level' elements are not always present and thus may be empty. NOTE: this method is expensive and slow, as it retrieves a file from the controller over FTP and parses it. :returns: A list of all errors and their details :rtype: list(tuple(int, datetime.datetime, str, str, str, str)) """ errs = get_file_as_bytes(conx, remote_name='/md:/errall.ls') res = [] for line in errs.decode('ascii').splitlines(): # check for really empty lines if ('Robot Name' in line) or (line == ''): continue fields = list(map(str.strip, line.split('"'))) # check for empty rows (seen on just installed controllers) if not fields[2]: continue # probably OK, try to continue parsing level_state = fields[4].split() if len(level_state) > 1: ( err_level, err_state, ) = level_state else: err_level, err_state, = ( '', level_state[0], ) stamp = datetime.datetime.strptime(fields[1], '%d-%b-%y %H:%M:%S') res.append((int(fields[0]), stamp, fields[2], fields[3], err_level, err_state)) return res
21,415
async def get_incident(incident_id): """ Get incident --- get: summary: Get incident tags: - incidents parameters: - name: id in: path required: true description: Object ID responses: 200: description: The requested object content: application/json: schema: Incident """ incident = g.Incident.find_by_id(incident_id) if incident is None: raise exceptions.NotFound(description="Incident {} was not found".format(incident_id)) return jsonify(incident), HTTPStatus.OK
21,416
def partition_vector(vector, sets, fdtype: str='float64') -> List[NDArrayNfloat]: # pragma: no cover """partitions a vector""" vectors = [] for unused_aname, aset in sets: if len(aset) == 0: vectors.append(np.array([], dtype=fdtype)) continue vectori = vector[aset] vectors.append(vectori) return vectors
21,417
def colorStrip(strip, color): """Wipe color across display a pixel at a time.""" for i in range(strip.numPixels()): strip.setPixelColor(i, color) strip.show()
21,418
def gen_BatchIterator(data, batch_size=100, shuffle=True): """ Return the next 'batch_size' examples from the X_in dataset Reference ========= [1] tensorflow.examples.tutorial.mnist.input_data.next_batch Input ===== data: 4d np.ndarray The samples to be batched batch_size: int Size of a single batch. shuffle: bool Whether shuffle the indices. Output ====== Yield a batch generator """ if shuffle: indices = np.arange(len(data)) np.random.shuffle(indices) for start_idx in range(0, len(data) - batch_size + 1, batch_size): if shuffle: excerpt = indices[start_idx: start_idx + batch_size] else: excerpt = slice(start_idx, start_idx + batch_size) yield data[excerpt]
21,419
def calculate_G4( n_numbers, neighborsymbols, neighborpositions, G_elements, theta, zeta, eta, Rs, cutoff, cutofffxn, Ri, normalized=True, image_molecule=None, n_indices=None, weighted=False, ): """Calculate G4 symmetry function. These are 3 body or angular interactions. Parameters ---------- n_symbols : list of int List of neighbors' chemical numbers. neighborsymbols : list of str List of symbols of neighboring atoms. neighborpositions : list of list of floats List of Cartesian atomic positions of neighboring atoms. G_elements : list of str A list of two members, each member is the chemical species of one of the neighboring atoms forming the triangle with the center atom. theta : float Parameter of Gaussian symmetry functions. zeta : float Parameter of Gaussian symmetry functions. eta : float Parameter of Gaussian symmetry functions. Rs : float Parameter to shift the center of the peak. cutoff : float Cutoff radius. cutofffxn : object Cutoff function. Ri : list Position of the center atom. Should be fed as a list of three floats. normalized : bool Whether or not the symmetry function is normalized. image_molecule : ase object, list List of atoms in an image. n_indices : list List of indices of neighboring atoms from the image object. weighted : bool True if applying weighted feature of Gaussian function. See Ref. 2. Returns ------- feature : float G4 feature value. Notes ----- The difference between the calculate_G3 and the calculate_G4 function is that calculate_G4 accounts for bond angles of 180 degrees. """ feature = 0.0 counts = range(len(neighborpositions)) for j in counts: for k in counts[(j + 1) :]: els = sorted([neighborsymbols[j], neighborsymbols[k]]) if els != G_elements: continue Rij_vector = neighborpositions[j] - Ri Rij = np.linalg.norm(Rij_vector) Rik_vector = neighborpositions[k] - Ri Rik = np.linalg.norm(Rik_vector) cos_theta_ijk = np.dot(Rij_vector, Rik_vector) / Rij / Rik theta_ijk = np.arccos( np.clip(cos_theta_ijk, -1.0, 1.0) ) # Avoids rounding issues cos_theta = np.cos(theta_ijk - theta) term = (1.0 + cos_theta) ** zeta term *= np.exp(-eta * ((Rij + Rik) / 2.0 - Rs) ** 2.0) if weighted: term *= weighted_h(image_molecule, n_indices) term *= cutofffxn(Rij) term *= cutofffxn(Rik) feature += term feature *= 2.0 ** (1.0 - zeta) return feature
21,420
def parse_arguments(): """Parse command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( '--output', type=str, required=False, help='GCS URL where results will be saved as a CSV.') parser.add_argument( '--query', type=str, required=True, help='The SQL query to be run in BigQuery') parser.add_argument( '--dataset_id', type=str, required=True, help='Dataset of the destination table.') parser.add_argument( '--table_id', type=str, required=True, help='Name of the destination table.') parser.add_argument( '--project', type=str, required=True, help='The GCP project to run the query.') args = parser.parse_args() return args
21,421
def paths_from_root(graph, start): """ Generates paths from `start` to every other node in `graph` and puts it in the returned dictionary `paths`. ie.: `paths_from_node(graph, start)[node]` is a list of the edge names used to get to `node` form `start`. """ paths = {start: []} q = [start] seen = set() while q: node = q.pop() seen.add(node) for relation, child in graph[node]: if isnode(child) and child not in seen: q.append(child) paths[child] = paths[node] + [relation] return paths
21,422
def list_to_dict(l:Sequence, f:Optional[Callable]=None) -> Dict: """ Convert the list to a dictionary in which keys and values are adjacent in the list. Optionally, a function `f` can be passed to apply to each value before adding it to the dictionary. Parameters ---------- l: typing.Sequence The list of items f: typing.Callable A function to apply to each value before inserting it into the list. For example, `float` could be passed to convert each value to a float. Returns ------- d: typing.Dict The dictionary, defined as described above Examples -------- .. code-block:: python l = ["key1", "value1", "key2", "value2"] list_to_dict(l, f) == {"key1": f("value1"), "key2": f("value2")} """ if len(l) % 2 != 0: msg = ("[collection_utils.list_to_dict]: the list must contain an even number" "of elements") raise ValueError(msg) if f is None: f = lambda x: x keys = l[::2] values = l[1::2] d = {k:f(v) for k, v in zip(keys, values)} return d
21,423
def inner(a, b): """Computes an inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation). Parameters ---------- a, b : array_like If *a* and *b* are nonscalar, their shape must match. Returns ------- out : ndarray out.shape = a.shape[:-1] + b.shape[:-1] Restriction ----------- If *a* or *b* is not 1-D array : *NotImplementedError* occurs. Note ---- For vectors (1-D arrays) it computes the ordinary inner-product:: import nlcpy as vp vp.inner(a, b) # equivalent to sum(a[:]*b[:]) if *a* or *b* is scalar, in which case:: vp.inner(a, b) # equivalent to a*b See Also -------- dot : Computes a dot product of two arrays. Examples -------- Ordinary inner product for vectors: >>> import nlcpy as vp >>> a = vp.array([1,2,3]) >>> b = vp.array([0,1,0]) >>> vp.inner(a, b) array(2) An example where b is a scalar: >>> vp.inner(vp.eye(2), 7) array([[7., 0.], [0., 7.]]) """ a = nlcpy.asanyarray(a) b = nlcpy.asanyarray(b) if a.ndim == 0 or b.ndim == 0: return ufunc_op.multiply(a, b) elif a.ndim == 1 and b.ndim == 1: return cblas_wrapper.cblas_dot(a, b) else: raise NotImplementedError("Only 1-D array is supported.")
21,424
def remove_list_redundancies(lst: Sequence[T]) -> list[T]: """ Used instead of list(set(l)) to maintain order Keeps the last occurrence of each element """ return list(reversed(dict.fromkeys(reversed(lst))))
21,425
def concatenate(arrays, axis): """Concatenate along axis. Differs from numpy.concatenate in that it works if the axis doesn't exist. """ logger.debug('Applying asarray to each element of arrays.') arrays = [np.asarray(array) for array in arrays] logger.debug('Adding axes to each element of arrays as necessary') if axis >= 0: arrays = [array[(Ellipsis,) + (None,) * max(axis - array.ndim + 1, 0)] for array in arrays] # [array[[Ellipsis]+[None]*max(axis-array.ndim+1,0)] for array in arrays] else: arrays = [array[(None,) * max(-axis - array.ndim, 0) + (Ellipsis,)] for array in arrays] # arrays=[array[[None]*max(-axis-array.ndim,0)+[Ellipsis]] for array in arrays] logger.debug('Calling numpy.concatenate') return np.concatenate(arrays, axis)
21,426
def s7_blastn_xml(qry, base, threads): """ run blastn with xml output qry and base are absolute paths """ print("Step7 ... :" + qry) os.makedirs(os.path.join(hivdrm_work_dir, s7_prefix), exist_ok = True) qry_file = os.path.basename(qry) base_file = os.path.basename(base) sample_name = os.path.splitext(qry_file)[0] result_file = f"{sample_name}.xml" result_path = os.path.realpath(os.path.join(hivdrm_work_dir, s7_prefix, result_file)) if os.path.exists(result_path): return result_file cmd = (f"blastn -num_threads {threads} " f"-query {qry} " f"-db {base} " f"-out {result_path} " \ f"-dust no " \ f"-num_alignments 1 " \ f"-outfmt 5") subprocess.check_call(cmd, shell = True) return result_file
21,427
def bpformat(bp): """ Format the value like a 'human-readable' file size (i.e. 13 Kbp, 4.1 Mbp, 102 bp, etc.). """ try: bp = int(bp) except (TypeError, ValueError, UnicodeDecodeError): return avoid_wrapping("0 bp") def bp_number_format(value): return formats.number_format(round(value, 1), 1) kbp = 1 << 10 mbp = 1 << 20 gbp = 1 << 30 tbp = 1 << 40 pbp = 1 << 50 negative = bp < 0 if negative: bp = -bp # Allow formatting of negative numbers. if bp < kbp: value = "%(size)d byte" % {"size": bp} elif bp < mbp: value = "%s Kbp" % bp_number_format(bp / kbp) elif bp < gbp: value = "%s Mbp" % bp_number_format(bp / mbp) elif bp < tbp: value = "%s Gbp" % bp_number_format(bp / gbp) elif bp < pbp: value = "%s Tbp" % bp_number_format(bp / tbp) else: value = "%s Pbp" % bp_number_format(bp / bp) if negative: value = "-%s" % value return avoid_wrapping(value)
21,428
def get_module_id_from_event(event): """ Helper function to get the module_id from an EventHub message """ if "iothub-connection-module_id" in event.message.annotations: return event.message.annotations["iothub-connection-module-id".encode()].decode() else: return None
21,429
def pipe_literal_representer(dumper, data): """Create a representer for pipe literals, used internally for pyyaml.""" return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
21,430
def verify_remote_versions(setup): """Examine the remote versions.""" setup_ver = setup.VER gc_ver, _, _ = release.get_last_google_code_version(setup.NAME) pypi_ver, _, _ = pypi_list.get_latest_version(setup.NAME) if gc_ver and gc_ver == setup_ver: print(' code.google.com version is up-to-date') else: print('** Note: code.google.com version is at %r and needs to be uploaded' % gc_ver) if pypi_ver and pypi_ver == setup_ver: print(' pypi version is up-to-date') else: print('** Note: pypi.python.org version is at %r and needs to be uploaded' % pypi_ver)
21,431
def RecognitionNeuralNetworkModelSmall(ih, iw, ic, nl): """ A simple model used to test the machinery on TrainSmall2. ih, iw, ic - describe the dimensions of the input image mh, mw - describe the dimensions of the output mask """ dropout = 0.1 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=(ih, iw, ic))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(dropout)) model.add(Conv2D(32, (3, 3), activation="relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(dropout)) model.add(Conv2D(32, (3, 3), activation="relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(dropout)) model.add(Flatten()) model.add(Dense(32, activation="relu")) #model.add(Dropout(0.5)) model.add(Dense((nl), activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"]) print("\n ---> Model summary <--- \n") model.summary() return model
21,432
def _quick_rec_str(rec): """try to print an identifiable description of a record""" if rec['tickets']: return "[tickets: %s]" % ", ".join(rec["tickets"]) else: return "%s..." % rec["raw_text"][0:25]
21,433
def A006577(start: int = 0, limit: int = 20) -> Collection[int]: """Number of halving and tripling steps to reach 1 in '3x+1' problem, or -1 if 1 is never reached. """ def steps(n: int) -> int: if n == 1: return 0 x = 0 while True: if n % 2 == 0: n //= 2 else: n = 3 * n + 1 x += 1 if n < 2: break return x return [steps(n) for n in range(start, start + limit)]
21,434
def split_to_sentences_per_pages(text): """ splitting pdfminer outputted text into list of pages and cleanup paragraphs""" def split_into_sentences(line): """cleanup paragraphs""" return ifilter(None, (i.strip() for i in line.split('\n\n'))) return ifilter(None, imap(split_into_sentences, text.split('\x0c')))
21,435
def lookup(_id=None, article_id=None, user_id=None, mult=False): """ Lookup a reaction in our g.db """ query = {} if article_id: query["article_id"] = ObjectId(article_id) if user_id: query["user_id"] = ObjectId(user_id) if _id: query["_id"] = ObjectId(_id) if mult: return g.db.reactions.find(query) else: return g.db.reactions.find_one(query)
21,436
def connection(user='m001-student', password='m001-mongodb-basics'): """connection: This function connects mongoDB to get MongoClient Args: user (str, optional): It's user's value for URL ATLAS srv. Defaults to 'm001-student'. password (str, optional): It's password's value for URL ATLAS srv. Defaults to 'm001-mongodb-basics'. Returns: object: Returns a MongoClient object """ try: MONGO_URL_ATLAS = f'mongodb+srv://{user}:{password}@sandbox.dec55.mongodb.net/?retryWrites=true&w=majority' mongo = pymongo.MongoClient(MONGO_URL_ATLAS, tlsAllowInvalidCertificates=False) except pymongo.errors.ConnectionFailure as conn_error: print("ERROR - Cannot connect to DataBase", conn_error) else: print('Correct Connection!!') return mongo
21,437
async def get_selection(ctx, choices, delete=True, pm=False, message=None, force_select=False): """Returns the selected choice, or None. Choices should be a list of two-tuples of (name, choice). If delete is True, will delete the selection message and the response. If length of choices is 1, will return the only choice unless force_select is True. :raises NoSelectionElements if len(choices) is 0. :raises SelectionCancelled if selection is cancelled.""" if len(choices) == 0: raise NoSelectionElements() elif len(choices) == 1 and not force_select: return choices[0][1] page = 0 pages = paginate(choices, 10) m = None selectMsg = None def chk(msg): valid = [str(v) for v in range(1, len(choices) + 1)] + ["c", "n", "p"] return msg.author == ctx.author and msg.channel == ctx.channel and msg.content.lower() in valid for n in range(200): _choices = pages[page] names = [o[0] for o in _choices if o] embed = discord.Embed() embed.title = "Multiple Matches Found" selectStr = "Which one were you looking for? (Type the number or \"c\" to cancel)\n" if len(pages) > 1: selectStr += "`n` to go to the next page, or `p` for previous\n" embed.set_footer(text=f"Page {page + 1}/{len(pages)}") for i, r in enumerate(names): selectStr += f"**[{i + 1 + page * 10}]** - {r}\n" embed.description = selectStr embed.colour = random.randint(0, 0xffffff) if message: embed.add_field(name="Note", value=message, inline=False) if selectMsg: try: await selectMsg.delete() except: pass if not pm: selectMsg = await ctx.channel.send(embed=embed) else: embed.add_field(name="Instructions", value="Type your response in the channel you called the command. This message was PMed to " "you to hide the monster name.", inline=False) selectMsg = await ctx.author.send(embed=embed) try: m = await ctx.bot.wait_for('message', timeout=30, check=chk) except asyncio.TimeoutError: m = None if m is None: break if m.content.lower() == 'n': if page + 1 < len(pages): page += 1 else: await ctx.channel.send("You are already on the last page.") elif m.content.lower() == 'p': if page - 1 >= 0: page -= 1 else: await ctx.channel.send("You are already on the first page.") else: break if delete and not pm: try: await selectMsg.delete() await m.delete() except: pass if m is None or m.content.lower() == "c": raise SelectionCancelled() return choices[int(m.content) - 1][1]
21,438
def data_dir(): """ Get SUNCG data path (must be symlinked to ~/.suncg) :return: Path to suncg dataset """ if 'SUNCG_DATA_DIR' in os.environ: path = os.path.abspath(os.environ['SUNCG_DATA_DIR']) else: path = os.path.join(os.path.abspath(os.path.expanduser('~')), ".suncg") rooms_exist = os.path.isdir(os.path.join(path, "room")) houses_exist = os.path.isdir(os.path.join(path, "house")) if not os.path.isdir(path) or not rooms_exist or not houses_exist: raise Exception("Couldn't find the SUNCG dataset in '~/.suncg' or with environment variable SUNCG_DATA_DIR. " "Please symlink the dataset there, so that the folders " "'~/.suncg/room', '~/.suncg/house', etc. exist.") return path
21,439
def total_variation(images, name=None): """Calculate and return the total variation for one or more images. (A mirror to tf.image total_variation) The total variation is the sum of the absolute differences for neighboring pixel-values in the input images. This measures how much noise is in the images. This can be used as a loss-function during optimization so as to suppress noise in images. If you have a batch of images, then you should calculate the scalar loss-value as the sum: `loss = tf.reduce_sum(tf.image.total_variation(images))` This implements the anisotropic 2-D version of the formula described here: https://en.wikipedia.org/wiki/Total_variation_denoising Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for the operation (optional). Raises: ValueError: if images.shape is not a 3-D or 4-D vector. Returns: The total variation of `images`. If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the total variation for each image in the batch. If `images` was 3-D, return a scalar float with the total variation for that image. """ return tf.image.total_variation(images=images, name=name)
21,440
def get_vivareal_data(driver_path: str, address: str, driver_options: Options = None) -> list: """ Scrapes vivareal site and build a array of maps in the following format: [ { "preço": int, "valor_de_condominio": int, "banheiros": int, "quartos": int, "área": int, "vagas": int, "endereço": str "texto": str }, ... ] :param address: Address to search for :param driver_options: driver options :return: json like string """ # Initialize browser chrome = init_driver(driver_path, driver_options) chrome.get(SITE) # Collect data try: accept_cookies(chrome) select_rent_option(chrome) send_address(chrome, address) real_state_elements = collect_real_state_raw_data(chrome) real_state_parsed_data = collect_elements_data(real_state_elements, chrome) except Exception as e: print(e) real_state_parsed_data = None finally: chrome.close() return real_state_parsed_data
21,441
def extract_image(data): """Tries and extracts the image inside data (which is a zipfile)""" with ZipFile(BytesIO(data)) as zip_file: for name in zip_file.namelist()[::-1]: try: return Image.open(BytesIO(zip_file.read(name))) except UnidentifiedImageError: logging.warning("%s does not seem to be an image", name)
21,442
def construct_getatt(node): """ Reconstruct !GetAtt into a list """ if isinstance(node.value, (six.text_type, six.string_types)): return node.value.split(".") elif isinstance(node.value, list): return [s.value for s in node.value] else: raise ValueError("Unexpected node type: {}".format(type(node.value)))
21,443
def check_root(): """ Check whether the program is running as root or not. Args: None Raises: None Returns: bool: True if running as root, else False """ user = os.getuid() return user == 0
21,444
def rss(x, y, w, b): """residual sum of squares for linear regression """ return sum((yi-(xi*wi+b))**2 for xi, yi, wi in zip(x,y, w))
21,445
def sortFiles(path, dst, dryRun = False, reverse = False): """ Rename the source files to a descriptive name and place them in the dst folder path : The source folder containing the files dst : The destination folder to place the files dryRun : If True, do not make any changes on disk reverse : If True, revert the renaming Operation """ fileMap = getTeachingMap(path, dst) tmpKey = fileMap.keys()[0] tmpValue = fileMap[tmpKey] print(reverse) if (reverse): tmp = tmpKey tmpKey = tmpValue tmpValue = tmp print('Example output: \n' + tmpKey + ' -> ' + tmpValue) confirm = raw_input('Apply? [y/N] ') if (confirm.lower() == 'y'): renameFiles(fileMap, dryRun, reverse) else: print('Operation aborted. No changes has been made.')
21,446
def test__read_json_from_gz(mock_pathlib_path_isfile): """Tests function ``_read_json_from_gz()``.""" from dirty_cat.datasets.fetching import _read_json_from_gz from json import JSONDecodeError dummy_file_path = Path("file/path.gz") # Passing an invalid file path (does not exist). mock_pathlib_path_isfile.return_value = False with pytest.raises(FileNotFoundError): assert _read_json_from_gz(dummy_file_path) # Passing a valid file path, # but reading it does not return JSON-encoded data. mock_pathlib_path_isfile.return_value = True with mock.patch("gzip.open", mock_open( read_data='This is not JSON-encoded data!')) as _: with pytest.raises(JSONDecodeError): assert _read_json_from_gz(dummy_file_path) # Passing a valid file path, and reading it # returns valid JSON-encoded data. expected_return_value = {"data": "This is JSON-encoded data!"} with mock.patch("gzip.open", mock_open( read_data='{"data": "This is JSON-encoded data!"}')) as _: assert _read_json_from_gz(dummy_file_path) == expected_return_value
21,447
def get_index(lang, index): """ Given an integer index this function will return the proper string version of the index based on the language and other considerations Parameters ---------- lang : str One of the supported languages index : int Returns ------- str The string corresponding to the correct index to be formatted into the code """ retval = None if lang in ['fortran', 'matlab']: return str(index + 1) if lang in ['c', 'cuda']: return str(index)
21,448
def sampleFunction(x: int, y: float) -> float: """ Multiply int and float sample. :param x: x value :type x: int :param y: y value :type y: float :return: result :return type: float """ return x * y
21,449
def n_elements_unique_intersection_np_axis_0(a: np.ndarray, b: np.ndarray) -> int: """ A lot faster than to calculate the real intersection: Example with small numbers: a = [1, 4, 2, 13] # len = 4 b = [1, 4, 9, 12, 25] # (len = 5) # a, b need to be unique!!! unique(concat(a, b)) = [1, 4, 2, 13, 9, 12, 25] # (len = 7) intersect(a, b) = [1, 4] # (len = 2) to expensive to call # Formular (fast to calculate) len(intersect(a, b)) = len(b) - n_elements_in_b_and_not_in_a len(intersect(a, b)) = len(b) - (len(unique(concat(a, b))) - len(a)) """ a = np.unique(a, axis=0) b = np.unique(b, axis=0) return len(b) - (len(np.unique(np.concatenate((a, b), axis=0), axis=0)) - len(a))
21,450
def test_power(): """ Test raising quantities to a power. """ values = [2 * kilogram, np.array([2]) * kilogram, np.array([1, 2]) * kilogram] for value in values: assert_quantity(value ** 3, np.asarray(value) ** 3, kilogram ** 3) # Test raising to a dimensionless quantity assert_quantity(value ** (3 * volt/volt), np.asarray(value) ** 3, kilogram ** 3) with pytest.raises(DimensionMismatchError): value ** (2 * volt) with pytest.raises(TypeError): value ** np.array([2, 3])
21,451
def Setup(test_options): """Runs uiautomator tests on connected device(s). Args: test_options: A UIAutomatorOptions object. Returns: A tuple of (TestRunnerFactory, tests). """ test_pkg = test_package.TestPackage(test_options.uiautomator_jar, test_options.uiautomator_info_jar) tests = test_pkg.GetAllMatchingTests(test_options.annotations, test_options.exclude_annotations, test_options.test_filter) if not tests: logging.error('No uiautomator tests to run with current args.') def TestRunnerFactory(device, shard_index): return test_runner.TestRunner( test_options, device, shard_index, test_pkg) return (TestRunnerFactory, tests)
21,452
def barcode_junction_counts(inhandle): """Return count dict from vdjxml file with counts[barcode][junction]""" counts = dict() for chain in vdj.parse_VDJXML(inhandle): try: # chain may not have barcode counts_barcode = counts.setdefault(chain.barcode,dict()) except AttributeError: continue counts_barcode[chain.junction] = counts_barcode.get(chain.junction,0) + 1 return counts
21,453
def is_following(user, actor): """ retorna True si el usuario esta siguiendo al actor :: {% if request.user|is_following:another_user %} You are already following {{ another_user }} {% endif %} """ return Follow.objects.is_following(user, actor)
21,454
def test_workspace_lifecycle(exopy_qtbot, workspace, tmpdir): """Test the workspace life cycle. """ workbench = workspace.plugin.workbench log = workbench.get_plugin('exopy.app.logging') # Check UI creation def assert_ui(): assert workspace._selection_tracker._thread assert workspace.last_selected_measurement assert workspace.content assert workspace.dock_area assert workbench.get_manifest('exopy.measurement.workspace.menus') exopy_qtbot.wait_until(assert_ui) # Check log handling assert 'exopy.measurement.workspace' in log.handler_ids # Check engine handling engine = workbench.get_manifest('test.measurement').find('dummy_engine') assert engine.workspace_contributing # Check measurement creation assert len(workspace.plugin.edited_measurements.measurements) == 1 assert workspace.plugin.edited_measurements.measurements[0].monitors # Create a new measurement and enqueue it workspace.new_measurement() def assert_measurement_created(): assert len(workspace.plugin.edited_measurements.measurements) == 2 exopy_qtbot.wait_until(assert_measurement_created) m = workspace.plugin.edited_measurements.measurements[1] m.root_task.default_path = str(tmpdir) assert workspace.enqueue_measurement(m) exopy_qtbot.wait(10) # Create a tool edition window for d in workspace.dock_area.dock_items(): if d.name == 'meas_0': edition_view = d ed = edition_view.dock_widget().widgets()[0] btn = ed.widgets()[4] btn.clicked = True exopy_qtbot.wait(10) # Check observance of engine selection. workspace.plugin.selected_engine = '' assert not engine.workspace_contributing workspace.plugin.selected_engine = 'dummy' def assert_contrib(): assert engine.workspace_contributing exopy_qtbot.wait_until(assert_contrib) # Test stopping the workspace core = workbench.get_plugin('enaml.workbench.core') cmd = 'enaml.workbench.ui.close_workspace' core.invoke_command(cmd, {'workspace': 'exopy.measurement.workspace'}) assert workspace.plugin.workspace is None assert not engine.workspace_contributing assert workbench.get_manifest('exopy.measurement.workspace.menus') is None assert 'exopy.measurement.workspace' not in log.handler_ids assert not workspace._selection_tracker._thread.is_alive() # Test restarting now that we have two edited measurement. cmd = 'enaml.workbench.ui.select_workspace' core.invoke_command(cmd, {'workspace': 'exopy.measurement.workspace'}) assert len(workspace.plugin.edited_measurements.measurements) == 2 # Check that all dock items have been restored. names = [d.name for d in workspace.dock_area.dock_items()] for n in ('meas_0', 'meas_1', 'meas_0_tools'): assert n in names # Create a false monitors_window workspace.plugin.processor.monitors_window = Window() workspace.plugin.processor.monitors_window.show() exopy_qtbot.wait(10) # Stop again core = workbench.get_plugin('enaml.workbench.core') cmd = 'enaml.workbench.ui.close_workspace' core.invoke_command(cmd, {'workspace': 'exopy.measurement.workspace'}) def assert_not_visible(): assert not workspace.plugin.processor.monitors_window.visible exopy_qtbot.wait_until(assert_not_visible)
21,455
def rf_make_ones_tile(num_cols: int, num_rows: int, cell_type: Union[str, CellType] = CellType.float64()) -> Column: """Create column of constant tiles of one""" jfcn = RFContext.active().lookup('rf_make_ones_tile') return Column(jfcn(num_cols, num_rows, _parse_cell_type(cell_type)))
21,456
def test_addressing_user_email(): """Tests making a blockchain address for an user email and that it: 1. is a 35-byte non-zero hexadecimal string 2. is unique - a different user email yields a different address 3. is deterministic - same user email yields same address, even if different case 4. the addresser recognizes the address as an user email 5. the addresser can parse the address into its components 6. the identifier is a hash of the user email""" user_id = helper.user.id() email = helper.user.email() address = addresser.user.email.address(user_id, email) assert assert_is_address(address) assert address != addresser.user.email.address(user_id, helper.user.email()) assert address == addresser.user.email.address(user_id, email) assert address == addresser.user.email.address(user_id, email.upper()) assert addresser.get_address_type(address) == addresser.AddressSpace.USER_EMAIL parsed = addresser.parse(address) assert parsed.object_type == addresser.ObjectType.USER assert parsed.related_type == addresser.ObjectType.EMAIL assert parsed.relationship_type == addresser.RelationshipType.OWNER assert assert_is_identifier(parsed.object_id) assert assert_is_identifier(parsed.related_id) assert parsed.object_id == addresser.user.hash(user_id) assert parsed.related_id == addresser.email.hash(email)
21,457
def get_block_hash_from_height(height): """ Request a block hash by specifying the height :param str height: a bitcoin block height :return: a bitcoin block address """ resource = f'block-height/{height}' return call_api(resource)
21,458
def analyze_json( snippet_data_json: str, root_dir: str ) -> Tuple[Set[str], Set[str], Set[str], List[pdd.PolyglotDriftData]]: """Perform language-agnostic AST analysis on a directory This function processes a given directory's language-specific analysis (stored in a polyglot_snippet_data.json file) into a list of automatically detected snippets. It then augments the automatic detection results with useful manual data (specified in .drift-data.yml files). Finally, it repackages all this data into a tuple containing 4 useful lists of data as shown in the 'returns' section. Arguments: snippet_data_json: A path to a polyglot_snippet_data.json file generated for the specified root_dir root_dir: The root directory to perform AST analysis on Returns: A tuple containing the following: - A list of tags found (via grep/text search) within the given directory and its subdirectories - A list of tags detected (by the AST parser) within the given directory and its subdirectories - A list of tags that the AST parser detected, but chose to ignore (due to constants or user specification in .drift-data.yml files) - A list of snippet objects (as typed NamedTuples) detected by the AST parser in the given directory and its subdirectories """ tuple_methods, test_method_map = _get_data(snippet_data_json) source_filepaths = set(method.source_path for method in tuple_methods) grep_tags: Set[str] = set() ignored_tags: Set[str] = set() for source_file in source_filepaths: grep_tag_names, ignored_tag_names = ( _process_file_region_tags( source_file, snippet_data_json, tuple_methods)) grep_tags = grep_tags.union(grep_tag_names) ignored_tags = ignored_tags.union(ignored_tag_names) source_methods = [method for method in tuple_methods if method.region_tags or method.name in constants.SNIPPET_INVOCATION_METHODS] source_methods = _dedupe_source_methods(source_methods) _store_tests_on_methods(source_methods, test_method_map) polyglot_parser.add_children_drift_data(source_methods) yaml_utils.add_yaml_data_to_source_methods(source_methods, root_dir) source_tags: Set[str] = set() for method in source_methods: source_tags = source_tags.union(set(method.region_tags)) # Remove automatically ignored region tags from region tag lists grep_tags = set(tag for tag in grep_tags if tag not in ignored_tags) source_tags = set(tag for tag in source_tags if tag not in ignored_tags) # Add manually ignored (via yaml) tags to ignored tags list # These should *not* overlap w/ source_tags, but we # check that in validate_yaml_syntax - *not here!* ignored_tags = ignored_tags.union( yaml_utils.get_untested_region_tags(root_dir)) return grep_tags, source_tags, ignored_tags, source_methods
21,459
def hover_over_element(id_or_elem): """Hover over an element using Selenium ActionChains. :argument id_or_elem: The identifier of the element, or its element object. """ elem = _get_elem(id_or_elem) action = action_chains.ActionChains(_test.browser) action.move_to_element(elem) action.perform()
21,460
def delete_metrics(conn, metric_names_list): """ :type conn: pyKairosDB.connect object :param conn: the interface to the requests library :type metric_names_list: list of str :param metric_names_list: A list of metric names to be deleted """ for metric in metric_names_list: delete_metric(conn, metric)
21,461
def list_clusters(event, context): """List clusters""" clusters = [] cluster_items = storage.get_cluster_table().scan() for cluster in cluster_items.get('Items', []): clusters.append(cluster['id']) return { "statusCode": 200, "body": json.dumps(clusters) }
21,462
def run(): """Tally end-of-timestep quantities.""" print("\n" + "-" * 79) print("Tally step ({:4d})".format(time.step)) print("-" * 79) # Start-of-step radiation energy density radnrgdens = np.zeros(mesh.ncells) radnrgdens[:] = phys.a * mesh.temp[:] ** 4 # Temperature increase nrg_inc = np.zeros(mesh.ncells) nrg_inc[:] = ( mesh.nrgdep[:] / mesh.dx - mesh.fleck[:] * phys.c * time.dt * mesh.sigma_p[:] * radnrgdens[:] ) mesh.temp[:] = mesh.temp[:] + nrg_inc[:] / mat.bee print("\nMesh temperature:") print(mesh.temp)
21,463
def round(data): """Compute element-wise round of data. Parameters ---------- data : relay.Expr The input data Returns ------- result : relay.Expr The computed result. """ return _make.round(data)
21,464
def deserialize(member, class_indexing): """ deserialize """ class_name = member[0].text if class_name in class_indexing: class_num = class_indexing[class_name] else: return None bnx = member.find('bndbox') box_x_min = float(bnx.find('xmin').text) box_y_min = float(bnx.find('ymin').text) box_x_max = float(bnx.find('xmax').text) box_y_max = float(bnx.find('ymax').text) width = float(box_x_max - box_x_min + 1) height = float(box_y_max - box_y_min + 1) # try: # ignore = float(member.find('ignore').text) # except ValueError: ignore = 0.0 return [class_num, box_x_min, box_y_min, width, height, ignore]
21,465
def interval_weighting(intervals, lower, upper): """ Compute a weighting function by finding the proportion within the dataframe df's lower and upper bounds. Note: intervals is of the form ((lower, upper, id), ...) """ if len(intervals) == 1: return np.asarray([1]) wts = np.ones(len(intervals)) lower_limit, upper_limit = intervals[0], intervals[-1] wts[0] = (lower_limit[1] - lower) / np.diff(lower_limit[:2]) wts[-1] = (upper - upper_limit[0]) / np.diff(upper_limit[:2]) return wts
21,466
async def monitor_buttons(reverse_pin, slower_pin, faster_pin, controls): """Monitor buttons that reverse direction and change animation speed. Assume buttons are active low. """ with keypad.Keys( (reverse_pin, slower_pin, faster_pin), value_when_pressed=False, pull=True ) as keys: while True: key_event = keys.events.get() if key_event and key_event.pressed: key_number = key_event.key_number if key_number == 0: controls.reverse = not controls.reverse elif key_number == 1: # Lengthen the interval. controls.wait = controls.wait + 0.001 elif key_number == 2: # Shorten the interval. controls.wait = max(0.0, controls.wait - 0.001) # Let another task run. await asyncio.sleep(0)
21,467
def centroid_precursor_frame(mzml_data_struct): """ Read and returns a centroid spectrum for a precursor frame This function uses the SDK to get and return an MS1 centroid spectrum for the requested frame. Parameters ---------- mzml_data_struct : dict structure of the mzml data Returns ------- list of lists list of mz and i lists [[mz,[i]] """ precursor_frame_id = mzml_data_struct['current_precursor']['id'] num_scans = mzml_data_struct['td'].conn.execute("SELECT NumScans FROM Frames WHERE Id={0}".format(precursor_frame_id)).fetchone()[0] data_list = mzml_data_struct['td'].extractCentroidedSpectrumForFrame (precursor_frame_id, 0, num_scans) return np.array(data_list)
21,468
def floor_ts( ts: Union[pd.Timestamp, pd.DatetimeIndex], freq=None, future: int = 0 ) -> Union[pd.Timestamp, pd.DatetimeIndex]: """Floor timestamp to period boundary. i.e., find (latest) period start that is on or before the timestamp. Parameters ---------- ts : Timestamp or DatetimeIndex. Timestamp(s) to floor. freq : {'15T' (quarter-hour), 'H' (hour), 'D' (day), 'MS' (month), 'QS' (quarter), 'AS' (year)}, optional What to floor it to, e.g. 'QS' to get start of quarter it's contained in. If none specified, use .freq attribute of timestamp. future : int, optional (default: 0) 0 to get latest period start that is ``ts`` or earlier. 1 (-1) to get start of period after (before) that. 2 (-2) .. etc. Returns ------- Timestamp or DatetimeIndex (same type as ``ts``). At begin of period. Notes ----- If ``ts`` is exactly at the start of the period, ceil_ts(ts, 0) == floor_ts(ts, 0) == ts. Examples -------- >>> floor_ts(pd.Timestamp('2020-04-21 15:42'), 'AS') Timestamp('2020-01-01 00:00:00') >>> floor_ts(pd.Timestamp('2020-04-21 15:42'), 'MS') Timestamp('2020-04-01 00:00:00') >>> floor_ts(pd.Timestamp('2020-04-21 15:42'), '15T') Timestamp('2020-04-21 15:30:00') >>> floor_ts(pd.Timestamp('2020-04-21 15:42', tz='Europe/Berlin'), 'MS') Timestamp('2020-04-01 00:00:00+0200', tz='Europe/Berlin') >>> floor_ts(pd.Timestamp('2020-04-21 15:42'), 'MS', 2) Timestamp('2020-06-01 00:00:00') """ if freq is None: freq = ts.freq # Rounding to short (< day) frequencies. try: # Can only infer if it's an index. kwargs = {"ambiguous": "infer"} if isinstance(ts, pd.DatetimeIndex) else {} if freq == "15T": return ts.floor("15T", **kwargs) + pd.Timedelta(minutes=future * 15) elif freq == "H": return ts.floor("H", **kwargs) + pd.Timedelta(hours=future) except AmbiguousTimeError: # converting to UTC and then flooring to nearest hour. # TODO: this is incorrect for timezones with fractional offset to UTC. return floor_ts(ts.tz_convert("UTC"), freq, future).tz_convert(ts.tz) # Rounding to longer (>= day) frequencies. ts = ts.floor("D") # make sure we return a midnight value if freq == "D": return ts + pd.Timedelta(days=future) elif freq == "MS": return ts + pd.offsets.MonthBegin(1) + pd.offsets.MonthBegin(future - 1) elif freq == "QS": return ( ts + pd.offsets.QuarterBegin(1, startingMonth=1) + pd.offsets.QuarterBegin(future - 1, startingMonth=1) ) elif freq == "AS": return ts + pd.offsets.YearBegin(1) + pd.offsets.YearBegin(future - 1) else: raise ValueError( f"Parameter ``freq`` must be one of {', '.join(FREQUENCIES)}; got {freq}." )
21,469
def start_xmlrpc_server(): """ Initialize the XMLRPC thread """ def register_module(module): for name, function in module.__dict__.items(): if hasattr(function, '__call__'): server.register_function(function, name) print("[+] Starting XMLRPC server: {}:{}".format(HOST, PORT)) server = SimpleXMLRPCServer((HOST, PORT), requestHandler=ReqHandler, logRequests=False, allow_none=True) # register ida python modules register_module(idc) register_module(idautils) register_module(idaapi) server.register_function(versions) server.register_introspection_functions() server.register_instance(PwnGef(server)) print("[+] Registered {} functions.".format(len(server.system_listMethods()))) while True: if hasattr(server, "shutdown") and server.shutdown is True: break server.handle_request() return
21,470
def check_results(jobname, app_config): """" return T/F if there is a results file """ fp = results_file_path(jobname, app_config) # if results file exists and it's non-zero size, then true return( os.path.exists(fp) and os.path.getsize(fp) > 0)
21,471
def create_geo_database(): """ Create a geo db. """ log.info("Starting to create the geo db") log.info("Waiting for the database to be ready") log.info(f"Testing connection on host: {ctx.geo_db_hostname} and port {ctx.geo_db_port}") # We need to sleep and retry ubtil the db wakes up s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) while True: try: s.connect((ctx.geo_db_hostname, int(ctx.geo_db_port))) s.close() break except socket.error as ex: log.debug("Database not ready..") time.sleep(5) # 5 seconds between tests log.info("Geo database is now ready.") if create_db(DB_TYPE_GEO): if create_geo_db(): log.info("Geo database creation is complete.") return True else: log.info("Failed to make the airspace db, could not create the tables.") else: log.info("Failed to make the airspace db, could not create the database.")
21,472
def createNewForest(): """Returns a dictionary for a new forest data structure.""" forest = {'width': WIDTH, 'height': HEIGHT} for x in range(WIDTH): for y in range(HEIGHT): if (random.randint(1, 10000) / 100) <= INITIAL_TREE_DENSITY: forest[(x, y)] = TREE # Start as a tree. else: forest[(x, y)] = EMPTY # Start as an empty space. return forest
21,473
def vardamp(iter): """ execute trajs in vardamp folder """ #folderpath = '/home/fan/workspace/EOC/matlab/trajs/vardamp/command/' #filenames = glob.glob(folderpath+'*.csv') #savefolder = '/home/fan/workspace/EOC/matlab/trajs/vardamp/record/' folderpath = '/home/fan/catkin_ws/src/maccepavd/pymaccepavd/trajs/trajs'+ str(iter)+'/vardamp/command/' filenames = glob.glob(folderpath+'*.csv') savefolder = '/home/fan/catkin_ws/src/maccepavd/pymaccepavd/trajs/trajs/vardamp/record/' for k in range(len(filenames)): y = exec_traj_csv(folderpath+'traj'+str(k+1)+'.csv') save_dict_csv(y, savefolder+'record_traj'+str(k+1)+'_'+str(iter)+'.csv') rospy.sleep(0.1)
21,474
def test_show_external_log_redirect_link_with_local_log_handler(capture_templates, admin_client, endpoint): """Do not show external links if log handler is local.""" url = f'{endpoint}?dag_id=example_bash_operator' with capture_templates() as templates: admin_client.get(url, follow_redirects=True) ctx = templates[0].local_context assert not ctx['show_external_log_redirect'] assert ctx['external_log_name'] is None
21,475
def test_filter(): """ Base class filter function """ def test(): """ Test the filter function """ try: for i in _TEST_FRAME_.keys(): for j in range(10): test = _TEST_FRAME_.filter(i, "<", j) assert all(map(lambda x: x < j, test[i])) test = _TEST_FRAME_.filter(i, "<=", j) assert all(map(lambda x: x <= j, test[i])) test = _TEST_FRAME_.filter(i, "=", j) assert all(map(lambda x: x == j, test[i])) test = _TEST_FRAME_.filter(i, "==", j) assert all(map(lambda x: x == j, test[i])) test = _TEST_FRAME_.filter(i, '!=', j) assert all(map(lambda x: x != j, test[i])) test = _TEST_FRAME_.filter(i, ">=", j) assert all(map(lambda x: x >= j, test[i])) test = _TEST_FRAME_.filter(i, ">", j) assert all(map(lambda x: x > j, test[i])) except: return False return True return ["vice.core.dataframe.base.filter", test]
21,476
def exp_moving_average(values, window): """ Numpy implementation of EMA """ if window >= len(values): if len(values) == 0: sma = 0.0 else: sma = np.mean(np.asarray(values)) a = [sma] * len(values) else: weights = np.exp(np.linspace(-1., 0., window)) weights /= weights.sum() a = np.convolve(values, weights, mode='full')[:len(values)] a[:window] = a[window] return a
21,477
def update_auth_data(auth_data: dict, ex: int = None): """ 更新认证数据 :param auth_data: 登录认证数据 :param ex: 数据过期秒数 """ RedisUtils().set('token:' + auth_data.get('token'), auth_data, ex)
21,478
def parabolic(f, x): """ Quadratic interpolation in order to estimate the location of a maximum https://gist.github.com/endolith/255291 Args: f (ndarray): a vector a samples x (int): an index on the vector Returns: (vx, vy): the vertex coordinates of a parabola passing through x and its neighbors """ xv = 1/2. * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x) return (xv, yv)
21,479
def texture( mh, tex_info, location, # Upper-right corner of the TexImage node label, # Label for the TexImg node color_socket, alpha_socket=None, is_data=False, ): """Creates nodes for a TextureInfo and hooks up the color/alpha outputs.""" x, y = location pytexture = mh.gltf.data.textures[tex_info.index] if pytexture.sampler is not None: pysampler = mh.gltf.data.samplers[pytexture.sampler] else: pysampler = Sampler.from_dict({}) needs_uv_map = False # whether to create UVMap node # Image Texture tex_img = mh.node_tree.nodes.new('ShaderNodeTexImage') tex_img.location = x - 240, y tex_img.label = label # Get image if pytexture.source is not None: BlenderImage.create(mh.gltf, pytexture.source) pyimg = mh.gltf.data.images[pytexture.source] blender_image_name = pyimg.blender_image_name if blender_image_name: tex_img.image = bpy.data.images[blender_image_name] # Set colorspace for data images if is_data: if tex_img.image: tex_img.image.colorspace_settings.is_data = True # Set filtering set_filtering(tex_img, pysampler) # Outputs mh.node_tree.links.new(color_socket, tex_img.outputs['Color']) if alpha_socket is not None: mh.node_tree.links.new(alpha_socket, tex_img.outputs['Alpha']) # Inputs uv_socket = tex_img.inputs[0] x -= 340 # Do wrapping wrap_s = pysampler.wrap_s wrap_t = pysampler.wrap_t if wrap_s is None: wrap_s = TextureWrap.Repeat if wrap_t is None: wrap_t = TextureWrap.Repeat # If wrapping is REPEATxREPEAT or CLAMPxCLAMP, just set tex_img.extension if (wrap_s, wrap_t) == (TextureWrap.Repeat, TextureWrap.Repeat): tex_img.extension = 'REPEAT' elif (wrap_s, wrap_t) == (TextureWrap.ClampToEdge, TextureWrap.ClampToEdge): tex_img.extension = 'EXTEND' else: # Otherwise separate the UV components and use math nodes to compute # the wrapped UV coordinates # => [Separate XYZ] => [Wrap for S] => [Combine XYZ] => # => [Wrap for T] => tex_img.extension = 'EXTEND' # slightly better errors near the edge than REPEAT # Combine XYZ com_uv = mh.node_tree.nodes.new('ShaderNodeCombineXYZ') com_uv.location = x - 140, y - 100 mh.node_tree.links.new(uv_socket, com_uv.outputs[0]) u_socket = com_uv.inputs[0] v_socket = com_uv.inputs[1] x -= 200 for i in [0, 1]: wrap = [wrap_s, wrap_t][i] socket = [u_socket, v_socket][i] if wrap == TextureWrap.Repeat: # WRAP node for REPEAT math = mh.node_tree.nodes.new('ShaderNodeMath') math.location = x - 140, y + 30 - i*200 math.operation = 'WRAP' math.inputs[1].default_value = 0 math.inputs[2].default_value = 1 mh.node_tree.links.new(socket, math.outputs[0]) socket = math.inputs[0] elif wrap == TextureWrap.MirroredRepeat: # PINGPONG node for MIRRORED_REPEAT math = mh.node_tree.nodes.new('ShaderNodeMath') math.location = x - 140, y + 30 - i*200 math.operation = 'PINGPONG' math.inputs[1].default_value = 1 mh.node_tree.links.new(socket, math.outputs[0]) socket = math.inputs[0] else: # Pass-through CLAMP since the tex_img node is set to EXTEND pass if i == 0: u_socket = socket else: v_socket = socket x -= 200 # Separate XYZ sep_uv = mh.node_tree.nodes.new('ShaderNodeSeparateXYZ') sep_uv.location = x - 140, y - 100 mh.node_tree.links.new(u_socket, sep_uv.outputs[0]) mh.node_tree.links.new(v_socket, sep_uv.outputs[1]) uv_socket = sep_uv.inputs[0] x -= 200 needs_uv_map = True # UV Transform (for KHR_texture_transform) needs_tex_transform = 'KHR_texture_transform' in (tex_info.extensions or {}) if needs_tex_transform: mapping = mh.node_tree.nodes.new('ShaderNodeMapping') mapping.location = x - 160, y + 30 mapping.vector_type = 'POINT' # Outputs mh.node_tree.links.new(uv_socket, mapping.outputs[0]) # Inputs uv_socket = mapping.inputs[0] transform = tex_info.extensions['KHR_texture_transform'] transform = texture_transform_gltf_to_blender(transform) mapping.inputs['Location'].default_value[0] = transform['offset'][0] mapping.inputs['Location'].default_value[1] = transform['offset'][1] mapping.inputs['Rotation'].default_value[2] = transform['rotation'] mapping.inputs['Scale'].default_value[0] = transform['scale'][0] mapping.inputs['Scale'].default_value[1] = transform['scale'][1] x -= 260 needs_uv_map = True # UV Map uv_idx = tex_info.tex_coord or 0 try: uv_idx = tex_info.extensions['KHR_texture_transform']['texCoord'] except Exception: pass if uv_idx != 0 or needs_uv_map: uv_map = mh.node_tree.nodes.new('ShaderNodeUVMap') uv_map.location = x - 160, y - 70 uv_map.uv_map = 'UVMap' if uv_idx == 0 else 'UVMap.%03d' % uv_idx # Outputs mh.node_tree.links.new(uv_socket, uv_map.outputs[0])
21,480
def arith_expr(draw): """ arith_expr: term (('+'|'-') term)* """ return _expr_builder(draw, term, '+-')
21,481
def test_clonotype_clusters_end_to_end( adata_define_clonotype_clusters, receptor_arms, dual_ir, same_v_gene, within_group, expected, expected_size, ): """Test define_clonotype_clusters with different parameters""" ir.pp.ir_dist( adata_define_clonotype_clusters, cutoff=0, sequence="aa", ) clonotypes, clonotype_size, _ = ir.tl.define_clonotype_clusters( adata_define_clonotype_clusters, inplace=False, within_group=within_group, receptor_arms=receptor_arms, dual_ir=dual_ir, same_v_gene=same_v_gene, ) # type: ignore print(clonotypes) npt.assert_equal( list(clonotypes.values), [str(x) if not np.isnan(x) else x for x in expected] ) npt.assert_almost_equal(clonotype_size.values, expected_size)
21,482
def extract_running_speed(module_params): """Writes the stimulus and pkl paths to the input json Parameters ---------- module_params: dict Session or probe unique information, used by each module Returns ------- module_params: dict Session or probe unique information, used by each module input_json_write_dict: dict A dictionary representing the values that will be written to the input json """ # trim_discontiguous_frame_times = module_params['trim'] output_path = module_params['output_path'] input_json_write_dict = \ { 'stimulus_pkl_path': glob(join(module_params['base_directory'], "*.stim.pkl"))[0], 'sync_h5_path': glob(join(module_params['base_directory'], "*.sync"))[0], 'output_path': join(output_path, "running_speed.h5"), "log_level": 'INFO' } return module_params, input_json_write_dict
21,483
def sliced_transposed_product( mat, block_size, axes=(-1,), precision=lax.Precision.DEFAULT, ): """Returns the blocked slices representing a symmetric contraction. Specifically, the output is a contraction of the input mat with itself, in the specified axes. Args: mat: The matrix for which we will compute a contraction with itself. block_size: The size of row blocks to compute. axes: Axes to use for the contraction. precision: The precision to use in each computation. Raises: ValueError: Raised when the specified block size does not evenly divide the number of rows of the input mat. """ rank = len(mat.shape) def _make_axis_positive(ax): assert -rank <= ax < rank return ax + rank if ax < 0 else ax positive_axes = [_make_axis_positive(ax) for ax in axes] assert len(positive_axes) == len(axes) remaining_axes = set(range(rank)) - set(positive_axes) assert len(remaining_axes) == 1 remaining_ax = remaining_axes.pop() num_rows = mat.shape[remaining_ax] if num_rows % block_size != 0: raise ValueError( "The row dimension must be divisible by block_size. " f"Instead got row dimension={num_rows} and block_size={block_size}." ) block_rows = [] for i in range(num_rows // block_size): start_indices = [0] * rank start_indices[remaining_ax] = i * block_size slice_sizes = list(mat.shape) slice_sizes[remaining_ax] = block_size slice_sizes_full = list(mat.shape) slice_sizes_full[remaining_ax] = (i + 1) * block_size block_rows.append( product_with_transpose( lax.dynamic_slice( mat, start_indices=start_indices, slice_sizes=slice_sizes ), lax.dynamic_slice( mat, start_indices=[0] * rank, slice_sizes=slice_sizes_full ), axes=(axes, axes), precision=precision, ) ) return SlicedSymmetricMatrix(block_rows=block_rows)
21,484
def _is_ipython_line_magic(line): """ Determines if the source line is an IPython magic. e.g., %%bash for i in 1 2 3; do echo $i done """ return re.match(_IS_IPYTHON_LINE_MAGIC, line) is not None
21,485
def osu_to_excel( osu_path: str, excel_path: str = '', n: int = None, compact_log: bool = False, display_progress=True, **kwargs ) -> str: """Export metadata and hitobjects in a xlsx file.""" metadata = from_osu( osu_path, n=n, compact_log=compact_log, display_progress=display_progress ) mode = 'w' if not excel_path.strip() else 'a' excel_path = './osu_data.xlsx' if not excel_path else excel_path with pd.ExcelWriter(excel_path, mode=mode) as writer: Logs.info("the 'metadata' sheet is being created...") metadata[:MAX_EXCEL_LINES].to_excel(writer, sheet_name='metadata', index=False, **kwargs) if metadata.shape[0] > MAX_EXCEL_LINES: Logs.warning(f'The sheet "metadata" is too large ({metadata.shape[0]} lines), the maximum size has been keeping (MAX_EXCEL_LINES)') else: Logs.success('There is not error during the export data') return excel_path
21,486
def get_cluster_env() -> ClusterEnv: """Get cardano cluster environment.""" socket_path = Path(os.environ["CARDANO_NODE_SOCKET_PATH"]).expanduser().resolve() state_dir = socket_path.parent work_dir = state_dir.parent repo_dir = Path(os.environ.get("CARDANO_NODE_REPO_PATH") or work_dir) instance_num = int(state_dir.name.replace("state-cluster", "") or 0) cluster_env = ClusterEnv( socket_path=socket_path, state_dir=state_dir, repo_dir=repo_dir, work_dir=work_dir, instance_num=instance_num, cluster_era=configuration.CLUSTER_ERA, tx_era=configuration.TX_ERA, ) return cluster_env
21,487
def convert_pybites_chars(text): """Swap case all characters in the word pybites for the given text. Return the resulting string.""" return "".join( char.swapcase() if char.lower() in PYBITES else char for char in text )
21,488
def get_meminfo(): """ Return the total memory (in MB). :return: memory (float). """ mem = 0.0 with open("/proc/meminfo", "r") as fd: mems = fd.readline() while mems: if mems.upper().find("MEMTOTAL") != -1: try: mem = float(mems.split()[1]) / 1024 # value listed by command as kB, convert to MB except ValueError as e: logger.warning('exception caught while trying to convert meminfo: %s' % e) break mems = fd.readline() return mem
21,489
def process_inline_semantic_match(placeholder_storage, match_object): """ Process a single inline-semantic match object. """ delimiter = match_object.group('delimiter') tag_name = TAG_NAME_FROM_INLINE_SEMANTIC_DELIMITER[delimiter] attribute_specification = match_object.group('attribute_specification') attribute_dictionary = parse_attribute_specification(attribute_specification) attributes = build_html_attributes(placeholder_storage, attribute_dictionary) content = match_object.group('content') content = strip_whitespace(content) # Process nested inline semantics content = process_inline_semantics(placeholder_storage, content) inline_semantic = f'<{tag_name}{attributes}>{content}</{tag_name}>' return inline_semantic
21,490
async def insert_cd_inurl_name(cluster_id: str, iso_name: str): """ Find SR by Name """ try: try: session = create_session( _id=cluster_id, get_xen_clusters=Settings.get_xen_clusters() ) except KeyError as key_error: raise HTTPException( status_code=400, detail=f"{key_error} is not a valid path" ) srs = SR.get_by_name(session=session, name=iso_name) if srs is not None: __srs_list = [] srs_list = __srs_list.append for sr in srs: srs_list(serialize(sr)) ret = dict(success=True, data=__srs_list) else: ret = dict(success=False) session.xenapi.session.logout() return ret except Fault as xml_rpc_error: raise HTTPException( status_code=int(xml_rpc_error.faultCode), detail=xml_rpc_error.faultString, ) except RemoteDisconnected as rd_error: raise HTTPException(status_code=500, detail=rd_error.strerror)
21,491
def main_add(args): """Start the add-environment command and return exit status code.""" return add_env_spec(args.directory, args.name, args.packages, args.channel)
21,492
def download(version): """Download the release archive for |version|.""" # This is the timeout used on each blocking operation, not the entire # life of the connection. So it's used for initial urlopen and for each # read attempt (which may be partial reads). 5 minutes should be fine. TIMEOUT = 5 * 60 if version == 'latest': uri = 'https://www.unicode.org/Public/UNIDATA/UCD.zip' output = Path.cwd() / f'UCD.zip' if output.exists(): req = urllib.request.Request(uri, method='HEAD') with urllib.request.urlopen(req, timeout=TIMEOUT) as f: length = int(f.getheader('Content-Length')) if length != output.stat().st_size: print(f'Refreshing {output}') output.unlink() else: uri = f'https://www.unicode.org/Public/zipped/{version}/UCD.zip' output = Path.cwd() / f'UCD-{version}.zip' # Fetch the archive if it doesn't exist. if not output.exists(): print(f'Downloading {uri}') tmpfile = output.with_suffix('.tmp') with open(tmpfile, 'wb') as outfp: with urllib.request.urlopen(uri, timeout=TIMEOUT) as infp: while True: data = infp.read(1024 * 1024) if not data: break outfp.write(data) tmpfile.rename(output) print('Extracting files') with zipfile.ZipFile(output) as archive: archive.extract('EastAsianWidth.txt') archive.extract('PropList.txt') archive.extract('UnicodeData.txt')
21,493
def clean_galaxy(name: str, data_directory: str, yes_flag: bool) -> None: """Clean galaxy directory for specified galaxy Args: name (str): Name of the galaxy data_directory (str): dr2 data directory yes_flag (bool): switch to skip asking before removing """ print(f"Cleaning files for galaxy: {name}") galaxy_dir = helper.get_magnetic_galaxy_dir(name, data_directory) sfr_dir = sfr_func.get_path_to_sfr_dir(name, data_directory) radio_sfr_dir = radio_sfr_func.get_path_to_radio_sfr_dir(name, data_directory) h1_dir = h1_func.get_path_to_h1_dir(name, data_directory) energy_dir = energy_func.get_path_to_energy_density_dir(name, data_directory) surf_dir = surf_func.get_path_to_surface_density_dir(name, data_directory) # Deleting files in all directories delete_files_in_dir(galaxy_dir, yes_flag) delete_files_in_dir(sfr_dir, yes_flag) delete_files_in_dir(radio_sfr_dir, yes_flag) delete_files_in_dir(h1_dir, yes_flag) delete_files_in_dir(energy_dir, yes_flag) delete_files_in_dir(surf_dir, yes_flag)
21,494
def write(objct, fileoutput, binary=True): """ Write 3D object to file. (same as `save()`). Possile extensions are: - vtk, vti, npy, npz, ply, obj, stl, byu, vtp, vti, mhd, xyz, tif, png, bmp. """ obj = objct if isinstance(obj, Points): # picks transformation obj = objct.polydata(True) elif isinstance(obj, (vtk.vtkActor, vtk.vtkVolume)): obj = objct.GetMapper().GetInput() elif isinstance(obj, (vtk.vtkPolyData, vtk.vtkImageData)): obj = objct if hasattr(obj, 'filename'): obj.filename = fileoutput fr = fileoutput.lower() if fr.endswith(".vtk"): writer = vtk.vtkDataSetWriter() elif fr.endswith(".ply"): writer = vtk.vtkPLYWriter() writer.AddComment("PLY file generated by vedo") lut = objct.GetMapper().GetLookupTable() if lut: pscal = obj.GetPointData().GetScalars() if not pscal: pscal = obj.GetCellData().GetScalars() if pscal and pscal.GetName(): writer.SetArrayName(pscal.GetName()) writer.SetLookupTable(lut) elif fr.endswith(".stl"): writer = vtk.vtkSTLWriter() elif fr.endswith(".vtp"): writer = vtk.vtkXMLPolyDataWriter() elif fr.endswith(".vtu"): writer = vtk.vtkXMLUnstructuredGridWriter() elif fr.endswith(".vtm"): g = vtk.vtkMultiBlockDataGroupFilter() for ob in objct: if isinstance(ob, (Points, Volume)): # picks transformation ob = ob.polydata(True) g.AddInputData(ob) # elif isinstance(ob, (vtk.vtkActor, vtk.vtkVolume)): # ob = ob.GetMapper().GetInput() # g.AddInputData(ob) g.Update() mb = g.GetOutputDataObject(0) wri = vtk.vtkXMLMultiBlockDataWriter() wri.SetInputData(mb) wri.SetFileName(fileoutput) wri.Write() return mb elif fr.endswith(".xyz"): writer = vtk.vtkSimplePointsWriter() elif fr.endswith(".facet"): writer = vtk.vtkFacetWriter() elif fr.endswith(".tif"): writer = vtk.vtkTIFFWriter() # print("GetCompression ", writer.GetCompression ()) writer.SetFileDimensionality(len(obj.GetDimensions())) elif fr.endswith(".vti"): writer = vtk.vtkXMLImageDataWriter() elif fr.endswith(".mhd"): writer = vtk.vtkMetaImageWriter() elif fr.endswith(".nii"): writer = vtk.vtkNIFTIImageWriter() elif fr.endswith(".png"): writer = vtk.vtkPNGWriter() elif fr.endswith(".jpg"): writer = vtk.vtkJPEGWriter() elif fr.endswith(".bmp"): writer = vtk.vtkBMPWriter() elif fr.endswith(".npy") or fr.endswith(".npz"): if utils.isSequence(objct): objslist = objct else: objslist = [objct] dicts2save = [] for obj in objslist: dicts2save.append( toNumpy(obj) ) np.save(fileoutput, dicts2save) return dicts2save elif fr.endswith(".obj"): outF = open(fileoutput, "w") outF.write('# OBJ file format with ext .obj\n') outF.write('# File generated by vedo\n') for p in objct.points(): outF.write("v {:.5g} {:.5g} {:.5g}\n".format(*p)) # pdata = objct.polydata().GetPointData().GetScalars() # if pdata: # ndata = vtk_to_numpy(pdata) # for vd in ndata: # outF.write('vp '+ str(vd) +'\n') #ptxt = objct.polydata().GetPointData().GetTCoords() # not working #if ptxt: # ntxt = vtk_to_numpy(ptxt) # print(len(objct.faces()), objct.points().shape, ntxt.shape) # for vt in ntxt: # outF.write('vt '+ str(vt[0]) +" "+ str(vt[1])+ ' 0\n') for i,f in enumerate(objct.faces()): fs = '' for fi in f: fs += " {:d}".format(fi+1) outF.write('f' + fs + '\n') for l in objct.lines(): ls = '' for li in l: ls += str(li+1)+" " outF.write('l '+ ls + '\n') outF.close() return objct elif fr.endswith(".xml"): # write tetrahedral dolfin xml vertices = objct.points().astype(str) faces = np.array(objct.faces()).astype(str) ncoords = vertices.shape[0] outF = open(fileoutput, "w") outF.write('<?xml version="1.0" encoding="UTF-8"?>\n') outF.write('<dolfin xmlns:dolfin="http://www.fenicsproject.org">\n') if len(faces[0]) == 4:# write tetrahedral mesh ntets = faces.shape[0] outF.write(' <mesh celltype="tetrahedron" dim="3">\n') outF.write(' <vertices size="' + str(ncoords) + '">\n') for i in range(ncoords): x, y, z = vertices[i] outF.write(' <vertex index="'+str(i)+'" x="'+x+'" y="'+y+'" z="'+z+'"/>\n') outF.write(' </vertices>\n') outF.write(' <cells size="' + str(ntets) + '">\n') for i in range(ntets): v0, v1, v2, v3 = faces[i] outF.write(' <tetrahedron index="'+str(i) + '" v0="'+v0+'" v1="'+v1+'" v2="'+v2+'" v3="'+v3+'"/>\n') elif len(faces[0]) == 3:# write triangle mesh ntri = faces.shape[0] outF.write(' <mesh celltype="triangle" dim="2">\n') outF.write(' <vertices size="' + str(ncoords) + '">\n') for i in range(ncoords): x, y, dummy_z = vertices[i] outF.write(' <vertex index="'+str(i)+'" x="'+x+'" y="'+y+'"/>\n') outF.write(' </vertices>\n') outF.write(' <cells size="' + str(ntri) + '">\n') for i in range(ntri): v0, v1, v2 = faces[i] outF.write(' <triangle index="'+str(i)+'" v0="'+v0+'" v1="'+v1+'" v2="'+v2+'"/>\n') outF.write(' </cells>\n') outF.write(" </mesh>\n") outF.write("</dolfin>\n") outF.close() return objct else: colors.printc("\noentry Unknown format", fileoutput, "file not saved.", c="r") return objct try: if hasattr(writer, 'SetFileTypeToBinary'): if binary: writer.SetFileTypeToBinary() else: writer.SetFileTypeToASCII() writer.SetInputData(obj) writer.SetFileName(fileoutput) writer.Write() except Exception as e: colors.printc("\noentry Error saving: " + fileoutput, "\n", e, c="r") return objct
21,495
def npy2point(folder='ct_train', to_save='v', number_points=300, dim=3, crop_size=224, tocrop=False): """ convert .npy to point cloud :param folder: the folder name of the data set :param to_save: choose which oen to save. 'v' represents vertices, 'p' represents plots, '' represents all. :param number_points: number of points for each point cloud :param dim: the dimension of the point clouds :param crop_size: the size of the cropped masks / gt :param tocrop: whether to crop the mask / gt :return: """ assert to_save=='' or to_save=='v' or to_save=='p' import mcubes crop_from = 128 - crop_size//2 crop_to = 128 + crop_size//2 vertices_fold = os.path.join('../../input/PnpAda_release_data/', folder, 'vertices/') plots_fold = os.path.join('../../input/PnpAda_release_data/', folder, 'plots/') if not os.path.exists(vertices_fold): os.mkdir(vertices_fold) if not os.path.exists(plots_fold): os.mkdir(plots_fold) folder_path = os.path.join('../../input/PnpAda_release_data/', folder, "mask/") for path in tqdm(glob.glob(folder_path + '*.npy')): filename = os.path.splitext(os.path.basename(path))[0] vertices_path = os.path.join(vertices_fold, filename + '.npy') plot_path = os.path.join(plots_fold, filename + '.npy') if not os.path.exists(vertices_path): mask = np.load(path) if args.toplot: from matplotlib import pyplot as plt temp = mask[...,0][crop_from:crop_to, crop_from:crop_to] if tocrop else mask[...,0] plt.imshow(temp) plt.show() mask = np.where(mask > 0, 1, 0) mask = np.moveaxis(mask, -1, 0) if tocrop: mask = crop_volume(mask, crop_size=crop_size) mask = np.concatenate([mask, mask, mask], axis=0) point_cloud = np.zeros((crop_size, crop_size)) if tocrop else np.zeros((256, 256)) vertices_array = np.zeros((number_points, dim)) if mask.sum() > 50: vol = mcubes.smooth(mask) vertices, triangles = mcubes.marching_cubes(vol, 0) try: vertices = graipher(vertices, number_points, dim=dim) except: print(filename) exit() vertices_array = np.array(vertices, dtype=np.int) if args.toplot: fig = plt.figure() from mpl_toolkits.mplot3d import Axes3D ax = fig.add_subplot(111, projection='3d') ax.scatter(vertices[:, 0], vertices[:, 1], vertices[:, 2], s=10) plt.show() point_cloud[vertices_array[:,1], vertices_array[:,2]] = 1 if args.toplot: plt.imshow(point_cloud, cmap='gray') plt.show() if to_save=='v' or to_save=='': np.save(vertices_path, vertices_array) if to_save=='p' or to_save=='': np.save(plot_path, point_cloud) # mcubes.export_mesh(vertices, triangles, "heart_single_slice.dae", "MyHeart_s") print("finish")
21,496
def instantiate_env_class(builder: IRBuilder) -> Value: """Assign an environment class to a register named after the given function definition.""" curr_env_reg = builder.add( Call(builder.fn_info.env_class.ctor, [], builder.fn_info.fitem.line) ) if builder.fn_info.is_nested: builder.fn_info.callable_class._curr_env_reg = curr_env_reg builder.add(SetAttr(curr_env_reg, ENV_ATTR_NAME, builder.fn_info.callable_class.prev_env_reg, builder.fn_info.fitem.line)) else: builder.fn_info._curr_env_reg = curr_env_reg return curr_env_reg
21,497
def initialize(): """ Initialise Halcon """ # path where dl_training_PK.hdl and dl_visulaization_PK.hdl files are located
21,498
def validate_recaptcha(token): """ Send recaptcha token to API to check if user response is valid """ url = 'https://www.google.com/recaptcha/api/siteverify' values = { 'secret': settings.RECAPTCHA_PRIVATE_KEY, 'response': token } data = urlencode(values).encode("utf-8") response = builtin_request.urlopen(url, data) result = json.load(response) if result['success']: return True, "" return False, "Invalid reCAPTCHA. Please try again."
21,499