content
stringlengths
22
815k
id
int64
0
4.91M
def _validate_config_file(config: Dict[str, str], prompts: Dict[str, Any]): """Checks that the configuration file contains all needed variables. Args: config: The config as a dictionary. prompts: Prompts from prompts.yml. Raises: KedroCliError: If the config file is empty or does not contain all the keys required in prompts, or if the output_dir specified does not exist. """ if config is None: raise KedroCliError("Config file is empty.") missing_keys = set(prompts) - set(config) if missing_keys: click.echo(yaml.dump(config, default_flow_style=False)) raise KedroCliError(f"{', '.join(missing_keys)} not found in config file.") if "output_dir" in config and not Path(config["output_dir"]).exists(): raise KedroCliError( f"`{config['output_dir']}` is not a valid output directory. " "It must be a relative or absolute path to an existing directory." )
20,500
def f5_list_policy_hostnames_command(client: Client, policy_md5: str) -> CommandResults: """ Get a list of all policy hostnames. Args: client (Client): f5 client. policy_md5 (str): MD5 hash of the policy. """ result = client.list_policy_hostnames(policy_md5) table_name = 'f5 data for listing policy hostname:' readable_output, printable_result = build_command_result(result, table_name) command_results = CommandResults( outputs_prefix='f5.Hostname', outputs_key_field='id', readable_output=readable_output, outputs=printable_result, raw_response=result ) return command_results
20,501
def predict(m, count, s, A): """predict the chain after s calculate the probability of a m-length chain, then return chains. CAUTION the number of chains maybe less then count args: m: the length of predict chain count: the number of predict chain s: the last element of the current chain A: transition matrix return: some chains save in list """ process = [] start = {} start[s] = [1, None] process.append(start) for i in range(m): line = process[-1] next_line = {} for key in line.keys(): if A.get(key, None) is None: continue for k in A[key].keys(): p = next_line.get(k, [0, None])[0] if p < A[key][k] * line[key][0]: next_line[k] = [A[key][k] * line[key][0], key] process.append(next_line) ans = process[-1] # sort according to probability from high to low ans = sorted(ans.iteritems(), key=lambda item: item[1][0], reverse=True) if len(ans) == 0: return None # Can't predict, because of no answer can be find else: count = min(len(ans), count) # the number of ans maybe less than count chains = [] length = len(process) for i in range(count): elem = ans[i][0] chain = get_chain(elem, length-1, process) chains.append(chain[1:]) return chains
20,502
def read_lengths_from_fastx_file(fastx_file): """ @param fastx_file: file path @type fastx_file: str @rtype: dict[str, int] """ file_type = mimetypes.guess_type(fastx_file)[1] if file_type == 'gzip': f = gzip.open(fastx_file, "rt") elif not file_type: f = open(fastx_file, "rt") else: raise RuntimeError("Unknown type of file: '{}".format(fastx_file)) length = {} if os.path.getsize(fastx_file) == 0: return length file_format = None line = f.readline() if line.startswith('@'): file_format = "fastq" elif line.startswith(">"): file_format = "fasta" f.seek(0) if not file_format: raise RuntimeError("Invalid sequence file: '{}".format(fastx_file)) for seq_record in SeqIO.parse(f, file_format): length[seq_record.id] = len(seq_record.seq) f.close() return length
20,503
def f_multidim(anchors, basis, distance_measurements, coeffs): """ :param anchors: anchors dim x N :param basis: basis vectors K x M :param distance_measurements: matrix of squared distances M x N :param coeffs: coefficient matrix dim x K :return: vector of differences between estimate distance and measured distance. """ assert basis.shape[0] == coeffs.shape[1] assert anchors.shape[0] == coeffs.shape[0] assert anchors.shape[1] == distance_measurements.shape[1] assert basis.shape[1] == distance_measurements.shape[0] X = coeffs.dot(basis) # is (dim x M) diff = anchors[:, :, np.newaxis] - X[:, np.newaxis, :] distance_estimates = np.linalg.norm(diff, axis=0)**2 diff = distance_measurements.T - distance_estimates nnz_diffs = diff[distance_measurements.T > 0].flatten() return nnz_diffs
20,504
def getHomography(indict, outdict, outsize=None): """Returns a transformation to go from input pts to output pts using a homography. 'indict' and 'outdict' should contain identical keys mapping to 2-tuples. We create A: x1 y1 1 0 0 0 -x1*x1' -y1*x1' 0 0 0 x1 y1 1 -x1*y1' -y1*y1' x2 y2 1 0 0 0 -x2*x2' -y2*x2' 0 0 0 x2 y2 1 -x2*y2' -y2*y2' ... And b: [x1' y1' x2' y2' x3' y3' ...].T Then solve for h in Ah = b using linear least squares, where h is: [h11 h12 h13 h21 h22 h23 h31 h32].T and h33 is 1. Returns (h, Ah), where the 2nd term is the transformed locations of the inputs. """ # initialize both matrices A = np.zeros((2*len(outdict), 8), dtype=np.double) b = np.zeros((2*len(outdict), 1), dtype=np.double) inputs, outputs = getFidsFromDicts(indict, outdict, outsize=outsize) # copy over data for i, ((xi, yi, _), (xo, yo, _)) in enumerate(zip(inputs, outputs)): A[2*i,:] = [xi, yi, 1, 0, 0, 0, -xi*xo, -yi*xo] A[2*i+1, :] = [0, 0, 0, xi, yi, 1, -xi*yo, -yi*yo] b[2*i] = xo b[2*i+1] = yo #print A, A.shape, b, b.shape, inputs, inputs.shape # Linear least squares solve h, resids, rank, s = np.linalg.lstsq(A, b) h = h.flatten() ret = np.ones((3,3), dtype=np.double) ret[:, :] = [h[:3], h[3:6], [h[6], h[7], 1.0]] ret = ret.transpose() # we need transposed version of h throughout ah = np.dot(inputs, ret) ah /= ah[:, -1:] if 0: print h, len(h) print 'ret\n', ret, ret.shape print 'normed ah\n', ah, ah.shape print 'outputs\n', outputs print 'inputs\n', inputs print 'diff %\n', 100.0*(outputs-ah)/outputs return ret, ah
20,505
def variables_to_restore(scope=None, strip_scope=False): """Returns a list of variables to restore for the specified list of methods. It is supposed that variable name starts with the method's scope (a prefix returned by _method_scope function). Args: methods_names: a list of names of configurable methods. strip_scope: if True will return variable names without method's scope. If methods_names is None will return names unchanged. model_scope: a scope for a whole model. Returns: a dictionary mapping variable names to variables for restore. """ if scope: variable_map = {} method_variables = slim.get_variables_to_restore(include=[scope]) for var in method_variables: if strip_scope: var_name = var.op.name[len(scope) + 1:] else: var_name = var.op.name variable_map[var_name] = var return variable_map else: return {v.op.name: v for v in slim.get_variables_to_restore()}
20,506
def uninstall(): """Uninstall Blender configuration for Avalon.""" sys.excepthook = ORIGINAL_EXCEPTHOOK pyblish.api.deregister_host("blender") pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) deregister_loader_plugin_path(str(LOAD_PATH)) avalon.api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) if not IS_HEADLESS: ops.unregister()
20,507
def from_strings(data, gaps="-", length=None, dtype=np.int8): """Convert a series of strings to an array of integer encoded alleles. Parameters ---------- data : array_like, str Sequence of strings of alleles. gaps : str, optional String of symbols to be interpreted as gaps in the sequence. length : int, optional Truncate or extend sequence to a set length by padding with gap values. dtype : dtype, optional Specify dtype of returned array. Returns ------- array : ndarray, int Array of alleles encoded as integers. """ if isinstance(data, str): return vector_from_string(data, gaps=gaps, length=length, dtype=dtype) if isinstance(data, np.ndarray): pass else: data = np.array(data, copy=False) sequences = data.ravel() # default to length of longest element if length is None: length = max(len(i) for i in sequences) # number of sequences n_seq = len(sequences) # new array with gap as default array = np.empty((n_seq, length), dtype=dtype) for i in range(n_seq): array[i] = vector_from_string( sequences[i], gaps=gaps, length=length, dtype=dtype ) shape = data.shape + (length,) return array.reshape(shape)
20,508
def geocode(scene, dem, tmpdir, outdir, spacing, scaling='linear', func_geoback=1, nodata=(0, -99), osvdir=None, allow_RES_OSV=False, cleanup=True, export_extra=None, basename_extensions=None, removeS1BorderNoiseMethod='gamma', refine_lut=False): """ general function for radiometric terrain correction (RTC) and geocoding of SAR backscatter images with GAMMA. Applies the RTC method by :cite:t:`Small2011` to retrieve gamma nought RTC backscatter. Parameters ---------- scene: str or ~pyroSAR.drivers.ID or list the SAR scene(s) to be processed dem: str the reference DEM in GAMMA format tmpdir: str a temporary directory for writing intermediate files outdir: str the directory for the final GeoTIFF output files spacing: int the target pixel spacing in meters scaling: {'linear', 'db'} or list the value scaling of the backscatter values; either 'linear', 'db' or a list of both, i.e. ['linear', 'db'] func_geoback: {0, 1, 2, 3, 4, 5, 6, 7} backward geocoding interpolation mode (see GAMMA command `geocode_back`) - 0: nearest-neighbor - 1: bicubic spline (default) - 2: bicubic-spline, interpolate log(data) - 3: bicubic-spline, interpolate sqrt(data) - 4: B-spline interpolation (default B-spline degree: 5) - 5: B-spline interpolation sqrt(x) (default B-spline degree: 5) - 6: Lanczos interpolation (default Lanczos function order: 5) - 7: Lanczos interpolation sqrt(x) (default Lanczos function order: 5) .. note:: log and sqrt interpolation modes should only be used with non-negative data! .. note:: GAMMA recommendation for MLI data: "The interpolation should be performed on the square root of the data. A mid-order (3 to 5) B-spline interpolation is recommended." nodata: tuple the nodata values for the output files; defined as a tuple with two values, the first for linear, the second for logarithmic scaling osvdir: str a directory for Orbit State Vector files; this is currently only used by for Sentinel-1 where two subdirectories POEORB and RESORB are created; if set to None, a subdirectory OSV is created in the directory of the unpacked scene. allow_RES_OSV: bool also allow the less accurate RES orbit files to be used? Otherwise the function will raise an error if no POE file exists. cleanup: bool should all files written to the temporary directory during function execution be deleted after processing? export_extra: list of str or None a list of image file IDs to be exported to outdir - format is GeoTIFF if the file is geocoded and ENVI otherwise. Non-geocoded images can be converted via GAMMA command data2tiff yet the output was found impossible to read with GIS software - scaling of SAR image products is applied as defined by parameter `scaling` - see Notes for ID options basename_extensions: list of str or None names of additional parameters to append to the basename, e.g. ['orbitNumber_rel'] removeS1BorderNoiseMethod: str or None the S1 GRD border noise removal method to be applied, See :func:`pyroSAR.S1.removeGRDBorderNoise` for details; one of the following: - 'ESA': the pure implementation as described by ESA - 'pyroSAR': the ESA method plus the custom pyroSAR refinement - 'gamma': the GAMMA implementation of :cite:`Ali2018` - None: do not remove border noise refine_lut: bool should the LUT for geocoding be refined using pixel area normalization? Returns ------- Note ---- | intermediate output files | DEM products are named <scene identifier>_<ID>, e.g. `S1A__IW___A_20141012T162337_inc_geo` | SAR products will additionally contain the polarization, e.g. `S1A__IW___A_20141012T162337_VV_grd_mli` | IDs in brackets are only written if selected by `export_extra` - images in range-Doppler geometry * **grd**: the ground range detected SAR intensity image * **grd_mli**: the multi-looked grd image with approximated target resolution * (**pix_ellip_sigma0**): ellipsoid-based pixel area * (**pix_area_sigma0**): illuminated area as obtained from integrating DEM-facets in sigma projection (command pixel_area) * (**pix_area_gamma0**): illuminated area as obtained from integrating DEM-facets in gamma projection (command pixel_area) * **pix_ratio**: pixel area normalization factor (pix_ellip_sigma0 / pix_area_gamma0) * **grd_mli_gamma0-rtc**: the terrain-corrected gamma0 backscatter (grd_mli * pix_ratio) * (**gs_ratio**): gamma-sigma ratio (pix_gamma0 / pix_sigma0) - images in map geometry * **dem_seg_geo**: dem subsetted to the extent of the intersect between input DEM and SAR image * (**u_geo**): zenith angle of surface normal vector n (angle between z and n) * (**v_geo**): orientation angle of n (between x and projection of n in xy plane) * **inc_geo**: local incidence angle (between surface normal and look vector) * (**psi_geo**): projection angle (between surface normal and image plane normal) * **ls_map_geo**: layover and shadow map (in map projection) * (**sim_sar_geo**): simulated SAR backscatter image * (**pix_ellip_sigma0_geo**): ellipsoid-based pixel area * (**pix_area_sigma0_geo**): illuminated area as obtained from integrating DEM-facets in sigma projection (command pixel_area) * (**pix_area_gamma0_geo**): illuminated area as obtained from integrating DEM-facets in gamma projection (command pixel_area) * (**pix_ratio_geo**): pixel area normalization factor (pix_ellip_sigma0 / pix_area_gamma0) * (**gs_ratio_geo**): gamma-sigma ratio (pix_gamma0 / pix_sigma0) - additional files * **lut_init**: initial geocoding lookup table - files specific to lookup table refinement * **lut_fine**: refined geocoding lookup table * **diffpar**: ISP offset/interferogram parameter file * **offs**: offset estimates (fcomplex) * **coffs**: culled range and azimuth offset estimates (fcomplex) * **coffsets**: culled offset estimates and cross correlation values (text format) * **ccp**: cross-correlation of each patch (0.0->1.0) (float) Examples -------- geocode a Sentinel-1 scene and export the local incidence angle map with it >>> from pyroSAR.gamma import geocode >>> filename = 'S1A_IW_GRDH_1SDV_20180829T170656_20180829T170721_023464_028DE0_F7BD.zip' >>> geocode(scene=filename, dem='demfile', outdir='outdir', spacing=20, scaling='db', >>> export_extra=['dem_seg_geo', 'inc_geo', 'ls_map_geo']) .. figure:: figures/gamma_geocode.svg :align: center Workflow diagram for function geocode for processing a Sentinel-1 Ground Range Detected (GRD) scene to radiometrically terrain corrected (RTC) gamma nought backscatter. """ # experimental option to reuse intermediate products; currently affects: # - scene unpacking # - conversion to GAMMA format # - multilooking # - DEM product generation exist_ok = False scenes = scene if isinstance(scene, list) else [scene] if len(scenes) > 2: raise RuntimeError("currently only one or two scenes can be passed via argument 'scene'") scenes = identify_many(scenes) ref = scenes[0] if ref.sensor not in ['S1A', 'S1B', 'PALSAR-2']: raise RuntimeError( 'this function currently only supports Sentinel-1 and PALSAR-2 Path data. Please stay tuned...') if export_extra is not None and not isinstance(export_extra, list): raise TypeError("parameter 'export_extra' must either be None or a list") tmpdir = os.path.join(tmpdir, ref.outname_base(extensions=basename_extensions)) for dir in [tmpdir, outdir]: os.makedirs(dir, exist_ok=True) if ref.is_processed(outdir): log.info('scene {} already processed'.format(ref.outname_base(extensions=basename_extensions))) return shellscript = os.path.join(tmpdir, ref.outname_base(extensions=basename_extensions) + '_commands.sh') scaling = [scaling] if isinstance(scaling, str) else scaling if isinstance(scaling, list) else [] scaling = union(scaling, ['db', 'linear']) if len(scaling) == 0: raise IOError('wrong input type for parameter scaling') for scene in scenes: if scene.compression is not None: log.info('unpacking scene') try: scene.unpack(tmpdir, exist_ok=exist_ok) except RuntimeError: log.info('scene was attempted to be processed before, exiting') return else: scene.scene = os.path.join(tmpdir, os.path.basename(scene.file)) os.makedirs(scene.scene) path_log = os.path.join(tmpdir, 'logfiles') if not os.path.isdir(path_log): os.makedirs(path_log) for scene in scenes: if scene.sensor in ['S1A', 'S1B'] and removeS1BorderNoiseMethod in ['ESA', 'pyroSAR']: log.info('removing border noise') scene.removeGRDBorderNoise(method=removeS1BorderNoiseMethod) log.info('converting scene to GAMMA format') gamma_bnr = True if removeS1BorderNoiseMethod == 'gamma' else False images = [] for scene in scenes: files = convert2gamma(scene, directory=tmpdir, logpath=path_log, outdir=tmpdir, basename_extensions=basename_extensions, shellscript=shellscript, S1_bnr=gamma_bnr, exist_ok=exist_ok, return_fnames=True) images.extend(files) for scene in scenes: if scene.sensor in ['S1A', 'S1B']: log.info('updating orbit state vectors') if allow_RES_OSV: osvtype = ['POE', 'RES'] else: osvtype = 'POE' try: correctOSV(id=scene, directory=tmpdir, osvdir=osvdir, osvType=osvtype, logpath=path_log, outdir=tmpdir, shellscript=shellscript) except RuntimeError: log.warning('orbit state vector correction failed for scene {}'.format(scene.scene)) return log.info('calibrating') images_cal = [] for scene in scenes: files = calibrate(id=scene, directory=tmpdir, return_fnames=True, logpath=path_log, outdir=tmpdir, shellscript=shellscript) if files is not None: images_cal.extend(files) if len(images_cal) > 0: images = images_cal if len(scenes) > 1: images_new = [] groups = groupby(images, 'polarization') for group in groups: out = group[0] + '_cat' out_par = out + '.par' all_exist = all([os.path.isfile(x) for x in [out, out_par]]) if not all_exist: log.info('mosaicing scenes') isp.MLI_cat(MLI_1=group[0], MLI1_par=group[0] + '.par', MLI_2=group[1], MLI2_par=group[1] + '.par', MLI_3=out, MLI3_par=out_par, logpath=path_log, outdir=tmpdir, shellscript=shellscript) par2hdr(out_par, out + '.hdr') images_new.append(out) images = images_new if scene.sensor in ['S1A', 'S1B']: log.info('multilooking') groups = groupby(images, 'polarization') images = [] for group in groups: out = group[0].replace('IW1', 'IW_') + '_mli' infile = group[0] if len(group) == 1 else group multilook(infile=infile, outfile=out, spacing=spacing, exist_ok=exist_ok, logpath=path_log, outdir=tmpdir, shellscript=shellscript) images.append(out) products = list(images) reference = images[0] # create output names for files to be written # appreciated files will be written n = Namespace(tmpdir, scene.outname_base(extensions=basename_extensions)) n.appreciate(['dem_seg_geo', 'lut_init', 'inc_geo', 'ls_map_geo']) pix_geo = [] if export_extra is not None: n.appreciate(export_extra) pix = ['pix_area_sigma0', 'pix_area_gamma0', 'pix_ratio', 'gs_ratio', 'pix_ellip_sigma0'] for item in pix: if item + '_geo' in export_extra: pix_geo.append(item + '_geo') n.appreciate([item]) if refine_lut: n.appreciate(['pix_area_sigma0']) reference_par = ISPPar(reference + '.par') ###################################################################### # DEM product generation ############################################# ###################################################################### log.info('creating DEM products') gc_map_wrap(image=reference, namespace=n, dem=dem, spacing=spacing, exist_ok=exist_ok, logpath=path_log, outdir=tmpdir, shellscript=shellscript) sim_width = ISPPar(n.dem_seg_geo + '.par').width ###################################################################### # RTC reference area computation ##################################### ###################################################################### log.info('computing pixel area') pixel_area_wrap(image=reference, namespace=n, lut=n.lut_init, logpath=path_log, outdir=tmpdir, shellscript=shellscript) ###################################################################### # lookup table Refinement ############################################ ###################################################################### lut_final = n.lut_init if refine_lut: log.info('refining lookup table') # Refinement of geocoding lookup table diff.create_diff_par(PAR_1=reference + '.par', PAR_2='-', DIFF_par=reference + '_diff.par', PAR_type=1, iflg=0, logpath=path_log, outdir=tmpdir, shellscript=shellscript) # Refinement Lookuptable # for "shift" data offset window size enlarged twice to 512 and 256, for data without shift 256 128 diff.offset_pwrm(MLI_1=n.pix_area_sigma0, MLI_2=reference, DIFF_par=reference + '_diff.par', offs=reference + '_offs', ccp=reference + '_ccp', rwin=512, azwin=256, offsets=reference + '_offsets.txt', n_ovr=2, nr=64, naz=32, thres=0.2, logpath=path_log, outdir=tmpdir, shellscript=shellscript) # par2hdr(master + '.par', master + '_offs' + '.hdr') diff.offset_fitm(offs=reference + '_offs', ccp=reference + '_ccp', DIFF_par=reference + '_diff.par', coffs=reference + '_coffs', coffsets=reference + '_coffsets', thres=0.2, npoly=4, logpath=path_log, outdir=tmpdir, shellscript=shellscript) # Updating of the look-up table diff.gc_map_fine(gc_in=lut_final, width=sim_width, DIFF_par=reference + '_diff.par', gc_out=lut_final + '.fine', ref_flg=1, logpath=path_log, outdir=tmpdir, shellscript=shellscript) # Reproduce pixel area estimate pixel_area_wrap(image=reference, namespace=n, lut=lut_final + '.fine', logpath=path_log, outdir=tmpdir, shellscript=shellscript) lut_final = lut_final + '.fine' ###################################################################### # radiometric terrain correction and backward geocoding ############## ###################################################################### log.info('radiometric terrain correction and backward geocoding') for image in images: lat.product(data_1=image, data_2=n.pix_ratio, product=image + '_gamma0-rtc', width=reference_par.range_samples, bx=1, by=1, logpath=path_log, outdir=tmpdir, shellscript=shellscript) par2hdr(reference + '.par', image + '_gamma0-rtc.hdr') diff.geocode_back(data_in=image + '_gamma0-rtc', width_in=reference_par.range_samples, lookup_table=lut_final, data_out=image + '_gamma0-rtc_geo', width_out=sim_width, interp_mode=func_geoback, logpath=path_log, outdir=tmpdir, shellscript=shellscript) par2hdr(n.dem_seg_geo + '.par', image + '_gamma0-rtc_geo.hdr') products.extend([image + '_gamma0-rtc', image + '_gamma0-rtc_geo']) ###################################################################### # log scaling and image export ####################################### ###################################################################### log.info('conversion to (dB and) GeoTIFF') def exporter(data_in, outdir, nodata, scale='linear', dtype=2): if scale == 'db': if re.search('_geo', os.path.basename(data_in)): width = sim_width refpar = n.dem_seg_geo + '.par' else: width = reference_par.range_samples refpar = reference + '.par' lat.linear_to_dB(data_in=data_in, data_out=data_in + '_db', width=width, inverse_flag=0, null_value=nodata, logpath=path_log, outdir=tmpdir, shellscript=shellscript) par2hdr(refpar, data_in + '_db.hdr') data_in += '_db' if re.search('_geo', os.path.basename(data_in)): outfile = os.path.join(outdir, os.path.basename(data_in) + '.tif') disp.data2geotiff(DEM_par=n.dem_seg_geo + '.par', data=data_in, type=dtype, GeoTIFF=outfile, no_data=nodata, logpath=path_log, outdir=tmpdir, shellscript=shellscript) else: outfile = os.path.join(outdir, os.path.basename(data_in)) shutil.copyfile(data_in, outfile) shutil.copyfile(data_in + '.hdr', outfile + '.hdr') for image in images: for scale in scaling: exporter(data_in=image + '_gamma0-rtc_geo', scale=scale, dtype=2, nodata=dict(zip(('linear', 'db'), nodata))[scale], outdir=outdir) if scene.sensor in ['S1A', 'S1B']: outname_base = scene.outname_base(extensions=basename_extensions) shutil.copyfile(os.path.join(scene.scene, 'manifest.safe'), os.path.join(outdir, outname_base + '_manifest.safe')) if export_extra is not None: log.info('exporting extra products') for key in export_extra: if key in pix_geo: fname = n.get(key) diff.geocode_back(data_in=fname.replace('_geo', ''), width_in=reference_par.range_samples, lookup_table=lut_final, data_out=fname, width_out=sim_width, interp_mode=func_geoback, logpath=path_log, outdir=tmpdir, shellscript=shellscript) par2hdr(n.dem_seg_geo + '.par', fname + '_.hdr') # SAR image products product_match = [x for x in products if x.endswith(key)] if len(product_match) > 0: for product in product_match: for scale in scaling: exporter(data_in=product, outdir=outdir, scale=scale, dtype=2, nodata=dict(zip(('linear', 'db'), nodata))[scale]) # ancillary (DEM) products elif n.isfile(key) and key not in ['lut_init']: filename = n[key] dtype = 5 if key == 'ls_map_geo' else 2 nodata = 0 exporter(filename, outdir, dtype=dtype, nodata=nodata) else: log.warning('cannot export file {}'.format(key)) shutil.copyfile(shellscript, os.path.join(outdir, os.path.basename(shellscript))) if cleanup: log.info('cleaning up temporary files') shutil.rmtree(tmpdir)
20,509
def fetch_new_stock_datasets(): """fetch_new_stock_datasets Collect datasets for a ticker from IEX Cloud or Tradier .. warning: IEX Cloud charges per request. Here are example commands to help you monitor your usage while handling first time users and automation (intraday, daily, and weekly options are supported). **Setup** :: export IEX_TOKEN=YOUR_IEX_CLOUD_TOKEN export TD_TOKEN=YOUR_TRADIER_TOKEN **Pull Data for a Ticker from IEX and Tradier** :: fetch -t TICKER **Pull from All Supported IEX Feeds** :: fetch -t TICKER -g iex-all **Pull from All Supported Tradier Feeds** :: fetch -t TICKER -g td **Intraday IEX and Tradier Feeds (only minute and news to reduce costs)** :: fetch -t TICKER -g intra # or manually: # fetch -t TICKER -g td,iex_min,iex_news **Daily IEX Feeds (daily and news)** :: fetch -t TICKER -g daily # or manually: # fetch -t TICKER -g iex_day,iex_news **Weekly IEX Feeds (company, financials, earnings, dividends, and peers)** :: fetch -t TICKER -g weekly # or manually: # fetch -t TICKER -g iex_fin,iex_earn,iex_div,iex_peers,iex_news, # iex_comp **IEX Minute** :: fetch -t TICKER -g iex_min **IEX News** :: fetch -t TICKER -g iex_news **IEX Daily** :: fetch -t TICKER -g iex_day **IEX Stats** :: fetch -t TICKER -g iex_stats **IEX Peers** :: fetch -t TICKER -g iex_peers **IEX Financials** :: fetch -t TICKER -g iex_fin **IEX Earnings** :: fetch -t TICKER -g iex_earn **IEX Dividends** :: fetch -t TICKER -g iex_div **IEX Quote** :: fetch -t TICKER -g iex_quote **IEX Company** :: fetch -t TICKER -g iex_comp .. note:: This requires the following services are listening on: - redis ``localhost:6379`` - minio ``localhost:9000`` """ log.info( 'start - fetch_new_stock_datasets') parser = argparse.ArgumentParser( description=( 'Download and store the latest stock pricing, ' 'news, and options chain data ' 'and store it in Minio (S3) and Redis. ' 'Also includes support for getting FinViz ' 'screener tickers')) parser.add_argument( '-t', help=( 'ticker'), required=False, dest='ticker') parser.add_argument( '-g', help=( 'optional - fetch mode: ' 'initial = default fetch from initial data feeds ' '(IEX and Tradier), ' 'intra = fetch intraday from IEX and Tradier, ' 'daily or day = fetch daily from IEX, ' 'weekly = fetch weekly from IEX, ' 'all = fetch from all data feeds, ' 'td = fetch from Tradier feeds only, ' 'iex = fetch from IEX Cloud feeds only, ' 'min or minute or iex_min = fetch IEX Cloud intraday ' 'per-minute feed ' 'https://iexcloud.io/docs/api/#historical-prices, ' 'day or daily or iex_day = fetch IEX Cloud daily feed ' 'https://iexcloud.io/docs/api/#historical-prices, ' 'quote or iex_quote = fetch IEX Cloud quotes feed ' 'https://iexcloud.io/docs/api/#quote, ' 'stats or iex_stats = fetch IEX Cloud key stats feed ' 'https://iexcloud.io/docs/api/#key-stats, ' 'peers or iex_peers = fetch from just IEX Cloud peers feed ' 'https://iexcloud.io/docs/api/#peers, ' 'news or iex_news = fetch IEX Cloud news feed ' 'https://iexcloud.io/docs/api/#news, ' 'fin or iex_fin = fetch IEX Cloud financials feed' 'https://iexcloud.io/docs/api/#financials, ' 'earn or iex_earn = fetch from just IEX Cloud earnings feeed ' 'https://iexcloud.io/docs/api/#earnings, ' 'div or iex_div = fetch from just IEX Cloud dividends feed' 'https://iexcloud.io/docs/api/#dividends, ' 'iex_comp = fetch from just IEX Cloud company feed ' 'https://iexcloud.io/docs/api/#company'), required=False, dest='fetch_mode') parser.add_argument( '-i', help=( 'optional - ticker id ' 'not used without a database'), required=False, dest='ticker_id') parser.add_argument( '-e', help=( 'optional - options expiration date'), required=False, dest='exp_date_str') parser.add_argument( '-l', help=( 'optional - path to the log config file'), required=False, dest='log_config_path') parser.add_argument( '-b', help=( 'optional - broker url for Celery'), required=False, dest='broker_url') parser.add_argument( '-B', help=( 'optional - backend url for Celery'), required=False, dest='backend_url') parser.add_argument( '-k', help=( 'optional - s3 access key'), required=False, dest='s3_access_key') parser.add_argument( '-s', help=( 'optional - s3 secret key'), required=False, dest='s3_secret_key') parser.add_argument( '-a', help=( 'optional - s3 address format: <host:port>'), required=False, dest='s3_address') parser.add_argument( '-S', help=( 'optional - s3 ssl or not'), required=False, dest='s3_secure') parser.add_argument( '-u', help=( 'optional - s3 bucket name'), required=False, dest='s3_bucket_name') parser.add_argument( '-G', help=( 'optional - s3 region name'), required=False, dest='s3_region_name') parser.add_argument( '-p', help=( 'optional - redis_password'), required=False, dest='redis_password') parser.add_argument( '-r', help=( 'optional - redis_address format: <host:port>'), required=False, dest='redis_address') parser.add_argument( '-n', help=( 'optional - redis and s3 key name'), required=False, dest='keyname') parser.add_argument( '-m', help=( 'optional - redis database number (0 by default)'), required=False, dest='redis_db') parser.add_argument( '-x', help=( 'optional - redis expiration in seconds'), required=False, dest='redis_expire') parser.add_argument( '-z', help=( 'optional - strike price'), required=False, dest='strike') parser.add_argument( '-c', help=( 'optional - contract type "C" for calls "P" for puts'), required=False, dest='contract_type') parser.add_argument( '-P', help=( 'optional - get pricing data if "1" or "0" disabled'), required=False, dest='get_pricing') parser.add_argument( '-N', help=( 'optional - get news data if "1" or "0" disabled'), required=False, dest='get_news') parser.add_argument( '-O', help=( 'optional - get options data if "1" or "0" disabled'), required=False, dest='get_options') parser.add_argument( '-U', help=( 'optional - s3 enabled for publishing if "1" or ' '"0" is disabled'), required=False, dest='s3_enabled') parser.add_argument( '-R', help=( 'optional - redis enabled for publishing if "1" or ' '"0" is disabled'), required=False, dest='redis_enabled') parser.add_argument( '-A', help=( 'optional - run an analysis ' 'supported modes: scn'), required=False, dest='analysis_type') parser.add_argument( '-L', help=( 'optional - screener urls to pull ' 'tickers for analysis'), required=False, dest='urls') parser.add_argument( '-Z', help=( 'disable run without an engine for local testing and demos'), required=False, dest='celery_enabled', action='store_true') parser.add_argument( '-F', help=( 'optional - backfill date for filling in ' 'gaps for the IEX Cloud minute dataset ' 'format is YYYY-MM-DD'), required=False, dest='backfill_date') parser.add_argument( '-d', help=( 'debug'), required=False, dest='debug', action='store_true') args = parser.parse_args() run_offline = True ticker = ae_consts.TICKER ticker_id = ae_consts.TICKER_ID fetch_mode = 'initial' exp_date_str = ae_consts.NEXT_EXP_STR ssl_options = ae_consts.SSL_OPTIONS transport_options = ae_consts.TRANSPORT_OPTIONS broker_url = ae_consts.WORKER_BROKER_URL backend_url = ae_consts.WORKER_BACKEND_URL celery_config_module = ae_consts.WORKER_CELERY_CONFIG_MODULE include_tasks = ae_consts.INCLUDE_TASKS s3_access_key = ae_consts.S3_ACCESS_KEY s3_secret_key = ae_consts.S3_SECRET_KEY s3_region_name = ae_consts.S3_REGION_NAME s3_address = ae_consts.S3_ADDRESS s3_secure = ae_consts.S3_SECURE s3_bucket_name = ae_consts.S3_BUCKET s3_key = ae_consts.S3_KEY redis_address = ae_consts.REDIS_ADDRESS redis_key = ae_consts.REDIS_KEY redis_password = ae_consts.REDIS_PASSWORD redis_db = ae_consts.REDIS_DB redis_expire = ae_consts.REDIS_EXPIRE strike = None contract_type = None get_pricing = True get_news = True get_options = True s3_enabled = True redis_enabled = True analysis_type = None backfill_date = None debug = False if args.ticker: ticker = args.ticker.upper() if args.ticker_id: ticker_id = args.ticker_id if args.exp_date_str: exp_date_str = ae_consts.NEXT_EXP_STR if args.broker_url: broker_url = args.broker_url if args.backend_url: backend_url = args.backend_url if args.s3_access_key: s3_access_key = args.s3_access_key if args.s3_secret_key: s3_secret_key = args.s3_secret_key if args.s3_region_name: s3_region_name = args.s3_region_name if args.s3_address: s3_address = args.s3_address if args.s3_secure: s3_secure = args.s3_secure if args.s3_bucket_name: s3_bucket_name = args.s3_bucket_name if args.keyname: s3_key = args.keyname redis_key = args.keyname if args.redis_address: redis_address = args.redis_address if args.redis_password: redis_password = args.redis_password if args.redis_db: redis_db = args.redis_db if args.redis_expire: redis_expire = args.redis_expire if args.strike: strike = args.strike if args.contract_type: contract_type = args.contract_type if args.get_pricing: get_pricing = args.get_pricing == '1' if args.get_news: get_news = args.get_news == '1' if args.get_options: get_options = args.get_options == '1' if args.s3_enabled: s3_enabled = args.s3_enabled == '1' if args.redis_enabled: redis_enabled = args.redis_enabled == '1' if args.fetch_mode: fetch_mode = str(args.fetch_mode).lower() if args.analysis_type: analysis_type = str(args.analysis_type).lower() if args.celery_enabled: run_offline = False if args.backfill_date: backfill_date = args.backfill_date if args.debug: debug = True work = api_requests.build_get_new_pricing_request() work['ticker'] = ticker work['ticker_id'] = ticker_id work['s3_bucket'] = s3_bucket_name work['s3_key'] = s3_key work['redis_key'] = redis_key work['strike'] = strike work['contract'] = contract_type work['exp_date'] = exp_date_str work['s3_access_key'] = s3_access_key work['s3_secret_key'] = s3_secret_key work['s3_region_name'] = s3_region_name work['s3_address'] = s3_address work['s3_secure'] = s3_secure work['redis_address'] = redis_address work['redis_password'] = redis_password work['redis_db'] = redis_db work['redis_expire'] = redis_expire work['get_pricing'] = get_pricing work['get_news'] = get_news work['get_options'] = get_options work['s3_enabled'] = s3_enabled work['redis_enabled'] = redis_enabled work['fetch_mode'] = fetch_mode work['analysis_type'] = analysis_type work['iex_datasets'] = iex_consts.DEFAULT_FETCH_DATASETS work['backfill_date'] = backfill_date work['debug'] = debug work['label'] = f'ticker={ticker}' if analysis_type == 'scn': label = f'screener={work["ticker"]}' fv_urls = [] if args.urls: fv_urls = str(args.urls).split('|') if len(fv_urls) == 0: fv_urls = os.getenv('SCREENER_URLS', []).split('|') screener_req = api_requests.build_screener_analysis_request( ticker=ticker, fv_urls=fv_urls, label=label) work.update(screener_req) start_screener_analysis( req=work) # end of analysis_type else: last_close_date = ae_utils.last_close() last_close_str = last_close_date.strftime( ae_consts.COMMON_DATE_FORMAT) cache_base_key = f'{ticker}_{last_close_str}' if not args.keyname: work['s3_key'] = cache_base_key work['redis_key'] = cache_base_key path_to_tasks = 'analysis_engine.work_tasks' task_name = ( f'{path_to_tasks}' f'.get_new_pricing_data.get_new_pricing_data') task_res = None if ae_consts.is_celery_disabled() or run_offline: work['celery_disabled'] = True work['verbose'] = debug log.debug( f'starting without celery work={ae_consts.ppj(work)} ' f'offline={run_offline}') task_res = task_pricing.get_new_pricing_data( work) status_str = ae_consts.get_status(status=task_res['status']) cur_date = backfill_date if not backfill_date: cur_date = ae_utils.get_last_close_str() redis_arr = work["redis_address"].split(':') include_results = '' if debug: include_results = task_res['rec'] if task_res['status'] == ae_consts.SUCCESS: if task_res['rec']['num_success'] == 0: log.error( f'failed fetching ticker={work["ticker"]} ' f'from {fetch_mode} - please check the ' 'environment variables') else: log.info( f'done fetching ticker={work["ticker"]} ' f'mode={fetch_mode} ' f'status={status_str} ' f'err={task_res["err"]} {include_results}') print( 'View keys in redis with:\n' f'redis-cli -h {redis_arr[0]} ' 'keys ' f'"{work["ticker"]}_{cur_date}*"') elif task_res['status'] == ae_consts.MISSING_TOKEN: print( 'Set an IEX or Tradier token: ' '\n' ' export IEX_TOKEN=YOUR_IEX_TOKEN\n' ' export TD_TOKEN=YOUR_TD_TOKEN\n') else: log.error( f'done fetching ticker={work["ticker"]} ' f'mode={fetch_mode} ' f'status={status_str} ' f'err={task_res["err"]}') # if/else debug else: log.debug( f'connecting to broker={broker_url} ' f'backend={backend_url}') # Get the Celery app app = get_celery_app.get_celery_app( name=__name__, auth_url=broker_url, backend_url=backend_url, path_to_config_module=celery_config_module, ssl_options=ssl_options, transport_options=transport_options, include_tasks=include_tasks) log.debug(f'calling task={task_name} - work={ae_consts.ppj(work)}') job_id = app.send_task( task_name, (work,)) log.debug(f'task={task_name} - job_id={job_id}') # end of if/else # end of supported modes
20,510
def get_relationship_targets(item_ids, relationships, id2rec): """Get item ID set of item IDs in a relationship target set""" # Requirements to use this function: # 1) item Terms must have been loaded with 'relationships' # 2) item IDs in 'item_ids' arguement must be present in id2rec # 3) Arg, 'relationships' must be True or an iterable reltgt_objs_all = set() for goid in item_ids: obj = id2rec[goid] for reltype, reltgt_objs_cur in obj.relationship.items(): if relationships is True or reltype in relationships: reltgt_objs_all.update(reltgt_objs_cur) return reltgt_objs_all
20,511
def cli(ctx, newick, analysis_id, name="", xref_db="null", xref_accession="", match_on_name=False, prefix=""): """Load a phylogenetic tree (Newick format) into Chado db Output: Number of inserted trees """ return ctx.gi.phylogeny.load_tree(newick, analysis_id, name=name, xref_db=xref_db, xref_accession=xref_accession, match_on_name=match_on_name, prefix=prefix)
20,512
def punkt_feature(punkter: pd.DataFrame) -> Dict[str, str]: """Omsæt punktinformationer til JSON-egnet dict""" for i in range(punkter.shape[0]): punkt = punkter.at[i, "Punkt"] # Fastholdte punkter har ingen ny kote, så vi viser den gamle if punkter.at[i, "Fasthold"] == "x": fastholdt = True delta = 0.0 kote = float(punkter.at[i, "Kote"]) sigma = float(punkter.at[i, "σ"]) else: fastholdt = False delta = float(punkter.at[i, "Δ-kote [mm]"]) kote = float(punkter.at[i, "Ny kote"]) sigma = float(punkter.at[i, "Ny σ"]) # Endnu uberegnede punkter if kote is None: kote = 0.0 delta = 0.0 sigma = 0.0 # Ignorerede ændringer (under 1 um) if delta is None: delta = 0.0 feature = { "type": "Feature", "properties": { "id": punkt, "H": kote, "sH": sigma, "Δ": delta, "fastholdt": fastholdt, }, "geometry": { "type": "Point", "coordinates": [punkter.at[i, "Øst"], punkter.at[i, "Nord"]], }, } yield feature
20,513
def roll_neighbors(sites, site, dims=None, radius=1): """ N-dimensional pixel neighborhood for periodic images on regular grids """ index = np.unravel_index(site, dims=dims) neighs = sites.take(nbr_range+index, axis=0, mode='wrap') return neighs.flatten()
20,514
def test_list_date_length_nistxml_sv_iv_list_date_length_1_3(mode, save_output, output_format): """ Type list/date is restricted by facet length with value 5. """ assert_bindings( schema="nistData/list/date/Schema+Instance/NISTSchema-SV-IV-list-date-length-1.xsd", instance="nistData/list/date/Schema+Instance/NISTXML-SV-IV-list-date-length-1-3.xml", class_name="NistschemaSvIvListDateLength1", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
20,515
def download(datadir, conn): """Download UPDATED.csv and reinstatements to the DATADIR CONN is a model.LEIE instance we'll use to log to the database""" if dload_if_stale(os.path.join(datadir, "UPDATED.csv"), 'https://oig.hhs.gov/exclusions/downloadables/UPDATED.csv', conn): conn.log("updated", "Downloaded UPDATED.csv") for year in range(2016,date.today().year+1): for month in range(1,13): if (year == date.today().year and month >= date.today().month): continue for suffix in ("REIN.csv", "EXCL.csv"): fname = "%2d%02d%s" % (year-2000, month, suffix) url = "https://oig.hhs.gov/exclusions/downloadables/%4d/%s" % (year, fname) if dload_if_stale(os.path.join(datadir, fname), url, conn): conn.log("reinstatement", "Downloaded %s" % fname)
20,516
def save_numpy_object(obj, output_path, if_file_exists, name='file'): """Utility to save a numpy object Parameters ---------- obj: numpy.ndarray Object to save output_path: str Where to save the file if_file_exists: str, optional One of 'overwrite', 'abort', 'skip'. If 'overwrite' it replaces the file if it exists, if 'abort' if raise a ValueError exception if the file exists, if 'skip' if skips the operation if the file exists name: str, optional Name (just used for logging messages) """ logger = logging.getLogger(__name__) output_path = Path(output_path) if output_path.exists() and if_file_exists == 'abort': raise ValueError('{} already exists'.format(output_path)) elif output_path.exists() and if_file_exists == 'skip': logger.info('{} already exists, skipping...'.format(output_path)) else: np.save(str(output_path), obj) logger.info('Saved {} in {}'.format(name, output_path))
20,517
def print_columns(left_text, right_text, column_width): """Print two columns of text given two strings of multi-line text. Either (but not both) text string may be None. Text will be separated by a vertical bar with one space of pad on either side (' | ') """ assert left_text is not None or right_text is not None if left_text: left_lines = left_text.split('\n') if right_text: right_lines = right_text.split('\n') if not left_text: left_lines = [' ' * column_width] * len(right_lines) if not right_text: right_lines = [''] * len(left_lines) if len(left_lines) < len(right_lines): left_lines += [' ' * column_width] * (len(right_lines) - len(left_lines)) elif len(left_lines) > len(right_lines): right_lines += [''] * (len(left_lines) - len(right_lines)) for left_line, right_line in zip(left_lines, right_lines): print(f'{left_line} | {right_line}')
20,518
def extract_node_name(path, ignore_missing_nodes=False): """extracts the token after the 'nodes'""" tokens = path.split(os.sep) last_nodes_index = -1 for i, token in enumerate(tokens): if token == "nodes": last_nodes_index = i if last_nodes_index == -1: if ignore_missing_nodes: return path raise "path '%s' does not contain 'nodes' and " + "is not a valid diag tarball, so cannot determine the node" % path try: # we're interested in getting the token after nodes return tokens[last_nodes_index + 1] except IndexError: raise "there is nothing after the 'nodes' entry of '%s'" % path
20,519
def say_my_name(first_name, last_name=""): """Prints "My name is" followed by the first name and optional last name""" if type(first_name) is not str: raise TypeError("first_name must be a string") if type(last_name) is not str: raise TypeError("last_name must be a string") print("My name is", first_name, last_name)
20,520
def run_model(model, raw_cohort, delta_encoder): """ Run the given model using the given cohort and experimental settings contained in args. This function: (1) balanced the dataset (2) splits the cohort intro training:development:testing sets at the patient-level (3) trains PRONTO and saves checkpoint/summaries for TensorBoard (4) evaluates PRONTO on the development and testing set :param model: an instantiated PRONTO model :type model: modeling.PRONTOModel :param raw_cohort: the cohort to use for this experimental run :type raw_cohort: preprocess.Cohort :param delta_encoder: encoder used to represented elapsed time deltas :type delta_encoder: preprocess.DeltaEncoder :return: nothing """ import scipy snapshot_sizes = [] for chronology in raw_cohort.chronologies(): for snapshot in chronology.snapshots: snapshot_sizes.append(len(snapshot)) print('Statistics on snapshot sizes:', scipy.stats.describe(snapshot_sizes)) days_til_onset = [] for chronology in raw_cohort.chronologies(): seconds = 0 for delta in chronology.deltas: seconds += delta days_til_onset.append(seconds / 60 / 60 / 24) print('Statistics on days until disease onset:', scipy.stats.describe(days_til_onset)) elapsed_times = [] for chronology in raw_cohort.chronologies(): for delta in chronology.deltas: elapsed_times.append(delta / 60 / 60 / 24) print('Statistics on elapsed time:', scipy.stats.describe(elapsed_times)) lengths = [] for chronology in raw_cohort.chronologies(): lengths.append(len(chronology)) print('Statistics on chronology lengths:', scipy.stats.describe(lengths)) # Balance the cohort to have an even number of positive/negative chronologies for each patient cohort = raw_cohort.balance_chronologies() # Split into training:development:testing train, devel, test = make_train_devel_test_split(cohort.patients(), FLAGS.tdt_ratio) # Save summaries and checkpoints into the directories passed to the script model_file = 'ln=%d_delta=%s_d=%.2f_vd=%.2f_lr=%g_bs=%d' % ( 1 if FLAGS.rnn_layer_norm else 0, 'disc' if FLAGS.use_discrete_deltas else 'tanh', FLAGS.dropout, FLAGS.vocab_dropout, FLAGS.learning_rate, FLAGS.batch_size, ) model_summaries_dir = os.path.join(FLAGS.output_dir, FLAGS.optimizer, FLAGS.rnn_cell_type, FLAGS.snapshot_encoder, model_file) model_checkpoint_dir = os.path.join(FLAGS.output_dir, FLAGS.optimizer, FLAGS.rnn_cell_type, FLAGS.snapshot_encoder, model_file, 'pronto_model') # Clear any previous summaries/checkpoints if asked if FLAGS.clear_prev: nio.delete_dir_quiet(model_summaries_dir) nio.delete_dir_quiet(model_checkpoint_dir) print('Deleted previous model summaries/checkpoints') # Make output directories so we don't blow up when saving nio.make_dirs_quiet(model_checkpoint_dir) # Instantiate PRONTO optimizer and summarizer classes if FLAGS.optimizer == 'PRONTO': optimizer = optimization.PRONTOOptimizer(model, learning_rate=FLAGS.learning_rate, sparse=True) elif FLAGS.optimizer == 'BERT': epoch_steps = len(cohort[train].make_epoch_batches(batch_size=FLAGS.batch_size, max_snapshot_size=FLAGS.max_snapshot_size, max_chrono_length=FLAGS.max_chrono_length, delta_encoder=delta_encoder)) optimizer = optimization.BERTOptimizer(model, num_train_steps=epoch_steps * FLAGS.num_epochs, num_warmup_steps=epoch_steps * 3, init_lr=FLAGS.learning_rate) print('Created BERT-like optimizer with initial learning rate of %f' % FLAGS.learning_rate) else: raise NotImplementedError('No optimizer available for %s' % FLAGS.optimizer) # noinspection PyUnboundLocalVariable summarizer = summarization.PRONTOSummarizer(model, optimizer) # Now that everything has been defined in TensorFlow's computation graph, initialize our model saver saver = tf.train.Saver(tf.global_variables()) first_cohort = cohort # Tell TensorFlow to wake up and get ready to rumble with tf.Session() as sess: # If we specified a TensorBoard debug server, connect to it # (this is actually pretty sweet but you have to manually step through your model's flow so 99% of the time # you shouldn't need it) if FLAGS.debug is not None: sess = tf_debug.TensorBoardDebugWrapperSession(sess, FLAGS.debug) # Create our summary writer (used by TensorBoard) summary_writer = tf.summary.FileWriter(model_summaries_dir, sess.graph) # Restore model if it exists (and we didn't clear it), otherwise create a shiny new one checkpoint = tf.train.get_checkpoint_state(model_checkpoint_dir) if checkpoint and gfile.Exists(checkpoint.model_checkpoint_path + '.index'): print("Reading model parameters from '%s'...", checkpoint.model_checkpoint_path) saver.restore(sess, checkpoint.model_checkpoint_path) else: print("Creating model with fresh parameters...") sess.run(tf.global_variables_initializer()) # Initialize local variables (these are just used for computing average metrics) sess.run(tf.local_variables_initializer()) # Create a progress logger to monitor training (this is a wrapped version of range() with trange(FLAGS.num_epochs, desc='Training') as train_log: # Save the training, development, and testing metrics for our best model (as measured by devel F1) # I'm lazy so I initialize best_devel_metrics with a zero F1 so I can compare the first iteration to it best_train_metrics, best_devel_metrics, best_test_metrics = {}, {'F2': 0}, {} # Iterate over training epochs for i in train_log: # Get global step and reset training metrics global_step, _ = sess.run([optimizer.global_step, summarizer.train.reset_op]) # Log our progress on the current epoch using tqdm cohort.make_epoch_batches shuffles the order of # chronologies and prepares them into mini-batches with zero-padding if needed total_loss = 0. batches = cohort[train].make_epoch_batches(batch_size=FLAGS.batch_size, max_snapshot_size=FLAGS.max_snapshot_size, max_chrono_length=FLAGS.max_chrono_length, delta_encoder=delta_encoder) num_batches = len(batches) with tqdm(batches, desc='Epoch %d' % (i + 1)) as batch_log: # Iterate over each batch for j, batch in enumerate(batch_log): # We train the model by evaluating the optimizer's training op. At the same time we update the # training metrics and get metrics/summaries for the current batch and request the new global # step number (used by TensorBoard to coordinate metrics across different runs _, batch_summary, batch_metrics, global_step = sess.run( [[optimizer.train_op, summarizer.train.metric_ops], # All fetches we aren't going to read summarizer.batch_summary, summarizer.batch_metrics, optimizer.global_step], batch.feed(model, training=True)) # Update tqdm progress indicator with current training metrics on this batch batch_log.set_postfix(batch_metrics) # Save batch-level summaries summary_writer.add_summary(batch_summary, global_step=global_step) total_loss += batch_metrics['Loss'] # Save epoch-level training metrics and summaries train_metrics, train_summary = sess.run([summarizer.train.metrics, summarizer.train.summary]) train_metrics['Loss'] = total_loss / num_batches summary_writer.add_summary(train_summary, global_step=global_step) # Re-sample chronologies in cohort cohort = raw_cohort.balance_chronologies() # Evaluate development performance sess.run(summarizer.devel.reset_op) # Update local variables used to compute development metrics as we process each batch for devel_batch in first_cohort[devel].make_epoch_batches(batch_size=FLAGS.batch_size, max_snapshot_size=FLAGS.max_snapshot_size, max_chrono_length=FLAGS.max_chrono_length, delta_encoder=delta_encoder): sess.run([summarizer.devel.metric_ops], devel_batch.feed(model, training=False)) # Compute the development metrics devel_metrics, devel_summary = sess.run([summarizer.devel.metrics, summarizer.devel.summary]) # Update training progress bar to indicate current performance on development set train_log.set_postfix(devel_metrics) # Save TensorBoard summary summary_writer.add_summary(devel_summary, global_step=global_step) def format_metrics(metrics: dict): return dict((key, '%6.4f' % value) for key, value in metrics.items()) train_log.write('Epoch %d. Train: %s | Devel: %s' % (i + 1, format_metrics(train_metrics), format_metrics(devel_metrics))) # Evaluate testing performance exactly as described above for development sess.run(summarizer.test.reset_op) for batch in first_cohort[test].make_epoch_batches(batch_size=FLAGS.batch_size, max_snapshot_size=FLAGS.max_snapshot_size, max_chrono_length=FLAGS.max_chrono_length, delta_encoder=delta_encoder): sess.run([summarizer.test.metrics, summarizer.test.metric_ops], batch.feed(model, training=False)) test_metrics, test_summary = sess.run([summarizer.test.metrics, summarizer.test.summary]) summary_writer.add_summary(test_summary, global_step=global_step) # If this run did better on the dev set, save it as the new best model if devel_metrics['F2'] > best_devel_metrics['F2']: best_devel_metrics = devel_metrics best_train_metrics = train_metrics best_test_metrics = test_metrics # Save the model saver.save(sess, model_checkpoint_dir, global_step=global_step) elif FLAGS.early_term: tqdm.write('Early termination!') break print('Training complete!') if FLAGS.print_performance: print('Train: %s' % str(best_train_metrics)) print('Devel: %s' % str(best_devel_metrics)) print('Test: %s' % str(best_test_metrics)) if FLAGS.save_tabbed_results: with open(os.path.join(model_summaries_dir, 'results.tsv'), 'w') as outfile: print_table_results(best_train_metrics, best_devel_metrics, best_test_metrics, 'simple', file=outfile) if FLAGS.save_latex_results: with open(os.path.join(model_summaries_dir, 'results.tex'), 'w') as outfile: print_table_results(best_train_metrics, best_devel_metrics, best_test_metrics, 'latex_booktabs', file=outfile)
20,521
def folder0_content(folder0_id, host, token): """ Modules ------- request, json ---------- Parameters ---------- folder0_id : Onedata folder level 0 id containing the data to publish. host : OneData provider (e.g., ceta-ciemat-02.datahub.egi.eu). token : OneData personal access token. ------- Returns ------- all_level0: "name" and "id" of the folders contained in the folder defined by "folder0_id" """ OneData_urlchildren = "https://" + host + '/api/v3/oneprovider/data/' + folder0_id + "/children" request_param = {'X-Auth-Token': token} r_level0 = requests.get(OneData_urlchildren, headers=request_param) all_level0 = json.loads(r_level0.text) return (all_level0)
20,522
def _update_collection_info(file_name, doi, title, short_name, force): """ Adds an entry to the gloabl infomation file. If the entry exists, will be overwritten. """ info, info_file = _get_collection_info() if file_name in info: if not force and short_name != info[file_name]["short_name"]: raise ValueError("Error: short-name provided does not match the one in database. " \ "If making sure all entries exist, run without the --short-name option to use the name in databse.\n"\ "To force the new name, use the option --force: acm-dl-searcher get <doi> --short-name <new-short-name> --force") else: if short_name is None: raise ValueError("Error: New entry: need paramter value for `short-name`. Pass value with option --short-name.") if short_name is not None and len(short_name) > 10: raise ValueError("Error: value for `--short-name` is too long (max 10 charachters).") info[file_name] = {"doi": doi, "title": title, "short_name": short_name} with open(info_file, "w") as f: json.dump(info, f, indent=4)
20,523
def fetch_object(object_id: int, url: str): """ Fetch a single object from a feature layer. We have to fetch objects one by one, because they can get pretty big. Big enough, that if you ask for more than one at a time, you're likely to encounter 500 errors. object_id: object id to fetch (e.g. 1) url: layer url to fetch (e.g. https://maps.gov.bc.ca/arcserver/rest/services/whse/bcgw_pub_whse_legal_admin_boundaries/MapServer/2) """ print(f'fetching object {object_id}') params = { 'where': f'objectid={object_id}', 'geometryType': 'esriGeometryEnvelope', 'spatialRel': 'esriSpatialRelIntersects', # 'outSR': '102100', 'outFields': '*', 'returnGeometry': 'true', 'returnIdsOnly': 'false', 'f': 'geojson' } encode_params = urllib.parse.urlencode(params).encode("utf-8") print(f'{url}/query?{encode_params.decode()}') with urllib.request.urlopen(f'{url}/query?', encode_params) as response: json_data = json.loads(response.read()) return json_data
20,524
def density_forecast_param(Yp, sigma, _, rankmatrix, errordist_normed, dof): """creates a density forecast for Yp with Schaake Schuffle Parameters ---------- Yp: numpy.array 24-dimensional array with point-predictions of day ahead prices sigma: numpy.array Variance prediction for each hour _ : rankmatrix: numpy.array Matrix with rank positions of forecast samples errordist_normed: numpy.array Realized normed prediction errors dof: int Degrees of Freedom of parametric margins 0: Normal distribution >0: t-distribution Returns ------- newdataarray: numpy.array Array containing the density predictions of day ahead price """ # Initialize errordist=errordist_normed.copy() nzero=np.size(rankmatrix,axis=0) n_sample=np.size(errordist, axis=0) sqrtsigma = np.sqrt(sigma) # for h in range(24): # Assume Normal distribution for dof==0 if dof[0]==0: errordist[:, h]=np.linspace(st.norm(Yp[0, h], sqrtsigma[h]).ppf(1 / (n_sample + 1)), st.norm(Yp[0, h], sqrtsigma[h]).ppf(n_sample / (n_sample + 1)), n_sample) # Assume t-distribution with given degrees of freedom else: errordist[:, h] = np.linspace(st.t(loc=Yp[0, h], scale=sqrtsigma[h],df=dof[h]).ppf(1 / (n_sample + 1)), st.t(loc=Yp[0, h], scale=sqrtsigma[h],df=dof[h]).ppf(n_sample / (n_sample + 1)), n_sample) Yt = np.zeros(shape=(nzero, 24)) u_new = np.arange(1, nzero + 1) / (nzero + 1) std_error = np.zeros(shape=(nzero, 24)) for h in range(24): helper = np.sort(errordist[:, h]) std_error_pos = np.array(np.floor(u_new * np.size(errordist, axis=0)), dtype='int') std_error[:, h] = helper[std_error_pos] for i in range(nzero): Yt[i, :] = std_error[i, :] # order newdata according to rank-matrix newdataarray = np.zeros(shape=(nzero, 24)) for col in range(24): for i in range(0, nzero): help = int(rankmatrix[i, col] - 1) newdataarray[i, col] = Yt[help, col] return newdataarray
20,525
def test_rundak_driver(): """ run dakota on an aep scan, test case for full dakota/openmdao/twister pipeline""" print "rundak_driver" dak = AEP_CSM_DAKOTA_Scanner(10) set_as_top(dak) dak.run()
20,526
def vae_latent_space_offset(data_dir, model_dir, encoded_dir, latent_dim, gene_id, percent_low, percent_high): """ vae_latent_space_offset(data_dir: string, model_dir: string, encoded_dir: string, gene_id: string): input: data_dir: directory containing the raw gene expression data for all genes including the target gene (see gene_id definition). model_dir: directory containing the learned vae models encoded_dir: directory to use to output offset vector to gene_id: gene you are using as the "phenotype" to sort samples by This gene is referred to as "target_gene" in comments below. In "interpolate_in_vae_latent_space.py", after we sort samples based on the expression level of the target gene, we want to predict the expression profile of the OTHER genes at different levels of target gene expression. percent_low: integer between 0 and 1 percent_high: integer between 0 and 1 computation: offset_vector = average(encoded gene expression of samples that have the highest percent_high% of target gene expression) - average(encoded gene expression of samples that have the lowest percent_low% of target gene expression) output: encoded offset vector (1 x number of latent space features) Note: offset vector does not include the target gene """ # Load arguments target_gene_file = os.path.join(data_dir, gene_id + ".txt") non_target_gene_file = os.path.join(data_dir, "train_model_input.txt.xz") model_file = os.path.join(model_dir, "tybalt_2layer_{}latent_encoder_model.h5".format(latent_dim)) weights_file = os.path.join(model_dir, "tybalt_2layer_{}latent_encoder_weights.h5".format(latent_dim)) # Output files offset_file = os.path.join(encoded_dir, "offset_latent_space_vae.txt") lowest_file = os.path.join(encoded_dir, "lowest_encoded_vae.txt") highest_file = os.path.join(encoded_dir, "highest_encoded_vae.txt") # Read in data target_gene_data = pd.read_table(target_gene_file, header=0, index_col=0) non_target_gene_data = pd.read_table(non_target_gene_file, header=0, index_col=0) # read in saved models loaded_model = load_model(model_file) # load weights into new model loaded_model.load_weights(weights_file) # Sort target gene data by expression (lowest --> highest) target_gene_sorted = target_gene_data.sort_values(by=[gene_id]) # Collect the extreme gene expressions [low_ids, high_ids] = utils.get_gene_expression_above_percent(target_gene_sorted, gene_id, percent_low, percent_high) low_exp = non_target_gene_data.loc[low_ids] high_exp = non_target_gene_data.loc[high_ids] print('Number of genes in low expression group is {}'.format(low_exp.shape)) print('Number of gene in high expression group is {}'.format(high_exp.shape)) # Use trained model to encode expression data into SAME latent space low_exp_encoded = loaded_model.predict_on_batch(low_exp) low_exp_encoded_df = pd.DataFrame(low_exp_encoded, index=low_exp.index) high_exp_encoded = loaded_model.predict_on_batch(high_exp) high_exp_encoded_df = pd.DataFrame(high_exp_encoded, index=high_exp.index) # Average gene expression across samples in each extreme group lowest_mean = low_exp_encoded_df.mean(axis=0) highest_mean = high_exp_encoded_df.mean(axis=0) # Generate offset using average gene expression in original dataset offset_latent_space = highest_mean - lowest_mean offset_latent_space_df = pd.Series.to_frame(offset_latent_space).T # output lowest and highest expressing samples low_exp_encoded_df.to_csv(lowest_file, sep='\t', float_format="%.5g") high_exp_encoded_df.to_csv(highest_file, sep='\t', float_format="%.5g") # ouput gene space offset vector offset_latent_space_df.to_csv(offset_file, sep='\t', float_format="%.5g")
20,527
def gaussian_blur(img: np.ndarray, kernel_size: int) -> np.ndarray: """Applies a Gaussian Noise kernel""" if not is_valid_kernel_size(kernel_size): raise ValueError( "kernel_size must either be 0 or a positive, odd integer") return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
20,528
def get_controls_snapshots_count(selenium, src_obj): """Return dictionary with controls snapshots actual count and count taken from tab title.""" controls_ui_service = webui_service.ControlsService(selenium) return { "controls_tab_count": controls_ui_service.get_count_objs_from_tab( src_obj=src_obj), "controls_count": len(controls_ui_service.get_list_objs_from_tree_view( src_obj=src_obj))}
20,529
def fast_gnp_random_graph(n, p, seed=None, directed=False): """Returns a `G_{n,p}` random graph, also known as an Erdős-Rényi graph or a binomial graph. Parameters ---------- n : int The number of nodes. p : float Probability for edge creation. seed : int, optional Seed for random number generator (default=None). directed : bool, optional (default=False) If ``True``, this function returns a directed graph. Notes ----- The `G_{n,p}` graph algorithm chooses each of the `[n (n - 1)] / 2` (undirected) or `n (n - 1)` (directed) possible edges with probability `p`. This algorithm runs in `O(n + m)` time, where `m` is the expected number of edges, which equals `p n (n - 1) / 2`. This should be faster than :func:`gnp_random_graph` when `p` is small and the expected number of edges is small (that is, the graph is sparse). See Also -------- gnp_random_graph References ---------- .. [1] Vladimir Batagelj and Ulrik Brandes, "Efficient generation of large random networks", Phys. Rev. E, 71, 036113, 2005. """ G = empty_graph(n) G.name="fast_gnp_random_graph(%s,%s)"%(n,p) if not seed is None: random.seed(seed) if p <= 0 or p >= 1: return nx.gnp_random_graph(n,p,directed=directed) w = -1 lp = math.log(1.0 - p) if directed: G = nx.DiGraph(G) # Nodes in graph are from 0,n-1 (start with v as the first node index). v = 0 while v < n: lr = math.log(1.0 - random.random()) w = w + 1 + int(lr/lp) if v == w: # avoid self loops w = w + 1 while w >= n and v < n: w = w - n v = v + 1 if v == w: # avoid self loops w = w + 1 if v < n: G.add_edge(v, w) else: # Nodes in graph are from 0,n-1 (start with v as the second node index). v = 1 while v < n: lr = math.log(1.0 - random.random()) w = w + 1 + int(lr/lp) while w >= v and v < n: w = w - v v = v + 1 if v < n: G.add_edge(v, w) return G
20,530
def round_to(f: float, p: int = 0) -> float: """Round to the specified precision using "half up" rounding.""" # Do no rounding, just return a float with full precision if p == -1: return float(f) # Integer rounding elif p == 0: return round_half_up(f) # Round to the specified precision else: whole = int(f) digits = 0 if whole == 0 else int(math.log10(-whole if whole < 0 else whole)) + 1 return round_half_up(whole if digits > p else f, p - digits)
20,531
def get_interface_type(interface): """Gets the type of interface """ if interface.upper().startswith('GI'): return 'GigabitEthernet' elif interface.upper().startswith('TE'): return 'TenGigabitEthernet' elif interface.upper().startswith('FA'): return 'FastEthernet' elif interface.upper().startswith('FO'): return 'FortyGigabitEthernet' elif interface.upper().startswith('LON'): return 'LongReachEthernet' elif interface.upper().startswith('ET'): return 'Ethernet' elif interface.upper().startswith('VL'): return 'Vlan' elif interface.upper().startswith('LO'): return 'loopback' elif interface.upper().startswith('PO'): return 'Port-channel' elif interface.upper().startswith('NV'): return 'nve' elif interface.upper().startswith('TWE'): return 'TwentyFiveGigE' elif interface.upper().startswith('HU'): return 'HundredGigE' else: return 'unknown'
20,532
def __get_global_options(cmd_line_options, conf_file_options=None): """ Get all global options :type cmd_line_options: dict :param cmd_line_options: Dictionary with all command line options :type conf_file_options: dict :param conf_file_options: Dictionary with all config file options :returns: dict """ options = {} for option in DEFAULT_OPTIONS['global'].keys(): options[option] = DEFAULT_OPTIONS['global'][option] if conf_file_options and option in conf_file_options: options[option] = conf_file_options[option] if cmd_line_options and option in cmd_line_options: options[option] = cmd_line_options[option] return options
20,533
def add_to_cache(blob_cache, txs_by_key_index, scanned_blocks, password): """ password -> txs_by_gindex recent block hashes/heights minimum height of blocks in block_hashes """ cache_data = { 'txs': txs_by_key_index, 'scanned_blocks': scanned_blocks } blob_cache.clear_objs(password) blob_cache.add_obj(cache_data, password)
20,534
def test_visualization(): """Test that the visualization is created as expected """ nr_bottom = 4 capacity = 250 pour = 1000 bartender = Bartender(nr_bottom, capacity) bartender.place_order(pour) tower = bartender._fountain.tower graph_fountain = bartender._visualizer.visualize(tower) expected_graph = """ | 250 | ------- | 250 || 250 | ------- ------- | 62.5 || 125.0 || 62.5 | ------- ------- ------- | 0 || 0 || 0 || 0 | ------- ------- ------- ------- """ assert expected_graph == graph_fountain
20,535
def download_file(service, drive_file): """Download a file's content. Args: service: Drive API service instance. drive_file: Drive File instance. Returns: File's content if successful, None otherwise. """ download_url = drive_file.get('downloadUrl') if download_url: resp, content = service._http.request(download_url) if resp.status == 200: #print 'Status: %s' % resp return content else: #print 'An error occurred: %s' % resp return None else: # The file doesn't have any content stored on Drive. return None
20,536
def get_include_file_end_before(block: Block) -> str: """ >>> # test end-before set to 'end-marker' >>> block = lib_test.get_test_block_ok() >>> get_include_file_end_before(block) '# end-marker' >>> assert block.include_file_end_before == '# end-marker' >>> # test end-before not set >>> block = lib_test.get_test_block_end_before_not_set() >>> get_include_file_end_before(block) '' >>> # test end-before invalid >>> block = lib_test.get_test_block_end_before_invalid() >>> get_include_file_end_before(block) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Error in File ".../README.template.rst", Line 47106: option "end-before" has no value """ include_file_end_before = block.include_file_end_before if lib_block_options.is_option_in_block('end-before', block): include_file_end_before = lib_block_options.get_option_value_from_block_or_raise_if_empty_or_invalid('end-before', block) block.include_file_end_before = include_file_end_before return include_file_end_before
20,537
def add_random_circles(tensor: torch.Tensor, n_circles: int, equalize_overlaps: bool = True): """Adds n_circles random circles onto the image.""" height, width = tensor.shape circle_img = torch.zeros_like(tensor) for _ in range(n_circles): circle_img = add_circle(circle_img, {'x': random.randint(0, width), 'y': random.randint(0, height)}, random.randint(1, int(max(height, width) / 30))) tensor += (circle_img != 0) if equalize_overlaps: tensor = (tensor != 0) return tensor.type(torch.FloatTensor)
20,538
def sample_quadric_surface(quadric, center, samples): """Samples the algebraic distance to the input quadric at sparse locations. Args: quadric: Tensor with shape [..., 4, 4]. Contains the matrix of the quadric surface. center: Tensor with shape [..., 3]. Contains the [x,y,z] coordinates of the center of the coordinate frame of the quadric surface in NIC space with a top-left origin. samples: Tensor with shape [..., N, 3], where N is the number of samples to evaluate. These are the sample locations in the same space in which the quadric surface center is defined. Supports broadcasting the batching dimensions. Returns: distances: Tensor with shape [..., N, 1]. Contains the algebraic distance to the surface at each sample. """ with tf.name_scope('sample_quadric_surface'): batching_dimensions = quadric.get_shape().as_list()[:-2] batching_rank = len(batching_dimensions) tf_util.assert_shape(quadric, batching_dimensions + [4, 4], 'sample_quadric_surface:quadric') tf_util.assert_shape(center, batching_dimensions + [-1], 'sample_quadric_surface:center') tf_util.assert_shape(samples, batching_rank * [-1] + [-1, 3], 'sample_quadric_surface:samples') # We want to transform the coordinates so that they are in the coordinate # frame of the conic section matrix, so we subtract the center of the # conic. samples = samples - tf.expand_dims(center, axis=batching_rank) sample_count = samples.get_shape().as_list()[-2] homogeneous_sample_ones = tf.ones( samples.get_shape().as_list()[:-1] + [1], dtype=tf.float32) homogeneous_sample_coords = tf.concat([samples, homogeneous_sample_ones], axis=-1) # When we transform the coordinates per-image, we broadcast on both sides- # the batching dimensions broadcast up the coordinate grid, and the # coordinate center broadcasts up along the height and width. # Per-pixel, the algebraic distance is v^T * M * v, where M is the matrix # of the conic section, and v is the homogeneous column vector [x y z 1]^T. half_distance = tf.matmul( quadric, homogeneous_sample_coords, transpose_b=True) rank = batching_rank + 2 half_distance = tf.transpose( half_distance, perm=list(range(rank - 2)) + [rank - 1, rank - 2]) algebraic_distance = tf.reduce_sum( tf.multiply(homogeneous_sample_coords, half_distance), axis=-1) return tf.reshape(algebraic_distance, batching_dimensions + [sample_count, 1])
20,539
def mol2df(mols: Mols[pd.DataFrame], multiindex=False) -> pd.DataFrame: """ flattens a mol into a dataframe with the columns containing the start, stop and price :param mols: mols to transform :return: """ if multiindex: flat = { ((start, stop), price): series for (start, stop), mol in mols.items() for price, series in mol.items() } else: flat = { f"{start} -> {stop}: {price}": series for (start, stop), mol in mols.items() for price, series in mol.items() } return pd.concat(flat, axis="columns")
20,540
def load_sequence_classifier_configs(args) -> Tuple[WrapperConfig, pet.TrainConfig, pet.EvalConfig]: """ Load the model, training and evaluation configs for a regular sequence classifier from the given command line arguments. This classifier can either be used as a standalone model or as the final classifier for PET/iPET. """ model_cfg = WrapperConfig( model_type=args.model_type, model_name_or_path=args.model_name_or_path, wrapper_type=SEQUENCE_CLASSIFIER_WRAPPER, task_name=args.task_name, label_list=args.label_list, max_seq_length=args.sc_max_seq_length, verbalizer_file=args.verbalizer_file, cache_dir=args.cache_dir, ) train_cfg = pet.TrainConfig( device=args.device, per_gpu_train_batch_size=args.sc_per_gpu_train_batch_size, per_gpu_unlabeled_batch_size=args.sc_per_gpu_unlabeled_batch_size, n_gpu=args.n_gpu, num_train_epochs=args.sc_num_train_epochs, max_steps=args.sc_max_steps, min_steps=args.sc_min_steps, temperature=args.temperature, gradient_accumulation_steps=args.sc_gradient_accumulation_steps, weight_decay=args.weight_decay, learning_rate=args.learning_rate, adam_epsilon=args.adam_epsilon, warmup_steps=args.warmup_steps, logging_steps=args.logging_steps, logging_number=args.logging_number, max_grad_norm=args.max_grad_norm, use_logits=args.method != "sequence_classifier", local_rank=args.local_rank, ) eval_cfg = pet.EvalConfig( device=args.device, n_gpu=args.n_gpu, metrics=args.metrics, per_gpu_eval_batch_size=args.sc_per_gpu_eval_batch_size, local_rank=args.local_rank, ) return model_cfg, train_cfg, eval_cfg
20,541
def plot(graph): """Plots the graph. :param graph: graph to plot :type graph: networkx graph """ fig, axis = plt.subplots(1, 1, figsize=(10, 10)) pos = graphviz_layout(graph) values = networkx.get_node_attributes(graph, 'value') node_labels = {} for name, value in values.items(): node_labels[name] = f'{value}' colors = [] for name in pos: color = networkx.get_node_attributes(graph, 'color')[name] colors.append(color) networkx.draw( graph, pos, ax=axis, node_shape='o', node_size=200, node_color=colors, alpha=0.5, style='dotted' ) networkx.draw_networkx_labels( graph, pos, ax=axis, labels=node_labels, font_size=25, ) fig.tight_layout() fig.savefig('graph.png')
20,542
def gender(word): """ Returns the gender for the given word, either: MALE, FEMALE, (MALE, FEMALE), (MALE, PLURAL) or (FEMALE, PLURAL). """ w = word.lower() # Adjectives ending in -e: cruciale, difficile, ... if w.endswith(("ale", "ile", "ese", "nte")): return (MALE, FEMALE) # Most nouns ending in -a (-e) are feminine, -o (-i) masculine: if w.endswith(("ore", "ista", "mma")): return MALE if w.endswith(("a", u"tà", u"tù", "ione", "rice")): return FEMALE if w.endswith(("e", "oni")): return (FEMALE, PLURAL) if w.endswith("i"): return (MALE, PLURAL) if w.endswith("o"): return MALE return MALE
20,543
def fcmp(d,r): """ Compares two files, d and r, cell by cell. Float comparisons are made to 4 decimal places. Extending this function could be a project in and of itself. """ # we need to compare the files dh=open(d,'rb') rh=open(r,'rb') dlines = dh.readlines() rlines = rh.readlines() boolCounter = Counter() for dline, rline in zip(dlines,rlines): for dc,rc in zip(dline.split(','), rline.split(',')): if _isfloat(dc): if round(float(dc),4)!=round(float(rc),4): boolCounter[False] += 1 else: boolCounter[True] += 1 else: pass if dc!=rc: boolCounter[False]+= 1 else: boolCounter[True]+= 1 dh.close() rh.close() if all(boolCounter): return True else: return False
20,544
def is_vector_equal(vec1, vec2, tolerance=1e-10): """Compare if two vectors are equal (L1-norm) according to a tolerance""" return np.all(np.abs(vec1 - vec2) <= tolerance)
20,545
def partition_pair(bif_point): """Calculate the partition pairs at a bifurcation point. The number of nodes in each child tree is counted. The partition pairs is the number of bifurcations in the two child subtrees at each branch point. """ n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) return (n, m)
20,546
def readPyCorrFit(file): """ Read header and data of .csv PyCorrFit output file ========== =============================================================== Input Meaning ---------- --------------------------------------------------------------- file String with path to .csv file ========== =============================================================== ========== =============================================================== Output Meaning ---------- --------------------------------------------------------------- outputdata Object with the tau, G, Gfit, Gres in data field, and separate fields for the fitted values n, SP, offset, and chi2 ========== =============================================================== """ # create object outputdata = PyCorrFitData() # read .csv header f = open(file, "r") if f.mode == "r": contents = f.read() start = contents.find("Parameters:") [n, start] = readPyCorrFitSingleParam(contents, "# n\t", "\n", start) [tauD, start] = readPyCorrFitSingleParam(contents, "_diff [ms]\t", "\n", start) [SP, start] = readPyCorrFitSingleParam(contents, "# SP\t", "\n", start) [offset, start] = readPyCorrFitSingleParam(contents, "# offset\t", "\n", start) start = contents.find("Fitting:", start) [chi2, start] = readPyCorrFitSingleParam(contents, "\t", "\n", start) [Gfitstart, start] = readPyCorrFitSingleParam(contents, "# Ival start [ms]\t", "\n", start) [Gfitstop, start] = readPyCorrFitSingleParam(contents, "# Ival end [ms]\t", "\n", start) outputdata.n = n outputdata.tauD = tauD outputdata.SP = SP outputdata.offset = offset outputdata.chi2 = chi2 outputdata.Gfitstart = Gfitstart outputdata.Gfitstop = Gfitstop # load .csv file data = csv2array(file) # extract data tau = data[:,0] G = data[:,1] Gfit = data[:,2] Gres = G - Gfit outputdata.data = np.stack((tau, G, Gfit, Gres), axis=1) return outputdata
20,547
def end_of_time(t): """ Return the next hour of the passed time. e.g, 18:25:36 --> 19:00:00 """ return t + timedelta(minutes=60) - timedelta(minutes=t.minute) - timedelta(seconds=t.second)
20,548
def modf(x): """modf(x) Return the fractional and integer parts of x. Both results carry the sign of x. """ signx = sign(x) absx = Abs(x) return (signx * Mod(absx, 1), signx * floor(absx))
20,549
def get_user_from_request(request, available_query_params: list()) -> Tuple[User, GeneralApiResponse]: """ Entra com o request da view e uma lista de query params do user que podem ser consultados Retorna um user caso seja si mesmo, ou tenha permissão de acesso a outros usuários Retorna uma resposta de erro caso algo não possa ser completado """ get_keys = list(request.GET.keys()) if len(get_keys) != 0 or (len(get_keys) > 1 and 'page' in get_keys): if request.user.is_superuser: if any(query_param not in available_query_params for query_param in get_keys): return None, GeneralApiResponse.bad_request() # algum query param na requisição de user ta zoado ignore_page_query_params = {key: v for key, v in request.GET.dict().items() if key not in ['page']} users = User.objects.filter(**ignore_page_query_params) # ignora o query param "page" if not users.exists(): return None, GeneralApiResponse.not_found() # não achou usuário que atendesse à query elif len(users) > 1: return None, GeneralApiResponse.bad_request('a query retorna mais de um user') else: return users[0], None else: return None, GeneralApiResponse.unauthorized() else: return request.user, None
20,550
def longest_CD(values): """ Return the sequence range for the longest continuous disorder (CDl) subsequence. """ # Filter residues with score equal or greater than 0.5 # and store its position index dis_res = [index for index, res in enumerate(values) if float(res) >= 0.5] # Initialize longest CD region CDl = [] # Counter to store partial results of each continuous region c = [] # Iterate over disordered residues list for i, j in zip(dis_res, dis_res[1:]): # Check if residues are consecutive if j - i == 1: # Update counter c.append(i) # Not consecutive else: # Add last residue of the interval c.append(i) # Update CDl if len(c) > len(CDl): CDl = c # Reset counter for the next interval c = [] return CDl
20,551
def get_read_length(filename): """ Return the first read length of fastq file. :param str filename: fastq file. """ with FastqReader(filename) as filin: read_len = len(next(iter(filin))) return read_len
20,552
def no_outliers_estimator(base_estimator, x, alpha=0.01): """ Calculate base_estimator function after removal of extreme quantiles from the sample """ x = np.array(x) if len(x.shape) < 3: x = np.expand_dims(x, -1) low_value = np.quantile(x, alpha, axis=(0, 1)) high_value = np.quantile(x, 1 - alpha, axis=(0, 1)) result = np.zeros(x.shape[2], x.dtype) for i in range(x.shape[2]): x_ch = x[:, :, i] x_ch = x_ch[(x_ch >= low_value[i]) & (x_ch <= high_value[i])] result[i] = base_estimator(x_ch) return result
20,553
def error_state_to_dict(err: ErrorState) -> ErrorDict: """Return an ErrorDict based on the exception, string or tuple in the ErrorState. Args: err: ErrorState from a api error state Returns: An ErrorDict containing the error message a status_code and a traceback if available """ # Import here to prevent cyclic imports from server.forms import FormNotCompleteError, FormValidationError if isinstance(err, FormValidationError): return { "class": type(err).__name__, "error": str(err), "traceback": err, "validation_errors": err.errors, # type:ignore "status_code": HTTPStatus.BAD_REQUEST, } elif isinstance(err, FormNotCompleteError): return { "class": type(err).__name__, "error": str(err), "traceback": err, "form": err.form, "status_code": HTTPStatus.NOT_EXTENDED, } elif isinstance(err, Exception): if is_api_exception(err): err = cast(ApiException, err) return { "class": type(err).__name__, "error": err.reason, "status_code": err.status, "body": err.body, "headers": "\n".join(f"{k}: {v}" for k, v in err.headers.items()), "traceback": err, } return { "class": type(err).__name__, "error": str(err), "traceback": show_ex(err), } elif isinstance(err, tuple): cast(Tuple, err) error, status_code = err return {"error": str(error), "status_code": int(status_code)} elif isinstance(err, str): return {"error": err} elif isinstance(err, dict) and "error" in err: # type: ignore return err else: raise TypeError("ErrorState should be a tuple, exception or string")
20,554
def test_user_extract_bogus_data(): """User doesn't exist, user is GUEST""" http = httplib2.Http() response, content = http.request('http://our_test_domain:8001/current_user', method='GET', headers={'Authorization': 'Basic %s' % b64encode(':')}) assert response['status'] == '200' assert 'GUEST' in content
20,555
def english_words() -> Set[str]: """Return a set of english words from the nltk corpus "words". Returns: Set of english words. """ nltk_resource("corpora/words") return set(nltk.corpus.words.words())
20,556
def bipartite_matching_wrapper(a, b, score_func, symmetric=False): """A wrapper to `bipartite_matching()` that returns `(matches, unmatched_in_a, unmatched_in_b)` The list of `matches` contains tuples of `(score, a_element, b_element)`. The two unmatched lists are elements from each of the respective input lists. """ found_a, found_b = set(), set() matches = [] for score, i, j in bipartite_matching(a, b, score_func, symmetric=symmetric): matches.append((score, i, j)) found_a.add(i) found_b.add(j) unmatched_in_a = set(a) - found_a unmatched_in_b = set(b) - found_b return matches, unmatched_in_a, unmatched_in_b
20,557
def get_entsoe(connection_string, user, pwd, category, directory): """ downloads dataset from ENTSO-E's transparency data sftp server. contact ENTSO-E to receive login credentials. :param connection_string: url of ENTSO-E transparency server, as of May 1, 2020: 'sftp-transparency.entsoe.eu' :param user: username required for connecting with sftp server :param pwd: password required for connecting with sftp server :param category: ENTSO-E data category to be downloaded :param directory: directory where downloaded data is saved to. A separate subdirectory is created for each category :return: downloaded dataset(s) in dir """ # check if local_dir exists and create if it doesn't local_dir = os.path.join(directory, category) if not os.path.exists(local_dir): os.makedirs(local_dir) # cnopts = pysftp.CnOpts() cnopts.hostkeys = None # connect to entso-e server via sFTP entsoe_dir = f'/TP_export/{category}' logging.info(f'connecting to {connection_string}') with pysftp.Connection(connection_string, username=user, password=pwd, cnopts=cnopts) as sftp: sftp.chdir(entsoe_dir) files_entsoe = sftp.listdir() os.chdir(local_dir) files_local = set(os.listdir(local_dir)) # compare to files on disk to_download = list(compress(files_entsoe, [item not in files_local for item in files_entsoe])) # download files not on disk for file in to_download: logging.info(f'starting download of {file}...') sftp.get(f'{entsoe_dir}/{file}', os.path.join(directory, category, file)) logging.info(f'download of {file} successful') sftp.close()
20,558
def gen_seldicts( da, dims=None, check_empty=True, unstack=True ): """ TODO: improve documentation generates a list of dictionaries to be passed into dataarray selection functions. Parameters ---------- da : xr.DataArray datarray to generate selection dicts for dims dimensions to generate seldicts over, if None then use all dimensions check_empty : bool only generate seldicts that give values that are not all nan unstack : if Returns ------- seldicts : List[Dict] """ if unstack: #unstacks in case of multiindex. using unstacked seldict on stacked multindex da seems to work da = da.unstack() if dims is None: dims = list(da.dims) idxs = {dim: da.indexes[dim] for dim in dims} seldicts = [dict(zip(idxs, x)) for x in itertools.product(*idxs.values())] seldicts_red = [] if check_empty: # checks to see if the seldict produces all nans and only appends the # seldict to the list if that is not true for i, seldict in enumerate(seldicts): sel = da.sel(seldict).values t = (sel != sel) # test for nan if type(t) == np.ndarray: t = t.all() if not t: seldicts_red.append(seldict) seldicts = seldicts_red return seldicts
20,559
def get_interface_type(interface): """Gets the type of interface Args: interface (str): full name of interface, i.e. Ethernet1/1, loopback10, port-channel20, vlan20 Returns: type of interface: ethernet, svi, loopback, management, portchannel, or unknown """ if interface.upper().startswith('ET'): return 'ethernet' elif interface.upper().startswith('VL'): return 'svi' elif interface.upper().startswith('LO'): return 'loopback' elif interface.upper().startswith('MG'): return 'management' elif interface.upper().startswith('MA'): return 'management' elif interface.upper().startswith('PO'): return 'portchannel' else: return 'unknown'
20,560
def GetExpandedPaths( args, heartbeat_cb = _NoOp): """Expands given path patterns. Args: args: A `FileFinderArgs` instance that dictates the behaviour of the path expansion. heartbeat_cb: A function to be called regularly to send heartbeats. Yields: Absolute paths (as string objects) derived from input patterns. Raises: ValueError: For unsupported path types. """ if args.pathtype == rdf_paths.PathSpec.PathType.OS: pathtype = rdf_paths.PathSpec.PathType.OS else: raise ValueError("Unsupported path type: ", args.pathtype) opts = globbing.PathOpts( follow_links=args.follow_links, xdev=args.xdev, pathtype=pathtype) for path in args.paths: for expanded_path in globbing.ExpandPath(str(path), opts, heartbeat_cb): yield expanded_path
20,561
def create_int_feature_list(name, key, prefix="", module_dict=None): """Creates accessor functions for bytes feature lists. The provided functions are has_${NAME}, get_${NAME}_size, get_${NAME}_at, clear_${NAME}, and add_${NAME}. example = tensorflow.train.SequenceExample() add_image_timestamp(1000000, example) add_image_timestamp(2000000, example) if has_image_timestamp: for i in range(get_image_timestamp_size(example): timestamp = get_image_timestamp_at(i, example) clear_image_timestamp(example) Args: name: the name of the feature to use in function names. key: the key for this feature in the SequenceExample. prefix: a prefix to append to the key in the SequenceExample module_dict: adds the functions to the corresponding module dict. """ def _has(sequence_example, prefix=prefix): return has_feature_list(key, sequence_example, prefix=prefix) def _get_size(sequence_example, prefix=prefix): return get_feature_list_size(key, sequence_example, prefix=prefix) def _get_at(index, sequence_example, prefix=prefix): return get_int_at(key, index, sequence_example, prefix=prefix) def _clear(sequence_example, prefix=prefix): clear_feature_list(key, sequence_example, prefix=prefix) def _add(value, sequence_example, prefix=prefix): add_int(key, value, sequence_example, prefix=prefix) def _get_key(prefix=prefix): return merge_prefix(prefix, key) def _get_default_parser(): return tf.io.FixedLenSequenceFeature((), tf.int64) function_dict = { "has_" + name: _has, "get_" + name + "_size": _get_size, "get_" + name + "_at": _get_at, "clear_" + name: _clear, "add_" + name: _add, "get_" + name + "_key": _get_key, "get_" + name + "_default_parser": _get_default_parser, } add_functions_to_module(function_dict, module_dict)
20,562
def ROC(y_pred, y_true, positive_column = 0,draw = True): """ ROC """ y_pred = y_pred[:,0] y_true = y_true[:,0] # sort by y_pred sort_index = np.argsort(-y_pred) y_pred = y_pred[sort_index] y_true = y_true[sort_index] tprs = [] fprs = [] positive_num = (y_true == 1.0).sum() negivate_num = len(y_true) - positive_num for threshold in np.arange(0,1+0.1,0.1): t = ((y_true == 1.0)& (y_pred >= threshold)).sum() f = ((y_true == 0.0) & (y_pred >= threshold)).sum() tprs.append(t*1.0/positive_num) fprs.append(f*1.0/negivate_num) if draw: plt.plot(fprs,tprs,c='r') plt.show() return tprs, fprs
20,563
def pip_install_with_cursor(*args, **kwargs): """Just slicer.util.pip_install but with a busy cursor while it executes.""" qt.QApplication.setOverrideCursor(qt.Qt.BusyCursor) try: slicer.util.pip_install(*args, **kwargs) finally: qt.QApplication.restoreOverrideCursor()
20,564
def ignore_check(self, channel: discord.TextChannel, ignore_dm: bool = False, from_main: bool = False): """ A function that checks whether or not that channel allows command. Args: self: instance of the class this command calls or this can be commands.Bot channel (discord.TextChannel): the channel the command call happened in ignore_dm (bool): whether or not the command is being ignored in direct messages from_main (bool): indicator for whether or not this call is from Main.py, which switches changes how self is read Returns: True: if channel needs to be ignored False: if channel is fine """ if ignore_dm: if channel.type is discord.ChannelType.private: return True try: if from_main: ignore = self.get_cog("Ignores").find(channel.guild.id, channel.id) else: ignore = self.bot.get_cog('Ignores').find(channel.guild.id, channel.id) except AttributeError: return False if ignore: return True return False
20,565
def network_count_allocated_ips(context, network_id): """Return the number of allocated non-reserved ips in the network.""" return IMPL.network_count_allocated_ips(context, network_id)
20,566
def _parse_transform_spec( transform_spec ): """ Parses a transform specification into its name and parameters dictionary. Raises ValueError if the specification is invalid, it represents an unknown transform, or if the encoded parameters do not match the transform's expected types. Takes 1 argument: transform_spec - Transform specification string. See lookup_transform() for details. Returns 2 values: transform_name - Name of the specified transform. transform_parameters - Dictionary of parameters for the specified transform. Dictionary values are cast to the types expected the transform. """ try: # break the "<name>:<parameters>" string. make sure we don't break # the <parameters> into multiple components so it can contain colons # in the (key, value) pairs. (transform_name, transform_parameters_spec) = transform_spec.split( ":", maxsplit=1 ) except ValueError: raise ValueError( "Failed to get a transform name and parameters " "specification from '{:s}'.".format( transform_spec ) ) # make sure this is a known transform. if transform_name not in _transform_map: raise ValueError( "Unknown transform '{:s}'!".format( transform_name ) ) # get the associated parameter parser for this transform. _, parameter_parser = _transform_map[transform_name] try: # split the remaining <parameters> into (key, value) pairs. each # (key, value) set is colon-delimited, and each set equal # sign-delimited. # # e.g. "parameter1=value1:parameter2=value2a,value2b,value2c" # transform_parameters = dict( map( lambda key_value: key_value.split( "=" ), transform_parameters_spec.split( ":" ) ) ) # map individual parameters to their expected data types. transform_parameters = parameter_parser( transform_parameters ) except ValueError as e: raise ValueError( "<parameters> -> (<key>, <value>) ({:s})".format( str( e ) ) ) return (transform_name, transform_parameters)
20,567
def mock_get_backend(backend): """Replace qiskit.IBMQ with a mock that returns a single backend. Note this will set the value of qiskit.IBMQ to a MagicMock object. It is intended to be run as part of docstrings with jupyter-example in a hidden cell so that later examples which rely on ibmq devices so that the docs can be built without requiring configured credentials. If used outside of this context be aware that you will have to manually restore qiskit.IBMQ the value to qiskit.providers.ibmq.IBMQ after you finish using your mock. Args: backend (str): The class name as a string for the fake device to return from the mock IBMQ object. For example, FakeVigo. Raises: NameError: If the specified value of backend """ mock_ibmq = MagicMock() mock_provider = MagicMock() if not hasattr(backend_mocks, backend): raise NameError( 'The specified backend name is not a valid mock from ' 'qiskit.test.mock') fake_backend = getattr(backend_mocks, backend)() mock_provider.get_backend.return_value = fake_backend mock_ibmq.get_provider.return_value = mock_provider qiskit.IBMQ = mock_ibmq
20,568
def describe_delivery_channels(DeliveryChannelNames=None): """ Returns details about the specified delivery channel. If a delivery channel is not specified, this action returns the details of all delivery channels associated with the account. See also: AWS API Documentation :example: response = client.describe_delivery_channels( DeliveryChannelNames=[ 'string', ] ) :type DeliveryChannelNames: list :param DeliveryChannelNames: A list of delivery channel names. (string) -- :rtype: dict :return: { 'DeliveryChannels': [ { 'name': 'string', 's3BucketName': 'string', 's3KeyPrefix': 'string', 'snsTopicARN': 'string', 'configSnapshotDeliveryProperties': { 'deliveryFrequency': 'One_Hour'|'Three_Hours'|'Six_Hours'|'Twelve_Hours'|'TwentyFour_Hours' } }, ] } """ pass
20,569
def test_atomic_integer_min_inclusive_nistxml_sv_iv_atomic_integer_min_inclusive_1_4(mode, save_output, output_format): """ Type atomic/integer is restricted by facet minInclusive with value -999999999999999999. """ assert_bindings( schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-minInclusive-1.xsd", instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-minInclusive-1-4.xml", class_name="NistschemaSvIvAtomicIntegerMinInclusive1", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
20,570
def while_Zero(): """ Pattern of Number : '0' using while loop""" i=0 while i<7: j=0 while j<5: if i in (0,6) and j not in(0,4) or j in(0,4) and i not in(0,6) or i+j==5: print('*',end=' ') else: print(' ',end=' ') j+=1 i+=1 print()
20,571
def parse_text_multiline(data: Union[str, List[str]]) -> str: """Parse the text in multiline mode.""" if isinstance(data, str): return data elif isinstance(data, list) and all(map(is_str, data)): return '\n'.join(data) else: raise ValueError(data)
20,572
def get_latest_episode_release(series, downloaded=True, season=None): """ :param series series: SQLAlchemy session :param downloaded: find only downloaded releases :param season: season to find newest release for :return: Instance of Episode or None if not found. """ session = Session.object_session(series) releases = ( session.query(Episode) .join(Episode.releases, Episode.series) .filter(Series.id == series.id) ) if downloaded: releases = releases.filter(EpisodeRelease.downloaded == True) if season is not None: releases = releases.filter(Episode.season == season) if series.identified_by and series.identified_by != 'auto': releases = releases.filter(Episode.identified_by == series.identified_by) if series.identified_by in ['ep', 'sequence']: latest_episode_release = releases.order_by( desc(Episode.season), desc(Episode.number) ).first() elif series.identified_by == 'date': latest_episode_release = releases.order_by(desc(Episode.identifier)).first() else: # We have to label the order_by clause to disambiguate from Release.first_seen #3055 latest_episode_release = releases.order_by( desc(Episode.first_seen.label('ep_first_seen')) ).first() if not latest_episode_release: log.debug( 'no episodes found for series `%s` with parameters season: %s, downloaded: %s', series.name, season, downloaded, ) return log.debug( 'latest episode for series %s, with downloaded set to %s and season set to %s', series, downloaded, season, ) return latest_episode_release
20,573
def _set_user_permissions_for_volumes(users, volumes): """ Returns the section of the user data script to create a Linux user group and grant the group permission to access the mounted volumes on the EC2 instance. """ group_name = 'volumes' user_data_script_section = f""" groupadd {group_name} """ for user in users: user_data_script_section += f""" usermod -a -G {group_name} {user.login} """ for volume in volumes: user_data_script_section += f""" chgrp -R {group_name} {volume.mount} chmod -R 2775 {volume.mount} """ return user_data_script_section
20,574
def cached_examples(): """This view should be cached for 60 sec""" examples = ExampleModel.query() return render_template('list_examples_cached.html', examples=examples)
20,575
def corona_surface_integral(solution, E, candidate_attach_pts, corona_elem, phys_param, debug_flag=False): """ Surface integral around the points that are marked as possible attachment candidates """ pcg_idx_vec = np.zeros((len(candidate_attach_pts.keys())), dtype=np.int64) Q_vec = np.zeros((len(candidate_attach_pts.keys()))) for i, pt in enumerate(candidate_attach_pts): pcg_idx_vec[i] = pt elem_above_thresh_in_radius = np.intersect1d(candidate_attach_pts[pt], corona_elem) # if debug_flag: # pts_list = [229096, 229099, 229129, 229132, 229155, 229167, 229168, 229171, 229176, 229189, 229190, 229191, 229195, 229196, 229201, 229213, 229214, 229217, 229218, 229220, 229233, 229234, 229238, 229241, 229244, 229245, 229261, 229270, 229286, 229419] # elem_indicator = np.zeros((solution['surf_mesh']['t'].shape[0], len(pts_list))) # if pt in pts_list: # logger.info('visualizing') # logger.info(pt) # elem_indicator = np.zeros((solution['surf_mesh']['t'].shape[0], 1)) # # elem_indicator[candidate_attach_pts[pt]] = 1 # elem_indicator[elem_above_thresh_in_radius] = 1 # viz.generate_vtu(solution['surf_mesh']['p'], solution['surf_mesh']['t'], None, None, {'cell_data': {0: 'E threshold'}}, 'test_radius{}'.format(pt), False, cell_data=elem_indicator) Q_vec[i] = phys_param['eps0']*quadrature.surface_integral(solution['surf_mesh'], solution['master'], E, elem_above_thresh_in_radius) return Q_vec, pcg_idx_vec
20,576
def librispeech_adversarial( split_type: str = "adversarial", epochs: int = 1, batch_size: int = 1, dataset_dir: str = None, preprocessing_fn: Callable = None, cache_dataset: bool = True, framework: str = "numpy", clean_key: str = "clean", adversarial_key: str = "adversarial", ) -> datasets.ArmoryDataGenerator: """ Adversarial dataset based on Librispeech-dev-clean using Universal Perturbation with PGD. split_type - one of ("adversarial") returns: Generator """ if clean_key != "clean": raise ValueError(f"{clean_key} != 'clean'") if adversarial_key != "adversarial": raise ValueError(f"{adversarial_key} != 'adversarial'") return datasets._generator_from_tfds( "librispeech_adversarial:1.0.0", split_type=split_type, batch_size=batch_size, epochs=epochs, dataset_dir=dataset_dir, preprocessing_fn=preprocessing_fn, as_supervised=False, supervised_xy_keys=("audio", "label"), variable_length=bool(batch_size > 1), cache_dataset=cache_dataset, framework=framework, lambda_map=lambda x, y: ((x[clean_key], x[adversarial_key]), y), )
20,577
def search4letters(phrase, letters='aeiou'): """ ->return a set of the 'letters' found in 'phrase'. :param phrase: phrase where the search will be made :param letters:set of letters that will be searched for in the sentence :return returns a set () """ return set(letters).intersection(set(phrase))
20,578
def handle_cmdline_args(): """ Return an object with attributes 'infile' and 'outfile', after handling the command line arguments """ parser = argparse.ArgumentParser( description='Generate synthetic data from a specification in a json ' 'file using the "synth-method" described in the json file. ') parser.add_argument( '-i', dest='infile', required=True, help='The input json file. Must contain a "synth-method" property') parser.add_argument( '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), ' 'relative to the QUIPP-pipeline root directory') args = parser.parse_args() return args
20,579
def merge_flights(prev_flights_filename, next_flights_filename, ids_df, log): """ Gets the next days flights that are the continuation of the previous days flights and merges them with the previous days flights. It writes the new next days and previous days flights to files prepended with new. it returns True if successful, False otherwise. """ new_items_df = get_next_day_items(next_flights_filename, ids_df, log) # free memory used by get_next_day_items gc.collect() prev_flights_df = pd.DataFrame() try: prev_flights_df = pd.read_csv(prev_flights_filename, index_col='FLIGHT_ID', converters={'FLIGHT_ID': lambda x: UUID(x)}, memory_map=True) log.info('%s read ok', prev_flights_filename) except EnvironmentError: log.error('could not read file: %s', prev_flights_filename) return False # merge next days flight data with the previous days flight data update_flight_data(prev_flights_df, new_items_df) # Output the new previous flights new_prev_flights_filename = 'new_' + prev_flights_filename try: is_bz2 = has_bz2_extension(prev_flights_filename) if is_bz2: new_prev_flights_filename = new_prev_flights_filename[:-BZ2_LENGTH] prev_flights_df.to_csv(new_prev_flights_filename, index=True, date_format=ISO8601_DATETIME_FORMAT) log.info('written file: %s', new_prev_flights_filename) except EnvironmentError: log.error('could not write file: %s', new_prev_flights_filename) return False return True
20,580
def findClusters( peaks, thresh ): """Since the peaks are in sequence, this method follows a very simplistic approach. For each peak it checks its distance from the previous peak. If it is less than threshold, it clusters that peak with the previous one. Note that in each of the clusters, input order is maintained.""" clusters, cluster = [], [] cluster.append(peaks[0]) for peak in peaks[1:]: if euclideanDistance( cluster[-1], peak ) < thresh: cluster.append( peak ) else: clusters.append(cluster) cluster = [peak] clusters.append( cluster ) print( clusters ) return clusters
20,581
def docker_compose_file(pytestconfig): """Get docker compose file""" return os.path.join(str(pytestconfig.rootdir), "docker-compose.yml")
20,582
def sample(): """ Returns the path to the sample of the given name. """ def inner(name): return os.path.join( os.path.join( os.path.dirname(os.path.abspath(__file__)), 'samples' ), name ) return inner
20,583
def connectAnimAndRigJoints(animJoints, rigJoints): """ Given a list of animation joints and a list of rig joints, connects each pair of joints with translate, rotate, and scale. Note: animJoints and rigJoints should follow the exact same hierarchy. """ for animJoint in animJoints: animJointName = animJoint.split(":")[1] for rigJoint in rigJoints: rigJointName = rigJoint.split(":")[1] if animJointName == rigJointName: # connectTranslateRotateScale(animJoint, rigJoint) applyParentConstraint(animJoint, rigJoint) break
20,584
def acq_max_single_seed(ac, gp, y_max, bounds): """ A function to find the maximum of the acquisition function using the 'L-BFGS-B' method. Input Parameters ---------- ac: The acquisition function object that return its point-wise value. gp: A gaussian process fitted to the relevant data. y_max: The current maximum known value of the target function. bounds: The variables bounds to limit the search of the acq max. Returns ------- :return: x_max, The arg max of the acquisition function. """ # Start with the lower bound as the argmax x_max = bounds[:, 0] #max_acq = None dim=bounds.shape[0] x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],size=(50*dim, dim)) # evaluate y_tries=ac(x_tries,gp=gp, y_max=y_max) #find x optimal for init idx_max=np.argmax(y_tries) x_init_max=x_tries[idx_max] #x_try=np.array(bounds[:, 0]) # Find the minimum of minus the acquisition function res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max), x_init_max.reshape(1, -1), bounds=bounds, method="L-BFGS-B") x_max = res.x #max_acq = -res.fun # Clip output to make sure it lies within the bounds. Due to floating # point technicalities this is not always the case. return np.clip(x_max, bounds[:, 0], bounds[:, 1])
20,585
def futures_pig_rank(symbol: str = "外三元") -> pd.DataFrame: """ 价格排行榜 https://zhujia.zhuwang.cc/lists.shtml :param symbol: choice of {"外三元", "内三元", "土杂猪", "玉米", "豆粕"} :type symbol: str :return: 价格排行榜 :rtype: pandas.DataFrame """ if symbol == "外三元": temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists.shtml")[0] temp_df.columns = [ '排名', '品种', '省份', '价格-公斤', '价格-斤', ] temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元") temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元") temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤']) temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤']) return temp_df elif symbol == "内三元": temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-1.shtml")[0] temp_df.columns = [ '排名', '品种', '省份', '价格-公斤', '价格-斤', ] temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元") temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元") temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤']) temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤']) return temp_df elif symbol == "土杂猪": temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-2.shtml")[0] temp_df.columns = [ '排名', '品种', '省份', '价格-公斤', '价格-斤', ] temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元") temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元") temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤']) temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤']) return temp_df elif symbol == "玉米": temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-3.shtml")[0] temp_df.columns = [ '排名', '品种', '省份', '价格-公斤', '价格-斤', ] temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元") temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元") temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤']) temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤']) return temp_df elif symbol == "豆粕": temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-4.shtml")[0] temp_df.columns = [ '排名', '品种', '省份', '价格-公斤', '价格-斤', ] temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元") temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元") temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤']) temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤']) return temp_df
20,586
def psubl_T(T): """ EQ 6 / Sublimation Pressure """ T_star = 273.16 p_star = 611.657E-6 a = (-0.212144006E2, 0.273203819E2, -0.610598130E1) b = ( 0.333333333E-2, 0.120666667E1, 0.170333333E1) theta = T / T_star sum = 0 for i in range(0, 3): sum += a[i] * theta ** b[i] pi_subl = math.exp((theta ** -1) * sum) return pi_subl * p_star
20,587
def map_key_values(f, dct): """ Like map_with_obj but expects a key value pair returned from f and uses it to form a new dict :param f: Called with a key and value :param dct: :return: """ return from_pairs(values(map_with_obj(f, dct)))
20,588
def plot_ppc( ax, length_plotters, rows, cols, figsize, animated, obs_plotters, pp_plotters, posterior_predictive, pp_sample_ix, kind, alpha, linewidth, mean, xt_labelsize, ax_labelsize, jitter, total_pp_samples, legend, markersize, animation_kwargs, num_pp_samples, ): """Matplotlib ppc plot.""" if ax is None: fig, axes = _create_axes_grid(length_plotters, rows, cols, figsize=figsize) else: axes = np.ravel(ax) if len(axes) != length_plotters: raise ValueError( "Found {} variables to plot but {} axes instances. They must be equal.".format( length_plotters, len(axes) ) ) if animated: fig = axes[0].get_figure() if not all([ax.get_figure() is fig for ax in axes]): raise ValueError("All axes must be on the same figure for animation to work") for i, ax_i in enumerate(axes): var_name, selection, obs_vals = obs_plotters[i] pp_var_name, _, pp_vals = pp_plotters[i] dtype = posterior_predictive[pp_var_name].dtype.kind # flatten non-specified dimensions obs_vals = obs_vals.flatten() pp_vals = pp_vals.reshape(total_pp_samples, -1) pp_sampled_vals = pp_vals[pp_sample_ix] if kind == "kde": plot_kwargs = {"color": "C5", "alpha": alpha, "linewidth": 0.5 * linewidth} if dtype == "i": plot_kwargs["drawstyle"] = "steps-pre" ax_i.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name)) if dtype == "f": plot_kde( obs_vals, label="Observed {}".format(var_name), plot_kwargs={"color": "k", "linewidth": linewidth, "zorder": 3}, fill_kwargs={"alpha": 0}, ax=ax_i, legend=legend, ) else: bins = get_bins(obs_vals) _, hist, bin_edges = histogram(obs_vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.plot( bin_edges, hist, label="Observed {}".format(var_name), color="k", linewidth=linewidth, zorder=3, drawstyle=plot_kwargs["drawstyle"], ) pp_densities = [] pp_xs = [] for vals in pp_sampled_vals: vals = np.array([vals]).flatten() if dtype == "f": pp_density, lower, upper = _fast_kde(vals) pp_x = np.linspace(lower, upper, len(pp_density)) pp_densities.append(pp_density) pp_xs.append(pp_x) else: bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) pp_densities.append(hist) pp_xs.append(bin_edges) if animated: animate, init = _set_animation( pp_sampled_vals, ax_i, dtype=dtype, kind=kind, plot_kwargs=plot_kwargs ) else: if dtype == "f": ax_i.plot(np.transpose(pp_xs), np.transpose(pp_densities), **plot_kwargs) else: for x_s, y_s in zip(pp_xs, pp_densities): ax_i.plot(x_s, y_s, **plot_kwargs) if mean: if dtype == "f": rep = len(pp_densities) len_density = len(pp_densities[0]) new_x = np.linspace(np.min(pp_xs), np.max(pp_xs), len_density) new_d = np.zeros((rep, len_density)) bins = np.digitize(pp_xs, new_x, right=True) new_x -= (new_x[1] - new_x[0]) / 2 for irep in range(rep): new_d[irep][bins[irep]] = pp_densities[irep] ax_i.plot( new_x, new_d.mean(0), color="C0", linestyle="--", linewidth=linewidth, zorder=2, label="Posterior predictive mean {}".format(pp_var_name), ) else: vals = pp_vals.flatten() bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.plot( bin_edges, hist, color="C0", linewidth=linewidth, label="Posterior predictive mean {}".format(pp_var_name), zorder=2, linestyle="--", drawstyle=plot_kwargs["drawstyle"], ) ax_i.tick_params(labelsize=xt_labelsize) ax_i.set_yticks([]) elif kind == "cumulative": drawstyle = "default" if dtype == "f" else "steps-pre" ax_i.plot( *_empirical_cdf(obs_vals), color="k", linewidth=linewidth, label="Observed {}".format(var_name), drawstyle=drawstyle, zorder=3 ) if animated: animate, init = _set_animation( pp_sampled_vals, ax_i, kind=kind, alpha=alpha, drawstyle=drawstyle, linewidth=linewidth, ) else: pp_densities = np.empty((2 * len(pp_sampled_vals), pp_sampled_vals[0].size)) for idx, vals in enumerate(pp_sampled_vals): vals = np.array([vals]).flatten() pp_x, pp_density = _empirical_cdf(vals) pp_densities[2 * idx] = pp_x pp_densities[2 * idx + 1] = pp_density ax_i.plot( *pp_densities, alpha=alpha, color="C5", drawstyle=drawstyle, linewidth=linewidth ) ax_i.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name)) if mean: ax_i.plot( *_empirical_cdf(pp_vals.flatten()), color="C0", linestyle="--", linewidth=linewidth, drawstyle=drawstyle, label="Posterior predictive mean {}".format(pp_var_name) ) ax_i.set_yticks([0, 0.5, 1]) elif kind == "scatter": if mean: if dtype == "f": plot_kde( pp_vals.flatten(), plot_kwargs={ "color": "C0", "linestyle": "--", "linewidth": linewidth, "zorder": 3, }, label="Posterior predictive mean {}".format(pp_var_name), ax=ax_i, legend=legend, ) else: vals = pp_vals.flatten() bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.plot( bin_edges, hist, color="C0", linewidth=linewidth, label="Posterior predictive mean {}".format(pp_var_name), zorder=3, linestyle="--", drawstyle="steps-pre", ) _, limit = ax_i.get_ylim() limit *= 1.05 y_rows = np.linspace(0, limit, num_pp_samples + 1) jitter_scale = y_rows[1] - y_rows[0] scale_low = 0 scale_high = jitter_scale * jitter obs_yvals = np.zeros_like(obs_vals, dtype=np.float64) if jitter: obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals)) ax_i.plot( obs_vals, obs_yvals, "o", color="C0", markersize=markersize, alpha=alpha, label="Observed {}".format(var_name), zorder=4, ) if animated: animate, init = _set_animation( pp_sampled_vals, ax_i, kind=kind, height=y_rows.mean() * 0.5, markersize=markersize, ) else: for vals, y in zip(pp_sampled_vals, y_rows[1:]): vals = np.ravel(vals) yvals = np.full_like(vals, y, dtype=np.float64) if jitter: yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals)) ax_i.plot( vals, yvals, "o", zorder=2, color="C5", markersize=markersize, alpha=alpha ) ax_i.plot([], "C5o", label="Posterior predictive {}".format(pp_var_name)) ax_i.set_yticks([]) if var_name != pp_var_name: xlabel = "{} / {}".format(var_name, pp_var_name) else: xlabel = var_name ax_i.set_xlabel(make_label(xlabel, selection), fontsize=ax_labelsize) if legend: if i == 0: ax_i.legend(fontsize=xt_labelsize * 0.75) else: ax_i.legend([]) if animated: ani = animation.FuncAnimation( fig, animate, np.arange(0, num_pp_samples), init_func=init, **animation_kwargs ) return axes, ani else: return axes
20,589
def startGroup(): """ StartGroup used at the start of a group operaction or secondary function. """ global stackFlag, errCode, networkSet, netSetStack, redoStack redoStack = TStack() netSetStack.push(networkSet) stackFlag = False
20,590
def wait_for_es(es_url: str, logger): """Wait for es to come up by polling.""" logger.info("Waiting for es...") es = elasticsearch.Elasticsearch(hosts=[es_url]) while not es.ping(): time.sleep(0.5)
20,591
def load_module(name): """Load the named module without registering it in ``sys.modules``. Parameters ---------- name : string Module name Returns ------- mod : module Loaded module """ spec = importlib.util.find_spec(name) mod = importlib.util.module_from_spec(spec) mod.__spec__ = spec mod.__loader__ = spec.loader spec.loader.exec_module(mod) return mod
20,592
def make_no_graph_input_fn(graph_data, args, treatments, outcomes, filter_test=False): """ A dataset w/ all the label processing, but no graph structure. Used at evaluation and prediction time """ def input_fn(): vertex_dataset = tf.data.Dataset.from_tensor_slices( ({'vertex_index': np.expand_dims(np.array(range(graph_data.num_vertices)), 1), 'is_positive': np.expand_dims(np.array(range(graph_data.num_vertices)), 1)},)) data_processing = adapters.compose( adapters.append_vertex_labels(treatments, 'treatment'), adapters.append_vertex_labels(outcomes, 'outcome'), adapters.make_split_vertex_labels( graph_data.num_vertices, args.proportion_censored, np.random.RandomState(args.seed)), adapters.format_features_labels()) dataset = vertex_dataset.map(data_processing, 8) if filter_test: def filter_test_fn(features, labels): return tf.equal(tf.squeeze(features['in_test']), 1) dataset = dataset.filter(filter_test_fn) batch_size = args.batch_size dataset = dataset.batch(batch_size=batch_size, drop_remainder=False) return dataset return input_fn
20,593
def compute_gradient_penalty(D, real_samples, fake_samples): """Calculates the gradient penalty loss for WGAN GP""" # Random weight term for interpolation between real and fake samples alpha = torch.tensor(np.random.random((real_samples.size(0), 1, 1, 1,1)), dtype = real_samples.dtype, device = real_samples.device) # Get random interpolation between real and fake samples #print(alpha.shape, fake_samples.shape) interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True) d_interpolates = D(interpolates) fake = Variable(Tensor(d_interpolates.shape[0], 1).fill_(1.0), requires_grad=False).view(-1) #print(d_interpolates.shape, interpolates.shape, fake.shape) # Get gradient w.r.t. interpolates gradients = autograd.grad( outputs=d_interpolates, inputs=interpolates, grad_outputs=fake, create_graph=True, retain_graph=True, only_inputs=True, )[0] gradients = gradients.view(gradients.size(0), -1) gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() return gradient_penalty
20,594
def prune_min(t): """Prune the tree mutatively from the bottom up. >>> t1 = Tree(6) >>> prune_min(t1) >>> t1 Tree(6) >>> t2 = Tree(6, [Tree(3), Tree(4)]) >>> prune_min(t2) >>> t2 Tree(6, [Tree(3)]) >>> t3 = Tree(6, [Tree(3, [Tree(1), Tree(2)]), Tree(5, [Tree(3), Tree(4)])]) >>> prune_min(t3) >>> t3 Tree(6, [Tree(3, [Tree(1)])]) """ "*** YOUR CODE HERE ***" if t.is_leaf(): return if t.branches[0].label < t.branches[1].label: t.branches = [t.branches[0]] else: t.branches = [t.branches[1]] prune_min(t.branches[0])
20,595
def _perform_aggregation(resource, pipeline, options): """ .. versionadded:: 0.7 """ # TODO move most of this down to the Mongo layer? # TODO experiment with cursor.batch_size as alternative pagination # implementation def parse_aggregation_stage(d, key, value): for st_key, st_value in d.items(): if isinstance(st_value, dict): parse_aggregation_stage(st_value, key, value) if key == st_value: d[st_key] = value response = {} documents = [] req = parse_request(resource) req_pipeline = copy.deepcopy(pipeline) if req.aggregation: try: query = json.loads(req.aggregation) except ValueError: abort(400, description='Aggregation query could not be parsed.') for key, value in query.items(): if key[0] != '$': pass for stage in req_pipeline: parse_aggregation_stage(stage, key, value) if req.max_results > 1: limit = {"$limit": req.max_results} skip = {"$skip": (req.page - 1) * req.max_results} req_pipeline.append(skip) req_pipeline.append(limit) cursor = app.data.aggregate(resource, req_pipeline, options) for document in cursor: documents.append(document) response[config.ITEMS] = documents # PyMongo's CommandCursor does not return a count, so we cannot # provide paination/total count info as we do with a normal (non-aggregate) # GET request. return response, None, None, 200, []
20,596
def email_role(role, subject, html=None, plain=None): """Send an email to a user with a given address.""" if role.email is None: log.info("Role [%r]: does not have email.", role) return try: sender = "%s <%s>" % (settings.APP_TITLE, settings.MAIL_FROM) subject = "[%s] %s" % (settings.APP_TITLE, subject) msg = Message(subject=subject, sender=sender, recipients=[role.email]) msg.body = plain msg.html = html mail.send(msg) except Exception as exc: log.error("Error sending email [%r]: %s", role, exc)
20,597
def tab(num): """ Get tab indentation. Parameters ---------- num : int indentation depth """ return num * 4 * " "
20,598
def merge_sort(lst): """Sorts the input list into ascending order.""" if len(lst) < 2: return lst half = len(lst) // 2 # This variant of merge sort uses O(N * log N) memory, since list slicing in Python 3 creates a copy. return merge(merge_sort(lst[:half]), merge_sort(lst[half:]))
20,599