content
stringlengths
22
815k
id
int64
0
4.91M
def validate_get_arguments(kwargs): # type: (Dict[Text, Any]) -> None """Verify that attribute filtering parameters are not found in the request. :raises InvalidArgumentError: if banned parameters are found """ for arg in ("AttributesToGet", "ProjectionExpression"): if arg in kwargs: raise InvalidArgumentError('"{}" is not supported for this operation'.format(arg)) if kwargs.get("Select", None) in ("SPECIFIC_ATTRIBUTES", "ALL_PROJECTED_ATTRIBUTES"): raise InvalidArgumentError('Scan "Select" value of "{}" is not supported'.format(kwargs["Select"]))
13,600
def _get_transmission(self,d,E='config'): """ calculate the transmittion after thickness d (in m) of material at energy E (in eV).""" return np.exp(-d*1e6/self.absorption_length(E))
13,601
def calculate_density( input_layer, field=None, cell_size=None, cell_size_units="Meters", radius=None, radius_units=None, bounding_polygon_layer=None, area_units=None, classification_type="EqualInterval", num_classes=10, output_name=None, context=None, gis=None, estimate=False, future=False): """ .. image:: _static/images/cal_density_standard/calculate_density.png The calculate_density function creates a density map from point or line features by spreading known quantities of some phenomenon (represented as attributes of the points or lines) across the map. The result is a layer of areas classified from least dense to most dense. For point input, each point should represent the location of some event or incident, and the result layer represents a count of the incident per unit area. A higher density value in a new location means that there are more points near that location. In many cases, the result layer can be interpreted as a risk surface for future events. For example, if the input points represent locations of lightning strikes, the result layer can be interpreted as a risk surface for future lightning strikes. For line input, the line density surface represents the total amount of line that is near each location. The units of the calculated density values are the length of line per unit area. For example, if the lines represent rivers, the result layer will represent the total length of rivers that are within the search radius. This result can be used to identify areas that are hospitable to grazing animals. ========================= ========================================================= **Argument** **Description** ------------------------- --------------------------------------------------------- input_layer Required layer. The point or line features from which to calculate density. See :ref:`Feature Input<FeatureInput>`. ------------------------- --------------------------------------------------------- field Optional string. A numeric field name specifying the number of incidents at each location. For example, if you have points that represent cities, you can use a field representing the population of the city as the count field, and the resulting population density layer will calculate larger population densities near cities with larger populations. If not specified, each location will be assumed to represent a single count. ------------------------- --------------------------------------------------------- cell_size Optional float. This value is used to create a mesh of points where density values are calculated. The default is approximately 1/1000th of the smaller of the width and height of the analysis extent as defined in the context parameter. The smaller the value, the smoother the polygon boundaries will be. Conversely, with larger values, the polygon boundaries will be more coarse and jagged. ------------------------- --------------------------------------------------------- cell_size_units Optional string. The units of the cell_size value. Choice list: ['Miles', 'Feet', 'Kilometers', 'Meters'] ------------------------- --------------------------------------------------------- radius Optional float. A distance specifying how far to search to find point or line features when calculating density values. ------------------------- --------------------------------------------------------- radius_units Optional string. The units of the radius parameter. If no distance is provided, a default will be calculated that is based on the locations of the input features and the values in the count field (if a count field is provided). Choice list: ['Miles', 'Feet', 'Kilometers', 'Meters'] ------------------------- --------------------------------------------------------- bounding_polygon_layer Optional layer. A layer specifying the polygon(s) where you want densities to be calculated. For example, if you are interpolating densities of fish within a lake, you can use the boundary of the lake in this parameter and the output will only draw within the boundary of the lake. See :ref:`Feature Input<FeatureInput>`. ------------------------- --------------------------------------------------------- area_units Optional string. The units of the calculated density values. Choice list: ['areaUnits', 'SquareMiles'] ------------------------- --------------------------------------------------------- classification_type Optional string. Determines how density values will be classified into polygons. Choice list: ['EqualInterval', 'GeometricInterval', 'NaturalBreaks', 'EqualArea', 'StandardDeviation'] * EqualInterval - Polygons are created such that the range of density values is equal for each area. * GeometricInterval - Polygons are based on class intervals that have a geometric series. This method ensures that each class range has approximately the same number of values within each class and that the change between intervals is consistent. * NaturalBreaks - Class intervals for polygons are based on natural groupings of the data. Class break values are identified that best group similar values and that maximize the differences between classes. * EqualArea - Polygons are created such that the size of each area is equal. For example, if the result has more high density values than low density values, more polygons will be created for high densities. * StandardDeviation - Polygons are created based upon the standard deviation of the predicted density values. ------------------------- --------------------------------------------------------- num_classes Optional int. This value is used to divide the range of predicted values into distinct classes. The range of values in each class is determined by the classification_type parameter. ------------------------- --------------------------------------------------------- output_name Optional string. Additional properties such as output feature service name. ------------------------- --------------------------------------------------------- context Optional string. Additional settings such as processing extent and output spatial reference. For calculate_density, there are two settings. #. Extent (extent)-a bounding box that defines the analysis area. Only those points in the input_layer that intersect the bounding box will be analyzed. #. Output Spatial Reference (outSR) the output features will be projected into the output spatial reference. ------------------------- --------------------------------------------------------- gis Optional, the GIS on which this tool runs. If not specified, the active GIS is used. ------------------------- --------------------------------------------------------- estimate Optional Boolean. Is true, the number of credits needed to run the operation will be returned as a float. ------------------------- --------------------------------------------------------- future Optional boolean. If True, the result will be a GPJob object and results will be returned asynchronously. ========================= ========================================================= :returns: result_layer : feature layer Item if output_name is specified, else Feature Collection. .. code-block:: python USAGE EXAMPLE: To create a layer that shows density of collisions within 2 miles. The density is classified based upon the standard deviation. The range of density values is divided into 5 classes. collision_density = calculate_density(input_layer=collisions, radius=2, radius_units='Miles', bounding_polygon_layer=zoning_lyr, area_units='SquareMiles', classification_type='StandardDeviation', num_classes=5, output_name='density_of_incidents') """ gis = _arcgis.env.active_gis if gis is None else gis return gis._tools.featureanalysis.calculate_density( input_layer, field, cell_size, cell_size_units, radius, radius_units, bounding_polygon_layer, area_units, classification_type, num_classes, output_name, context, estimate=estimate, future=future)
13,602
def reverse(operation): """ decorator that returns sa.not_ for sending operation""" def decorated(*args, **kwargs): return sqlalchemy.not_(operation(*args, **kwargs)) return decorated
13,603
def disable_static_generator(view_func): """Decorator which prevents caching the response from a view on disk Flags the view with a ``disable_static_generator`` attribute so staticgenerator won't ever save its response on the filesystem. Example:: @disable_static_generator def myview(request): # ... """ # We could just do view_func.disable_static_generator = True, but # decorators are nicer if they don't have side-effects, so we return a new # function. def wrapped_view(*args, **kwargs): return view_func(*args, **kwargs) wrapped_view.disable_static_generator = True return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
13,604
def create_lengths(text): """Create a data frame of the sentence lengths from a text""" lengths = [] for sentence in tqdm(text): lengths.append(len(sentence)) return pd.DataFrame(lengths, columns=['counts'])
13,605
def basic_auth_string(key, value): """Returns basic auth string from key and value""" key_pass = b":".join((_to_bytes(key), _to_bytes(value))) token = b64encode(key_pass).decode() return f"Basic {token}"
13,606
def main(argv = None): """ Main function for the ``amplimap`` executable. This function: - parses command line arguments - reads, merges and checks each of these config files, if they exist: + ``config_default.yaml`` in the amplimap package + ``/etc/amplimap/VERSION/config.yaml`` (where VERSION is the amplimap version) + ``$AMPLIMAP_CONFIG`` + ``config.yaml`` in the working directory - checks for an existing analysis directory (and compares the amplimap version used to create it) - adds its own parent directory to the config file (to be inserted back into the python path inside Snakemake) - creates an analysis directory - writes ``config_used.yaml`` to the new analysis directory - creates a ``cluster_log`` directory (if running in cluster mode) - launches Snakemake, using the amplimap Snakefile, ``config_used.yaml`` as the config file and cluster parameters as specified in the command line arguments and config. """ try: basedir = os.path.dirname(os.path.realpath(__file__)) # parse the arguments, which will be available as properties of args (e.g. args.probe) parser = argparse.ArgumentParser( description = "amplimap v{} - amplicon mapping and analysis pipeline".format(__version__), formatter_class = argparse.ArgumentDefaultsHelpFormatter) # specify parameters parser.add_argument("-v", "--version", help="print version and exit", action="store_true") parser.add_argument("--basedir", help="print basedir and exit", action="store_true") parser.add_argument("--print-config", help="print configuration (including global and local settings) and exit", action="store_true") parser.add_argument("-r", "--run", help="actually run (will perform a dry run otherwise)", action="store_true") parser.add_argument("--resume", help="resume analysis in existing analysis directory", action="store_true") parser.add_argument("--cluster", help="specify a cluster type defined in your configuration files to run jobs on cluster.") parser.add_argument("--skip-file-check", help="skip check for changes in input files when resuming (not recommended)", action="store_true") parser.add_argument("--unlock", help="unlock working directory (Snakemake parameter)", action="store_true") parser.add_argument("--working-directory", help="path to the working directory", default=".") parser.add_argument("--ncores", help="number of local cores to run in parallel (only applies if --cluster is NOT set)", default=1, type=int) parser.add_argument("--njobs", help="number of cluster jobs to run in parallel (only applies if --cluster is set)", default=10, type=int) parser.add_argument("--latency-wait", help="How long to wait for output files to appear after job completes. Increase this if you get errors about missing output files. (Snakemake parameter)", default=5, type=int) parser.add_argument("--snakemake-args", help="For debugging: Extra arguments to the snakemake function (comma-separated key=value pairs - eg. 'printreason=True')") parser.add_argument("--debug", help="debug mode", action="store_true") # parser.add_argument("--debug-dag", help="debug DAG", action="store_true") parser.add_argument("TARGET", help="targets to run (eg. pileups variants coverages)", nargs="*") if argv is None: args = parser.parse_args() else: args = parser.parse_args(argv) if args.debug: print('Incoming argv: {}'.format(str(argv))) print('Targets: {}'.format(str(args.TARGET))) if args.version: print('{} {}'.format(__title__, __version__)) return 0 if args.basedir: print(basedir) return 0 # read base config to know which parameters etc are allowed default_config = read_config_file(args.print_config, os.path.join(basedir, 'config_default.yaml')) if not default_config: raise Exception('config_default.yaml file missing') # add undocumented config keys to make sure these don't raise an error for key in ['include_gbrowse_links', 'include_exon_distance', 'include_score']: if not key in default_config['annotate']: default_config['annotate'][key] = False # override with data from /etc/amplimap, if exists etc_config = read_config_file(args.print_config, '/etc/amplimap/%s/config.yaml' % __version__) # override with data from $AMPLIMAP_CONFIG, if exists env_config = {} try: env_config = read_config_file(args.print_config, os.environ['AMPLIMAP_CONFIG']) except KeyError: pass # read local config local_config = read_config_file(args.print_config, os.path.join(args.working_directory, 'config.yaml')) if not local_config: if args.print_config: sys.stderr.write('No local config.yaml found, using default configuration.\n') # merge configs together config = default_config for my_config in [etc_config, env_config, local_config]: # check that all settings actually exist differences = check_config_keys(default_config, my_config) # allow custom tools allowed_tools = set(default_config['tools'].keys()) if 'tools' in my_config: allowed_tools.update(my_config['tools'].keys()) differences = [ d for d in differences if not (len(d) == 2 and d[0] in ['modules'] and d[1] in allowed_tools) and d[0] != 'tools' ] if len(differences) > 0: sys.stderr.write('Your configuration file(s) contain unknown or invalid settings:\n') for diff in differences: sys.stderr.write('\t- {}\n'.format(':'.join(diff))) sys.stderr.write('Please check their spelling and location and try again.\n') return 1 snakemake.utils.update_config(config, my_config) # check basic config aligners = ['naive', 'bwa', 'bowtie2', 'star'] # allowed values for the aligner # add custom tools for tool_name, tool_config in config['tools'].items(): if 'align_command' in tool_config: aligners.append(tool_name) if not config['align']['aligner'] in aligners: raise Exception('align: aligner must be one of {}'.format(','.join(aligners))) callers = ['gatk', 'platypus', 'wecall'] # allowed values for the variant caller # add custom tools for tool_name, tool_config in config['tools'].items(): if 'call_command' in tool_config: callers.append(tool_name) if not config['variants']['caller'] in callers: raise Exception('variants: caller must be one of {}'.format(','.join(callers))) if config['parse_reads']['quality_trim_threshold'] != False: if not isinstance(config['parse_reads']['quality_trim_threshold'], float): raise Exception('quality_trim_threshold must be a decimal number') if not config['parse_reads']['quality_trim_threshold'] > 0 and config['parse_reads']['quality_trim_threshold'] < 1: raise Exception('quality_trim_threshold must be either "false" or above 0 and below 1') if not (config['general']['umi_min_consensus_percentage'] >= 0 and config['general']['umi_min_consensus_percentage'] <= 100): raise Exception('umi_min_consensus_percentage must be between 0 and 100') if not (config['parse_reads']['min_percentage_good'] >= 0 and config['parse_reads']['min_percentage_good'] <= 100): raise Exception('min_percentage_good must be between 0 and 100') if not (config['parse_reads']['umi_one'] >= 0 and config['parse_reads']['umi_two'] >= 0): raise Exception('umi_one and umi_two must be 0 or greater') if config['annotate']['annovar']['protocols'].count(',') != config['annotate']['annovar']['operations'].count(','): raise Exception('The number of comma-separated protocols and operations under `annotate: annovar:` must match') # if we don't have UMIs (either on reads or as bam tag) we definitely have to ignore them # this makes it possible to "auto-detect" whether we need to ignore_umis or not if (config['parse_reads']['umi_one'] + config['parse_reads']['umi_two'] == 0) and config['general']['umi_tag_name'] == "": config['general']['ignore_umis'] = True # check we have proper paths if not config['general']['genome_name'] in config['paths'] or not isinstance(config['paths'][config['general']['genome_name']], dict): raise Exception('Could not find list of paths for genome_name: "{}". Please add the paths to your default configuration or your local config.yaml file.'.format(config['general']['genome_name'])) for name, path in config['paths'][config['general']['genome_name']].items(): if path.startswith('/PATH/TO/'): raise Exception('Path for {} reference is set to {}, which is probably incorrect. Please set the correct path in your default configuration or your local config.yaml file, or leave it empty.'.format( name, path)) if args.print_config: yaml.dump(config, sys.stdout, default_flow_style=False) return 0 # do some basic checks assert os.path.isdir(args.working_directory), 'working directory does not exist' # check for one (and only one) input directory input_directory = None input_directory_count = 0 input_directories = ['reads_in', 'unmapped_bams_in', 'mapped_bams_in', 'bams_in'] for input_name in input_directories: if os.path.isdir(os.path.join(args.working_directory, input_name)): input_directory_count += 1 input_directory = input_name if input_directory_count < 1: raise Exception( 'An input directory (one of: %s) needs to exist. Please see the documentation for the appropriate directory to use and place your sequencing data there.' % (', '.join(input_directories)) ) elif input_directory_count > 1: raise Exception( 'More than one of the possible input directories (%s) exists. Please only provide a single input directory with all your data.' % (', '.join(input_directories)) ) if input_directory in ['unmapped_bams_in', 'mapped_bams_in']: if not config['general']['use_raw_reads']: raise Exception( 'general: use_raw_reads needs to be set to true when using %s for input.' % (input_directory) ) # check input files sys.stderr.write('Checking input files...\n') if os.path.isfile(os.path.join(args.working_directory, 'probes.csv')): read_new_probe_design(os.path.join(args.working_directory, 'probes.csv'), reference_type = 'genome') if os.path.isfile(os.path.join(args.working_directory, 'probes_mipgen.csv')): process_probe_design(read_and_convert_mipgen_probes(os.path.join(args.working_directory, 'probes_mipgen.csv'))) if os.path.isfile(os.path.join(args.working_directory, 'picked_mips.txt')): process_probe_design(read_and_convert_mipgen_probes(os.path.join(args.working_directory, 'picked_mips.txt'), sep='\t')) if os.path.isfile(os.path.join(args.working_directory, 'probes_heatseq.tsv')): process_probe_design(read_and_convert_heatseq_probes(os.path.join(args.working_directory, 'probes_heatseq.tsv'))) if os.path.isfile(os.path.join(args.working_directory, 'targets.csv')): # note: this will fail on overlapping targets read_targets(os.path.join(args.working_directory, 'targets.csv'), check_overlaps=True, reference_type = 'genome', file_type = 'csv') if os.path.isfile(os.path.join(args.working_directory, 'targets.bed')): # note: this will fail on overlapping targets read_targets(os.path.join(args.working_directory, 'targets.bed'), check_overlaps=True, reference_type = 'genome', file_type = 'bed') if os.path.isfile(os.path.join(args.working_directory, 'snps.txt')): read_snps_txt(os.path.join(args.working_directory, 'snps.txt'), reference_type = 'genome') # this will be used to (very hackily) make sure amplimap can be imported as amplimap.xxx # by adding the parent dir to the top of sys.path in the Snakefile config['general']['amplimap_parent_dir'] = os.path.dirname(basedir) # check if analysis dir exists already analysis_dir = os.path.join(args.working_directory, 'analysis') configfile = os.path.join(analysis_dir, 'config_used.yaml') used_versions_path = os.path.join(analysis_dir, 'versions.yaml') # the analysis dir may exist just because we did a dry run, but once the versions exist we actually executed snakemake! if os.path.exists(analysis_dir) and os.path.exists(used_versions_path): if not args.resume: raise Exception('An analysis directory already exists. Please rename it or set --resume to reuse it and possibly overwrite existing files.') else: # check version if os.path.isfile(used_versions_path): with open(used_versions_path, 'r') as used_versions_file: used_versions = yaml.safe_load(used_versions_file.read()) if used_versions['_amplimap'] != str(__version__): sys.stderr.write('This analysis was performed with {} {} but this is {} {}!\n\n'.format(__title__, used_versions['_amplimap'], __title__, __version__)) sys.stderr.write('Please use the correct version of {} or start a new analysis.\n'.format(__title__)) return 1 else: sys.stderr.write('{} version checked.\n'.format(__title__)) # check used config file if os.path.isfile(configfile): with open(configfile, 'r') as used_config_file: used_config = yaml.safe_load(used_config_file.read()) differences = compare_config_dicts(config, used_config) if len(differences) > 0: sys.stderr.write('config_used.yaml in analysis directory differs from current config.yaml in working directory! Please rename or delete the old analysis directory to restart analysis with the new configuration.\n') sys.stderr.write('Different settings:\n') for diff in differences: sys.stderr.write('\t- {}\n'.format(':'.join(diff))) return 1 else: sys.stderr.write('Config files checked.\n') # check hashes of input files if not args.skip_file_check: used_file_hashes_path = os.path.join(analysis_dir, 'file_hashes.yaml') if os.path.isfile(used_file_hashes_path): with open(used_file_hashes_path, 'r') as used_file_hashes_file: used_file_hashes = yaml.safe_load(used_file_hashes_file.read()) from .reader import get_file_hashes for fn, current_hash in get_file_hashes(args.working_directory).items(): if used_file_hashes[fn] != current_hash: sys.stderr.write('File {} seems to have changed since the last run!\n\n'.format(fn)) sys.stderr.write('To ensure consistent results, you should rename or delete the old analysis directory and start a new analysis.\n') sys.stderr.write('To ignore this error, add the --skip-file-check parameter.\n') return 1 sys.stderr.write('Input files checked.\n') else: sys.stderr.write('Warning: Skipping input file check.\n') # ensure analysis dir exists now os.makedirs(analysis_dir, exist_ok=True) # write config to analysis directory, and then use that for snakemake with open(configfile, 'w') as f: yaml.dump(config, f, default_flow_style=False) # set up cluster commands cluster_command_nosync = None cluster_command_sync = None if args.cluster: if args.cluster in config['clusters'] and isinstance(config['clusters'][args.cluster], dict): if 'command_sync' in config['clusters'][args.cluster]: cluster_command_sync = config['clusters'][args.cluster]['command_sync'] elif 'command_nosync' in config['clusters'][args.cluster]: cluster_command_nosync = config['clusters'][args.cluster]['command_nosync'] else: raise Exception('Invalid cluster configuration -- need either command_sync or command_nosync for: {}'.format(args.cluster)) else: raise Exception('Cluster type not found in config: {}'.format(args.cluster)) sys.stderr.write('Running in cluster mode {} with {} parallel jobs\n'.format(args.cluster, args.njobs)) sys.stderr.write('cluster_command_nosync={}\n'.format(cluster_command_nosync)) sys.stderr.write('cluster_command_sync={}\n'.format(cluster_command_sync)) # make sure cluster log directory exists (this assumed the cluster command is using this as a parameter) cluster_logs = os.path.join(args.working_directory, 'cluster_log') os.makedirs(cluster_logs, exist_ok=True) sys.stderr.write('Will write cluster logs to: {}\n'.format(cluster_logs)) else: sys.stderr.write('Running locally with {} cores\n'.format(args.ncores)) extra_snakemake_args = {} if args.snakemake_args: extra_snakemake_args = { kv[0]: (True if kv[1].lower() == 'true' else False if kv[1].lower() == 'false' else kv[1]) for kv in [ x.split('=') for x in args.snakemake_args.split(',') ] } sys.stderr.write('Using extra Snakemake arguments: {}\n'.format(str(extra_snakemake_args))) success = snakemake.snakemake( snakefile = os.path.join(basedir, "Snakefile"), configfile = configfile, cores = args.ncores, # ignored if cluster nodes = args.njobs, # ignored if not cluster workdir = args.working_directory, targets = args.TARGET, dryrun = not args.run, cluster = cluster_command_nosync, cluster_sync = cluster_command_sync, jobname = "{}.{{rulename}}.{{jobid}}.sh".format(__title__), unlock = args.unlock, latency_wait = args.latency_wait, **extra_snakemake_args ) sys.stderr.write('\n===============================================\n\n') if success: if args.unlock: sys.stderr.write('Unlocked working directory. Run without --unlock to start.\n') elif not args.run: sys.stderr.write('{} {} dry run successful. Set --run to run!\n'.format(__title__, __version__)) else: sys.stderr.write('{} {} finished!\n'.format(__title__, __version__)) return 0 else: if args.cluster: sys.stderr.write('{} {} failed! Please see output above or the cluster log files for details.\n'.format(__title__, __version__)) sys.stderr.write('\nFor details on how to find the correct cluster log file for a failed job, see: https://amplimap.readthedocs.io/en/latest/usage.html#cluster-log-files\n') sys.stderr.write('You can also try to run amplimap without the cluster parameter to see the error message.\n') else: sys.stderr.write('{} {} failed! Please see output above for details.\n'.format(__title__, __version__)) return 1 except AmplimapReaderException as e: if args.debug: import traceback traceback.print_exc() sys.stderr.write(str(e)) sys.stderr.write('{} {} failed!\n'.format(__title__, __version__)) return 2 except Exception as e: if args.debug: import traceback traceback.print_exc() sys.stderr.write('\nERROR: {}\n\n'.format(e)) sys.stderr.write('{} {} failed!\n'.format(__title__, __version__)) return 1
13,607
def measureit(_func=None, *, output: Output = None, number: int = 1): """ Measure the energy consumption of monitored devices during the execution of the decorated function (if multiple runs it will measure the mean energy) :param output: output instance that will receive the power consummation data :param number: number of iteration in the loop in case you need multiple runs or the code is too fast to be measured """ def decorator_measure_energy(func): @functools.wraps(func) def wrapper_measure(*args, **kwargs): sensor = Measurement(func.__name__, output) sensor.begin() for i in range(number): val = func(*args, **kwargs) sensor.end() sensor._results = sensor._results / number sensor.export() return val return wrapper_measure if _func is None: # to ensure the working system when you call it with parameters or without parameters return decorator_measure_energy else: return decorator_measure_energy(_func)
13,608
def plot_distribution(dof_names, mean, upper_bound, lower_bound): """Plots a given probability distribution. """ figures_per_plot = np.min([4, mean.shape[0]]) for index in range(mean.shape[0]): if(index % figures_per_plot == 0): fig = plt.figure() new_plot = plt.subplot(figures_per_plot, 1, (index % figures_per_plot) + 1) domain = np.linspace(0, 1, mean.shape[1]) new_plot.fill_between(domain, upper_bound[index], lower_bound[index], color = '#ccf5ff') new_plot.plot(domain, mean[index], color = '#000000') new_plot.set_title('Trajectory distribution for degree ' + dof_names[index]) fig.tight_layout() plt.show(block = False)
13,609
def csv_template(n_types, n_type_covariates, initialize_coeffs=True): """Creates a template for the parameter specification. Parameters ---------- n_types : int, optional Number of types in the model. Default is one. n_type_covariates : int, optional Number of covariates to predict type probabilities. Can be two or three. initialize_coeffs : bool, optional Whether coefficients are initialized with values or not. Default is ``True``. """ template = _base_template() if n_types > 1: to_concat = [ template, _type_prob_template(n_types, n_type_covariates), _type_shift_template(n_types), ] template = pd.concat(to_concat, axis=0, sort=False) if initialize_coeffs is False: template["value"] = np.nan return template
13,610
def test_wpas_mesh_secure_no_auto(dev, apdev): """wpa_supplicant secure MESH network connectivity""" check_mesh_support(dev[0], secure=True) dev[0].request("SET sae_groups 19") id = add_mesh_secure_net(dev[0]) dev[0].mesh_group_add(id) dev[1].request("SET sae_groups 19") id = add_mesh_secure_net(dev[1]) dev[1].set_network(id, "no_auto_peer", "1") dev[1].mesh_group_add(id) check_mesh_joined_connected(dev, connectivity=True) dev[0].request("SET sae_groups ") dev[1].request("SET sae_groups ")
13,611
def get_surround(ppath, recordings, istate, win, signal_type, recalc_highres=False, tstart=0, tend=-1, ma_thr=20, ma_state=3, flatten_tnrem=False, nsr_seg=2, perc_overlap=0.95, null=False, null_win=[0.5,0.5], p_iso=0, pcluster=0, clus_event='waves', psave=False): """ Collect raw signal surrounding events @Params ppath - base folder recordings - list of recordings istate - brain state(s) to analyze win: time window (s) to collect data relative to the event signal_type: specifies the type of data to collect 'EEG', 'EEG2' --> raw hippocampal or prefrontal EEG 'SP', 'SP2' --> hippocampal or prefrontal SP 'SP_NORM', 'SP2_NORM' --> norm. hippocampal or prefrontal SP 'SP_CALC', 'SP2_CALC' --> calculate each SP using surrounding EEG 'SP_CALC_NORM', 'SP2_CALC_NORM' --> normalize calculated SP by whole SP mean 'LFP' --> processed LFP signal recalc_highres - if True, recalculate high-resolution spectrogram from EEG, using $nsr_seg and $perc_overlap params tstart, tend - time (s) into recording to start and stop collecting data ma_thr, ma_state - max duration and brain state for microarousals flatten_tnrem - brain state for transition sleep nsr_seg, perc_overlap - set FFT bin size (s) and overlap (%) for spectrogram calculation null - if True, also collect data surrounding randomized control points in $istate null_win - if > 0, qualifying "null" points must be free of P-waves and laser pulses in surrounding $null_win interval (s) if = 0, "null" points are randomly selected from all state indices p_iso, pcluster, clus_event - see SOMETHING ELSE psave - optional string specifying a filename to save the data (if False, data is not saved) @Returns p_signal - dictionary with brain states as keys, and sub-dictionaries as values Sub-dictionaries have mouse recordings as keys, with lists of 2D or 3D signals as values Signals (SPs, EEGs, or LFPs) represent the time window ($win s) surrounding each P-wave null_signal - dictionary structured as above, but containing signals surrounding each randomly selected control point data.shape - tuple with shape of the data from one trial """ import time START = time.perf_counter() # clean data inputs if not isinstance(recordings, list): recordings = [recordings] if not isinstance(istate, list): istate = [istate] if len(istate) == 0: istate = ['total'] brstate = 'total' p_signal = {s:{rec:[] for rec in recordings} for s in istate} # signal surrounding P-waves null_signal = {s:{rec:[] for rec in recordings} for s in istate} # signal surrounding randomized time points for rec in recordings: print('Getting P-waves for ' + rec + ' ...') p_signal = {s:[] for s in istate} # signal surrounding P-waves null_signal = {s:[] for s in istate} # signal surrounding randomized time points # load sampling rate sr = sleepy.get_snr(ppath, rec) nbin = int(np.round(sr) * 2.5) dt = (1.0 / sr) * nbin iwin1, iwin2 = get_iwins(win, sr) # load EEG and EEG2 EEG = so.loadmat(os.path.join(ppath, rec, 'EEG.mat'), squeeze_me=True)['EEG'] if os.path.exists(os.path.join(ppath, rec, 'EEG2.mat')): EEG2 = so.loadmat(os.path.join(ppath, rec, 'EEG2.mat'), squeeze_me=True)['EEG2'] # adjust Intan idx to properly translate to SP idx spi_adjust = np.linspace(-sr, sr, len(EEG)) # load or calculate entire high-res spectrogram # SP calculated using EEG2 if ('SP2' in signal_type) and (signal_type != 'SP2_CALC'): SP, f, t, sp_dt, sp_nbin, _ = AS.highres_spectrogram(ppath, rec, nsr_seg=nsr_seg, perc_overlap=perc_overlap, recalc_highres=recalc_highres, mode='EEG2') #sp_nbin = len(EEG) / SP.shape[1] sp_win1 = int(round(iwin1/sp_nbin)) sp_win2 = int(round(iwin2/sp_nbin)) # SP calculated using EEG elif ('SP' in signal_type) and (signal_type != 'SP_CALC'): SP, f, t, sp_dt, sp_nbin, _ = AS.highres_spectrogram(ppath, rec, nsr_seg=nsr_seg, perc_overlap=perc_overlap, recalc_highres=recalc_highres, mode='EEG') sp_win1 = int(round(iwin1/sp_nbin)) sp_win2 = int(round(iwin2/sp_nbin)) # calculate SP mean if '_NORM' in signal_type: SP_mean = SP.mean(axis=1) SP_norm = np.divide(SP, np.repeat([SP_mean], SP.shape[1], axis=0).T) # normalize entire spectrogram # load and adjust brain state annotation M, _ = sleepy.load_stateidx(ppath, rec) M = AS.adjust_brainstate(M, dt, ma_thr=ma_thr, ma_state=ma_state, flatten_tnrem=flatten_tnrem) # load LFP and P-wave indices LFP, p_idx = load_pwaves(ppath, rec) # isolate single or clustered P-waves if p_iso and pcluster: print('ERROR: cannot accept both p_iso and pcluster arguments') return elif p_iso: p_idx = get_p_iso(p_idx, sr, win=p_iso) elif pcluster: p_idx = get_pclusters(p_idx, sr, win=pcluster, return_event=clus_event) # define start and end points of analysis istart = int(np.round(tstart*sr)) if tend == -1: iend = len(EEG) - 1 else: iend = int(np.round(tend*sr)) for pi in p_idx: if pi >= iwin1 and pi + iwin2 < len(EEG) and istart <= pi <= iend: if istate[0] != 'total': brstate = int(M[int(pi/nbin)]) # get data of desired signal type if signal_type == 'EEG': data = EEG[pi-iwin1 : pi+iwin2] elif signal_type == 'EEG2': data = EEG2[pi-iwin1 : pi+iwin2] # calculate SP from EEG or EEG2 elif 'CALC' in signal_type: if 'SP2' in signal_type: tmp = EEG2[pi-iwin1 : pi+iwin2] elif 'SP' in signal_type: tmp = EEG[pi-iwin1 : pi+iwin2] f, t, data = scipy.signal.spectrogram(tmp, fs=sr, window='hanning', nperseg=int(nsr_seg*sr), noverlap=int(nsr_seg*sr*perc_overlap)) # normalize calculated SP based on entire recording if 'NORM' in signal_type: data = np.divide(data, np.repeat([SP_mean], data.shape[1], axis=0).T) # if not calculating, get SP or SP_NORM from whole recording calculation elif 'SP' in signal_type: spi = int(round((pi + spi_adjust[pi])/sp_nbin)) if 'NORM' in signal_type: data = SP_norm[:, spi-sp_win1 : spi+sp_win2] else: data = SP[:, spi-sp_win1 : spi+sp_win2] elif signal_type == 'LFP': data = LFP[pi-iwin1 : pi+iwin2] else: print(signal_type + ' IS AN INVALID SIGNAL TYPE') return # collect data in relevant dictionary if brstate in istate: p_signal[brstate].append(data) # collect signals surrounding random control time points if null: null_iwin1, null_iwin2 = get_iwins(null_win, sr) # sample "null" REM epochs with no P-waves/laser pulses if null_win != 0: # find all points that don't qualify as "null" not_null_idx = np.zeros((10000000)) for i, pi in enumerate(p_idx): p_win = np.arange(pi-null_iwin1, pi+null_iwin2) not_null_idx[i*len(p_win) : i*len(p_win)+len(p_win)] = p_win # get rid of trailing zeros (computational efficiency) not_null_idx = np.trim_zeros(not_null_idx, 'b') for s in istate: if istate[0] != 'total': # get array of all possible indices in state s sseq = sleepy.get_sequences(np.where(M==s)[0]) sseq_idx = [np.arange(seq[0]*nbin, seq[-1]*nbin+nbin) for seq in sseq] sseq_idx = np.array((list(chain.from_iterable(sseq_idx)))) sseq_idx = [sidx for sidx in sseq_idx if sidx > iwin1 and sidx < len(EEG)-iwin2 and istart < sidx < iend] else: sseq_idx = np.arange(iwin1, len(EEG)-iwin2) # keep only state indices that are not next to a P-wave/laser pulse if null_win != 0: sseq_idx = np.setdiff1d(sseq_idx, not_null_idx) # randomly select from all state indices else: sseq_idx = np.array((sseq_idx)) np.random.seed(0) # select number of random indices matching the number of P-waves r_idx = np.random.randint(low=0, high=len(sseq_idx), size=len(p_signal[s])) null_idx = sseq_idx[r_idx] for ni in null_idx: # get data of desired signal type if signal_type == 'EEG': data = EEG[ni-iwin1 : ni+iwin2] elif signal_type == 'EEG2': data = EEG2[ni-iwin1 : ni+iwin2] # calculate SP from EEG or EEG2 elif 'CALC' in signal_type: if 'SP2' in signal_type: tmp = EEG2[ni-iwin1 : ni+iwin2] elif 'SP' in signal_type: tmp = EEG[ni-iwin1 : ni+iwin2] f, t, data = scipy.signal.spectrogram(tmp, fs=sr, window='hanning', nperseg=int(nsr_seg * sr), noverlap=int(nsr_seg * sr * perc_overlap)) # normalize calculated SP based on entire recording if 'NORM' in signal_type: data = np.divide(data, np.repeat([SP_mean], data.shape[1], axis=0).T) # if not calculating, get SP or SP_NORM from whole recording calculation elif 'SP' in signal_type: spi = int(round((ni + spi_adjust[ni])/sp_nbin)) if 'NORM' in signal_type: data = SP_norm[:, spi-sp_win1 : spi+sp_win2] else: data = SP[:, spi-sp_win1 : spi+sp_win2] elif signal_type == 'LFP': data = LFP[ni-iwin1 : ni+iwin2] else: print(signal_type + ' IS AN INVALID SIGNAL TYPE') return # collect data in null dictionary null_signal[s].append(data) # save tmp files to free up more room for computation for s in istate: so.savemat(f'TMP_{rec}_pwaves_{s}.mat', {'data':p_signal[s]}) so.savemat(f'TMP_{rec}_null_{s}.mat', {'data':null_signal[s]}) if psave: print('\n Assembling data dictionaries and saving .mat files ...\n') else: print('\n Assembling data dictionaries ...\n') # collect data from all recordings for each state from tmp files p_signal = {s:{rec:0 for rec in recordings} for s in istate} null_signal = {s:{rec:0 for rec in recordings} for s in istate} # assemble final data dictionaries for s in istate: for rec in recordings: p_signal[s][rec] = so.loadmat(f'TMP_{rec}_pwaves_{s}.mat')['data'] null_signal[s][rec] = so.loadmat(f'TMP_{rec}_null_{s}.mat')['data'] # remove temporary files for rec in recordings: os.remove(f'TMP_{rec}_pwaves_{s}.mat') os.remove(f'TMP_{rec}_null_{s}.mat') # save files if psave: for s in istate: filename = psave if isinstance(psave, str) else f'Surround_{signal_type}' so.savemat(os.path.join(ppath, f'{filename}_pwaves_{s}.mat'), p_signal[s]) so.savemat(os.path.join(ppath, f'{filename}_null_{s}.mat'), null_signal[s]) so.savemat(os.path.join(ppath, f'{filename}_data_shape.mat'), {'data_shape':data.shape}) END = time.perf_counter() print(f'COMPUTING TIME --> {END-START:0.2f} seconds ({len(recordings)} recordings, {len(istate)} brainstates, signal type = {signal_type})') return p_signal, null_signal, data.shape
13,612
def load_quantized_bert_base(batch_size=1, seq_len=384): """ Load the quantized bert-base model from TLCBench, possibly downloading it from github and caching the converted int8 QNN module to disk. In addition to returing the relay module and its parameters, it also returns input name and shape information, which can be used at the deployment time as follows: ``` mod, params, input_info = load_quantized_bert_base() ... runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) for name, shape in input_info: arr = np.random.uniform(1, 10, size=shape).astype("int64") runtime.set_input(name, arr) runtime.run() ``` """ url = "https://github.com/tlc-pack/TLCBench/raw/main/models/bert-base-qat.onnx" log.info("Downloading quantized bert-base model.") onnx_path = download_testdata(url, "bert-base-qat.onnx", module="tlcbench") data_dir = os.path.dirname(onnx_path) json_path = os.path.join(data_dir, "bert_base_int8_b%d_s%d.json" % (batch_size, seq_len)) params_path = os.path.join(data_dir, "bert_base_int8_b%d_s%d.params" % (batch_size, seq_len)) # Input names and order encoded in the ONNX model input_info = [ ("input_ids", (batch_size, seq_len)), ("segment_ids", (batch_size, seq_len)), ("input_mask", (batch_size, seq_len)), ] if not os.path.exists(json_path) or not os.path.exists(params_path): convert_to_qnn(onnx_path, json_path, params_path, input_info) def deserialize(): try: return deserialize_relay(json_path, params_path) except TVMError: # A serialized Relay json file may become invalid after TVM bump # Update the serialized model and try loading again convert_to_qnn(onnx_path, json_path, params_path, input_info) return deserialize_relay(json_path, params_path) mod, params = deserialize() return mod, params, input_info
13,613
def projects(ctx): """Display and download projects from Kimai""" if config.get('ApiKey') is None: print_error( '''kimai-cli has not yet been configured. Use \'kimai configure\' first before using any other command''' ) ctx.abort()
13,614
def test_check_transform_not_matrix_2d() -> None: """verify raised exception for 2d non-matrix arguments""" xform = AutomataTransforms(base_universe_instance_2d()) not_matrix = not_matrix_typeerror_2d() for case in not_matrix: # minimal check: details verified in tests for validate_matrix, valid_address with pytest.raises(TypeError): xform._check_transform_matrix(case) assert len(NOT_MATRICES_2D) > 0 for case in NOT_MATRICES_2D: # minimal check: details verified in tests for validate_matrix, valid_address with pytest.raises(ValueError): xform._check_transform_matrix(case)
13,615
def setup(app): """A temporary setup function so that we can use it for backwards compatability. This should be removed after a deprecation cycle. """ # To avoid circular imports we'll lazily import from . import setup as jssetup js.logger.warning( ( "`jupyter-sphinx` was initialized with the " "`jupyter_sphinx.execute` sub-module. Replace this with " "`jupyter_sphinx`. Initializing with " "`jupyter_sphinx.execute` will be removed in " "version 0.3" ) ) out = jssetup(app) return out
13,616
def test_all_rank_logging_ddp_spawn(tmpdir): """Check that all ranks can be logged from.""" model = TestModel() all_rank_logger = AllRankLogger() model.training_epoch_end = None trainer = Trainer( strategy="ddp_spawn", gpus=2, default_root_dir=tmpdir, limit_train_batches=1, limit_val_batches=1, max_epochs=1, logger=all_rank_logger, enable_model_summary=False, ) trainer.fit(model)
13,617
def hinge_loss(logit, target, margin, reduce='sum'): """ Args: logit (torch.Tensor): (N, C, d_1, d_2, ..., d_K) target (torch.Tensor): (N, d_1, d_2, ..., d_K) margin (float): """ target = target.unsqueeze(1) tgt_logit = torch.gather(logit, dim=1, index=target) loss = logit - tgt_logit + margin loss = torch.masked_fill(loss, loss < 0, 0) loss = torch.scatter(loss, dim=1, index=target, value=0) reduce_fn = REDUCE_FN_MAPPINGS[reduce] return reduce_fn(loss)
13,618
def setTimeSync(state): """ Set the state of host/guest time synchronization using vmware-toolbox-cmd. Returns None on success and an error message on failure. """ # Translate the boolean to a string for vmware-toolbox-cmd if state: setStr = 'enable' else: setStr = 'disable' try: # Run the command to set the status of host/guest time sync retval = subprocess.Popen(['vmware-toolbox-cmd', 'timesync', setStr]).wait() except OSError as e: msg = "Unable to execute the 'vmware-toolbox-cmd' command: %s" % str(e) log.exception(msg) return msg if retval != 0: msg = 'Setting the state of host/guest time synchronization failed, '\ 'exit code: %d' % retval log.info(msg) return msg else: log.info("Successfully set status of host/guest time synchronization "\ "to: '%s'", setStr) return None
13,619
def do_cleanup(ips, args): """ :param ips: :param args: :return: None """ def _cleanup_single_node(ip): def _rpc(cmd): return rpc(ip, cmd, args.user, args.password, args.key, suppress_output=not args.verbose) # TODO: (Make this more targeted) # Stop services. _rpc('sudo service cassandra stop') _rpc('sudo service dsc stop') _rpc('sudo service dse stop') _rpc('sudo service datastax-agent stop') _rpc('sudo /etc/init.d/cassandra stop') _rpc('sudo /etc/init.d/dsc stop') _rpc('sudo /etc/init.d/dse stop') _rpc('sudo /etc/init.d/datastax-agent stop') # Uninstall packages. _rpc('sudo yum remove -y \'*cassandra*\' \'*dsc*\' \'*dse*\' \'*datastax*\'') # Cleanup install folders. _rpc('sudo rm -rf /var/lib/cassandra/*') _rpc('sudo rm -rf /var/log/{cassandra,hadoop,hive,pig}/*') _rpc('sudo rm -rf /etc/{cassandra,dsc,dse}/*') _rpc('sudo rm -rf /usr/share/{dse,dse-demos}') _rpc('sudo rm -rf /etc/default/{cassandra,dsc,dse}') # Start a progress bar if not in verbose mode. if not args.verbose: e = threading.Event() bar_thread = threading.Thread(target=progress_bar, args=('Performing pre-install cleanup.', e)) bar_thread.setDaemon(True) bar_thread.start() # Spawn threads to run instructions on all nodes at once threads = [] for pub, priv in ips: t = threading.Thread(target=_cleanup_single_node, args=(pub,)) t.setDaemon(True) t.start() threads.append(t) # Wait for all threads to complete for t in threads: t.join() # Terminate the progress bar if not in verbose mode. if not args.verbose: e.set() bar_thread.join()
13,620
def getargopts(): """Parse command line arguments.""" opts = argparse.ArgumentParser() opts.add_argument('--port', type=int, help="Port to listen to (default 8888)", default=8888) opts.add_argument('srcbase', help="Base source directory.") opts.add_argument('targetbase', help="Base target directory.") args = opts.parse_args() return args.srcbase, args.targetbase, args.port
13,621
def test_fetch_local_no_local_registry(): """Test that local fetch fails when no local registry.""" with TemporaryDirectory() as tmp_dir: with cd(tmp_dir): runner = CliRunner() result = runner.invoke( cli, ["fetch", "--local", "fetchai/my_first_aea"], catch_exceptions=False, ) assert result.exit_code == 1, result.stdout assert ( "Registry path not provided and local registry `packages` not found in current (.) and parent directory." in result.stdout )
13,622
def roc(model, image, mask, ignore=None, sky=None, n_mask=1, seed=1, thresholds=np.linspace(0.001, 0.999, 500), dilate=False, rad=1): """ evaluate model on test set with the ROC curve :param model: deepCR object :param image: np.ndarray((N, W, H)) image array :param mask: np.ndarray((N, W, H)) CR mask array :param ignore: np.ndarray((N, W, H)) bad pixel array incl. saturation, etc. :param thresholds: np.ndarray(N) FPR grid on which to evaluate ROC curves :return: np.ndarray(N), np.ndarray(N): TPR and FPR """ kernel = None if dilate: kernel = disk(rad) if type(image) == np.ndarray and len(image.shape) == 3: data = dataset(image, mask, ignore) elif type(image[0]) == str: data = DatasetSim(image, mask, sky=sky, n_mask=n_mask, seed=seed) else: raise TypeError('Input must be numpy data arrays or list of file paths!') (tpr, fpr), (tpr_dilate, fpr_dilate) = _roc(model, data, thresholds=thresholds, dilate=kernel) if dilate: return (tpr, fpr), (tpr_dilate, fpr_dilate) else: return tpr, fpr
13,623
def parse_testconfig(conffile): """Parses the config file for the whole testsuite.""" repo_path, drop_caches, tests_dir, testlog_dir = '', '', '', '' basebranch, baserev, repo_prof_path, repo_gprof_path = '', '', None, None fileopen = open(conffile, 'r') for line in fileopen: line = line.split('#')[0] # Discard comments if line == '' or line == '\n': continue # Discard lines with comments only and empty lines opt, args = line.split(' ', 1) # Get arguments if opt == 'MOSES_REPO_PATH:': repo_path = args.replace('\n', '') elif opt == 'DROP_CACHES_COMM:': drop_caches = args.replace('\n', '') elif opt == 'TEST_DIR:': tests_dir = args.replace('\n', '') elif opt == 'TEST_LOG_DIR:': testlog_dir = args.replace('\n', '') elif opt == 'BASEBRANCH:': basebranch = args.replace('\n', '') elif opt == 'BASEREV:': baserev = args.replace('\n', '') elif opt == 'MOSES_PROFILER_REPO:': # Optional repo_prof_path = args.replace('\n', '') elif opt == 'MOSES_GOOGLE_PROFILER_REPO:': # Optional repo_gprof_path = args.replace('\n', '') else: raise ValueError('Unrecognized option ' + opt) config = Configuration(repo_path, drop_caches, tests_dir, testlog_dir,\ basebranch, baserev, repo_prof_path, repo_gprof_path) fileopen.close() return config
13,624
def get_filtered_df(df, vocab_file): """ Return a data frame with only the words present in the vocab file. """ if vocab_file: vocab = open(vocab_file).readlines() vocab = [v.strip() for v in vocab] # Get the set of words. words = pd.Series(df.word.values.ravel()).unique() set_words = set(words) # Find the words common to data frame and vocab common_set_words = set_words & set(vocab) # Filter the dataframe df_filtered = df[df.word.isin(common_set_words)] return df_filtered else: return df
13,625
def test_update(session): """ Test the role repository update function. """ # Arrange test_model = RoleRepository.create(Role(name='Super User')) test_model_id = test_model.id update_fields = ('name',) # Act result = RoleRepository.update( test_model_id, update_fields, name='Admin User') # Assert assert isinstance(result, Role) assert result.normalized_name == 'admin-user' assert result.name == 'Admin User'
13,626
def pkcs5_pad(data): """ Pad data using PKCS5 """ pad = KEYCZAR_AES_BLOCK_SIZE - len(data) % KEYCZAR_AES_BLOCK_SIZE data = data + pad * chr(pad).encode("utf-8") return data
13,627
def fmul_rd(x, y): """ See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fmul_rd.html :param x: Argument. :type x: float32 :param y: Argument. :type y: float32 :rtype: float32 """
13,628
def preview(handler, width, height): """ Preview camera stream """ handler.stitch_feeds(False, None, width, height, None)
13,629
def add_to_shelf(book, shelf): """Add entry to data-base so that ``book`` appears in ``shelf``.""" shelf_content = ShelfContent( ShelfName=shelf, ContentId=f"file:///mnt/onboard/contents/{book.name}", DateModified=datetime.today(), _IsDeleted=False, _IsSynced=False, ) session.add(shelf_content) session.commit() click.echo(f"Added {book} to shelf {shelf}.")
13,630
def chdir(path) -> Any: """ Change current directory. """ ...
13,631
def gatherData(data, neat, gen, hyp, fileName, savePop=False): """Collects run data, saves it to disk, and exports pickled population Args: data - (DataGatherer) - collected run data neat - (Neat) - neat algorithm container .pop - [Ind] - list of individuals in population .species - (Species) - current species gen - (ind) - current generation hyp - (dict) - algorithm hyperparameters savePop - (bool) - save current population to disk? Return: data - (DataGatherer) - updated run data """ data.gatherData(neat.pop, neat.species) if (gen % hyp['save_mod']) == 0: data = checkBest(data, hyp) data.save(gen) if savePop is True: # Get a sample pop to play with in notebooks pref = 'log/' + fileName import pickle with open(pref + '_pop.obj', 'wb') as fp: pickle.dump(neat.pop, fp) return data
13,632
def impute_bad_concentration_fits(c_lgtc, c_lgtc_min=0.1): """Overwrite bad concentration parameter fit values.""" c_lgtc = np.where(c_lgtc < c_lgtc_min, c_lgtc_min, c_lgtc) return c_lgtc
13,633
def _build_sub_nics(all_nics): """ Aggregate all sub nics into their sub groups. I.E. All nic\.X.\.*\.Y nics go into a list where all Y's are the same. :param all_nics: All nics to consider. :type all_nics: list """ sub_nics = {} for nic in all_nics['nics']: possible_sub_nic = get_nic_sub_number.match(nic.key) if not possible_sub_nic: log("System {0} and NIC {1} not in valid format. " "Skipping.".format(nic.obj, nic.key), DEBUG) continue sub_nic_number = possible_sub_nic.group(1) if sub_nic_number in sub_nics: sub_nics[sub_nic_number].append(nic) else: sub_nics[sub_nic_number] = [nic] return sub_nics
13,634
def pearsonr(a0, a1): """Pearson r, product-moment correlation coefficient, of two samples. Covariance divided by product of standard deviations. https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient#For_a_sample """ n = len(a0) assert n == len(a1) if n == 0: # No data, so no notion of correlation. return float('NaN') a0 = numpy.array(a0) a1 = numpy.array(a1) m0 = numpy.mean(a0) m1 = numpy.mean(a1) num = numpy.sum((a0 - m0)*(a1 - m1)) den0_sq = numpy.sum((a0 - m0)**2) den1_sq = numpy.sum((a1 - m1)**2) den = math.sqrt(den0_sq*den1_sq) if den == 0.0: # No variation in at least one column, so no notion of # correlation. return float('NaN') r = num / den # Clamp r in [-1, +1] in case of floating-point error. r = min(r, +1.0) r = max(r, -1.0) return r
13,635
def __column(matrix, i): """Returns columns from a bidimensional Python list (a list of lists)""" return [row[i] for row in matrix]
13,636
def datamask(fmri_data, mask_data): """ filter the data by a ROI mask Parameters: fmri_data : array The fMRI data. The shape of fmri_data is [nx, ny, nz]. nx, ny, nz represent the size of the fMRI data. mask_data : array The mask data. The shape of mask_data is [nx, ny, nz]. nx, ny, nz represent the size of the fMRI data. Returns ------- newfmri_data : array The new fMRI data. The shape of newfmri_data is [nx, ny, nz]. nx, ny, nz represent the size of the fMRI data. """ nx, ny, nz = fmri_data.shape newfmri_data = np.full([nx, ny, nz], np.nan) for i in range(nx): for j in range(ny): for k in range(nz): if (mask_data[i, j, k] != 0) and (math.isnan(mask_data[i, j, k]) is False): newfmri_data[i, j, k] = fmri_data[i, j, k] return newfmri_data
13,637
def make_global_batch_norm_tests(options): """Make a set of tests to do batch_norm_with_global_normalization.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]], "epsilon": [0.1, 0.0001], "scale_after": [True, False], }] def build_graph(parameters): """Build the global batch norm testing graph.""" input_shape = parameters["input_shape"] scale_shape = input_shape[3] scale = create_tensor_data(parameters["dtype"], scale_shape) offset = create_tensor_data(parameters["dtype"], scale_shape) mean = create_tensor_data(parameters["dtype"], scale_shape) variance = create_tensor_data(parameters["dtype"], scale_shape) x = create_tensor_data(parameters["dtype"], parameters["input_shape"]) x_norm = tf.nn.batch_norm_with_global_normalization( x, mean, variance, scale, offset, parameters["epsilon"], parameters["scale_after"]) input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.add(input_tensor, x_norm) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
13,638
def write_stats_file(run_output_dict): """Writes a dummy PolyChord format .stats file for tests functions for processing stats files. This is written to: base_dir/file_root.stats Also returns the data in the file as a dict for comparison. Parameters ---------- run_output_dict: dict Output information to write to .stats file. Must contain file_root and base_dir. If other settings are not specified, default values are used. Returns ------- output: dict The expected output of nestcheck.process_polychord_stats(file_root, base_dir) """ mandatory_keys = ['file_root', 'base_dir'] for key in mandatory_keys: assert key in run_output_dict, key + ' not in run_output_dict' default_output = {'logZ': 0.0, 'logZerr': 0.0, 'logZs': [0.0], 'logZerrs': [0.0], 'ncluster': 1, 'nposterior': 0, 'nequals': 0, 'ndead': 0, 'nlike': 0, 'nlive': 0, 'avnlike': 0.0, 'avnlikeslice': 0.0, 'param_means': [0.0, 0.0, 0.0], 'param_mean_errs': [0.0, 0.0, 0.0]} allowed_keys = set(mandatory_keys) | set(default_output.keys()) assert set(run_output_dict.keys()).issubset(allowed_keys), ( 'Input dict contains unexpected keys: {}'.format( set(run_output_dict.keys()) - allowed_keys)) output = copy.deepcopy(run_output_dict) for key, value in default_output.items(): if key not in output: output[key] = value # Make a PolyChord format .stats file corresponding to output file_lines = [ 'Evidence estimates:', '===================', (' - The evidence Z is a log-normally distributed, with location and ' 'scale parameters mu and sigma.'), ' - We denote this as log(Z) = mu +/- sigma.', '', 'Global evidence:', '----------------', '', 'log(Z) = {0} +/- {1}'.format( output['logZ'], output['logZerr']), '', '', 'Local evidences:', '----------------', ''] for i, (lz, lzerr) in enumerate(zip(output['logZs'], output['logZerrs'])): file_lines.append('log(Z_ {0}) = {1} +/- {2}'.format( str(i + 1).rjust(2), lz, lzerr)) file_lines += [ '', '', 'Run-time information:', '---------------------', '', ' ncluster: 0 / 1', ' nposterior: {0}'.format(output['nposterior']), ' nequals: {0}'.format(output['nequals']), ' ndead: {0}'.format(output['ndead']), ' nlive: {0}'.format(output['nlive']), ' nlike: {0}'.format(output['nlike']), ' <nlike>: {0} ( {1} per slice )'.format( output['avnlike'], output['avnlikeslice']), '', '', 'Dim No. Mean Sigma'] for i, (mean, meanerr) in enumerate(zip(output['param_means'], output['param_mean_errs'])): file_lines.append('{0} {1} +/- {2}'.format( str(i + 1).ljust(3), mean, meanerr)) file_path = os.path.join(output['base_dir'], output['file_root'] + '.stats') with open(file_path, 'w') as stats_file: stats_file.writelines('{}\n'.format(line) for line in file_lines) return output
13,639
def dict_serialize(seqlen_dist_dict): """ dict->str Turns {1:'a',2:'b'}->"[[1,'a'],[2,'b']]" Why? Because this format plays nice with shell script that runs xlmr_bench. Avoids curly braces and spaces that makes shell script str input unhappy. """ seqlen_dist_lst = list(seqlen_dist_dict.items()) seqlen_dist_str = json.dumps(seqlen_dist_lst) seqlen_dist_str = seqlen_dist_str.replace(" ", "") # remove spaces return seqlen_dist_str
13,640
def pi_cdecimal(): """cdecimal""" D = C.Decimal lasts, t, s, n, na, d, da = D(0), D(3), D(3), D(1), D(0), D(0), D(24) while s != lasts: lasts = s n, na = n+na, na+8 d, da = d+da, da+32 t = (t * n) / d s += t return s
13,641
def image2file(image, path): """ Writes an image in list of lists format to a file. Will work with either color or grayscale. """ if isgray(image): img = gray2color(image) else: img = image with open(path, 'wb') as f: png.Writer(width=len(image[0]), height=len(image)).write(f, [boxed2flat(r) for r in img])
13,642
def e_list(a_list: AList) -> set[E]: """Unique elements in adjacency list.""" return set(e for n in a_list for nb in a_list[n] for e in a_list[n][nb])
13,643
def load_complete_state(options, cwd, subdir, skip_update): """Loads a CompleteState. This includes data from .isolate and .isolated.state files. Never reads the .isolated file. Arguments: options: Options instance generated with process_isolate_options. For either options.isolate and options.isolated, if the value is set, it is an absolute path. cwd: base directory to be used when loading the .isolate file. subdir: optional argument to only process file in the subdirectory, relative to CompleteState.root_dir. skip_update: Skip trying to load the .isolate file and processing the dependencies. It is useful when not needed, like when tracing. """ assert not options.isolate or os.path.isabs(options.isolate) assert not options.isolated or os.path.isabs(options.isolated) cwd = file_path.get_native_path_case(unicode(cwd)) if options.isolated: # Load the previous state if it was present. Namely, "foo.isolated.state". # Note: this call doesn't load the .isolate file. complete_state = CompleteState.load_files(options.isolated) else: # Constructs a dummy object that cannot be saved. Useful for temporary # commands like 'run'. There is no directory containing a .isolated file so # specify the current working directory as a valid directory. complete_state = CompleteState(None, SavedState(os.getcwd())) if not options.isolate: if not complete_state.saved_state.isolate_file: if not skip_update: raise ExecutionError('A .isolate file is required.') isolate = None else: isolate = complete_state.saved_state.isolate_filepath else: isolate = options.isolate if complete_state.saved_state.isolate_file: rel_isolate = file_path.safe_relpath( options.isolate, complete_state.saved_state.isolated_basedir) if rel_isolate != complete_state.saved_state.isolate_file: # This happens if the .isolate file was moved for example. In this case, # discard the saved state. logging.warning( '--isolated %s != %s as saved in %s. Discarding saved state', rel_isolate, complete_state.saved_state.isolate_file, isolatedfile_to_state(options.isolated)) complete_state = CompleteState( options.isolated, SavedState(complete_state.saved_state.isolated_basedir)) if not skip_update: # Then load the .isolate and expands directories. complete_state.load_isolate( cwd, isolate, options.path_variables, options.config_variables, options.extra_variables, options.blacklist, options.ignore_broken_items, options.collapse_symlinks) # Regenerate complete_state.saved_state.files. if subdir: subdir = unicode(subdir) # This is tricky here. If it is a path, take it from the root_dir. If # it is a variable, it must be keyed from the directory containing the # .isolate file. So translate all variables first. translated_path_variables = dict( (k, os.path.normpath(os.path.join(complete_state.saved_state.relative_cwd, v))) for k, v in complete_state.saved_state.path_variables.iteritems()) subdir = isolate_format.eval_variables(subdir, translated_path_variables) subdir = subdir.replace('/', os.path.sep) if not skip_update: complete_state.files_to_metadata(subdir, options.collapse_symlinks) return complete_state
13,644
def GDAL_QUERY(filename, sql, data={}): """ GDAL_QUERY """ res = [] sql = sformat(sql, data) ds = ogr.OpenShared(filename) if ds: try: layer = ds.ExecuteSQL(sql) definition = layer.GetLayerDefn() n = definition.GetFieldCount() for feature in layer: row = {} for i in range(n): fieldname = definition.GetFieldDefn(i).GetName() row[fieldname] = feature.GetField(fieldname) res += [row] except Exception, ex: print "GDAL_QUERY Exception:", ex return res
13,645
def auto_fav(q, count=100, result_type="recent"): """ Favorites tweets that match a certain phrase (hashtag, word, etc.) """ result = search_tweets(q, count, result_type) for tweet in result["statuses"]: try: # don't favorite your own tweets if tweet["user"]["screen_name"] == TWITTER_HANDLE: continue result = t.favorites.create(_id=tweet["id"]) print("favorited: %s" % (result["text"].encode("utf-8"))) # when you have already favorited a tweet, this error is thrown except TwitterHTTPError as e: print("error: %s" % (str(e)))
13,646
def get_event_bpt_hea(): """ Get hardware address for BREAKPOINT event @return: hardware address """ ev = ida_dbg.get_debug_event() assert ev, "Could not retrieve debug event" return ida_idd.get_event_bpt_hea(ev)
13,647
def solve_maxent_ce(payoffs, steps=1000000, lams=None, lr=None): """Calculates the maximum-entropy correlated equilibrium as defined in Ortiz et al. (2007). payoffs (torch.Tensor): Joint payoff tensor. steps (int, optional): Number of SGD steps to use in calculations (default: 1000000). lams (torch.Tensor): Initialization logits (default: auto-initialied). lr (float): SGD learning rate (default: auto-computed). Ortiz et al., "Maximum entropy correlated equilibria", 2007, http://proceedings.mlr.press/v2/ortiz07a/ortiz07a.pdf """ n = payoffs.size(0) action_counts = tuple(payoffs.shape[1:]) if lr is None: tot = 0.0 for i in range(n): ac = action_counts[i] payoff_permuted = payoffs[i].transpose(0, i) gain_mat = payoff_permuted.view(ac, 1, -1) - payoff_permuted.view(1, ac, -1) tot += torch.abs(gain_mat).sum(dim=0).max().item() lr = 0.9 / tot if lams is None: lams = [(lr * payoffs.new_ones((i, i))) for i in action_counts] for i in range(n): rac = torch.arange(action_counts[i]) lams[i][rac, rac] = 0.0 for _ in range(steps): log_policy = _lams_to_log_policy(lams, payoffs) policy = torch.exp(log_policy) pos_regrets = _get_regret(policy, payoffs, positive=True) neg_regrets = _get_regret(policy, payoffs, positive=False) eps = 0.5 ** 125 for i in range(n): ac = action_counts[i] rac = torch.arange(ac) chg = ((pos_regrets[i] + eps) / (pos_regrets[i] + neg_regrets[i] + 2 * eps)) - 0.5 chg[rac, rac] = 0.0 lams[i].add_(lr, chg) lams[i].clamp_(min=0.0) return policy
13,648
def d_latlon(p1, p2): """ 计算两点间的距离。原文件使用了较复杂的算法,代价较高 这里使用较为相对简单的算法代替,精度不会损失很多 """ lon_diff, lat_diff = p1 - p2 lon_diff *= cos((p1[1] + p2[1]) * 0.00872664625997165) return sqrt(lat_diff * lat_diff + lon_diff * lon_diff) * earth_radians
13,649
def _path_list_creator(path, file_prefix_name, number_of_digits_zfill, file_suffix_name): """Creates a list of paths where the files have a predefined prefix, an incremental number and a predefined suffix on their name, respectively. Eg.: img01.zdf Args: path: a path that leads to the files directory file_prefix_name: a string that comes before the number number_of_digits_zfill: a number of digits in the number file_suffix_name: a string that comes after the number Returns: list_of_paths: list of appended paths """ num = 1 list_of_paths = [] while True: file_path = path / f"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}" list_of_paths.append(file_path) next_file_path = path / f"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}" if not next_file_path.exists(): return list_of_paths num = num + 1
13,650
def determine_paths(env): """ Fill the 'CUDA_TOOLKIT_PATH' into environment if it is not there. @return: the paths. @rtype: tuple """ import sys import os from warnings import warn home = os.environ.get('HOME', '') programfiles = os.environ.get('PROGRAMFILES', '') homedrive = os.environ.get('HOMEDRIVE', '') # find CUDA Toolkit path and set CUDA_TOOLKIT_PATH. cudaToolkitPath = os.environ.get('CUDA_TOOLKIT_PATH', '') if not cudaToolkitPath: paths = [ '/'.join([home, 'NVIDIA_CUDA_TOOLKIT']), '/'.join([home, 'Apps', 'NVIDIA_CUDA_TOOLKIT']), '/'.join([home, 'Apps', 'CudaToolkit']), '/'.join([home, 'Apps', 'CudaTK']), '/'.join(['/usr', 'local', 'NVIDIA_CUDA_TOOLKIT']), '/'.join(['/usr', 'local', 'CUDA_TOOLKIT']), '/'.join(['/usr', 'local', 'cuda_toolkit']), '/'.join(['/usr', 'local', 'CUDA']), '/'.join(['/usr', 'local', 'cuda']), '/'.join(['/Developer', 'NVIDIA CUDA TOOLKIT']), '/'.join(['/Developer', 'CUDA TOOLKIT']), '/'.join(['/Developer', 'CUDA']), '/'.join([programfiles, 'NVIDIA Corporation', 'NVIDIA CUDA TOOLKIT']), '/'.join([programfiles, 'NVIDIA Corporation', 'NVIDIA CUDA']), '/'.join([programfiles, 'NVIDIA Corporation', 'CUDA TOOLKIT']), '/'.join([programfiles, 'NVIDIA Corporation', 'CUDA']), '/'.join([programfiles, 'NVIDIA', 'NVIDIA CUDA TOOLKIT']), '/'.join([programfiles, 'NVIDIA', 'NVIDIA CUDA']), '/'.join([programfiles, 'NVIDIA', 'CUDA TOOLKIT']), '/'.join([programfiles, 'NVIDIA', 'CUDA']), '/'.join([programfiles, 'CUDA TOOLKIT']), '/'.join([programfiles, 'CUDA']), '/'.join([homedrive, 'CUDA TOOLKIT']), '/'.join([homedrive, 'CUDA']), ] cudaToolkitPath = find_paths(paths) if cudaToolkitPath: sys.stdout.write( 'scons: CUDA Toolkit found in %s\n' % cudaToolkitPath) else: warn('Cannot find the CUDA Toolkit path. ' 'Please set it to CUDA_TOOLKIT_PATH environment variable.') env['CUDA_TOOLKIT_PATH'] = cudaToolkitPath return cudaToolkitPath
13,651
def crosscorrelation(array1, array2, std1, std2, **kwargs): """ Compute crosscorrelation. """ _ = std1, std2, kwargs xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np window = array1.shape[-1] pad_width = [(0, 0)] * (array2.ndim - 1) + [(window//2, window - window//2)] padded = xp.pad(array2, pad_width=tuple(pad_width)) accumulator = Accumulator('argmax') for i in range(window): corrs = (array1 * padded[..., i:i+window]).sum(axis=-1) accumulator.update(corrs) return accumulator.get(final=True).astype(float) - window//2
13,652
def score_max_depths(graph, max_depths): """ In order to assess the quality of the approximate partitioning method we've developed, we will run it with different values for max_depth and see how it affects the norm_cut score of the resulting partitions. Recall that smaller norm_cut scores correspond to better partitions. Params: graph........a networkx Graph max_depths...a list of ints for the max_depth values to be passed to calls to partition_girvan_newman Returns: A list of (int, float) tuples representing the max_depth and the norm_cut value obtained by the partitions returned by partition_girvan_newman. See Log.txt for an example. """ ###TODO result =[] for n in max_depths: components = partition_girvan_newman(graph, n) result.append((n,norm_cut(components[0], components[1], graph))) return result
13,653
def build_multi(mapping, inserts, key_residues, pdbfnames, chains): """Superimpose multiple structures onto a reference, showing equivalent selected residues in each. To reduce clutter, only show residues deviating from the reference side chain by at least `threshold` Angstroms RMS. """ # TODO - ensure Pymol's automatic struct colors aren't clobbered pml_names = [get_pml_name(pfn, cid) for pfn, cid in zip(pdbfnames, chains)] ref_pml_name, ref_chn = (pml_names[0], chains[0]) outs = [mk_intro_multi(pdbfnames, pml_names, chains), HR, mk_struct(ref_pml_name, 'RefStruct_'+ref_pml_name, chain=ref_chn, color='smudge', transparency=0.7)] if inserts: outs.extend(make_inserts(inserts, 'RefStruct_'+ref_pml_name, ref_chn, 'gray70')) if key_residues: # Side chains for the reference PDB outs.extend(make_residues(mapping, key_residues, ref_pml_name, ref_chn)) for eqv_pml_name, eqv_chn in zip(pml_names[1:], chains[1:]): outs.append(mk_struct(eqv_pml_name, 'EqvStruct_'+eqv_pml_name, chain=eqv_chn, color='slate', transparency=0.7)) # Side chains for the other PDBs if inserts: outs.extend(make_inserts(inserts, 'EqvStruct_'+eqv_pml_name, eqv_chn, 'marine')) if key_residues: # Generate PyMOL script lines outs.extend(make_residues(mapping, key_residues, eqv_pml_name, eqv_chn)) outs.extend([HR, mk_outro()]) return '\n'.join(outs) # just the script
13,654
def tamper(payload, **kwargs): """ Replaces instances of UNION with -.1UNION Requirement: * MySQL Notes: * Reference: https://raw.githubusercontent.com/y0unge/Notes/master/SQL%20Injection%20WAF%20Bypassing%20shortcut.pdf >>> tamper('1 UNION ALL SELECT') '1-.1UNION ALL SELECT' >>> tamper('1" UNION ALL SELECT') '1"-.1UNION ALL SELECT' """ return re.sub(r"(?i)\s+(UNION )", r"-.1\g<1>", payload) if payload else payload
13,655
def svn_wc_transmit_prop_deltas(*args): """ svn_wc_transmit_prop_deltas(char path, svn_wc_adm_access_t adm_access, svn_wc_entry_t entry, svn_delta_editor_t editor, void baton, apr_pool_t pool) -> svn_error_t """ return _wc.svn_wc_transmit_prop_deltas(*args)
13,656
def error_exit() -> NoReturn: """Exit with return code 1.""" if logger.isEnabledFor(logging.DEBUG): traceback.print_exc() logger.debug("Exited with error code 1.") sys.exit(1)
13,657
def patch_model_checkpoint(trainer): """ Save last checkpoint when key interrupt occurs. """ for callback in trainer.callbacks: if isinstance(callback, pl.callbacks.ModelCheckpoint): old_on_keyboard_interrupt = callback.on_keyboard_interrupt def on_keyboard_interrupt(self, trainer, pl_module): old_on_keyboard_interrupt(trainer, pl_module) KeyboardInterruptModelCheckpoint.on_keyboard_interrupt(self, trainer, pl_module) callback.on_keyboard_interrupt = types.MethodType(on_keyboard_interrupt, callback)
13,658
def save_model_audits(sender, **kwargs): """ The signal handler, `save_model_audits`, saves the audit logs when the original model is saved. """ instance = kwargs['instance'] if hasattr(instance, '_django_fsm_audits'): for audit in instance._django_fsm_audits: audit.save()
13,659
def _make_immutable(obj): """Recursively convert a container and objects inside of it into immutable data types.""" if isinstance(obj, (text_type, binary_type)): return obj elif isinstance(obj, Mapping): temp_dict = {} for key, value in obj.items(): if isinstance(value, Container): temp_dict[key] = _make_immutable(value) else: temp_dict[key] = value return ImmutableDict(temp_dict) elif isinstance(obj, Set): temp_set = set() for value in obj: if isinstance(value, Container): temp_set.add(_make_immutable(value)) else: temp_set.add(value) return frozenset(temp_set) elif isinstance(obj, Sequence): temp_sequence = [] for value in obj: if isinstance(value, Container): temp_sequence.append(_make_immutable(value)) else: temp_sequence.append(value) return tuple(temp_sequence) return obj
13,660
def get_source_tokens_tensor(src_tokens): """ To enable integration with PyText, src_tokens should be able to support more features than just token embeddings. Hence when dictionary features are passed from PyText it will be passed as a tuple (token_embeddings, dict_feat, ..). Thus, in this case where we need the source tokens tensor (eg to calculate batch size = source_tokens_tensor.size(0)), we get the first element on the tuple which is always guaranteed to be source tokens and do the necessary operation. eg : bsz, _ = get_source_tokens_tensor(source_tokens)[0].size(0) """ if type(src_tokens) is tuple: return src_tokens[0] else: return src_tokens
13,661
def init(db): """ Register the dumper """ db.add_format('nestedini', parse_nested_ini_file)
13,662
def merge_config_and_args(config, args): """ Creates a configuration dictionary based upon command line arguments. Parameters ---------- config : dict configurations loaded from the config file args : object arguments and there values which could be \ passed in the command line. Returns ------- dict updated configuration dictionary \ with arguments passed in command line. """ arg_dict = vars(args) stripped_dict = { k: v for k, v in arg_dict.items() if (v is not None) } return {**config, **stripped_dict}
13,663
def test_python_script(database: Path, migrations: Path) -> None: """It should run without errors.""" (migrations/"1_test.up.py").write_text(""" USER_VERSION = 1 def main(db): db.execute("CREATE TABLE Test (key)") """) (migrations/"1_test.down.py").write_text(""" USER_VERSION = 0 def main(db): db.execute("DROP TABLE Test") """) assert migrate(str(database), migrations, version=1) == 1 assert migrate(str(database), migrations, version=0) == 0
13,664
def save_augmented_data( original_raw_data: np.ndarray, new_path: str, new_filename: str, mult_factor: int, write_period: int = 200, max_samples: int = 20000, data_types: List[str] = ["signal", "frequencies"], ) -> None: """ """ # TODO: Is this method finished? total_counter = 0 write_curr = 0 shape = (50, 50) new_path = os.path.join(new_path, new_filename) index_sig = nt.config["core"]["data_types"]["signal"] index_freq = nt.config["core"]["data_types"]["frequencies"] index_grad = nt.config["core"]["data_types"]["gradient"] n_indx = len(nt.config["core"]["data_types"]) condensed_data_all = np.empty((n_indx, 0, np.prod(shape) + 1)) original_images = np.squeeze(original_raw_data[index_sig, :, :-1]) print(original_images.shape) original_labels = original_raw_data[:, :, -1][0] print(original_labels.shape) if not os.path.exists(new_path): np.save(new_path, condensed_data_all) stop = False for it in range(mult_factor): for orig_image, orig_label in zip(original_images, original_labels): # print(orig_image.shape) orig_image = orig_image.reshape(50, 50) condensed_data = np.empty((n_indx, 1, np.prod(shape) + 1)) new_img = random_transformation(orig_image, single=False) condensed_data[index_sig, 0, :] = np.append(new_img.flatten(), orig_label) dtrnd = sg.detrend(new_img, axis=0) dtrnd = sg.detrend(dtrnd, axis=1) frequencies_res = fp.frequencies2(dtrnd) frequencies_res = np.abs(fp.frequenciesshift(frequencies_res)) data_frq = resize( frequencies_res, (50, 50), anti_aliasing=True, mode="constant" ).flatten() condensed_data[index_freq, 0, :] = np.append(data_frq, orig_label) # labels_all.append(orig_label) grad = generic_gradient_magnitude(new_img, sobel) gradient_resized = resize( grad, shape, anti_aliasing=True, mode="constant" ).flatten() condensed_data[index_grad, 0, :] = np.append(gradient_resized, orig_label) condensed_data_all = np.append(condensed_data_all, condensed_data, axis=1) write_curr += 1 total_counter += 1 if write_curr >= write_period: # save to file n = list(condensed_data_all.shape) n[-1] += 1 previous_data = np.load(new_path) all_data = np.append(previous_data, condensed_data_all, axis=1) np.save(new_path, all_data) condensed_data_all = np.empty((n_indx, 0, np.prod(shape) + 1)) write_curr = 0 if total_counter >= max_samples: stop = True break if stop: break previous_data = np.load(new_path) all_data = np.append(previous_data, condensed_data_all, axis=1) np.save(new_path, all_data) # condensed_data_all = [] # labels_all = []
13,665
def contemp2pottemp(salt, tcon, tpot0=None, **rootkwargs): """Calculate conservative temp -> potential temp. Calculate the potential temperature from the absolute salinity and conservative temperature. Applies either Newton's method or Halley's method. See `aux.rootfinder` for details on implementation and control arguments. Arguments: salt (float or array): Absolute salinity in g kg-1. tcon (float or array): Conservative temperature in degrees Celsius. tpot0 (float or array, optional): Initial estimate of potential temperature in degrees Celsius. If None (default) then the conservative temperature is used. rootkwargs (dict, optional): Additional arguments for the root finder; see `aux.rootfinder` for available arguments and defaults. Returns: tpot (float or array): Potential temperature in degrees Celsius. """ # Set initial guess if tpot0 is None: tpot0 = tcon # Set up a function for the rootfinder update = rootkwargs.get('update', 'newton') if update == 'newton': dtpmax = 2 elif update == 'halley': dtpmax = 3 else: raise ValueError( 'The update method must be either "newton" or "halley"') y0 = CSEA*tcon args = (salt,) def derfun(tpot, salt): # Calculate Gibbs function *with adjusted coefficients* (g0s, *__) = gibbs0(salt, tpot, dtpmax, orig=False) tabs = TCELS + tpot hs = [g0s[0]-tabs*g0s[1], -tabs*g0s[2]] if dtpmax > 2: hs.append(-g0s[2] - tabs*g0s[3]) return hs # Apply the root-finding method tpot = aux.rootfinder( derfun, y0, tpot0, TMIN, CSEA*TMIN, args, **rootkwargs) return tpot
13,666
def get_random_string(length: int) -> str: """ With combination of lower and upper case """ return ''.join(random.choice(string.ascii_letters) for i in range(length))
13,667
def _write_callback(connection_id, data_buffer, data_length_pointer): """ Callback called by Secure Transport to actually write to the socket :param connection_id: An integer identifing the connection :param data_buffer: A char pointer FFI type containing the data to write :param data_length_pointer: A size_t pointer FFI type of the amount of data to write. Will be overwritten with the amount of data actually written on return. :return: An integer status code of the result - 0 for success """ try: self = _connection_refs.get(connection_id) if not self: socket = _socket_refs.get(connection_id) else: socket = self._socket if not self and not socket: return 0 data_length = deref(data_length_pointer) data = bytes_from_buffer(data_buffer, data_length) if self and not self._done_handshake: self._client_hello += data error = None try: sent = socket.send(data) except (socket_.error) as e: error = e.errno if error is not None and error != errno.EAGAIN: if error == errno.ECONNRESET or error == errno.EPIPE: return SecurityConst.errSSLClosedNoNotify return SecurityConst.errSSLClosedAbort if sent != data_length: pointer_set(data_length_pointer, sent) return SecurityConst.errSSLWouldBlock return 0 except (KeyboardInterrupt) as e: self._exception = e return SecurityConst.errSSLPeerUserCancelled
13,668
def cache(**kwargs): """ Cache decorator. Should be called with `@cache(ttl_sec=123, transform=transform_response)` Arguments: ttl_sec: optional,number The time in seconds to cache the response if status code < 400 transform: optional,func The transform function of the wrapped function to convert the function response to request response Usage Notes: If the wrapped function returns a tuple, the transform function will not be run on the response. The first item of the tuple must be serializable. If the wrapped function returns a single response, the transform function must be passed to the decorator. The wrapper function response must be serializable. Decorators in Python are just higher-order-functions that accept a function as a single parameter, and return a function that wraps the input function. In this case, because we need to pass kwargs into our decorator function, we need an additional layer of wrapping; the outermost function accepts the kwargs, and when called, returns the decorating function `outer_wrap`, which in turn returns the wrapped input function, `inner_wrap`. @functools.wraps simply ensures that if Python introspects `inner_wrap`, it refers to `func` rather than `inner_wrap`. """ ttl_sec = kwargs["ttl_sec"] if "ttl_sec" in kwargs else default_ttl_sec transform = kwargs["transform"] if "transform" in kwargs else None redis = redis_connection.get_redis() def outer_wrap(func): @functools.wraps(func) def inner_wrap(*args, **kwargs): has_user_id = 'user_id' in request.args and request.args['user_id'] is not None key = extract_key(request.path, request.args.items()) if not has_user_id: cached_resp = redis.get(key) if cached_resp: logger.info(f"Redis Cache - hit {key}") try: deserialized = pickle.loads(cached_resp) if transform is not None: return transform(deserialized) return deserialized, 200 except Exception as e: logger.warning(f"Unable to deserialize cached response: {e}") logger.info(f"Redis Cache - miss {key}") response = func(*args, **kwargs) if len(response) == 2: resp, status_code = response if status_code < 400: serialized = pickle.dumps(resp) redis.set(key, serialized, ttl_sec) return resp, status_code serialized = pickle.dumps(response) redis.set(key, serialized, ttl_sec) return transform(response) return inner_wrap return outer_wrap
13,669
def RegisterTensorTransformer(name): """Registers a dataset.""" def decorator(obj): TENSOR_TRANSFORMER_REGISTRY[name] = obj obj.name = name return obj return decorator
13,670
def read_token(): """Reads and returns the authentication token. It tries to read the token from a already existing config file first. If there is no token it will get one from the putio api and store it in a config file. Location of the config file:: ~/.putiodown :returns: putio authentication token :rtype: str """ home = os.path.expanduser('~') config_file = os.path.join(home, '.putiodown') config = configparser.ConfigParser() if os.path.exists(config_file): config.read(config_file) token = config.get('putiodown', 'token') else: token = get_token() config['putiodown'] = {} config['putiodown']['token'] = token with open(config_file, 'w') as f: config.write(f) return token
13,671
def create_help(tool, model): """ create help section of the Galaxy tool @param tool the Galaxy tool @param model the ctd model @param kwargs """ help_node = add_child_node(tool, "help") help_node.text = CDATA(utils.extract_tool_help_text(model))
13,672
def handler_factory( jinja_template_rendered: BytesIO, base_dir: Path, events: list = None, username: str = "thqm", password: str = None, oneshot: bool = False, allow_custom_events: bool = False, ): """Create a HTTPHandler class with the desired properties. Events should appear following the url paremeter 'event', controlling the server is done through the 'command' url parameter. Args: jinja_template_rendered: BytesIO object of the rendered template. base_dir: directory containing the static/ and templates/ folders. events: allowed events. username: basic auth username. password: basic auth password. oneshot: stop server after first click. allow_custom_events: the server will echo the event regardless of it being in the events list. Returns: HTTPHandler class. """ class HTTPHandler(BaseHTTPRequestHandler): extensions_map = { ".html": "text/html", "": "application/octet-stream", # Default ".css": "text/css", ".js": "text/javascript", ".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".svg": "image/svg+xml", } def __init__(self, *args, **kwargs): if events is None: self.events = [] else: self.events = events self.require_login = password is not None self._auth = b64encode(f"{username}:{password}".encode()).decode() super().__init__(*args, **kwargs) def _do_GET(self): f_obj = self.send_head() if f_obj: self.copyfile(f_obj, self.wfile) f_obj.close() def do_GET(self): """Serve a GET request.""" if self.require_login: if self.headers.get("Authorization") == "Basic " + self._auth: self._do_GET() else: self.do_HEADAUTH() else: self._do_GET() def do_HEAD(self): """Serve a HEAD request.""" f_obj = self.send_head() if f_obj: f_obj.close() def do_HEADAUTH(self): """Handle the authentication in the header.""" self.send_response(401) self.send_header("WWW-Authenticate", 'Basic realm="thqm"') self.send_header("Content-type", "text/html") self.end_headers() def reset(self): """Redirect to /.""" self.send_response(302) self.send_header("Location", "/") self.end_headers() def send_head(self): """Common code for GET and HEAD commands. This sends the response code and MIME headers. Return value is either a file object (which has to be copied to the outputfile by the caller unless the command was HEAD, and must be closed by the caller under all circumstances), or None, in which case the caller has nothing further to do. """ parsed_path = urlparse(self.path) if parsed_path.query: query = parse_qs(parsed_path.query) if "event" in query: event = query["event"][0] if allow_custom_events or event in self.events: echo(event) if oneshot: self.shutdown() else: self.reset() if "command" in query: command = query["command"][0] if command == "shutdown": self.shutdown() path = unquote(parsed_path.path) f_obj = None ctype = None if path == "/": # if main page f_obj = copy(jinja_template_rendered) ctype = "text/html" else: try: f_obj = open(base_dir / path[1:], "rb") except IOError: pass if f_obj is not None: if not ctype: ctype = self.guess_type(path) self.send_response(200) self.send_header("Content-type", ctype) self.end_headers() return f_obj @staticmethod def translate_path(path: str) -> str: """Cleanup path.""" # abandon query parameters path = path.split("?", 1)[0] path = path.split("#", 1)[0] # remove first / return unquote(path)[1:] @staticmethod def get_query(path: str) -> str: """Get the first query parameter.""" paths = path.split("?", 1) if len(paths) > 1: return paths[1] return "" def shutdown(self): """Shutdown the server.""" killer = threading.Thread(target=self.server.shutdown) killer.start() @staticmethod def copyfile(source, outputfile): """Copy all data between two file objects. The SOURCE argument is a file object open for reading (or anything with a read() method) and the DESTINATION argument is a file object open for writing (or anything with a write() method). The only reason for overriding this would be to change the block size or perhaps to replace newlines by CRLF -- note however that this the default server uses this to copy binary data as well. """ shutil.copyfileobj(source, outputfile) def guess_type(self, path: str) -> str: """Guess the type of a file. Argument is a PATH (a filename). Return value is a string of the form type/subtype, usable for a MIME Content-type header. The default implementation looks the file's extension up in the table self.extensions_map, using application/octet-stream as a default; however it would be permissible (if slow) to look inside the data to make a better guess. """ ext = Path(path).suffix.lower() return self.extensions_map.get(ext, self.extensions_map[""]) def log_message(self, *args, **kwargs): """Disable all prints.""" return HTTPHandler
13,673
def add(): """This is a temporary function to allow users to easily add tracks, mainly for testing.""" form = SQLFORM(db.memo) if form.process().accepted: redirect(URL('default', 'index')) return dict(form=form)
13,674
async def test_update_failed( project: Project, server: Server, client: Client, environment: str, agent_factory: Callable[ [UUID, Optional[str], Optional[Dict[str, str]], bool, List[str]], Agent ], function_temp_dir: str, cache_agent_dir: str, ): """ This test creates a file, then update it by moving it in a forbidden location. The update should fail but the param containing the state should be updated anyway, showing the current file state, which is null. """ from inmanta_plugins.terraform.helpers.const import ( TERRAFORM_RESOURCE_STATE_PARAMETER, ) file_path_object = Path(function_temp_dir) / Path("test-file.txt") provider = LocalProvider() local_file = LocalFile( "my file", str(file_path_object), "my original content", provider ) await agent_factory( environment=environment, hostname="node1", agent_map={provider.agent: "localhost"}, code_loader=False, agent_names=[provider.agent], ) def model(purged: bool = False) -> str: m = ( "\nimport terraform\n\n" + provider.model_instance("provider") + "\n" + local_file.model_instance("file", purged) ) LOGGER.info(m) return m assert not file_path_object.exists() # Create create_model = model() project.compile(create_model) resource: Resource = project.get_resource( local_file.resource_type, resource_name="my file" ) assert resource is not None resource_id = Id.resource_str(resource.id) async def get_param_short() -> Optional[str]: return await get_param( environment=environment, client=client, param_id=TERRAFORM_RESOURCE_STATE_PARAMETER, resource_id=resource_id, ) assert ( await get_param_short() is None ), "There shouldn't be any state set at this point for this resource" assert ( await deploy_model(project, create_model, client, environment) == VersionState.success ) assert await get_param_short() is not None, "A state should have been set by now" # Update forbidden_path_object = Path("/dev/test-file.txt") local_file.path = str(forbidden_path_object) update_model = model() assert ( await deploy_model(project, update_model, client, environment) == VersionState.failed ) param = await get_param_short() assert param is not None, "The state should still be there" assert param == "null", "The state should be empty as the new file couldn't deploy" # Delete delete_model = model(True) assert ( await deploy_model(project, delete_model, client, environment) == VersionState.success ) assert ( await get_param_short() is None ), "The state should have been removed, but wasn't"
13,675
def is_valid_mac(mac): """ Validate mac address :param mac: :return: boolean """ res = False if isinstance(mac, str): if mac: res = mac_address.match(mac.lower()) is not None return res
13,676
def run_test_shape(): """ Tests the shape function. """ print() print('--------------------------------------------------') print('Testing the SHAPE function:') print('--------------------------------------------------') print() print('Test 1 of shape: r=7') shape(7) print() print('Test 2 of shape: r=4') shape(4) print() print('Test 3 of shape: r=2') shape(2)
13,677
def test_skip_all_stages(tmp_path, caplog): """ Skips all stages but the last one, which prints "All stages run". """ jobname = "test_job" stagefile = get_stagefile_path(jobname, tmp_path) stagefile.parent.mkdir(parents=True) stagefile.write_text("\n".join(STAGE_NAMES[:-1])) with caplog.at_level(logging.INFO): setup_and_run(jobname, tmp_path) assert all(ALREADY_RUN_LOG.format(s) in caplog.text for s in STAGE_NAMES[:-1]) assert "All stages run." in caplog.text
13,678
def get_model(tokenizer, lstm_units): """ Constructs the model, Embedding vectors => LSTM => 2 output Fully-Connected neurons with softmax activation """ # get the GloVe embedding vectors embedding_matrix = get_embedding_vectors(tokenizer) model = Sequential() model.add(Embedding(len(tokenizer.word_index)+1, EMBEDDING_SIZE, weights=[embedding_matrix], trainable=False, input_length=SEQUENCE_LENGTH)) model.add(LSTM(lstm_units, recurrent_dropout=0.2)) model.add(Dropout(0.3)) model.add(Dense(2, activation="softmax")) # compile as rmsprop optimizer # aswell as with recall metric model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy", keras_metrics.precision(), keras_metrics.recall()]) model.summary() return model
13,679
def annotation_multi_vertical_height(_img, _x, _y_list, _line_color, _text_color, _text_list, _thickness=1, _with_arrow=True): """ 纵向标注多个高度 :param _img: 需要标注的图像 :param _x: 当前直线所在宽度 :param _y_list: 所有y的列表 :param _line_color: 线条颜色(bgr) :param _text_color: 文本颜色(bgr) :param _text_list: 所有需要显示的文本 :param _thickness: 线条粗细 :param _with_arrow: 线条两端是否带箭头 :return: 标注后的图像 """ assert len(_y_list) - 1 == len(_text_list), '线段数与字符串数不匹配' to_return_img = _img.copy() # 需要绘制: # 1. 双向箭头线 # 2. 箭头到头的直线 # 3. 线条对应的文字 for m_start_y, m_end_y, m_text in zip(_y_list[:-1], _y_list[1:], _text_list): if _with_arrow: cv2.arrowedLine(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness) cv2.arrowedLine(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness) else: cv2.line(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness) cv2.line(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness) text_start_x = _x + 10 text_start_y = m_start_y + (m_end_y - m_start_y) // 2 to_return_img = __annotation_text_on_image(to_return_img, (text_start_x, text_start_y), _text_color, m_text) for m_y in _y_list: cv2.line(to_return_img, (_x - 12, m_y), (_x + 12, m_y), _line_color, thickness=_thickness) return to_return_img
13,680
def test_passing_url(): """Test passing URL directly""" httpbin = AnyAPI("http://httpbin.org") assert ( httpbin.GET(url="http://httpbin.org/anything").json()["url"] == "https://httpbin.org/anything" )
13,681
def _parse_indentation(lines: Iterable[str]) -> Iterable[Tuple[bool, int, str]]: """For each line, yield the tuple (indented, lineno, text).""" indentation = 0 for lineno, line in enumerate(lines): line = line.replace("\t", " ") line_indentation_match = LEADING_WHITESPACE.match(line) if line_indentation_match is None: yield (False, lineno, line) indentation = 0 else: line_indentation = len(line_indentation_match.group(0)) if indentation == 0: indentation = line_indentation if line_indentation < indentation: raise ParseError("Improper dedent", lineno) line_indentation = min(indentation, line_indentation) yield (True, lineno, line[line_indentation:])
13,682
async def list_(hub, ctx, registry_name, resource_group, **kwargs): """ .. versionadded:: 3.0.0 Lists all the replications for the specified container registry. :param registry_name: The name of the container registry. :param resource_group: The name of the resource group to which the container registry belongs. CLI Example: .. code-block:: bash azurerm.containerregistry.replication.list testrepo testgroup """ result = {} regconn = await hub.exec.azurerm.utils.get_client( ctx, "containerregistry", **kwargs ) try: repls = await hub.exec.azurerm.utils.paged_object_to_list( regconn.replications.list( registry_name=registry_name, resource_group_name=resource_group ) ) for repl in repls: result[repl["name"]] = repl except CloudError as exc: await hub.exec.azurerm.utils.log_cloud_error( "containerregistry", str(exc), **kwargs ) result = {"error": str(exc)} return result
13,683
def dbrg(images, T, r): """ Segmentation by density-based region growing (DBRG). Parameters ---------- n : int Number of blurred images. M : np.ndarray The mask image. r : int Density connectivity search radius. """ n = len(images) M = _generate_init_mask(images, T) D = _density_distribution(n, M, r) S = _generate_seeds(D) # make sure there is at least one seed assert S.any() # unlabeled R = np.full(M.shape, 0, dtype=np.uint32) V = np.full(M.shape, np.NINF, dtype=np.float32) # label by density map for i, d in enumerate(D): logger.debug("density {}".format(i)) R[(d > V) & S] = i + 1 V[(d > V) & S] = d[(d > V) & S] # label by density connectivity v = np.empty(len(D) + 1, dtype=np.uint32) # buffer @timeit @jit(nopython=True) def ps_func(M, R, v): n, m = M.shape ps = [] # reset of the pixel coordinates for y in range(0, n): for x in range(0, m): if R[y, x] > 0: continue pu = min(y + r, n - 1) pd = max(y - r, 0) pr = min(x + r, m - 1) pl = max(x - r, 0) v.fill(0) for yy in range(pd, pu + 1): for xx in range(pl, pr + 1): if (xx - x) * (xx - x) + (yy - y) * (yy - y) <= r * r: v[R[yy, xx]] += 1 R[y, x] = v.argmax() if R[y, x] == 0: ps.append((y, x)) return ps ps = ps_func(M, R, v) # label by nearest neighbor @timeit @jit(nopython=True) def psv_func(ps, M, R): n, m = M.shape # psv = [] # filled result for y, x in ps: r = 1 while True: pu = min(y + r, n - 1) pd = max(y - r, 0) pr = min(x + r, m - 1) pl = max(x - r, 0) v = [] for yy in range(pd, pu + 1): for xx in range(pl, pr + 1): if R[yy, xx] > 0: v.append( (R[yy, xx], (xx - x) * (xx - x) + (yy - y) * (yy - y)) ) if len(v) == 0: r += 1 else: # v.sort(key=lambda p: p[1]) # psv.append(v[0][0]) R_min, _d_min = v[0] for _R, _d in v[1:]: if _d < _d_min: R_min, _d_min = _R, _d # psv.append(R_min) R[y, x] = R_min break # return psv return R # psv = psv_func(ps, M, R) if ps: R = psv_func(ps, M, R) # move into psv # for (y, x), v in zip(ps, psv): # R[y, x] = v # make sure each position is assigned a mask value assert np.all(R != 0) return R
13,684
def latest_window_partition_selector( context: ScheduleEvaluationContext, partition_set_def: PartitionSetDefinition[TimeWindow] ) -> Union[SkipReason, Partition[TimeWindow]]: """Creates a selector for partitions that are time windows. Selects latest time window that ends before the schedule tick time. """ partitions = partition_set_def.get_partitions(context.scheduled_execution_time) if len(partitions) == 0: return SkipReason() else: return partitions[-1]
13,685
def env_or_val(env, val, *args, __type=str, **kwargs): """Return value of environment variable (if it's defined) or a given fallback value :param env: Environment variable to look for :type env: ``str`` :param val: Either the fallback value or function to call to compute it :type val: ``str`` or a function :param args: If ``val`` is a function, these are the ``*args`` to pass to that function :type args: ``list`` :param __type: type of value to return when extracting from env variable, can be one of ``str``, ``int``, ``float``, ``bool``, ``list`` :type __type: ``type`` :param kwargs: If ``val`` is a function, these are the ``**kwargs`` to pass to that function :type kwargs: ``dict`` :return: Either the env value (if defined) or the fallback value :rtype: ``str`` """ if env not in os.environ: if isinstance(val, type(env_or_val)): val = val(*args, **kwargs) return val retval = os.environ.get(env) if __type in [str, int, float]: return __type(retval) elif __type is bool: if retval.lower() in ["true", "1", "yes"]: return True else: return False elif __type is list: return retval.split(":") else: raise ValueError("__type must be one of: str, int, float, bool, list")
13,686
def test_bakery_caching_for_AuthorizedSession(engine, oso, fixture_data): """Test that baked relationship queries don't lead to authorization bypasses for AuthorizedSession.""" from sqlalchemy.orm import Session basic_session = Session(bind=engine) all_posts = basic_session.query(Post) assert all_posts.count() == 9 first_post = all_posts[0] # Add related model query to the bakery cache. assert first_post.created_by.id == 0 oso.load_str('allow("user", "read", post: Post) if post.id = 0;') # Baked queries disabled for sqlalchemy_oso.session.AuthorizedSession. authorized_session = AuthorizedSession( oso, user="user", checked_permissions={Post: "read"}, bind=engine ) assert authorized_session.query(User).count() == 0 authorized_posts = authorized_session.query(Post) assert authorized_posts.count() == 1 first_authorized_post = authorized_posts[0] assert first_post.id == first_authorized_post.id # Should not be able to view the post's creator because there's no rule # permitting access to "read" users. assert first_authorized_post.created_by is None
13,687
def html(i): """ Input: { (skip_cid_predix) - if 'yes', skip "?cid=" prefix when creating URLs } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ d=i.get('dict',{}) scp=i.get('skip_cid_prefix','') bscp=(scp=="yes") short=i.get('short','') llm=d.get('meta',{}) llmisc=llm.get('misc',{}) lldict=llm.get('dict',{}) repo_url1=llmisc.get('repo_url1','') repo_url2=llmisc.get('repo_url2','') repo_url3=llmisc.get('repo_url3','') duoa=llmisc.get('data_uoa','') duid=llmisc.get('data_uid','') ruoa=llmisc.get('repo_uoa','') ruid=llmisc.get('repo_uid','') muid=llmisc.get('module_uid','') muoa=llmisc.get('module_uoa','') #Main title=llmisc.get('title','') authors=llmisc.get('authors','') where=llmisc.get('where','') paper_pdf_url=llmisc.get('paper_pdf_url','') paper_doi_url=llmisc.get('paper_doi_url','') artifact_doi_url=llmisc.get('artifact_doi_url','') workflow=llmisc.get('workflow','') workflow_url=llmisc.get('workflow_url','') h='' article='' if title!='': article='<b>'+title+'</b>' if authors!='': h+='<div id="ck_entries_space4"></div>\n' h+='<i>'+authors+'</i>\n' baaa=llmisc.get('badge_acm_artifact_available','') baaf=llmisc.get('badge_acm_artifact_functional','') baar=llmisc.get('badge_acm_artifact_reusable','') barr=llmisc.get('badge_acm_results_reproduced','') barp=llmisc.get('badge_acm_results_replicated','') badges='' if baaa!='': badges+=' <a href="http://cTuning.org/ae/reviewing.html#artifacts_available"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/artifacts_available_dl.jpg" width="64"></a>' if baaf!='': badges+=' <a href="http://cTuning.org/ae/reviewing.html#artifacts_functional"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/artifacts_evaluated_functional_dl.jpg" width="64"></a>' if baar!='': badges+=' <a href="http://cTuning.org/ae/reviewing.html#artifacts_reusable"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/artifacts_evaluated_reusable_dl.jpg" width="64"></a>' if barr!='': badges+=' <a href="http://cTuning.org/ae/reviewing.html#results_validated"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/results_reproduced_dl.jpg" width="64"></a>' if barp!='': badges+=' <a href="http://cTuning.org/ae/reviewing.html#results_validated"><img src="https://www.acm.org/binaries/content/gallery/acm/publications/replication-badges/results_replicated_dl.jpg" width="64"></a>' if workflow.lower()=='ck': x1='' x2='' if workflow_url!='': x1='<a href="'+workflow_url+'">' x2='</a>' badges+=' '+x1+'<img src="https://ctuning.org/ae/stamps/ck-workflow.png" width="100">'+x2 if badges!='': h+='<div id="ck_entries_space4"></div>\n' h+='<center>'+badges+'</center>\n' h1='' if short!='yes': h+='<div style="background-color:#efefef;margin:5px;padding:5px;">\n' url0=i.get('url','') urlc=url0.replace('index.php','c.php') # Needed for components # x1='' # x2='' # if url0!='' and ruid!='': # prfx='' # if not bscp: prfx='cid=' # x1='<a href="'+url0+prfx+cfg['module_deps']['component.repo']+':'+ruid+'" target="_blank">' # x2='</a>' # h+='<b>Repo name:</b> '+x1+ruoa+x2+'<br>\n' where_url=llmisc.get('where_url','') if where!='': x1='' x2='' if where_url!='': x1='<a href="'+where_url+'">' x2='</a>' h+='<b>Where published:</b> '+x1+where+x2+'<br>\n' if paper_doi_url!='': x=paper_doi_url j=paper_doi_url.find('doi.org/') if j>0: x=paper_doi_url[j+8:] h+='<b>Article DOI:</b> <a href="'+paper_doi_url+'">'+x+'</a><br>\n' if paper_pdf_url!='': h+='<b>Article:</b> <a href="'+paper_pdf_url+'">PDF</a><br>\n' if artifact_doi_url!='': x=artifact_doi_url j=artifact_doi_url.find('doi.org/') if j>0: x=artifact_doi_url[j+8:] h+='<b>Artifact DOI:</b> <a href="'+artifact_doi_url+'">'+x+'</a><br>\n' uaa=llmisc.get('unified_artifact_appendix','') if uaa!='': h+='<b>Unified artifact appendix:</b> <a href="'+uaa+'">Link</a><br>\n' arts=llmisc.get('artifact_sources','') arts_url=llmisc.get('artifact_sources_url','') if arts_url!='': x=arts_url if arts!='': x=arts h+='<b>Artifact before standardization:</b> <a href="'+arts_url+'">'+x+'</a><br>\n' if workflow_url!='': x=workflow_url y='Automated workflow' if workflow!='': x=workflow if x=='CK': x='Link' y='Standardized CK workflow' h+='<b>'+y+':</b> <a href="'+workflow_url+'">'+x+'</a>\n' ck_repo_uid=llmisc.get('ck_repo_uid','') if ck_repo_uid!='': prfx='' if not bscp: prfx='cid=' x=urlc+prfx+cfg['module_deps']['component.repo']+':'+ck_repo_uid h+=' (<a href="'+x+'">ReproIndex</a>)\n' h+='<br>\n' tasks=llmisc.get('tasks',{}) if len(tasks)>0: h+='<b>Standardized CK pipelines (programs):</b><br>\n' h+='<div style="margin-left:20px;">\n' h+=' <ul>\n' for tuid in tasks: tt=tasks[tuid] tuoa=tt.get('data_uoa','') if tuoa!='': prfx='' if not bscp: prfx='cid=' x='<a href="'+urlc+prfx+cfg['module_deps']['component.program']+':'+tuid+'" target="_blank">'+tuoa+'</a>' h+=' <li><span style="color:#2f0000;">'+x+'</li>\n' h+=' </ul>\n' h+='</div>\n' results=llmisc.get('results','') results_url=llmisc.get('results_url','') if results_url!='': x=results_url if results!='': x=results h+='<b>Reproducible results:</b> <a href="'+results_url+'">'+x+'</a><br>\n' some_results_replicated=llmisc.get('some_results_replicated','') if some_results_replicated=='yes': h+='<b>Some results replicated:</b> &#10004;<br>\n' rurl=llmisc.get('reproducibility_url','') if rurl!='': x='Link' if 'acm' in rurl.lower() or 'ctuning' in rurl.lower(): x='ACM and cTuning' h+='<b>Reproducible methodology:</b> <a href="'+rurl+'">'+x+'</a><br>\n' results_dashboard_url=llmisc.get('results_dashboard_url','') if results_dashboard_url!='': x=results_dashboard_url j=x.find('://') if j>=0: x=x[j+3:] h+='<b>Dashboard with results:</b> <a href="'+results_dashboard_url+'">'+x+'</a><br>\n' h+='</div>\n' # Extras h1='' if paper_doi_url!='': h1+='[&nbsp;<a href="'+paper_doi_url+'" target="_blank">paper</a>&nbsp;] \n' # ck_repo_uid=llmisc.get('ck_repo_uid','') # if ck_repo_uid!='': # prfx='' # if not bscp: prfx='cid=' # x=urlc+prfx+cfg['module_deps']['component.repo']+':'+ck_repo_uid # h1+='[&nbsp;<a href="'+x+'" target="_blank">CK repository</a>&nbsp;] \n' return {'return':0, 'html':h, 'html1':h1, 'article':article}
13,688
def multi_particle_first_np_metafit(n): """Fit to plots of two-body matrix elements from various normal-ordering schemes, where only the first n points are taken from each scheme """ name = b'multi_particle_first_{}p_metafit'.format(n) def mpfnp(fitfn, exp_list, **kwargs): return multi_particle_metafit_int( fitfn, exp_list, sourcedir=DPATH_FILES_INT, savedir=DPATH_PLOTS, transform=first_np(n), super_transform_post=s_combine_like(['interaction']), code='mpf{}p'.format(n), mf_name=name, xlabel='A', ylabel='Energy (MeV)', **kwargs ) mpfnp.__name__ = name return mpfnp
13,689
def readJSONLFile(file_name: Union[str, Path]) -> List[Dict]: """ Read a '.jsonl' file and create a list of dicts Args: file_name: `Union[str,Path]` The file to open Returns: The list of dictionaries read from the 'file_name' """ lines = ( open(file_name, 'r', encoding='utf-8').readlines() if isinstance(file_name, str) else file_name.read_text('utf-8').splitlines(False) ) return [json.loads(line) for line in lines]
13,690
def set_hash(node, nuc_data): """ This function sets the hash of a dataset or group of datasets in an hdf5 file as an attribute of that node. Parameters ---------- node : str String with the hdf5 node name nuc_data : str path to the nuc_data.h5 file """ the_hash = calc_hash(node, nuc_data) with tables.open_file(nuc_data, mode="a") as f: f.set_node_attr(node, "hash", the_hash)
13,691
def test_md033_bad_configuration_allowed_elements(): """ Test to verify that a configuration error is thrown when supplying the allowed_elements value with an integer that is not a string. """ # Arrange scanner = MarkdownScanner() supplied_arguments = [ "--set", "plugins.md033.allowed_elements=$#1", "--strict-config", "scan", "test/resources/rules/md004/good_list_asterisk_single_level.md", ] expected_return_code = 1 expected_output = "" expected_error = ( "BadPluginError encountered while configuring plugins:\n" + "The value for property 'plugins.md033.allowed_elements' must be of type 'str'." ) # Act execute_results = scanner.invoke_main(arguments=supplied_arguments) # Assert execute_results.assert_results( expected_output, expected_error, expected_return_code )
13,692
def _dtype_from_cogaudioformat(format: CogAudioFormat) -> np.dtype: """This method returns the numpy "data type" for a particular audio format.""" if COG_AUDIO_IS_INT(format): if COG_AUDIO_FORMAT_DEPTH(format) == COG_AUDIO_FORMAT_DEPTH_S24: return np.dtype(np.uint8) elif COG_AUDIO_FORMAT_SAMPLEBYTES(format) == 2: return np.dtype(np.int16) elif COG_AUDIO_FORMAT_SAMPLEBYTES(format) == 4: return np.dtype(np.int32) elif COG_AUDIO_FORMAT_SAMPLEBYTES(format) == 8: return np.dtype(np.int64) elif COG_AUDIO_IS_FLOAT(format): return np.dtype(np.float32) elif COG_AUDIO_IS_DOUBLE(format): return np.dtype(np.float64) raise NotImplementedError("Cog Audio Format not amongst those supported for numpy array interpretation")
13,693
def makeSiteWhitelist(jsonName, siteList): """ Provided a template json file name and the site white list from the command line options; return the correct site white list based on some silly rules """ if 'LHE_PFN' in jsonName: siteList = ["T1_US_FNAL"] print("Overwritting SiteWhitelist to: %s" % siteList) elif 'LHE' in jsonName or 'DQMHarvest' in jsonName: siteList = ["T2_CH_CERN"] print("Overwritting SiteWhitelist to: %s" % siteList) return siteList
13,694
def loadConfig(configFilePath: str) -> {}: """Loads configuration""" config = {} with open(configFilePath) as configFile: config = json.load(configFile) configSchema = {} with open(CONFIG_SCHEMA_FILE_PATH, "r") as configSchemaFile: configSchema = json.load(configSchemaFile) jsonschema.validate(instance=config, schema=configSchema) return config
13,695
def version_info(): """ Get version of vakt package as tuple """ return tuple(map(int, __version__.split('.')))
13,696
def dacl(obj_name=None, obj_type="file"): """ Helper function for instantiating a Dacl class. Args: obj_name (str): The full path to the object. If None, a blank DACL will be created. Default is None. obj_type (str): The type of object. Default is 'File' Returns: object: An instantiated Dacl object """ if not HAS_WIN32: return class Dacl(flags(False)): """ DACL Object """ def __init__(self, obj_name=None, obj_type="file"): """ Either load the DACL from the passed object or create an empty DACL. If `obj_name` is not passed, an empty DACL is created. Args: obj_name (str): The full path to the object. If None, a blank DACL will be created obj_type (Optional[str]): The type of object. Returns: obj: A DACL object Usage: .. code-block:: python # Create an Empty DACL dacl = Dacl(obj_type=obj_type) # Load the DACL of the named object dacl = Dacl(obj_name, obj_type) """ # Validate obj_type if obj_type.lower() not in self.obj_type: raise SaltInvocationError( 'Invalid "obj_type" passed: {0}'.format(obj_type) ) self.dacl_type = obj_type.lower() if obj_name is None: self.dacl = win32security.ACL() else: if "registry" in self.dacl_type: obj_name = self.get_reg_name(obj_name) try: sd = win32security.GetNamedSecurityInfo( obj_name, self.obj_type[self.dacl_type], self.element["dacl"] ) except pywintypes.error as exc: if "The system cannot find" in exc.strerror: msg = "System cannot find {0}".format(obj_name) log.exception(msg) raise CommandExecutionError(msg) raise self.dacl = sd.GetSecurityDescriptorDacl() if self.dacl is None: self.dacl = win32security.ACL() def get_reg_name(self, obj_name): """ Take the obj_name and convert the hive to a valid registry hive. Args: obj_name (str): The full path to the registry key including the hive, eg: ``HKLM\\SOFTWARE\\salt``. Valid options for the hive are: - HKEY_LOCAL_MACHINE - MACHINE - HKLM - HKEY_USERS - USERS - HKU - HKEY_CURRENT_USER - CURRENT_USER - HKCU - HKEY_CLASSES_ROOT - CLASSES_ROOT - HKCR Returns: str: The full path to the registry key in the format expected by the Windows API Usage: .. code-block:: python import salt.utils.win_dacl dacl = salt.utils.win_dacl.Dacl() valid_key = dacl.get_reg_name('HKLM\\SOFTWARE\\salt') # Returns: MACHINE\\SOFTWARE\\salt """ # Make sure the hive is correct # Should be MACHINE, USERS, CURRENT_USER, or CLASSES_ROOT hives = { # MACHINE "HKEY_LOCAL_MACHINE": "MACHINE", "MACHINE": "MACHINE", "HKLM": "MACHINE", # USERS "HKEY_USERS": "USERS", "USERS": "USERS", "HKU": "USERS", # CURRENT_USER "HKEY_CURRENT_USER": "CURRENT_USER", "CURRENT_USER": "CURRENT_USER", "HKCU": "CURRENT_USER", # CLASSES ROOT "HKEY_CLASSES_ROOT": "CLASSES_ROOT", "CLASSES_ROOT": "CLASSES_ROOT", "HKCR": "CLASSES_ROOT", } reg = obj_name.split("\\") passed_hive = reg.pop(0) try: valid_hive = hives[passed_hive.upper()] except KeyError: log.exception("Invalid Registry Hive: %s", passed_hive) raise CommandExecutionError( "Invalid Registry Hive: {0}".format(passed_hive) ) reg.insert(0, valid_hive) return r"\\".join(reg) def add_ace(self, principal, access_mode, permissions, applies_to): """ Add an ACE to the DACL Args: principal (str): The sid of the user/group to for the ACE access_mode (str): Determines the type of ACE to add. Must be either ``grant`` or ``deny``. permissions (str, list): The type of permissions to grant/deny the user. Can be one of the basic permissions, or a list of advanced permissions. applies_to (str): The objects to which these permissions will apply. Not all these options apply to all object types. Returns: bool: True if successful, otherwise False Usage: .. code-block:: python dacl = Dacl(obj_type=obj_type) dacl.add_ace(sid, access_mode, permission, applies_to) dacl.save(obj_name, protected) """ sid = get_sid(principal) if self.dacl is None: raise SaltInvocationError("You must load the DACL before adding an ACE") # Get the permission flag perm_flag = 0 if isinstance(permissions, six.string_types): try: perm_flag = self.ace_perms[self.dacl_type]["basic"][permissions] except KeyError as exc: msg = "Invalid permission specified: {0}".format(permissions) log.exception(msg) raise CommandExecutionError(msg, exc) else: try: for perm in permissions: perm_flag |= self.ace_perms[self.dacl_type]["advanced"][perm] except KeyError as exc: msg = "Invalid permission specified: {0}".format(perm) log.exception(msg) raise CommandExecutionError(msg, exc) if access_mode.lower() not in ["grant", "deny"]: raise SaltInvocationError( "Invalid Access Mode: {0}".format(access_mode) ) # Add ACE to the DACL # Grant or Deny try: if access_mode.lower() == "grant": self.dacl.AddAccessAllowedAceEx( win32security.ACL_REVISION_DS, # Some types don't support propagation # May need to use 0x0000 instead of None self.ace_prop.get(self.dacl_type, {}).get(applies_to), perm_flag, sid, ) elif access_mode.lower() == "deny": self.dacl.AddAccessDeniedAceEx( win32security.ACL_REVISION_DS, self.ace_prop.get(self.dacl_type, {}).get(applies_to), perm_flag, sid, ) else: log.exception("Invalid access mode: %s", access_mode) raise SaltInvocationError( "Invalid access mode: {0}".format(access_mode) ) except Exception as exc: # pylint: disable=broad-except return False, "Error: {0}".format(exc) return True def order_acl(self): """ Put the ACEs in the ACL in the proper order. This is necessary because the add_ace function puts ACEs at the end of the list without regard for order. This will cause the following Windows Security dialog to appear when viewing the security for the object: ``The permissions on Directory are incorrectly ordered, which may cause some entries to be ineffective.`` .. note:: Run this function after adding all your ACEs. Proper Orders is as follows: 1. Implicit Deny 2. Inherited Deny 3. Implicit Deny Object 4. Inherited Deny Object 5. Implicit Allow 6. Inherited Allow 7. Implicit Allow Object 8. Inherited Allow Object Usage: .. code-block:: python dacl = Dacl(obj_type=obj_type) dacl.add_ace(sid, access_mode, applies_to, permission) dacl.order_acl() dacl.save(obj_name, protected) """ new_dacl = Dacl() deny_dacl = Dacl() deny_obj_dacl = Dacl() allow_dacl = Dacl() allow_obj_dacl = Dacl() # Load Non-Inherited ACEs first for i in range(0, self.dacl.GetAceCount()): ace = self.dacl.GetAce(i) if ace[0][1] & win32security.INHERITED_ACE == 0: if ace[0][0] == win32security.ACCESS_DENIED_ACE_TYPE: deny_dacl.dacl.AddAccessDeniedAceEx( win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2] ) elif ace[0][0] == win32security.ACCESS_DENIED_OBJECT_ACE_TYPE: deny_obj_dacl.dacl.AddAccessDeniedAceEx( win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2] ) elif ace[0][0] == win32security.ACCESS_ALLOWED_ACE_TYPE: allow_dacl.dacl.AddAccessAllowedAceEx( win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2] ) elif ace[0][0] == win32security.ACCESS_ALLOWED_OBJECT_ACE_TYPE: allow_obj_dacl.dacl.AddAccessAllowedAceEx( win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2] ) # Load Inherited ACEs last for i in range(0, self.dacl.GetAceCount()): ace = self.dacl.GetAce(i) if ( ace[0][1] & win32security.INHERITED_ACE == win32security.INHERITED_ACE ): ace_prop = ace[0][1] ^ win32security.INHERITED_ACE if ace[0][0] == win32security.ACCESS_DENIED_ACE_TYPE: deny_dacl.dacl.AddAccessDeniedAceEx( win32security.ACL_REVISION_DS, ace_prop, ace[1], ace[2] ) elif ace[0][0] == win32security.ACCESS_DENIED_OBJECT_ACE_TYPE: deny_obj_dacl.dacl.AddAccessDeniedAceEx( win32security.ACL_REVISION_DS, ace_prop, ace[1], ace[2] ) elif ace[0][0] == win32security.ACCESS_ALLOWED_ACE_TYPE: allow_dacl.dacl.AddAccessAllowedAceEx( win32security.ACL_REVISION_DS, ace_prop, ace[1], ace[2] ) elif ace[0][0] == win32security.ACCESS_ALLOWED_OBJECT_ACE_TYPE: allow_obj_dacl.dacl.AddAccessAllowedAceEx( win32security.ACL_REVISION_DS, ace_prop, ace[1], ace[2] ) # Combine ACEs in the proper order # Deny, Deny Object, Allow, Allow Object # Deny for i in range(0, deny_dacl.dacl.GetAceCount()): ace = deny_dacl.dacl.GetAce(i) new_dacl.dacl.AddAccessDeniedAceEx( win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2] ) # Deny Object for i in range(0, deny_obj_dacl.dacl.GetAceCount()): ace = deny_obj_dacl.dacl.GetAce(i) new_dacl.dacl.AddAccessDeniedAceEx( win32security.ACL_REVISION_DS, ace[0][1] ^ win32security.INHERITED_ACE, ace[1], ace[2], ) # Allow for i in range(0, allow_dacl.dacl.GetAceCount()): ace = allow_dacl.dacl.GetAce(i) new_dacl.dacl.AddAccessAllowedAceEx( win32security.ACL_REVISION_DS, ace[0][1], ace[1], ace[2] ) # Allow Object for i in range(0, allow_obj_dacl.dacl.GetAceCount()): ace = allow_obj_dacl.dacl.GetAce(i) new_dacl.dacl.AddAccessAllowedAceEx( win32security.ACL_REVISION_DS, ace[0][1] ^ win32security.INHERITED_ACE, ace[1], ace[2], ) # Set the new dacl self.dacl = new_dacl.dacl def get_ace(self, principal): """ Get the ACE for a specific principal. Args: principal (str): The name of the user or group for which to get the ace. Can also be a SID. Returns: dict: A dictionary containing the ACEs found for the principal Usage: .. code-block:: python dacl = Dacl(obj_type=obj_type) dacl.get_ace() """ principal = get_name(principal) aces = self.list_aces() # Filter for the principal ret = {} for inheritance in aces: if principal in aces[inheritance]: ret[inheritance] = {principal: aces[inheritance][principal]} return ret def list_aces(self): """ List all Entries in the dacl. Returns: dict: A dictionary containing the ACEs for the object Usage: .. code-block:: python dacl = Dacl('C:\\Temp') dacl.list_aces() """ ret = {"Inherited": {}, "Not Inherited": {}} # loop through each ACE in the DACL for i in range(0, self.dacl.GetAceCount()): ace = self.dacl.GetAce(i) # Get ACE Elements user, a_type, a_prop, a_perms, inheritance = self._ace_to_dict(ace) if user in ret[inheritance]: ret[inheritance][user][a_type] = { "applies to": a_prop, "permissions": a_perms, } else: ret[inheritance][user] = { a_type: {"applies to": a_prop, "permissions": a_perms} } return ret def _ace_to_dict(self, ace): """ Helper function for creating the ACE return dictionary """ # Get the principal from the sid (object sid) sid = win32security.ConvertSidToStringSid(ace[2]) try: principal = get_name(sid) except CommandExecutionError: principal = sid # Get the ace type ace_type = self.ace_type[ace[0][0]] # Is the inherited ace flag present inherited = ace[0][1] & win32security.INHERITED_ACE == 16 # Ace Propagation ace_prop = "NA" # Get the ace propagation properties if self.dacl_type in ["file", "registry", "registry32"]: ace_prop = ace[0][1] # Remove the inherited ace flag and get propagation if inherited: ace_prop = ace[0][1] ^ win32security.INHERITED_ACE # Lookup the propagation try: ace_prop = self.ace_prop[self.dacl_type][ace_prop] except KeyError: ace_prop = "Unknown propagation" # Get the object type obj_type = "registry" if self.dacl_type == "registry32" else self.dacl_type # Get the ace permissions # Check basic permissions first ace_perms = self.ace_perms[obj_type]["basic"].get(ace[1], []) # If it didn't find basic perms, check advanced permissions if not ace_perms: ace_perms = [] for perm in self.ace_perms[obj_type]["advanced"]: # Don't match against the string perms if isinstance(perm, six.string_types): continue if ace[1] & perm == perm: ace_perms.append(self.ace_perms[obj_type]["advanced"][perm]) ace_perms.sort() # If still nothing, it must be undefined if not ace_perms: ace_perms = ["Undefined Permission: {0}".format(ace[1])] return ( principal, ace_type, ace_prop, ace_perms, "Inherited" if inherited else "Not Inherited", ) def rm_ace(self, principal, ace_type="all"): """ Remove a specific ACE from the DACL. Args: principal (str): The user whose ACE to remove. Can be the user name or a SID. ace_type (str): The type of ACE to remove. If not specified, all ACEs will be removed. Default is 'all'. Valid options are: - 'grant' - 'deny' - 'all' Returns: list: List of removed aces Usage: .. code-block:: python dacl = Dacl(obj_name='C:\\temp', obj_type='file') dacl.rm_ace('Users') dacl.save(obj_name='C:\\temp') """ sid = get_sid(principal) offset = 0 ret = [] for i in range(0, self.dacl.GetAceCount()): ace = self.dacl.GetAce(i - offset) # Is the inherited ace flag present inherited = ace[0][1] & win32security.INHERITED_ACE == 16 if ace[2] == sid and not inherited: if ( self.ace_type[ace[0][0]] == ace_type.lower() or ace_type == "all" ): self.dacl.DeleteAce(i - offset) ret.append(self._ace_to_dict(ace)) offset += 1 if not ret: ret = ["ACE not found for {0}".format(principal)] return ret def save(self, obj_name, protected=None): """ Save the DACL Args: obj_name (str): The object for which to set permissions. This can be the path to a file or folder, a registry key, printer, etc. For more information about how to format the name see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa379593(v=vs.85).aspx protected (Optional[bool]): True will disable inheritance for the object. False will enable inheritance. None will make no change. Default is ``None``. Returns: bool: True if successful, Otherwise raises an exception Usage: .. code-block:: python dacl = Dacl(obj_type='file') dacl.save('C:\\Temp', True) """ sec_info = self.element["dacl"] if protected is not None: if protected: sec_info = sec_info | self.inheritance["protected"] else: sec_info = sec_info | self.inheritance["unprotected"] if self.dacl_type in ["registry", "registry32"]: obj_name = self.get_reg_name(obj_name) try: win32security.SetNamedSecurityInfo( obj_name, self.obj_type[self.dacl_type], sec_info, None, None, self.dacl, None, ) except pywintypes.error as exc: raise CommandExecutionError( "Failed to set permissions: {0}".format(obj_name), exc.strerror ) return True return Dacl(obj_name, obj_type)
13,697
def max_crossing_sum(lst: List[int], mid: int, n: int) -> int: """ Parameter <mid> is the floor middle index of <lst>. Parameter <n> is the length of the input list <lst>. Pre: <lst> is a list of integers and len(lst) >= 2. Post: returns the maximum contiguous crossing sum starting from the middle of <lst>. >>> max_crossing_sum([2, -5, 8, -6, 10, -2], 3, 6) 12 """ left_sum, right_sum, total = 0, 0, 0 # initialize values # max sum of the left half k = mid - 1 i = 0 while i < mid: total += lst[k - i] i += 1 if total > left_sum: left_sum = total # # max sum the left half # for i in range(mid - 1, -1, -1): # iterate from index mid - 1...0 backward # total += lst[i] # if total > left_sum: # left_sum = total total = 0 # max sum the right half for i in range(mid, n): # iterate from index mid...n - 1 total += lst[i] if total > right_sum: right_sum = total # note: left_sum and right_sum are each at least zero return left_sum + right_sum
13,698
def reset_session_vars(): """ resets all image flask session variables """ session.pop("id", None) session.pop("filename", None) session.pop("image_path", None) session.pop("colors", None) session.pop("palette", None) session["max_colors"] = 8 session["sensitivity"] = 75
13,699