code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def require_template_debug(f): """Decorated function is a no-op if TEMPLATE_DEBUG is False""" def _(*args, **kwargs): TEMPLATE_DEBUG = getattr(settings, 'TEMPLATE_DEBUG', False) return f(*args, **kwargs) if TEMPLATE_DEBUG else '' return _
Decorated function is a no-op if TEMPLATE_DEBUG is False
def CopyToDateTimeString(cls, time_elements_tuple, fraction_of_second): """Copies the time elements and fraction of second to a string. Args: time_elements_tuple (tuple[int, int, int, int, int, int]): time elements, contains year, month, day of month, hours, minutes and seconds. fraction_of_second (decimal.Decimal): fraction of second, which must be a value between 0.0 and 1.0. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss.### Raises: ValueError: if the fraction of second is out of bounds. """ if fraction_of_second < 0.0 or fraction_of_second >= 1.0: raise ValueError('Fraction of second value: {0:f} out of bounds.'.format( fraction_of_second)) milliseconds = int(fraction_of_second * definitions.MILLISECONDS_PER_SECOND) return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:03d}'.format( time_elements_tuple[0], time_elements_tuple[1], time_elements_tuple[2], time_elements_tuple[3], time_elements_tuple[4], time_elements_tuple[5], milliseconds)
Copies the time elements and fraction of second to a string. Args: time_elements_tuple (tuple[int, int, int, int, int, int]): time elements, contains year, month, day of month, hours, minutes and seconds. fraction_of_second (decimal.Decimal): fraction of second, which must be a value between 0.0 and 1.0. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss.### Raises: ValueError: if the fraction of second is out of bounds.
def do_db(self, arg): """ Usage: db get_config db use_local_file [<filename>] db use_aws_instance [<instance_id>] db aws_list_regions db aws_get_region db aws_set_region [<region_name>] db aws_list_instances db aws_create_instance [<instance_id> <size> <username> <password> <dbname>] db aws_delete_instance [<instance_id>] db help """ if arg['get_config']: self.db_get_config() elif arg['use_local_file']: self.db_use_local_file(arg, filename=arg['<filename>']) elif arg['use_aws_instance']: self.db_use_aws_instance(arg['<instance_id>'], arg) elif arg['aws_list_regions']: self.db_aws_list_regions() elif arg['aws_get_region']: self.db_aws_get_region() elif arg['aws_set_region']: self.db_aws_set_region(arg['<region_name>']) elif arg['aws_list_instances']: self.db_aws_list_instances() elif arg['aws_create_instance']: self.db_create_aws_db_instance(arg['<instance_id>'], arg['<size>'], arg['<username>'], arg['<password>'], arg['<dbname>']) elif arg['aws_delete_instance']: self.db_aws_delete_instance(arg['<instance_id>']) else: self.help_db()
Usage: db get_config db use_local_file [<filename>] db use_aws_instance [<instance_id>] db aws_list_regions db aws_get_region db aws_set_region [<region_name>] db aws_list_instances db aws_create_instance [<instance_id> <size> <username> <password> <dbname>] db aws_delete_instance [<instance_id>] db help
def get_POST_data(self): """ Returns: dict: POST data, which can be sent to webform using \ :py:mod:`urllib` or similar library """ self._postprocess() # some fields need to be remapped (depends on type of media) self._apply_mapping( self.mapping.get(self._POST["P0502010__b"], self.mapping["else"]) ) self._check_required_fields() return self._POST
Returns: dict: POST data, which can be sent to webform using \ :py:mod:`urllib` or similar library
def until(self, regex): """Wait until the regex encountered """ logger.debug('waiting for %s', regex) r = re.compile(regex, re.M) self.tn.expect([r])
Wait until the regex encountered
def create_app(app_name, config={}, db=None, celery=None): """ App Factory 工具 策略是: - 初始化app - 根据app_name,装载指定的模块 - 尝试装载app.run_app - 如果指定了`FANTASY_PRIMARY_NODE`,则尝试进行migrate操作 - 装载error handler :return: """ track_mode = os.environ['FANTASY_TRACK_MODE'] == 'yes' if track_mode: print('(00/14)fantasy track mode active...') active_db = os.environ['FANTASY_ACTIVE_DB'] == 'yes' if track_mode: print('(01/14)hacking webargs...') from webargs.flaskparser import parser from . import error_handler, hacker, cli hacker.hack_webargs() migrations_root = os.path.join( os.environ.get('FANTASY_MIGRATION_PATH', os.getcwd()), 'migrations') if track_mode: print('(02/14)initial app...') mod = importlib.import_module(app_name) app = FantasyFlask(__name__, root_path=os.path.dirname(mod.__file__)) if track_mode: print('(03/14)update app.config...') if config: app.config.update(config) # 由外部做显式声明,否则不做调用 config_module = os.environ.get('FANTASY_SETTINGS_MODULE', None) if track_mode: print(" found config module %s,try load it..." % config_module) if config_module: app.config.from_object(config_module) if track_mode: print('(04/14)confirm celery...') if celery: app.celery = celery pass if track_mode: print('(05/14)bind app context...') with app.app_context(): if track_mode: print('(06/14)confirm db handle...') if db is None: global _db app.db = _db else: app.db = db if track_mode: print('(07/14)confirm cache...') if os.environ['FANTASY_ACTIVE_CACHE'] != 'no': from flask_caching import Cache app.cache = Cache(app, config=app.config) pass if track_mode: print('(08/14)confirm sentry...') if os.environ.get('FANTASY_ACTIVE_SENTRY') != 'no': from raven.contrib.flask import Sentry Sentry(app) pass if track_mode: print('(09/14)active app...') if hasattr(mod, 'run_app'): run_app = getattr(mod, 'run_app') try: run_app(app) except Exception as e: if hasattr(app, 'sentry'): app.sentry.handle_exception(e) pass import sys import traceback traceback.print_exc() sys.exit(-1) pass if active_db and app.db: if track_mode: print('(10/14)trigger auto migrate...') smart_database(app) smart_migrate(app, migrations_root) smart_account(app) app.db.init_app(app) @app.teardown_request def session_clear(exception=None): if exception and app.db.session.is_active: app.db.session.rollback() app.db.session.remove() pass if track_mode: print('(11/14)bind error handle...') # 添加错误控制 @parser.error_handler def h_webargs(error): return error_handler.webargs_error(error) @app.errorhandler(422) def h_422(error): return error_handler.http422(error) @app.errorhandler(500) def h_500(error): return error_handler.http500(error) if hasattr(mod, 'error_handler'): error_handle = getattr(mod, 'error_handle') error_handle(app) pass if track_mode: print('(12/14)bind admin handle...') if hasattr(mod, 'run_admin'): import flask_admin admin = flask_admin.Admin(name=os.environ.get('FANTASY_ADMIN_NAME', 'Admin'), template_mode=os.environ.get( 'FANTASY_ADMIN_TEMPLATE_MODE', 'bootstrap3')) run_admin = getattr(mod, 'run_admin') run_admin(admin) admin.init_app(app) pass pass if track_mode: print('(13/14)bind ff command...') app.cli.add_command(cli.ff) if track_mode: print('(14/14)bind cli command...') if hasattr(mod, 'run_cli'): run_cli = getattr(mod, 'run_cli') run_cli(app) pass return app
App Factory 工具 策略是: - 初始化app - 根据app_name,装载指定的模块 - 尝试装载app.run_app - 如果指定了`FANTASY_PRIMARY_NODE`,则尝试进行migrate操作 - 装载error handler :return:
def start_with(self, x): """Returns all arguments beginning with given string (or list thereof). """ _args = [] for arg in self.all: if _is_collection(x): for _x in x: if arg.startswith(x): _args.append(arg) break else: if arg.startswith(x): _args.append(arg) return ArgsList(_args, no_argv=True)
Returns all arguments beginning with given string (or list thereof).
def read_uint16(self): """Read 2 bytes.""" if self.pos + 2 > self.remaining_length: return NC.ERR_PROTOCOL msb = self.payload[self.pos] self.pos += 1 lsb = self.payload[self.pos] self.pos += 1 word = (msb << 8) + lsb return NC.ERR_SUCCESS, word
Read 2 bytes.
def find_matching_endpoints(self, discovery_ns): """ Compute current matching endpoints. Evaluated as a property to defer evaluation. """ def match_func(operation, ns, rule): return operation in self.matching_operations return list(iter_endpoints(self.graph, match_func))
Compute current matching endpoints. Evaluated as a property to defer evaluation.
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if not HAVE_ST: raise RuntimeError( "Trying to run fermipy analysis, but don't have ST") gta = GTAnalysis(args.config, logging={'verbosity': 3}, fileio={'workdir_regex': '\.xml$|\.npy$'}) gta.setup(overwrite=False) baseline_roi_fit(gta, make_plots=args.make_plots, minmax_npred=[1e3, np.inf]) localize_sources(gta, nstep=5, dtheta_max=0.5, update=True, prefix='base', make_plots=args.make_plots) gta.find_sources(sqrt_ts_threshold=5.0, search_skydir=gta.roi.skydir, search_minmax_radius=[1.0, np.nan]) gta.optimize() gta.print_roi() gta.print_params() gta.free_sources(skydir=gta.roi.skydir, distance=1.0, pars='norm') gta.fit(covar=True) gta.print_roi() gta.print_params() gta.write_roi(args.roi_baseline, make_plots=args.make_plots)
Run this analysis
def train_cv(self, num_folds, fold, random=None): """ Generates a training fold for cross-validation. :param num_folds: the number of folds of cross-validation, eg 10 :type num_folds: int :param fold: the current fold (0-based) :type fold: int :param random: the random number generator :type random: Random :return: the training fold :rtype: Instances """ if random is None: return Instances( javabridge.call(self.jobject, "trainCV", "(II)Lweka/core/Instances;", num_folds, fold)) else: return Instances( javabridge.call(self.jobject, "trainCV", "(IILjava/util/Random;)Lweka/core/Instances;", num_folds, fold, random.jobject))
Generates a training fold for cross-validation. :param num_folds: the number of folds of cross-validation, eg 10 :type num_folds: int :param fold: the current fold (0-based) :type fold: int :param random: the random number generator :type random: Random :return: the training fold :rtype: Instances
def _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna): """ numba implementation of antenna_uvw """ if antenna1.ndim != 1: raise ValueError("antenna1 shape should be (row,)") if antenna2.ndim != 1: raise ValueError("antenna2 shape should be (row,)") if uvw.ndim != 2 or uvw.shape[1] != 3: raise ValueError("uvw shape should be (row, 3)") if not (uvw.shape[0] == antenna1.shape[0] == antenna2.shape[0]): raise ValueError("First dimension of uvw, antenna1 " "and antenna2 do not match") if chunks.ndim != 1: raise ValueError("chunks shape should be (utime,)") if nr_of_antenna < 1: raise ValueError("nr_of_antenna < 1") ant_uvw_shape = (chunks.shape[0], nr_of_antenna, 3) antenna_uvw = np.full(ant_uvw_shape, np.nan, dtype=uvw.dtype) start = 0 for ci, chunk in enumerate(chunks): end = start + chunk # one pass should be enough! _antenna_uvw_loop(uvw, antenna1, antenna2, antenna_uvw, ci, start, end) start = end return antenna_uvw
numba implementation of antenna_uvw
def _fileobj_to_fd(fileobj): """ Return a file descriptor from a file object. If given an integer will simply return that integer back. """ if isinstance(fileobj, int): fd = fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError("Invalid file object: {0!r}".format(fileobj)) if fd < 0: raise ValueError("Invalid file descriptor: {0}".format(fd)) return fd
Return a file descriptor from a file object. If given an integer will simply return that integer back.
def mget(self, ids, index=None, doc_type=None, **query_params): """ Get multi JSON documents. ids can be: list of tuple: (index, type, id) list of ids: index and doc_type are required """ if not ids: return [] body = [] for value in ids: if isinstance(value, tuple): if len(value) == 3: a, b, c = value body.append({"_index": a, "_type": b, "_id": c}) elif len(value) == 4: a, b, c, d = value body.append({"_index": a, "_type": b, "_id": c, "fields": d}) else: if index is None: raise InvalidQuery("index value is required for id") if doc_type is None: raise InvalidQuery("doc_type value is required for id") body.append({"_index": index, "_type": doc_type, "_id": value}) results = self._send_request('GET', "/_mget", body={'docs': body}, params=query_params) if 'docs' in results: model = self.model return [model(self, item) for item in results['docs']] return []
Get multi JSON documents. ids can be: list of tuple: (index, type, id) list of ids: index and doc_type are required
def mpl_get_cb_bound_below_plot(ax): """ Return the coordinates for a colorbar axes below the provided axes object. Take into account the changes of the axes due to aspect ratio settings. Parts of this code are taken from the transforms.py file from matplotlib Important: Use only AFTER fig.subplots_adjust(...) Use as: ======= """ position = ax.get_position() figW, figH = ax.get_figure().get_size_inches() fig_aspect = figH / figW box_aspect = ax.get_data_ratio() pb = position.frozen() pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds ax_size = ax.get_position().bounds # the colorbar is set to 0.01 width sizes = [ax_size[0], ax_size[1] - 0.14, pb1[2], 0.03] return sizes
Return the coordinates for a colorbar axes below the provided axes object. Take into account the changes of the axes due to aspect ratio settings. Parts of this code are taken from the transforms.py file from matplotlib Important: Use only AFTER fig.subplots_adjust(...) Use as: =======
def check_labels_file_header(filename): """Validate that filename corresponds to labels for the MNIST dataset.""" with tf.gfile.Open(filename, 'rb') as f: magic = read32(f) read32(f) # num_items, unused if magic != 2049: raise ValueError('Invalid magic number %d in MNIST file %s' % (magic, f.name))
Validate that filename corresponds to labels for the MNIST dataset.
def _check_embedded_object(embedded_object, type, value, element_kind, element_name): # pylint: disable=redefined-builtin """ Check whether embedded-object-related parameters are ok. """ if embedded_object not in ('instance', 'object'): raise ValueError( _format("{0} {1!A} specifies an invalid value for " "embedded_object: {2!A} (must be 'instance' or 'object')", element_kind, element_name, embedded_object)) if type != 'string': raise ValueError( _format("{0} {1!A} specifies embedded_object {2!A} but its CIM " "type is invalid: {3!A} (must be 'string')", element_kind, element_name, embedded_object, type)) if value is not None: if isinstance(value, list): if value: v0 = value[0] # Check the first array element if v0 is not None and \ not isinstance(v0, (CIMInstance, CIMClass)): raise ValueError( _format("Array {0} {1!A} specifies embedded_object " "{2!A} but the Python type of its first array " "value is invalid: {3} (must be CIMInstance " "or CIMClass)", element_kind, element_name, embedded_object, builtin_type(v0))) else: if not isinstance(value, (CIMInstance, CIMClass)): raise ValueError( _format("{0} {1!A} specifies embedded_object {2!A} but " "the Python type of its value is invalid: {3} " "(must be CIMInstance or CIMClass)", element_kind, element_name, embedded_object, builtin_type(value)))
Check whether embedded-object-related parameters are ok.
def imap(requests, stream=False, size=2, exception_handler=None): """Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If True, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 :param exception_handler: Callback function, called when exception occured. Params: Request, Exception """ pool = Pool(size) def send(r): return r.send(stream=stream) for request in pool.imap_unordered(send, requests): if request.response is not None: yield request.response elif exception_handler: ex_result = exception_handler(request, request.exception) if ex_result is not None: yield ex_result pool.join()
Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If True, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 :param exception_handler: Callback function, called when exception occured. Params: Request, Exception
def update_lazyevals(self): """Update all LazyEvals in self self.lzy_evals must be set to LazyEval object(s) enough to update all owned LazyEval objects. """ if self.lazy_evals is None: return elif isinstance(self.lazy_evals, LazyEval): self.lazy_evals.get_updated() else: for lz in self.lazy_evals: lz.get_updated()
Update all LazyEvals in self self.lzy_evals must be set to LazyEval object(s) enough to update all owned LazyEval objects.
def _prune_maps_to_sequences(self): ''' When we merge the SIFTS maps, we can extend the sequence maps such that they have elements in their domain that we removed from the sequence e.g. 1A2P, residue 'B 3 ' is removed because Rosetta barfs on it. Here, we prune the maps so that their domains do not have elements that were removed from sequences.''' for c, seq in self.atom_sequences.iteritems(): res_ids = [r[0] for r in seq] for_removal = [] for k, _, _ in self.atom_to_seqres_sequence_maps[c]: if k not in res_ids: for_removal.append(k) for res_id in for_removal: self.atom_to_seqres_sequence_maps[c].remove(res_id)
When we merge the SIFTS maps, we can extend the sequence maps such that they have elements in their domain that we removed from the sequence e.g. 1A2P, residue 'B 3 ' is removed because Rosetta barfs on it. Here, we prune the maps so that their domains do not have elements that were removed from sequences.
def add(self, child): """ Adds a typed child object to the behavioral object. @param child: Child object to be added. """ if isinstance(child, StateVariable): self.add_state_variable(child) elif isinstance(child, DerivedVariable): self.add_derived_variable(child) elif isinstance(child, ConditionalDerivedVariable): self.add_conditional_derived_variable(child) elif isinstance(child, TimeDerivative): self.add_time_derivative(child) elif isinstance(child, EventHandler): self.add_event_handler(child) elif isinstance(child, KineticScheme): self.add_kinetic_scheme(child) else: raise ModelError('Unsupported child element')
Adds a typed child object to the behavioral object. @param child: Child object to be added.
def copy(self, empty=False): """returns an independent copy of the current object.""" # Create an empty object newobject = self.__new__(self.__class__) if empty: return # And fill it ! for prop in ["_properties","_side_properties", "_derived_properties","_build_properties" ]: if prop not in dir(self): continue try: # Try to deep copy because but some time it does not work (e.g. wcs) newobject.__dict__[prop] = copy.deepcopy(self.__dict__[prop]) except: newobject.__dict__[prop] = copy.copy(self.__dict__[prop]) # This be sure things are correct newobject._update_() # and return it return newobject
returns an independent copy of the current object.
def scp(args): """ Transfer files to or from EC2 instance. Use "--" to separate scp args from aegea args: aegea scp -- -r local_dir instance_name:~/remote_dir """ if args.scp_args[0] == "--": del args.scp_args[0] user_or_hostname_chars = string.ascii_letters + string.digits for i, arg in enumerate(args.scp_args): if arg[0] in user_or_hostname_chars and ":" in arg: hostname, colon, path = arg.partition(":") username, at, hostname = hostname.rpartition("@") hostname = resolve_instance_public_dns(hostname) if not (username or at): try: username, at = get_linux_username(), "@" except Exception: logger.info("Unable to determine IAM username, using local username") args.scp_args[i] = username + at + hostname + colon + path os.execvp("scp", ["scp"] + args.scp_args)
Transfer files to or from EC2 instance. Use "--" to separate scp args from aegea args: aegea scp -- -r local_dir instance_name:~/remote_dir
def setCommonInput(configObj, createOutwcs=True): """ The common interface interpreter for MultiDrizzle tasks which not only runs 'process_input()' but 'createImageObject()' and 'defineOutput()' as well to fully setup all inputs for use with the rest of the MultiDrizzle steps either as stand-alone tasks or internally to MultiDrizzle itself. Parameters ---------- configObj : object configObj instance or simple dictionary of input parameters imageObjectList : list of imageObject objects list of imageObject instances, 1 for each input exposure outwcs : object imageObject instance defining the final output frame Notes ----- At a minimum, the configObj instance (dictionary) should contain: configObj = {'input':None,'output':None } If provided, the configObj should contain the values of all the multidrizzle parameters as set by the user with TEAL. If no configObj is given, it will retrieve the default values automatically. In either case, the values from the input_dict will be merged in with the configObj before being used by the rest of the code. Examples -------- You can set *createOutwcs=False* for the cases where you only want the images processed and no output wcs information in necessary; as in: >>> imageObjectList,outwcs = processInput.processCommonInput(configObj) """ # make sure 'updatewcs' is set to False when running from GUI or if missing # from configObj: if 'updatewcs' not in configObj: configObj['updatewcs'] = False if not createOutwcs or not configObj['coeffs']: # we're probably just working on single images here configObj['updatewcs']=False # maybe we can chunk this part up some more so that we can call just the # parts we want # Interpret input, read and convert and update input files, then return # list of input filenames and derived output filename asndict, ivmlist, output = process_input( configObj['input'], configObj['output'], updatewcs=configObj['updatewcs'], wcskey=configObj['wcskey'], **configObj['STATE OF INPUT FILES']) if not asndict: return None, None # convert the filenames from asndict into a list of full filenames files = [fileutil.buildRootname(f) for f in asndict['order']] original_files = asndict['original_file_names'] # interpret MDRIZTAB, if specified, and update configObj accordingly # This can be done here because MDRIZTAB does not include values for # input, output, or updatewcs. if 'mdriztab' in configObj and configObj['mdriztab']: print("Reading in MDRIZTAB parameters for {} files".format(len(files))) mdriztab_dict = mdzhandler.getMdriztabParameters(files) # Update configObj with values from mpars cfgpars.mergeConfigObj(configObj, mdriztab_dict) # Convert interpreted list of input files from process_input into a list # of imageObject instances for use by the MultiDrizzle tasks. instrpars = configObj['INSTRUMENT PARAMETERS'] # pass in 'proc_unit' to initialize unit conversions as necessary instrpars['proc_unit'] = configObj['proc_unit'] undistort = True if not configObj['coeffs']: undistort = False # determine whether parallel processing will be performed use_parallel = False if util.can_parallel: # look to see whether steps which can be run using multiprocessing # have been turned on for stepnum in parallel_steps: sname = util.getSectionName(configObj,stepnum[0]) if configObj[sname][stepnum[1]]: use_parallel = True break # interpret all 'bits' related parameters and convert them to integers configObj['resetbits'] = interpret_bit_flags(configObj['resetbits']) step3name = util.getSectionName(configObj,3) configObj[step3name]['driz_sep_bits'] = interpret_bit_flags( configObj[step3name]['driz_sep_bits'] ) step7name = util.getSectionName(configObj,7) configObj[step7name]['final_bits'] = interpret_bit_flags( configObj[step7name]['final_bits'] ) # Verify any refimage parameters to be used step3aname = util.getSectionName(configObj,'3a') if not util.verifyRefimage(configObj[step3aname]['driz_sep_refimage']): msg = 'No refimage with WCS found!\n '+\ ' This could be caused by one of 2 problems:\n'+\ ' * filename does not specify an extension with a valid WCS.\n'+\ ' * can not find the file.\n'+\ 'Please check the filename specified in the "refimage" parameter.' print(textutil.textbox(msg)) return None,None step7aname = util.getSectionName(configObj,'7a') if not util.verifyRefimage(configObj[step7aname]['final_refimage']): msg = 'No refimage with WCS found!\n '+\ ' This could be caused by one of 2 problems:\n'+\ ' * filename does not specify an extension with a valid WCS.\n'+\ ' * can not find the file.\n'+\ 'Please check the filename specified in the "refimage" parameter.' print(textutil.textbox(msg)) return None,None # Build imageObject list for all the valid, shift-updated input files log.info('-Creating imageObject List as input for processing steps.') if 'in_memory' in configObj: virtual = configObj['in_memory'] else: virtual = False imageObjectList = createImageObjectList(files, instrpars, group=configObj['group'], undistort=undistort, inmemory=virtual) # Add original file names as "hidden" attributes of imageObject assert(len(original_files) == len(imageObjectList)) #TODO: remove after extensive testing for i in range(len(imageObjectList)): imageObjectList[i]._original_file_name = original_files[i] # apply context parameter applyContextPar(imageObjectList, configObj['context']) # reset DQ bits if requested by user resetDQBits(imageObjectList, cr_bits_value=configObj['resetbits']) # Add info about input IVM files at this point to the imageObjectList addIVMInputs(imageObjectList, ivmlist) if createOutwcs: log.info('-Creating output WCS.') # Build output WCS and update imageObjectList with output WCS info outwcs = wcs_functions.make_outputwcs(imageObjectList, output, configObj=configObj, perfect=True) outwcs.final_wcs.printwcs() else: outwcs = None try: # Provide user with some information on resource usage for this run # raises ValueError Exception in interactive mode and user quits num_cores = configObj.get('num_cores') if use_parallel else 1 reportResourceUsage(imageObjectList, outwcs, num_cores) except ValueError: imageObjectList = None return imageObjectList, outwcs
The common interface interpreter for MultiDrizzle tasks which not only runs 'process_input()' but 'createImageObject()' and 'defineOutput()' as well to fully setup all inputs for use with the rest of the MultiDrizzle steps either as stand-alone tasks or internally to MultiDrizzle itself. Parameters ---------- configObj : object configObj instance or simple dictionary of input parameters imageObjectList : list of imageObject objects list of imageObject instances, 1 for each input exposure outwcs : object imageObject instance defining the final output frame Notes ----- At a minimum, the configObj instance (dictionary) should contain: configObj = {'input':None,'output':None } If provided, the configObj should contain the values of all the multidrizzle parameters as set by the user with TEAL. If no configObj is given, it will retrieve the default values automatically. In either case, the values from the input_dict will be merged in with the configObj before being used by the rest of the code. Examples -------- You can set *createOutwcs=False* for the cases where you only want the images processed and no output wcs information in necessary; as in: >>> imageObjectList,outwcs = processInput.processCommonInput(configObj)
def next_frame_savp_gan(): """SAVP - GAN only model.""" hparams = next_frame_savp() hparams.use_gan = True hparams.use_vae = False hparams.gan_loss_multiplier = 0.001 hparams.optimizer_adam_beta1 = 0.5 hparams.learning_rate_constant = 2e-4 hparams.gan_loss = "cross_entropy" hparams.learning_rate_decay_steps = 100000 hparams.learning_rate_schedule = "constant*linear_decay" return hparams
SAVP - GAN only model.
def set_iscsi_info(self, target_name, lun, ip_address, port='3260', auth_method=None, username=None, password=None): """Set iscsi details of the system in uefi boot mode. The initiator system is set with the target details like IQN, LUN, IP, Port etc. :param target_name: Target Name for iscsi. :param lun: logical unit number. :param ip_address: IP address of the target. :param port: port of the target. :param auth_method : either None or CHAP. :param username: CHAP Username for authentication. :param password: CHAP secret. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode. """ return self._call_method('set_iscsi_info', target_name, lun, ip_address, port, auth_method, username, password)
Set iscsi details of the system in uefi boot mode. The initiator system is set with the target details like IQN, LUN, IP, Port etc. :param target_name: Target Name for iscsi. :param lun: logical unit number. :param ip_address: IP address of the target. :param port: port of the target. :param auth_method : either None or CHAP. :param username: CHAP Username for authentication. :param password: CHAP secret. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode.
def import_parameters_from_file(parameters_file): """ Try importing a parameter dictionary from file. We expect values in parameters_file to be defined as follows: param1: value1 param2: [value2, value3] """ params = {} with open(parameters_file, 'r') as f: matches = re.findall('(.*): (.*)', f.read()) for m in matches: params[m[0]] = ast.literal_eval(m[1]) return params
Try importing a parameter dictionary from file. We expect values in parameters_file to be defined as follows: param1: value1 param2: [value2, value3]
def shapeRef_to_iriref(self, ref: ShExDocParser.ShapeRefContext) -> ShExJ.IRIREF: """ shapeRef: ATPNAME_NS | ATPNAME_LN | '@' shapeExprLabel """ if ref.ATPNAME_NS(): return ShExJ.IRIREF(self._lookup_prefix(ref.ATPNAME_NS().getText()[1:])) elif ref.ATPNAME_LN(): prefix, local = ref.ATPNAME_LN().getText()[1:].split(':', 1) return ShExJ.IRIREF(self._lookup_prefix(prefix + ':') + (local if local else "")) else: return self.shapeexprlabel_to_IRI(ref.shapeExprLabel())
shapeRef: ATPNAME_NS | ATPNAME_LN | '@' shapeExprLabel
def list_sensors(parent_class, sensor_items, filter, strategy, status, use_python_identifiers, tuple, refresh): """Helper for implementing :meth:`katcp.resource.KATCPResource.list_sensors` Parameters ---------- sensor_items : tuple of sensor-item tuples As would be returned the items() method of a dict containing KATCPSensor objects keyed by Python-identifiers. parent_class: KATCPClientResource or KATCPClientResourceContainer Is used for prefix calculation Rest of parameters as for :meth:`katcp.resource.KATCPResource.list_sensors` """ filter_re = re.compile(filter) found_sensors = [] none_strat = resource.normalize_strategy_parameters('none') sensor_dict = dict(sensor_items) for sensor_identifier in sorted(sensor_dict.keys()): sensor_obj = sensor_dict[sensor_identifier] search_name = (sensor_identifier if use_python_identifiers else sensor_obj.name) name_match = filter_re.search(search_name) # Only include sensors with strategies strat_match = not strategy or sensor_obj.sampling_strategy != none_strat if name_match and strat_match: if refresh: # First refresh the sensor reading yield sensor_obj.get_value() # Determine the sensorname prefix: # parent_name. except for aggs when in KATCPClientResourceContinaer prefix = "" if isinstance(parent_class, KATCPClientResourceContainer): if sensor_obj.name.startswith("agg_"): prefix = "" else: prefix = sensor_obj.parent_name + "." if not status or (sensor_obj.reading.status in status): # Only include sensors of the given status if tuple: # (sensor.name, sensor.value, sensor.value_seconds, sensor.type, sensor.units, sensor.update_seconds, sensor.status, strategy_and_params) found_sensors.append(( prefix+sensor_obj.name, sensor_obj.reading.value, sensor_obj.reading.timestamp, sensor_obj.type, sensor_obj.units, sensor_obj.reading.received_timestamp, sensor_obj.reading.status, sensor_obj.sampling_strategy )) else: found_sensors.append(resource.SensorResultTuple( object=sensor_obj, name=prefix+sensor_obj.name, python_identifier=sensor_identifier, description=sensor_obj.description, units=sensor_obj.units, type=sensor_obj.type, reading=sensor_obj.reading)) raise tornado.gen.Return(found_sensors)
Helper for implementing :meth:`katcp.resource.KATCPResource.list_sensors` Parameters ---------- sensor_items : tuple of sensor-item tuples As would be returned the items() method of a dict containing KATCPSensor objects keyed by Python-identifiers. parent_class: KATCPClientResource or KATCPClientResourceContainer Is used for prefix calculation Rest of parameters as for :meth:`katcp.resource.KATCPResource.list_sensors`
def create_process(cmd, root_helper=None, addl_env=None, log_output=True): """Create a process object for the given command. The return value will be a tuple of the process object and the list of command arguments used to create it. """ if root_helper: cmd = shlex.split(root_helper) + cmd cmd = map(str, cmd) log_output and LOG.info("Running command: %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) return obj, cmd
Create a process object for the given command. The return value will be a tuple of the process object and the list of command arguments used to create it.
def _setup_no_fallback(parser): """Add the option, --tox-pyenv-no-fallback. If this option is set, do not allow fallback to tox's built-in strategy for looking up python executables if the call to `pyenv which` by this plugin fails. This will allow the error to raise instead of falling back to tox's default behavior. """ cli_dest = 'tox_pyenv_fallback' halp = ('If `pyenv which {basepython}` exits non-zero when looking ' 'up the python executable, do not allow fallback to tox\'s ' 'built-in default logic.') # Add a command-line option. tox_pyenv_group = parser.argparser.add_argument_group( title='{0} plugin options'.format(__title__), ) tox_pyenv_group.add_argument( '--tox-pyenv-no-fallback', '-F', dest=cli_dest, default=True, action='store_false', help=halp ) def _pyenv_fallback(testenv_config, value): cli_says = getattr(testenv_config.config.option, cli_dest) return cli_says or value # Add an equivalent tox.ini [testenv] section option. parser.add_testenv_attribute( name=cli_dest, type="bool", postprocess=_pyenv_fallback, default=False, help=('If `pyenv which {basepython}` exits non-zero when looking ' 'up the python executable, allow fallback to tox\'s ' 'built-in default logic.'), )
Add the option, --tox-pyenv-no-fallback. If this option is set, do not allow fallback to tox's built-in strategy for looking up python executables if the call to `pyenv which` by this plugin fails. This will allow the error to raise instead of falling back to tox's default behavior.
def _mark_lines(lines, sender): """Mark message lines with markers to distinguish signature lines. Markers: * e - empty line * s - line identified as signature * t - other i.e. ordinary text line >>> mark_message_lines(['Some text', '', 'Bob'], 'Bob') 'tes' """ global EXTRACTOR candidate = get_signature_candidate(lines) # at first consider everything to be text no signature markers = list('t' * len(lines)) # mark lines starting from bottom up # mark only lines that belong to candidate # no need to mark all lines of the message for i, line in reversed(list(enumerate(candidate))): # markers correspond to lines not candidate # so we need to recalculate our index to be # relative to lines not candidate j = len(lines) - len(candidate) + i if not line.strip(): markers[j] = 'e' elif is_signature_line(line, sender, EXTRACTOR): markers[j] = 's' return "".join(markers)
Mark message lines with markers to distinguish signature lines. Markers: * e - empty line * s - line identified as signature * t - other i.e. ordinary text line >>> mark_message_lines(['Some text', '', 'Bob'], 'Bob') 'tes'
def bam2fastq(self, input_bam, output_fastq, output_fastq2=None, unpaired_fastq=None): """ Create command to convert BAM(s) to FASTQ(s). :param str input_bam: Path to sequencing reads file to convert :param output_fastq: Path to FASTQ to write :param output_fastq2: Path to (R2) FASTQ to write :param unpaired_fastq: Path to unpaired FASTQ to write :return str: Command to convert BAM(s) to FASTQ(s) """ self._ensure_folders(output_fastq, output_fastq2, unpaired_fastq) cmd = self.tools.java + " -Xmx" + self.pm.javamem cmd += " -jar " + self.tools.picard + " SamToFastq" cmd += " INPUT={0}".format(input_bam) cmd += " FASTQ={0}".format(output_fastq) if output_fastq2 is not None and unpaired_fastq is not None: cmd += " SECOND_END_FASTQ={0}".format(output_fastq2) cmd += " UNPAIRED_FASTQ={0}".format(unpaired_fastq) return cmd
Create command to convert BAM(s) to FASTQ(s). :param str input_bam: Path to sequencing reads file to convert :param output_fastq: Path to FASTQ to write :param output_fastq2: Path to (R2) FASTQ to write :param unpaired_fastq: Path to unpaired FASTQ to write :return str: Command to convert BAM(s) to FASTQ(s)
def slip_reader(port, trace_function): """Generator to read SLIP packets from a serial port. Yields one full SLIP packet at a time, raises exception on timeout or invalid data. Designed to avoid too many calls to serial.read(1), which can bog down on slow systems. """ partial_packet = None in_escape = False while True: waiting = port.inWaiting() read_bytes = port.read(1 if waiting == 0 else waiting) if read_bytes == b'': waiting_for = "header" if partial_packet is None else "content" trace_function("Timed out waiting for packet %s", waiting_for) raise FatalError("Timed out waiting for packet %s" % waiting_for) trace_function("Read %d bytes: %s", len(read_bytes), HexFormatter(read_bytes)) for b in read_bytes: if type(b) is int: b = bytes([b]) # python 2/3 compat if partial_packet is None: # waiting for packet header if b == b'\xc0': partial_packet = b"" else: trace_function("Read invalid data: %s", HexFormatter(read_bytes)) trace_function("Remaining data in serial buffer: %s", HexFormatter(port.read(port.inWaiting()))) raise FatalError('Invalid head of packet (0x%s)' % hexify(b)) elif in_escape: # part-way through escape sequence in_escape = False if b == b'\xdc': partial_packet += b'\xc0' elif b == b'\xdd': partial_packet += b'\xdb' else: trace_function("Read invalid data: %s", HexFormatter(read_bytes)) trace_function("Remaining data in serial buffer: %s", HexFormatter(port.read(port.inWaiting()))) raise FatalError('Invalid SLIP escape (0xdb, 0x%s)' % (hexify(b))) elif b == b'\xdb': # start of escape sequence in_escape = True elif b == b'\xc0': # end of packet trace_function("Received full packet: %s", HexFormatter(partial_packet)) yield partial_packet partial_packet = None else: # normal byte in packet partial_packet += b
Generator to read SLIP packets from a serial port. Yields one full SLIP packet at a time, raises exception on timeout or invalid data. Designed to avoid too many calls to serial.read(1), which can bog down on slow systems.
def generate(self): """Generate a new string and return it.""" key = self._propose_new_key() while self.key_exists(key): _logger.warning('Previous candidate was used.' ' Regenerating another...') key = self._propose_new_key() return key
Generate a new string and return it.
def set_auto_reply(self, message, status=AutoReplyStatus.ALWAYS_ENABLED, start=None, end=None, external_message=None, audience=AutoReplyAudience.ALL): # type: (str, OutlookAccount.AutoReplyStatus, datetime, datetime, str, OutlookAccount.AutoReplyAudience) -> None """ Set an automatic reply for the account. Args: message (str): The message to be sent in replies. If external_message is provided this is the message sent to internal recipients status (OutlookAccount.AutoReplyStatus): Whether the auto-reply should be always enabled, scheduled, or disabled. You can use :class:`AutoReplyStatus <pyOutlook.core.main.OutlookAccount.AutoReplyStatus>` to provide the value. Defaults to ALWAYS_ENABLED. start (datetime): If status is set to SCHEDULED, this is when the replies will start being sent. end (datetime): If status is set to SCHEDULED, this is when the replies will stop being sent. external_message (str): If provided, this message will be sent to external recipients. audience (OutlookAccount.AutoReplyAudience): Whether replies should be sent to everyone, contacts only, or internal recipients only. You can use :class:`AutoReplyAudience <pyOutlook.core.main.OutlookAccount.AutoReplyAudience>` to provide the value. """ start_is_none = start is None end_is_none = end is None if (not start_is_none and end_is_none) or (start_is_none and not end_is_none): raise ValueError('Start and End not must both either be None or datetimes') start_is_datetime = isinstance(start, datetime) end_is_datetime = isinstance(end, datetime) if not start_is_datetime and not start_is_none or not end_is_datetime and not end_is_none: raise ValueError('Start and End must both either be None or datetimes') request_data = dict(Status=status, ExternalAudience=audience) # Outlook requires both an internal and external message. For convenience, pyOutlook allows only one message # and uses that as the external message if none is provided if external_message is None: external_message = message request_data.update(InternalReplyMessage=message, ExternalReplyMessage=external_message) if not start_is_none and not end_is_none: request_data.update(ScheduledStartDateTime=dict(DateTime=str(start))) request_data.update(ScheduledEndDateTime=dict(DateTime=str(end))) data = { "@odata.context": "https://outlook.office.com/api/v2.0/$metadata#Me/MailboxSettings", "AutomaticRepliesSetting": request_data } requests.patch('https://outlook.office.com/api/v2.0/me/MailboxSettings', headers=self._headers, data=json.dumps(data)) self._auto_reply = message
Set an automatic reply for the account. Args: message (str): The message to be sent in replies. If external_message is provided this is the message sent to internal recipients status (OutlookAccount.AutoReplyStatus): Whether the auto-reply should be always enabled, scheduled, or disabled. You can use :class:`AutoReplyStatus <pyOutlook.core.main.OutlookAccount.AutoReplyStatus>` to provide the value. Defaults to ALWAYS_ENABLED. start (datetime): If status is set to SCHEDULED, this is when the replies will start being sent. end (datetime): If status is set to SCHEDULED, this is when the replies will stop being sent. external_message (str): If provided, this message will be sent to external recipients. audience (OutlookAccount.AutoReplyAudience): Whether replies should be sent to everyone, contacts only, or internal recipients only. You can use :class:`AutoReplyAudience <pyOutlook.core.main.OutlookAccount.AutoReplyAudience>` to provide the value.
def get_player_id(player): """ Returns the player ID(s) associated with the player name that is passed in. There are instances where players have the same name so there are multiple player IDs associated with it. Parameters ---------- player : str The desired player's name in 'Last Name, First Name' format. Passing in a single name returns a numpy array containing all the player IDs associated with that name. Returns ------- player_id : numpy array The numpy array that contains the player ID(s). """ players_df = get_all_player_ids("all_data") player = players_df[players_df.DISPLAY_LAST_COMMA_FIRST == player] # if there are no plyaers by the given name, raise an a error if len(player) == 0: er = "Invalid player name passed or there is no player with that name." raise ValueError(er) player_id = player.PERSON_ID.values return player_id
Returns the player ID(s) associated with the player name that is passed in. There are instances where players have the same name so there are multiple player IDs associated with it. Parameters ---------- player : str The desired player's name in 'Last Name, First Name' format. Passing in a single name returns a numpy array containing all the player IDs associated with that name. Returns ------- player_id : numpy array The numpy array that contains the player ID(s).
def _headers(self, **kwargs): """ Returns dict containing base headers for all requests to the server. """ headers = BASE_HEADERS.copy() if self._token: headers['X-Plex-Token'] = self._token headers.update(kwargs) return headers
Returns dict containing base headers for all requests to the server.
def save(self, filename, content): """ default is to save a file from list of lines """ with open(filename, "w") as f: if hasattr(content, '__iter__'): f.write('\n'.join([row for row in content])) else: print('WRINGI CONTWETESWREWR') f.write(str(content))
default is to save a file from list of lines
def update(self, data): """Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(ami)` on the object. Args: data (bunch): Data fetched from AWS API Returns: True if there were any changes to the object, else false """ updated = self.set_property('description', data.description) updated |= self.set_property('state', data.state) tags = {x['Key']: x['Value'] for x in data.tags or {}} existing_tags = {x.key: x for x in self.tags} # Check for new tags for key, value in list(tags.items()): updated |= self.set_tag(key, value) # Check for updated or removed tags for key in list(existing_tags.keys()): if key not in tags: updated |= self.delete_tag(key) return updated
Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(ami)` on the object. Args: data (bunch): Data fetched from AWS API Returns: True if there were any changes to the object, else false
def ontologyShapeTree(self): """ Returns a dict representing the ontology tree Top level = {0:[top properties]} Multi inheritance is represented explicitly """ treedict = {} if self.all_shapes: treedict[0] = self.toplayer_shapes for element in self.all_shapes: if element.children(): treedict[element] = element.children() return treedict return treedict
Returns a dict representing the ontology tree Top level = {0:[top properties]} Multi inheritance is represented explicitly
def group_experiments(experiments: TomographyExperiment, method: str = 'greedy') -> TomographyExperiment: """ Group experiments that are diagonal in a shared tensor product basis (TPB) to minimize number of QPU runs. Background ---------- Given some PauliTerm operator, the 'natural' tensor product basis to diagonalize this term is the one which diagonalizes each Pauli operator in the product term-by-term. For example, X(1) * Z(0) would be diagonal in the 'natural' tensor product basis {(|0> +/- |1>)/Sqrt[2]} * {|0>, |1>}, whereas Z(1) * X(0) would be diagonal in the 'natural' tpb {|0>, |1>} * {(|0> +/- |1>)/Sqrt[2]}. The two operators commute but are not diagonal in each others 'natural' tpb (in fact, they are anti-diagonal in each others 'natural' tpb). This function tests whether two operators given as PauliTerms are both diagonal in each others 'natural' tpb. Note that for the given example of X(1) * Z(0) and Z(1) * X(0), we can construct the following basis which simultaneously diagonalizes both operators: -- |0>' = |0> (|+>) + |1> (|->) -- |1>' = |0> (|+>) - |1> (|->) -- |2>' = |0> (|->) + |1> (|+>) -- |3>' = |0> (-|->) + |1> (|+>) In this basis, X Z looks like diag(1, -1, 1, -1), and Z X looks like diag(1, 1, -1, -1). Notice however that this basis cannot be constructed with single-qubit operations, as each of the basis vectors are entangled states. Methods ------- The "greedy" method will keep a running set of 'buckets' into which grouped ExperimentSettings will be placed. Each new ExperimentSetting considered is assigned to the first applicable bucket and a new bucket is created if there are no applicable buckets. The "clique-removal" method maps the term grouping problem onto Max Clique graph problem. This method constructs a NetworkX graph where an edge exists between two settings that share an nTPB and then uses networkx's algorithm for clique removal. This method can give you marginally better groupings in certain circumstances, but constructing the graph is pretty slow so "greedy" is the default. :param experiments: a tomography experiment :param method: method used for grouping; the allowed methods are one of ['greedy', 'clique-removal'] :return: a tomography experiment with all the same settings, just grouped according to shared TPBs. """ allowed_methods = ['greedy', 'clique-removal'] assert method in allowed_methods, f"'method' should be one of {allowed_methods}." if method == 'greedy': return group_experiments_greedy(experiments) elif method == 'clique-removal': return group_experiments_clique_removal(experiments)
Group experiments that are diagonal in a shared tensor product basis (TPB) to minimize number of QPU runs. Background ---------- Given some PauliTerm operator, the 'natural' tensor product basis to diagonalize this term is the one which diagonalizes each Pauli operator in the product term-by-term. For example, X(1) * Z(0) would be diagonal in the 'natural' tensor product basis {(|0> +/- |1>)/Sqrt[2]} * {|0>, |1>}, whereas Z(1) * X(0) would be diagonal in the 'natural' tpb {|0>, |1>} * {(|0> +/- |1>)/Sqrt[2]}. The two operators commute but are not diagonal in each others 'natural' tpb (in fact, they are anti-diagonal in each others 'natural' tpb). This function tests whether two operators given as PauliTerms are both diagonal in each others 'natural' tpb. Note that for the given example of X(1) * Z(0) and Z(1) * X(0), we can construct the following basis which simultaneously diagonalizes both operators: -- |0>' = |0> (|+>) + |1> (|->) -- |1>' = |0> (|+>) - |1> (|->) -- |2>' = |0> (|->) + |1> (|+>) -- |3>' = |0> (-|->) + |1> (|+>) In this basis, X Z looks like diag(1, -1, 1, -1), and Z X looks like diag(1, 1, -1, -1). Notice however that this basis cannot be constructed with single-qubit operations, as each of the basis vectors are entangled states. Methods ------- The "greedy" method will keep a running set of 'buckets' into which grouped ExperimentSettings will be placed. Each new ExperimentSetting considered is assigned to the first applicable bucket and a new bucket is created if there are no applicable buckets. The "clique-removal" method maps the term grouping problem onto Max Clique graph problem. This method constructs a NetworkX graph where an edge exists between two settings that share an nTPB and then uses networkx's algorithm for clique removal. This method can give you marginally better groupings in certain circumstances, but constructing the graph is pretty slow so "greedy" is the default. :param experiments: a tomography experiment :param method: method used for grouping; the allowed methods are one of ['greedy', 'clique-removal'] :return: a tomography experiment with all the same settings, just grouped according to shared TPBs.
def _neg32(ins): """ Negates top of the stack (32 bits in DEHL) """ output = _32bit_oper(ins.quad[2]) output.append('call __NEG32') output.append('push de') output.append('push hl') REQUIRES.add('neg32.asm') return output
Negates top of the stack (32 bits in DEHL)
def _process_model_dict(self, d): """ Remove redundant items from a model's configuration dict. Parameters ---------- d : dict Modified in place. Returns ------- dict Modified `d`. """ del d['model_type'] del d['sample_size'] del d['probability_mode'] del d['choice_mode'] del d['choosers_fit_filters'] del d['choosers_predict_filters'] del d['alts_fit_filters'] del d['alts_predict_filters'] del d['interaction_predict_filters'] del d['estimation_sample_size'] del d['prediction_sample_size'] del d['choice_column'] if d['model_expression'] == self.default_model_expr: del d['model_expression'] d["name"] = yamlio.to_scalar_safe(d["name"]) return d
Remove redundant items from a model's configuration dict. Parameters ---------- d : dict Modified in place. Returns ------- dict Modified `d`.
def _is_valid_integer(self, inpt, metadata): """Checks if input is a valid integer value""" if not isinstance(inpt, int): return False if metadata.get_minimum_integer() and inpt < metadata.get_maximum_integer(): return False if metadata.get_maximum_integer() and inpt > metadata.get_minimum_integer(): return False if metadata.get_integer_set() and inpt not in metadata.get_integer_set(): return False else: return True
Checks if input is a valid integer value
def received_message(self, message): ''' Checks if the client has sent a ready message. A ready message causes ``send()`` to be called on the ``parent end`` of the pipe. Clients need to ensure that the pipe assigned to ``self.pipe`` is the ``parent end`` of a pipe. This ensures completion of the underlying websocket connection and can be used to synchronize parallel senders. ''' if message.data.decode('utf-8') == 'websocket client ready': self.pipe.send(message) self.send('server received message', False)
Checks if the client has sent a ready message. A ready message causes ``send()`` to be called on the ``parent end`` of the pipe. Clients need to ensure that the pipe assigned to ``self.pipe`` is the ``parent end`` of a pipe. This ensures completion of the underlying websocket connection and can be used to synchronize parallel senders.
def _append_path(new_path): # type: (str) -> None """ Given a path string, append it to sys.path """ for path in sys.path: path = os.path.abspath(path) if new_path == path: return sys.path.append(new_path)
Given a path string, append it to sys.path
def avail_images(call=None): ''' Return a dict of all available VM images on the cloud provider. ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} conn = get_conn() response = conn.getCreateObjectOptions() for image in response['operatingSystems']: ret[image['itemPrice']['item']['description']] = { 'name': image['itemPrice']['item']['description'], 'template': image['template']['operatingSystemReferenceCode'], } return ret
Return a dict of all available VM images on the cloud provider.
def unlink(self): """ Unregisters the Link """ links = self.registry.get(self.source) if self in links: links.pop(links.index(self))
Unregisters the Link
def level(self): """Returns the current output level by querying the remote controller.""" ev = self._query_waiters.request(self.__do_query_level) ev.wait(1.0) return self._level
Returns the current output level by querying the remote controller.
def _log_likelihood_per_sample(X, means, covars): """ Theta = (theta_1, theta_2, ... theta_M) Likelihood of mixture parameters given data: L(Theta | X) = product_i P(x_i | Theta) log likelihood: log L(Theta | X) = sum_i log(P(x_i | Theta)) and note that p(x_i | Theta) = sum_j prior_j * p(x_i | theta_j) Probability of sample x being generated from component i: P(w_i | x) = P(x|w_i) * P(w_i) / P(X) where P(X) = sum_i P(x|w_i) * P(w_i) Here post_proba = P/(w_i | x) and log_likelihood = log(P(x|w_i)) """ logden = _log_multivariate_density(X, means, covars) logden_max = logden.max(axis=1) log_likelihood = np.log(np.sum(np.exp(logden.T - logden_max) + Epsilon, axis=0)) log_likelihood += logden_max post_proba = np.exp(logden - log_likelihood[:, np.newaxis]) return (log_likelihood, post_proba)
Theta = (theta_1, theta_2, ... theta_M) Likelihood of mixture parameters given data: L(Theta | X) = product_i P(x_i | Theta) log likelihood: log L(Theta | X) = sum_i log(P(x_i | Theta)) and note that p(x_i | Theta) = sum_j prior_j * p(x_i | theta_j) Probability of sample x being generated from component i: P(w_i | x) = P(x|w_i) * P(w_i) / P(X) where P(X) = sum_i P(x|w_i) * P(w_i) Here post_proba = P/(w_i | x) and log_likelihood = log(P(x|w_i))
def normalize_ident(ident): '''Splits a generic identifier. If ``ident`` is a tuple, then ``(ident[0], ident[1])`` is returned. Otherwise, ``(ident[0], None)`` is returned. ''' if isinstance(ident, tuple) and len(ident) == 2: return ident[0], ident[1] # content_id, subtopic_id else: return ident, None
Splits a generic identifier. If ``ident`` is a tuple, then ``(ident[0], ident[1])`` is returned. Otherwise, ``(ident[0], None)`` is returned.
def _getColumnNeighborhood(self, centerColumn): """ Gets a neighborhood of columns. Simply calls topology.neighborhood or topology.wrappingNeighborhood A subclass can insert different topology behavior by overriding this method. :param centerColumn (int) The center of the neighborhood. @returns (1D numpy array of integers) The columns in the neighborhood. """ if self._wrapAround: return topology.wrappingNeighborhood(centerColumn, self._inhibitionRadius, self._columnDimensions) else: return topology.neighborhood(centerColumn, self._inhibitionRadius, self._columnDimensions)
Gets a neighborhood of columns. Simply calls topology.neighborhood or topology.wrappingNeighborhood A subclass can insert different topology behavior by overriding this method. :param centerColumn (int) The center of the neighborhood. @returns (1D numpy array of integers) The columns in the neighborhood.
def _build_resolver(cls, session: AppSession): '''Build resolver.''' args = session.args dns_timeout = args.dns_timeout if args.timeout: dns_timeout = args.timeout if args.inet_family == 'IPv4': family = IPFamilyPreference.ipv4_only elif args.inet_family == 'IPv6': family = IPFamilyPreference.ipv6_only elif args.prefer_family == 'IPv6': family = IPFamilyPreference.prefer_ipv6 elif args.prefer_family == 'IPv4': family = IPFamilyPreference.prefer_ipv4 else: family = IPFamilyPreference.any return session.factory.new( 'Resolver', family=family, timeout=dns_timeout, rotate=args.rotate_dns, cache=session.factory.class_map['Resolver'].new_cache() if args.dns_cache else None, )
Build resolver.
def multi_pop(d, *args): """ pops multiple keys off a dict like object """ retval = {} for key in args: if key in d: retval[key] = d.pop(key) return retval
pops multiple keys off a dict like object
def read_unsigned_var_int(file_obj): """Read a value using the unsigned, variable int encoding.""" result = 0 shift = 0 while True: byte = struct.unpack(b"<B", file_obj.read(1))[0] result |= ((byte & 0x7F) << shift) if (byte & 0x80) == 0: break shift += 7 return result
Read a value using the unsigned, variable int encoding.
def isEnabled( self ): """ Returns whether or not this node is enabled. """ if ( self._disableWithLayer and self._layer ): lenabled = self._layer.isEnabled() else: lenabled = True return self._enabled and lenabled
Returns whether or not this node is enabled.
def robust_topological_sort(graph: Graph) -> list: """Identify strongly connected components then perform a topological sort of those components.""" assert check_argument_types() components = strongly_connected_components(graph) node_component = {} for component in components: for node in component: node_component[node] = component component_graph = {} for component in components: component_graph[component] = [] for node in graph: node_c = node_component[node] for successor in graph[node]: successor_c = node_component[successor] if node_c != successor_c: component_graph[node_c].append(successor_c) return topological_sort(component_graph)
Identify strongly connected components then perform a topological sort of those components.
def subscribe(self, stream, callback, transform=""): """Given a stream, a callback and an optional transform, sets up the subscription""" if self.status == "disconnected" or self.status == "disconnecting" or self.status == "connecting": self.connect() if self.status is not "connected": return False logging.debug("Subscribing to %s", stream) self.send({"cmd": "subscribe", "arg": stream, "transform": transform}) with self.subscription_lock: self.subscriptions[stream + ":" + transform] = callback return True
Given a stream, a callback and an optional transform, sets up the subscription
def do(self, arg): ".example - This is an example plugin for the command line debugger" print "This is an example command." print "%s.do(%r, %r):" % (__name__, self, arg) print " last event", self.lastEvent print " prefix", self.cmdprefix print " arguments", self.split_tokens(arg)
.example - This is an example plugin for the command line debugger
def get_code(self): """Opens the link and returns the response's content.""" if self.code is None: self.code = urlopen(self.url).read() return self.code
Opens the link and returns the response's content.
def tparse(instring, lenout=_default_len_out): """ Parse a time string and return seconds past the J2000 epoch on a formal calendar. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tparse_c.html :param instring: Input time string, UTC. :type instring: str :param lenout: Available space in output error message string. :type lenout: int :return: Equivalent UTC seconds past J2000, Descriptive error message. :rtype: tuple """ errmsg = stypes.stringToCharP(lenout) lenout = ctypes.c_int(lenout) instring = stypes.stringToCharP(instring) sp2000 = ctypes.c_double() libspice.tparse_c(instring, lenout, ctypes.byref(sp2000), errmsg) return sp2000.value, stypes.toPythonString(errmsg)
Parse a time string and return seconds past the J2000 epoch on a formal calendar. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tparse_c.html :param instring: Input time string, UTC. :type instring: str :param lenout: Available space in output error message string. :type lenout: int :return: Equivalent UTC seconds past J2000, Descriptive error message. :rtype: tuple
def start(self, max): """ Displays the progress bar for a given maximum value. :param float max: Maximum value of the progress bar. """ try: self.widget.max = max display(self.widget) except: pass
Displays the progress bar for a given maximum value. :param float max: Maximum value of the progress bar.
def get_metrics(self, name=None): """Get metrics for this operator. Args: name(str, optional): Only return metrics matching `name`, where `name` can be a regular expression. If `name` is not supplied, then all metrics for this operator are returned. Returns: list(Metric): List of matching metrics. Retrieving a list of metrics whose name contains the string "temperatureSensor" could be performed as followed Example: >>> from streamsx import rest >>> sc = rest.StreamingAnalyticsConnection() >>> instances = sc.get_instances() >>> operator = instances[0].get_operators()[0] >>> metrics = op.get_metrics(name='*temperatureSensor*') """ return self._get_elements(self.metrics, 'metrics', Metric, name=name)
Get metrics for this operator. Args: name(str, optional): Only return metrics matching `name`, where `name` can be a regular expression. If `name` is not supplied, then all metrics for this operator are returned. Returns: list(Metric): List of matching metrics. Retrieving a list of metrics whose name contains the string "temperatureSensor" could be performed as followed Example: >>> from streamsx import rest >>> sc = rest.StreamingAnalyticsConnection() >>> instances = sc.get_instances() >>> operator = instances[0].get_operators()[0] >>> metrics = op.get_metrics(name='*temperatureSensor*')
def load_json(filename): """Load a json file as a dictionary""" try: if PY2: args = 'rb' else: args = 'r' with open(filename, args) as fid: data = json.load(fid) return data, None except Exception as err: return None, str(err)
Load a json file as a dictionary
def register_user(self, data): """ Parses input and register user """ error = False msg = "" email_re = re.compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain # Check input format if re.match(r"^[-_|~0-9A-Z]{4,}$", data["username"], re.IGNORECASE) is None: error = True msg = _("Invalid username format.") elif email_re.match(data["email"]) is None: error = True msg = _("Invalid email format.") elif len(data["passwd"]) < 6: error = True msg = _("Password too short.") elif data["passwd"] != data["passwd2"]: error = True msg = _("Passwords don't match !") if not error: existing_user = self.database.users.find_one({"$or": [{"username": data["username"]}, {"email": data["email"]}]}) if existing_user is not None: error = True if existing_user["username"] == data["username"]: msg = _("This username is already taken !") else: msg = _("This email address is already in use !") else: passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest() activate_hash = hashlib.sha512(str(random.getrandbits(256)).encode("utf-8")).hexdigest() self.database.users.insert({"username": data["username"], "realname": data["realname"], "email": data["email"], "password": passwd_hash, "activate": activate_hash, "bindings": {}, "language": self.user_manager._session.get("language", "en")}) try: web.sendmail(web.config.smtp_sendername, data["email"], _("Welcome on INGInious"), _("""Welcome on INGInious ! To activate your account, please click on the following link : """) + web.ctx.home + "/register?activate=" + activate_hash) msg = _("You are succesfully registered. An email has been sent to you for activation.") except: error = True msg = _("Something went wrong while sending you activation email. Please contact the administrator.") return msg, error
Parses input and register user
def mutation_jwt_refresh_token_required(fn): """ A decorator to protect a mutation. If you decorate anmutation with this, it will ensure that the requester has a valid refresh token before allowing the mutation to be called. """ @wraps(fn) def wrapper(cls, *args, **kwargs): token = kwargs.pop(current_app.config['JWT_REFRESH_TOKEN_ARGUMENT_NAME']) try: verify_refresh_jwt_in_argument(token) except Exception as e: return cls(AuthInfoField(message=str(e))) return fn(*args, **kwargs) return wrapper
A decorator to protect a mutation. If you decorate anmutation with this, it will ensure that the requester has a valid refresh token before allowing the mutation to be called.
def get_one(cls, db, *args, **kwargs): """ Returns an object that corresponds to given query or ``None``. Example:: item = Item.get_one(db, {'title': u'Hello'}) """ data = db[cls.collection].find_one(*args, **kwargs) if data: return cls.wrap_incoming(data, db) else: return None
Returns an object that corresponds to given query or ``None``. Example:: item = Item.get_one(db, {'title': u'Hello'})
def get_handler(self, request): """ Get callable from JSON RPC request :param RPCRequest request: JSON RPC request :return: Method :rtype: callable """ try: f = self._json_rpc_methods[request.method] except (AttributeError, KeyError): # pragma no coverage raise RPCMethodError("Received invalid method '{}'".format(request.method)) return f
Get callable from JSON RPC request :param RPCRequest request: JSON RPC request :return: Method :rtype: callable
def set_data_matrix_chunk_size(df_shape, max_chunk_kb, elem_per_kb): """ Sets chunk size to use for writing data matrix. Note. Calculation used here is for compatibility with cmapM and cmapR. Input: - df_shape (tuple): shape of input data_df. - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - elem_per_kb (int): Number of elements per kb Returns: chunk size (tuple) to use for chunking the data matrix """ row_chunk_size = min(df_shape[0], 1000) col_chunk_size = min(((max_chunk_kb*elem_per_kb)//row_chunk_size), df_shape[1]) return (row_chunk_size, col_chunk_size)
Sets chunk size to use for writing data matrix. Note. Calculation used here is for compatibility with cmapM and cmapR. Input: - df_shape (tuple): shape of input data_df. - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - elem_per_kb (int): Number of elements per kb Returns: chunk size (tuple) to use for chunking the data matrix
def autobuild_arm_program(elfname, test_dir=os.path.join('firmware', 'test'), patch=True): """ Build the an ARM module for all targets and build all unit tests. If pcb files are given, also build those. """ try: #Build for all targets family = utilities.get_family('module_settings.json') family.for_all_targets(family.tile.short_name, lambda x: arm.build_program(family.tile, elfname, x, patch=patch)) #Build all unit tests unit_test.build_units(os.path.join('firmware','test'), family.targets(family.tile.short_name)) Alias('release', os.path.join('build', 'output')) Alias('test', os.path.join('build', 'test', 'output')) Default(['release', 'test']) autobuild_release(family) if os.path.exists('doc'): autobuild_documentation(family.tile) except IOTileException as e: print(e.format()) sys.exit(1)
Build the an ARM module for all targets and build all unit tests. If pcb files are given, also build those.
def mirrored(setup): """Convience decorator for setUp in testcases:: @mirrored def setUp(self, mirror, mock): ... is the same as:: def setUp(self): self.mirror, self.mock = mirror() mirror, mock = self.mirror, self.mock ... """ @wraps(setup) def wrapped_setup(self): self.mirror, self.mock = mirror() return setup(self, self.mirror, self.mock) return wrapped_setup
Convience decorator for setUp in testcases:: @mirrored def setUp(self, mirror, mock): ... is the same as:: def setUp(self): self.mirror, self.mock = mirror() mirror, mock = self.mirror, self.mock ...
def permission_denied(request, template_name=None, extra_context=None): """ Default 403 handler. Templates: `403.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/') """ if template_name is None: template_name = ('403.html', 'authority/403.html') context = { 'request_path': request.path, } if extra_context: context.update(extra_context) return HttpResponseForbidden(loader.render_to_string( template_name=template_name, context=context, request=request, ))
Default 403 handler. Templates: `403.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/')
def _merge_sections(sec_a, sec_b): '''Merge two sections Merges sec_a into sec_b and sets sec_a attributes to default ''' sec_b.ids = list(sec_a.ids) + list(sec_b.ids[1:]) sec_b.ntype = sec_a.ntype sec_b.pid = sec_a.pid sec_a.ids = [] sec_a.pid = -1 sec_a.ntype = 0
Merge two sections Merges sec_a into sec_b and sets sec_a attributes to default
def bulk_create_posts(self, posts, post_categories, post_tags, post_media_attachments): """ Actually do a db bulk creation of posts, and link up the many-to-many fields :param posts: the list of Post objects to bulk create :param post_categories: a mapping of Categories to add to newly created Posts :param post_tags: a mapping of Tags to add to newly created Posts :param post_media_attachments: a mapping of Medias to add to newly created Posts :return: None """ Post.objects.bulk_create(posts) # attach many-to-ones for post_wp_id, categories in six.iteritems(post_categories): Post.objects.get(site_id=self.site_id, wp_id=post_wp_id).categories.add(*categories) for post_id, tags in six.iteritems(post_tags): Post.objects.get(site_id=self.site_id, wp_id=post_id).tags.add(*tags) for post_id, attachments in six.iteritems(post_media_attachments): Post.objects.get(site_id=self.site_id, wp_id=post_id).attachments.add(*attachments)
Actually do a db bulk creation of posts, and link up the many-to-many fields :param posts: the list of Post objects to bulk create :param post_categories: a mapping of Categories to add to newly created Posts :param post_tags: a mapping of Tags to add to newly created Posts :param post_media_attachments: a mapping of Medias to add to newly created Posts :return: None
def _do_exit(self, cmd, args): """\ Exit shell. exit | C-D Exit to the parent shell. exit root | end Exit to the root shell. exit all Exit to the command line. """ if cmd == 'end': if not args: return 'root' else: self.stderr.write(textwrap.dedent('''\ end: unrecognized arguments: {} ''')).format(args) # Hereafter, cmd == 'exit'. if not args: return True if len(args) > 1: self.stderr.write(textwrap.dedent('''\ exit: too many arguments: {} ''')).format(args) exit_directive = args[0] if exit_directive == 'root': return 'root' if exit_directive == 'all': return 'all' self.stderr.write(textwrap.dedent('''\ exit: unrecognized arguments: {} ''')).format(args)
\ Exit shell. exit | C-D Exit to the parent shell. exit root | end Exit to the root shell. exit all Exit to the command line.
def evolved_transformer_decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, cache=None, decode_loop_step=None, name="decoder", nonpadding=None, save_weights_to=None, make_image_summary=True, losses=None): """Evolved Transformer decoder. See arxiv.org/abs/1901.11117 for more details. Args: decoder_input: a Tensor. encoder_output: a Tensor. decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()). encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()). hparams: hyperparameters for model. cache: dict, containing tensors which are the results of previous layers, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. name: a string. nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This is used to mask out padding in convolutional layers. We generally only need this mask for "packed" datasets, because for ordinary datasets, no padding is ever followed by nonpadding. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. losses: Not supported. Returns: Decoder output tensor. """ del losses attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): hidden_state = decoder_input for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): layer_name = "layer_%d" % layer layer_cache = cache[layer_name] if cache is not None else None with tf.variable_scope(layer_name): with tf.variable_scope(_SIXTEEN_HEAD_ATTENTION_NAME): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) attention_cache = layer_cache[ _SIXTEEN_HEAD_ATTENTION_NAME] if layer_cache is not None else None left_state = common_attention.multihead_attention( hidden_state, None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, _capped_double_heads(hparams.num_heads), hparams.attention_dropout, attention_type=hparams.self_attention_type, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=attention_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), decode_loop_step=decode_loop_step, vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32")) if encoder_output is not None: with tf.variable_scope(_FIRST_ATTEND_TO_ENCODER_NAME): attention_cache = ( layer_cache[_FIRST_ATTEND_TO_ENCODER_NAME] if layer_cache is not None else None) right_state = common_attention.multihead_attention( hidden_state, encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=attention_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32")) left_state = tf.nn.dropout(left_state, 1 - hparams.layer_prepostprocess_dropout) right_state = tf.nn.dropout( right_state, 1 - hparams.layer_prepostprocess_dropout) hidden_state = residual_state + left_state + right_state else: hidden_state = common_layers.layer_postprocess( residual_state, left_state, hparams) with tf.variable_scope(_CONV_BRANCHES_NAME): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) if nonpadding is not None: # Mask padding from conv layers. mask = tf.tile( tf.expand_dims(nonpadding, 2), [1, 1, hparams.hidden_size]) hidden_state *= mask if layer_cache: if decode_loop_step is None: hidden_state = layer_cache[ _CONV_BRANCHES_FIRST_LAYER_NAME] = tf.concat( [ layer_cache[_CONV_BRANCHES_FIRST_LAYER_NAME], hidden_state ], axis=1)[:, -1 * _DECODER_LEFT_CONV_PADDING - 1:, :] left_state = hidden_state right_state = hidden_state[:, _DECODER_LEFT_CONV_PADDING - _DECODER_RIGHT_CONV_PADDING:, :] else: # Inplace update is required for inference on TPU. # Inplace_ops only supports inplace_update on the first dimension. tmp = tf.transpose( layer_cache[_CONV_BRANCHES_FIRST_LAYER_NAME], perm=[1, 0, 2]) tmp = tf.expand_dims(tmp, axis=1) tmp = inplace_ops.alias_inplace_update( tmp, decode_loop_step * tf.shape(hidden_state)[1] + _DECODER_LEFT_CONV_PADDING, tf.transpose(hidden_state, perm=[1, 0, 2])) tmp = tf.squeeze(tmp, axis=1) hidden_state = layer_cache[ _CONV_BRANCHES_FIRST_LAYER_NAME] = tf.transpose( tmp, perm=[1, 0, 2]) left_state_indexes = [ decode_loop_step + i for i in range(_DECODER_LEFT_CONV_PADDING + 1) ] left_state = tf.gather(hidden_state, left_state_indexes, axis=1) right_state_indexes = [ decode_loop_step + i + (_DECODER_LEFT_CONV_PADDING - _DECODER_RIGHT_CONV_PADDING) for i in range(_DECODER_RIGHT_CONV_PADDING + 1) ] right_state = tf.gather(hidden_state, right_state_indexes, axis=1) else: # No caching. left_state = tf.pad( hidden_state, paddings=[[0, 0], [_DECODER_LEFT_CONV_PADDING, 0], [0, 0]]) right_state = tf.pad( hidden_state, paddings=[[0, 0], [_DECODER_RIGHT_CONV_PADDING, 0], [0, 0]]) left_output_dim = int(hparams.hidden_size * 2) separable_conv_11x1 = tf.layers.SeparableConv1D( left_output_dim, 11, padding="VALID", name="separable_conv11x1", activation=tf.nn.relu) left_state = separable_conv_11x1.apply(left_state) left_state = tf.nn.dropout(left_state, 1 - hparams.layer_prepostprocess_dropout) right_output_dim = int(hparams.hidden_size / 2) separable_conv_7x1_1 = tf.layers.SeparableConv1D( right_output_dim, 7, padding="VALID", name="separable_conv_7x1_1") right_state = separable_conv_7x1_1.apply(right_state) right_state = tf.nn.dropout(right_state, 1 - hparams.layer_prepostprocess_dropout) right_state = tf.pad( right_state, [[0, 0], [0, 0], [0, left_output_dim - right_output_dim]], constant_values=0) hidden_state = left_state + right_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) if nonpadding is not None: # Mask padding from conv layers. mask = tf.tile( tf.expand_dims(nonpadding, 2), [1, 1, hparams.hidden_size * 2]) hidden_state *= mask if layer_cache: if decode_loop_step is None: hidden_state = layer_cache[ _CONV_BRANCHES_SECOND_LAYER_NAME] = tf.concat( [ layer_cache[_CONV_BRANCHES_SECOND_LAYER_NAME], hidden_state ], axis=1)[:, -1 * _DECODER_FINAL_CONV_PADDING - 1:, :] else: # Inplace update is required for inference on TPU. # Inplace_ops only supports inplace_update on the first dimension. tmp = tf.transpose( layer_cache[_CONV_BRANCHES_SECOND_LAYER_NAME], perm=[1, 0, 2]) tmp = tf.expand_dims(tmp, axis=1) tmp = inplace_ops.alias_inplace_update( tmp, (decode_loop_step + _DECODER_FINAL_CONV_PADDING) * tf.shape(hidden_state)[1], tf.transpose(hidden_state, perm=[1, 0, 2])) tmp = tf.squeeze(tmp, axis=1) hidden_state = layer_cache[ _CONV_BRANCHES_SECOND_LAYER_NAME] = tf.transpose( tmp, perm=[1, 0, 2]) hidden_state_indexes = [ decode_loop_step + i for i in range(_DECODER_FINAL_CONV_PADDING + 1) ] hidden_state = tf.gather( hidden_state, hidden_state_indexes, axis=1) else: hidden_state = tf.pad( hidden_state, paddings=[[0, 0], [_DECODER_FINAL_CONV_PADDING, 0], [0, 0]]) separable_conv_7x1_2 = tf.layers.SeparableConv1D( hparams.hidden_size, 7, padding="VALID", name="separable_conv_7x1_2") hidden_state = separable_conv_7x1_2.apply(hidden_state) hidden_state = common_layers.layer_postprocess( residual_state, hidden_state, hparams) with tf.variable_scope(_VANILLA_ATTENTION_NAME): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) attention_cache = layer_cache[ _VANILLA_ATTENTION_NAME] if layer_cache is not None else None hidden_state = common_attention.multihead_attention( hidden_state, None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=attention_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), decode_loop_step=decode_loop_step, vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32")) hidden_state = common_layers.layer_postprocess( residual_state, hidden_state, hparams) if encoder_output is not None: with tf.variable_scope(_SECOND_ATTEND_TO_ENCODER_NAME): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) attention_cache = ( layer_cache[_SECOND_ATTEND_TO_ENCODER_NAME] if layer_cache is not None else None) hidden_state = common_attention.multihead_attention( hidden_state, encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=attention_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32")) hidden_state = common_layers.layer_postprocess( residual_state, hidden_state, hparams) with tf.variable_scope("dense_layers"): residual_state = hidden_state hidden_state = common_layers.layer_preprocess(hidden_state, hparams) hidden_state = tf.layers.dense( hidden_state, int(hparams.hidden_size * 4), activation=tf.nn.swish) hidden_state = tf.nn.dropout(hidden_state, 1 - hparams.layer_prepostprocess_dropout) hidden_state = common_layers.layer_preprocess(hidden_state, hparams) hidden_state = tf.layers.dense(hidden_state, hparams.hidden_size) hidden_state = common_layers.layer_postprocess( residual_state, hidden_state, hparams) return common_layers.layer_preprocess(hidden_state, hparams)
Evolved Transformer decoder. See arxiv.org/abs/1901.11117 for more details. Args: decoder_input: a Tensor. encoder_output: a Tensor. decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()). encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()). hparams: hyperparameters for model. cache: dict, containing tensors which are the results of previous layers, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. name: a string. nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This is used to mask out padding in convolutional layers. We generally only need this mask for "packed" datasets, because for ordinary datasets, no padding is ever followed by nonpadding. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. losses: Not supported. Returns: Decoder output tensor.
def _validate_name(name): """Pre-flight ``Bucket`` name validation. :type name: str or :data:`NoneType` :param name: Proposed bucket name. :rtype: str or :data:`NoneType` :returns: ``name`` if valid. """ if name is None: return # The first and las characters must be alphanumeric. if not all([name[0].isalnum(), name[-1].isalnum()]): raise ValueError("Bucket names must start and end with a number or letter.") return name
Pre-flight ``Bucket`` name validation. :type name: str or :data:`NoneType` :param name: Proposed bucket name. :rtype: str or :data:`NoneType` :returns: ``name`` if valid.
def compute_panel(cls, data, scales, params): """ Positions must override this function Notes ----- Make necessary adjustments to the columns in the dataframe. Create the position transformation functions and use self.transform_position() do the rest. See Also -------- position_jitter.compute_panel """ msg = '{} needs to implement this method' raise NotImplementedError(msg.format(cls.__name__))
Positions must override this function Notes ----- Make necessary adjustments to the columns in the dataframe. Create the position transformation functions and use self.transform_position() do the rest. See Also -------- position_jitter.compute_panel
def _run_evolve(ssm_file, cnv_file, work_dir, data): """Run evolve.py to infer subclonal composition. """ exe = os.path.join(os.path.dirname(sys.executable), "evolve.py") assert os.path.exists(exe), "Could not find evolve script for PhyloWGS runs." out_dir = os.path.join(work_dir, "evolve") out_file = os.path.join(out_dir, "top_k_trees") if not utils.file_uptodate(out_file, cnv_file): with file_transaction(data, out_dir) as tx_out_dir: with utils.chdir(tx_out_dir): cmd = [sys.executable, exe, "-r", "42", ssm_file, cnv_file] do.run(cmd, "Run PhyloWGS evolution") return out_file
Run evolve.py to infer subclonal composition.
def invoke(self): """ Run it, return whether to end training. """ self._iter += 1 if self._iter - max(self._trainer.best_iter, self._annealed_iter) >= self._patience: if self._annealed_times >= self._anneal_times: logging.info("ending") self._trainer.exit() else: self._trainer.set_params(*self._trainer.best_params) self._learning_rate.set_value(self._learning_rate.get_value() * 0.5) self._annealed_times += 1 self._annealed_iter = self._iter logging.info("annealed learning rate to %f" % self._learning_rate.get_value())
Run it, return whether to end training.
def process_next_message(self, timeout): """Processes the next message coming from the workers.""" message = self.worker_manager.receive(timeout) if isinstance(message, Acknowledgement): self.task_manager.task_start(message.task, message.worker) elif isinstance(message, Result): self.task_manager.task_done(message.task, message.result)
Processes the next message coming from the workers.
def get_handlers(self, kind=None): """ Retrieves the handlers of the given kind. If kind is None, all handlers are returned. :param kind: The kind of the handlers to return :return: A list of handlers, or an empty list """ with self._lock: if kind is not None: try: return self._handlers[kind][:] except KeyError: return [] return self.__all_handlers.copy()
Retrieves the handlers of the given kind. If kind is None, all handlers are returned. :param kind: The kind of the handlers to return :return: A list of handlers, or an empty list
def close(self): """ Stop analyzing the current document and come up with a final prediction. :returns: The ``result`` attribute, a ``dict`` with the keys `encoding`, `confidence`, and `language`. """ # Don't bother with checks if we're already done if self.done: return self.result self.done = True if not self._got_data: self.logger.debug('no data received!') # Default to ASCII if it is all we've seen so far elif self._input_state == InputState.PURE_ASCII: self.result = {'encoding': 'ascii', 'confidence': 1.0, 'language': ''} # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD elif self._input_state == InputState.HIGH_BYTE: prober_confidence = None max_prober_confidence = 0.0 max_prober = None for prober in self._charset_probers: if not prober: continue prober_confidence = prober.get_confidence() if prober_confidence > max_prober_confidence: max_prober_confidence = prober_confidence max_prober = prober if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD): charset_name = max_prober.charset_name lower_charset_name = max_prober.charset_name.lower() confidence = max_prober.get_confidence() # Use Windows encoding name instead of ISO-8859 if we saw any # extra Windows-specific bytes if lower_charset_name.startswith('iso-8859'): if self._has_win_bytes: charset_name = self.ISO_WIN_MAP.get(lower_charset_name, charset_name) self.result = {'encoding': charset_name, 'confidence': confidence, 'language': max_prober.language} # Log all prober confidences if none met MINIMUM_THRESHOLD if self.logger.getEffectiveLevel() == logging.DEBUG: if self.result['encoding'] is None: self.logger.debug('no probers hit minimum threshold') for group_prober in self._charset_probers: if not group_prober: continue if isinstance(group_prober, CharSetGroupProber): for prober in group_prober.probers: self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, prober.get_confidence()) else: self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, prober.get_confidence()) return self.result
Stop analyzing the current document and come up with a final prediction. :returns: The ``result`` attribute, a ``dict`` with the keys `encoding`, `confidence`, and `language`.
def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None): """Helper to `maybe_call_fn_and_grads`.""" with tf.compat.v1.name_scope(name, 'value_and_gradients', [fn_arg_list, result, grads]): def _convert_to_tensor(x, name): ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor( value=x_, name=name) return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x) fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list) else [fn_arg_list]) fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg') if result is None: result = fn(*fn_arg_list) if grads is None and tf.executing_eagerly(): # Ensure we disable bijector cacheing in eager mode. # TODO(b/72831017): Remove this once bijector cacheing is fixed for # eager mode. fn_arg_list = [0 + x for x in fn_arg_list] result = _convert_to_tensor(result, 'fn_result') if grads is not None: grads = _convert_to_tensor(grads, 'fn_grad') return result, grads if is_list_like(result) and len(result) == len(fn_arg_list): # Compute the block diagonal of Jacobian. # TODO(b/79158574): Guard this calculation by an arg which explicitly # requests block diagonal Jacobian calculation. def fn_slice(i): """Needed to prevent `cell-var-from-loop` pylint warning.""" return lambda x: fn(*(fn_arg_list[:i] + [x] + fn_arg_list[i+1:])) grads = [ tfp_math_value_and_gradients(fn_slice(i), fn_arg_list[i])[1] for i in range(len(result)) ] else: _, grads = tfp_math_value_and_gradients(fn, fn_arg_list) return result, grads
Helper to `maybe_call_fn_and_grads`.
def _add_field_column(self, field): # pragma: no cover """Add a column for a given label field.""" @self.add_column(name=field) def get_my_label(cluster_id): return self.cluster_meta.get(field, cluster_id)
Add a column for a given label field.
def getVariable(self, name): """ Get the variable with the corresponding name. Args: name: Name of the variable to be found. Raises: TypeError: if the specified variable does not exist. """ return lock_and_call( lambda: Variable(self._impl.getVariable(name)), self._lock )
Get the variable with the corresponding name. Args: name: Name of the variable to be found. Raises: TypeError: if the specified variable does not exist.
def get_owned_games(self, steamID, include_appinfo=1, include_played_free_games=0, appids_filter=None, format=None): """Request a list of games owned by a given steam id. steamID: The users id include_appinfo: boolean. include_played_free_games: boolean. appids_filter: a json encoded list of app ids. format: Return format. None defaults to json. (json, xml, vdf) """ parameters = { 'steamid' : steamID, 'include_appinfo' : include_appinfo, 'include_played_free_games' : include_played_free_games } if format is not None: parameters['format'] = format if appids_filter is not None: parameters['appids_filter'] = appids_filter url = self.create_request_url(self.interface, 'GetOwnedGames', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
Request a list of games owned by a given steam id. steamID: The users id include_appinfo: boolean. include_played_free_games: boolean. appids_filter: a json encoded list of app ids. format: Return format. None defaults to json. (json, xml, vdf)
def dAbr_dV(dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St): """ Partial derivatives of squared flow magnitudes w.r.t voltage. Computes partial derivatives of apparent power w.r.t active and reactive power flows. Partial derivative must equal 1 for lines with zero flow to avoid division by zero errors (1 comes from L'Hopital). """ dAf_dPf = spdiag(2 * Sf.real()) dAf_dQf = spdiag(2 * Sf.imag()) dAt_dPt = spdiag(2 * St.real()) dAt_dQt = spdiag(2 * St.imag()) # Partial derivative of apparent power magnitude w.r.t voltage # phase angle. dAf_dVa = dAf_dPf * dSf_dVa.real() + dAf_dQf * dSf_dVa.imag() dAt_dVa = dAt_dPt * dSt_dVa.real() + dAt_dQt * dSt_dVa.imag() # Partial derivative of apparent power magnitude w.r.t. voltage # amplitude. dAf_dVm = dAf_dPf * dSf_dVm.real() + dAf_dQf * dSf_dVm.imag() dAt_dVm = dAt_dPt * dSt_dVm.real() + dAt_dQt * dSt_dVm.imag() return dAf_dVa, dAf_dVm, dAt_dVa, dAt_dVm
Partial derivatives of squared flow magnitudes w.r.t voltage. Computes partial derivatives of apparent power w.r.t active and reactive power flows. Partial derivative must equal 1 for lines with zero flow to avoid division by zero errors (1 comes from L'Hopital).
def build_error_handler(*tasks): """ Provides a generic error function that packages a flask_buzz exception so that it can be handled nicely by the flask error handler:: app.register_error_handler( FlaskBuzz, FlaskBuzz.build_error_handler(), ) Additionally, extra tasks may be applied to the error prior to packaging:: app.register_error_handler( FlaskBuzz, build_error_handler(print, lambda e: foo(e)), ) This latter example will print the error to stdout and also call the foo() function with the error prior to packaing it for flask's handler """ def _handler(error, tasks=[]): [t(error) for t in tasks] return error.jsonify(), error.status_code, error.headers return functools.partial(_handler, tasks=tasks)
Provides a generic error function that packages a flask_buzz exception so that it can be handled nicely by the flask error handler:: app.register_error_handler( FlaskBuzz, FlaskBuzz.build_error_handler(), ) Additionally, extra tasks may be applied to the error prior to packaging:: app.register_error_handler( FlaskBuzz, build_error_handler(print, lambda e: foo(e)), ) This latter example will print the error to stdout and also call the foo() function with the error prior to packaing it for flask's handler
def find_multiline_pattern(self, regexp, cursor, findflag): """Reimplement QTextDocument's find method Add support for *multiline* regular expressions""" pattern = to_text_string(regexp.pattern()) text = to_text_string(self.toPlainText()) try: regobj = re.compile(pattern) except sre_constants.error: return if findflag & QTextDocument.FindBackward: # Find backward offset = min([cursor.selectionEnd(), cursor.selectionStart()]) text = text[:offset] matches = [_m for _m in regobj.finditer(text, 0, offset)] if matches: match = matches[-1] else: return else: # Find forward offset = max([cursor.selectionEnd(), cursor.selectionStart()]) match = regobj.search(text, offset) if match: pos1, pos2 = match.span() fcursor = self.textCursor() fcursor.setPosition(pos1) fcursor.setPosition(pos2, QTextCursor.KeepAnchor) return fcursor
Reimplement QTextDocument's find method Add support for *multiline* regular expressions
def version_history(soup, html_flag=True): "extract the article version history details" convert = lambda xml_string: xml_to_html(html_flag, xml_string) version_history = [] related_object_tags = raw_parser.related_object(raw_parser.article_meta(soup)) for tag in related_object_tags: article_version = OrderedDict() date_tag = first(raw_parser.date(tag)) if date_tag: copy_attribute(date_tag.attrs, 'date-type', article_version, 'version') (day, month, year) = ymd(date_tag) article_version['day'] = day article_version['month'] = month article_version['year'] = year article_version['date'] = date_struct_nn(year, month, day) copy_attribute(tag.attrs, 'xlink:href', article_version, 'xlink_href') set_if_value(article_version, "comment", convert(node_contents_str(first(raw_parser.comment(tag))))) version_history.append(article_version) return version_history
extract the article version history details
def print_variables_info(self, output_file=sys.stdout): """Print variables information in human readble format.""" table = (' name | type size \n' + '---------+-------------------------\n') for name, var_info in list(self.variables.items()): table += '{:>8} | {:>6} {!s:<10}\n'.format(name, var_info[0], var_info[1]) print(prefix_indent('variables: ', table), file=output_file)
Print variables information in human readble format.
def locate_file(filename, env_var='', directory=''): """ Locates a file given an environment variable or directory :param filename: filename to search for :param env_var: environment variable to look under :param directory: directory to look in :return: (string) absolute path to filename or None if not found """ f = locate_by_env(filename, env_var) or locate_by_dir(filename, directory) return os.path.abspath(f) if can_locate(f) else None
Locates a file given an environment variable or directory :param filename: filename to search for :param env_var: environment variable to look under :param directory: directory to look in :return: (string) absolute path to filename or None if not found
def has_perm(self, perm): """ Checks if key has the given django's auth Permission """ if '.' in perm: app_label, codename = perm.split('.') permissions = self.permissions.filter( content_type__app_label = app_label, codename = codename) groups = self.groups.filter( permissions__content_type__app_label = app_label, permissions__codename = codename ) else: permissions = self.permissions.filter(codename = perm) groups = self.groups.filter(permissions__codename = perm) return permissions.exists() or groups.exists()
Checks if key has the given django's auth Permission
def add_filter_rule( self, name, condition, filters, actions, active=1, way='in'): """ :param: name filter name :param: condition allof or anyof :param: filters dict of filters :param: actions dict of actions :param: way string discribing if filter is for 'in' or 'out' messages :returns: list of user's zobjects.FilterRule """ filters['condition'] = condition new_rule = { 'name': name, 'active': active, 'filterTests': filters, 'filterActions': actions } new_rules = [zobjects.FilterRule.from_dict(new_rule)] prev_rules = self.get_filter_rules(way=way) # if there is already some rules if prev_rules: for rule in prev_rules: # don't add rule if it already exist if rule.name == new_rules[0].name: raise ZimSOAPException( 'filter %s already exists' % rule.name) new_rules = new_rules + prev_rules content = { 'filterRules': { 'filterRule': [r._full_data for r in new_rules] } } if way == 'in': self.request('ModifyFilterRules', content) elif way == 'out': self.request('ModifyOutgoingFilterRules', content) return new_rules
:param: name filter name :param: condition allof or anyof :param: filters dict of filters :param: actions dict of actions :param: way string discribing if filter is for 'in' or 'out' messages :returns: list of user's zobjects.FilterRule
def _flatten(self, element): """Recursively enter and extract text from all child elements.""" result = [(element.text or '')] if element.attrib.get('alt'): result.append(Symbol(element.attrib.get('alt')).textbox) for sel in element: result.append(self._flatten(sel)) result.append(sel.tail or '') # prevent reminder text from getting too close to mana symbols return ''.join(result).replace('}(', '} (')
Recursively enter and extract text from all child elements.
def check_ok_button(self): """Helper to enable or not the OK button.""" login = self.login.text() password = self.password.text() url = self.url.text() if self.layers.count() >= 1 and login and password and url: self.ok_button.setEnabled(True) else: self.ok_button.setEnabled(False)
Helper to enable or not the OK button.
def limit_spec(self, spec): """ Whenever we do a Pseudo ID lookup from the database, we need to limit based on the memberships -> organization -> jurisdiction, so we scope the resolution. """ if list(spec.keys()) == ['name']: # if we're just resolving on name, include other names return ((Q(name=spec['name']) | Q(other_names__name=spec['name'])) & Q(memberships__organization__jurisdiction_id=self.jurisdiction_id)) spec['memberships__organization__jurisdiction_id'] = self.jurisdiction_id return spec
Whenever we do a Pseudo ID lookup from the database, we need to limit based on the memberships -> organization -> jurisdiction, so we scope the resolution.
def return_self_updater(func): ''' Run func, but still return v. Useful for using knowledge.update with operates like append, extend, etc. e.g. return_self(lambda k,v: v.append('newobj')) ''' @functools.wraps(func) def decorator(k,v): func(k,v) return v return decorator
Run func, but still return v. Useful for using knowledge.update with operates like append, extend, etc. e.g. return_self(lambda k,v: v.append('newobj'))