code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def lstm_posterior_builder(getter, name, *args, **kwargs): """A builder for a particular diagonal gaussian posterior. Args: getter: The `getter` passed to a `custom_getter`. Please see the documentation for `tf.get_variable`. name: The `name` argument passed to `tf.get_variable`. *args: Positional arguments forwarded by `tf.get_variable`. **kwargs: Keyword arguments forwarded by `tf.get_variable`. Returns: An instance of `tfp.distributions.Distribution` representing the posterior distribution over the variable in question. """ del args parameter_shapes = tfp.distributions.Normal.param_static_shapes( kwargs["shape"]) # The standard deviation of the scale mixture prior. prior_stddev = np.sqrt( FLAGS.prior_pi * np.square(FLAGS.prior_sigma1) + (1 - FLAGS.prior_pi) * np.square(FLAGS.prior_sigma2)) loc_var = getter( "{}/posterior_loc".format(name), shape=parameter_shapes["loc"], initializer=kwargs.get("initializer"), dtype=tf.float32) scale_var = getter( "{}/posterior_scale".format(name), initializer=tf.random_uniform( minval=np.log(np.exp(prior_stddev / 4.0) - 1.0), maxval=np.log(np.exp(prior_stddev / 2.0) - 1.0), dtype=tf.float32, shape=parameter_shapes["scale"])) return tfp.distributions.Normal( loc=loc_var, scale=tf.nn.softplus(scale_var) + 1e-5, name="{}/posterior_dist".format(name))
A builder for a particular diagonal gaussian posterior. Args: getter: The `getter` passed to a `custom_getter`. Please see the documentation for `tf.get_variable`. name: The `name` argument passed to `tf.get_variable`. *args: Positional arguments forwarded by `tf.get_variable`. **kwargs: Keyword arguments forwarded by `tf.get_variable`. Returns: An instance of `tfp.distributions.Distribution` representing the posterior distribution over the variable in question.
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FunctionVersionContext for this FunctionVersionInstance :rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionContext """ if self._context is None: self._context = FunctionVersionContext( self._version, service_sid=self._solution['service_sid'], function_sid=self._solution['function_sid'], sid=self._solution['sid'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FunctionVersionContext for this FunctionVersionInstance :rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionContext
def _load_background_color(self): """ Loads the data related to the fill color """ url = self.build_url(self._endpoints.get('fill')) response = self.session.get(url) if not response: return None data = response.json() self._background_color = data.get('color', None)
Loads the data related to the fill color
def create_relocate_package(cls, package_name, shade_prefix=None, recursive=True): """Convenience constructor for a package relocation rule. Essentially equivalent to just using ``shading_relocate('package_name.**')``. :param string package_name: Package name to shade (eg, ``org.pantsbuild.example``). :param string shade_prefix: Optional prefix to apply to the package. Defaults to ``__shaded_by_pants__.``. :param bool recursive: Whether to rename everything under any subpackage of ``package_name``, or just direct children of the package. (Defaults to True). """ return cls.create_relocate(from_pattern=cls._format_package_glob(package_name, recursive), shade_prefix=shade_prefix)
Convenience constructor for a package relocation rule. Essentially equivalent to just using ``shading_relocate('package_name.**')``. :param string package_name: Package name to shade (eg, ``org.pantsbuild.example``). :param string shade_prefix: Optional prefix to apply to the package. Defaults to ``__shaded_by_pants__.``. :param bool recursive: Whether to rename everything under any subpackage of ``package_name``, or just direct children of the package. (Defaults to True).
def get_words_iterable( letters, tamil_only=False ): """ given a list of UTF-8 letters section them into words, grouping them at spaces """ # correct algorithm for get-tamil-words buf = [] for idx,let in enumerate(letters): if not let.isspace(): if istamil(let) or (not tamil_only): buf.append( let ) else: if len(buf) > 0: yield u"".join( buf ) buf = [] if len(buf) > 0: yield u"".join(buf)
given a list of UTF-8 letters section them into words, grouping them at spaces
def add_slices(self, dashboard_id): """Add and save slices to a dashboard""" data = json.loads(request.form.get('data')) session = db.session() Slice = models.Slice # noqa dash = ( session.query(models.Dashboard).filter_by(id=dashboard_id).first()) check_ownership(dash, raise_if_false=True) new_slices = session.query(Slice).filter( Slice.id.in_(data['slice_ids'])) dash.slices += new_slices session.merge(dash) session.commit() session.close() return 'SLICES ADDED'
Add and save slices to a dashboard
def rooms(self): """ :rtype: twilio.rest.video.v1.room.RoomList """ if self._rooms is None: self._rooms = RoomList(self) return self._rooms
:rtype: twilio.rest.video.v1.room.RoomList
def runs_to_xml(self, runSet, runs, blockname=None): """ This function creates the XML structure for a list of runs """ # copy benchmarkinfo, limits, columntitles, systeminfo from xml_header runsElem = util.copy_of_xml_element(self.xml_header) runsElem.set("options", " ".join(runSet.options)) if blockname is not None: runsElem.set("block", blockname) runsElem.set("name", ((runSet.real_name + ".") if runSet.real_name else "") + blockname) elif runSet.real_name: runsElem.set("name", runSet.real_name) # collect XMLelements from all runs for run in runs: runsElem.append(run.xml) return runsElem
This function creates the XML structure for a list of runs
def p_If(p): ''' If : IfBlock | IfBlock ELSE COLON Terminator Block ''' if len(p) == 2: p[0] = If(p[1], None, None) else: p[0] = If(p[1], p[5], p[4])
If : IfBlock | IfBlock ELSE COLON Terminator Block
def generate(env): """Add Builders and construction variables for Ghostscript to an Environment.""" global GhostscriptAction # The following try-except block enables us to use the Tool # in standalone mode (without the accompanying pdf.py), # whenever we need an explicit call of gs via the Gs() # Builder ... try: if GhostscriptAction is None: GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR') from SCons.Tool import pdf pdf.generate(env) bld = env['BUILDERS']['PDF'] bld.add_action('.ps', GhostscriptAction) except ImportError as e: pass gsbuilder = SCons.Builder.Builder(action = SCons.Action.Action('$GSCOM', '$GSCOMSTR')) env['BUILDERS']['Gs'] = gsbuilder env['GS'] = gs env['GSFLAGS'] = SCons.Util.CLVar('-dNOPAUSE -dBATCH -sDEVICE=pdfwrite') env['GSCOM'] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES'
Add Builders and construction variables for Ghostscript to an Environment.
def image_undo(): """ Undoes the last coarsen or smooth command. """ if len(image_undo_list) <= 0: print("no undos in memory") return [image, Z] = image_undo_list.pop(-1) image.set_array(Z) _pylab.draw()
Undoes the last coarsen or smooth command.
def isbn(self, fmt: Optional[ISBNFormat] = None, locale: str = 'en') -> str: """Generate ISBN for current locale. To change ISBN format, pass parameter ``fmt`` with needed value of the enum object :class:`~mimesis.enums.ISBNFormat` :param fmt: ISBN format. :param locale: Locale code. :return: ISBN. :raises NonEnumerableError: if fmt is not enum ISBNFormat. """ fmt_value = self._validate_enum(item=fmt, enum=ISBNFormat) mask = ISBN_MASKS[fmt_value].format( ISBN_GROUPS[locale]) return self.random.custom_code(mask)
Generate ISBN for current locale. To change ISBN format, pass parameter ``fmt`` with needed value of the enum object :class:`~mimesis.enums.ISBNFormat` :param fmt: ISBN format. :param locale: Locale code. :return: ISBN. :raises NonEnumerableError: if fmt is not enum ISBNFormat.
def get(self, section, option, as_list=False): ''' Adds an optional "as_list" argument to ensure a list is returned. This is helpful when iterating over an option which may or may not be a multivar. ''' ret = super(GitConfigParser, self).get(section, option) if as_list and not isinstance(ret, list): ret = [ret] return ret
Adds an optional "as_list" argument to ensure a list is returned. This is helpful when iterating over an option which may or may not be a multivar.
def photo_url(self): """获取话题头像图片地址. :return: 话题头像url :rtype: str """ img = self.soup.find('a', id='zh-avartar-edit-form').img['src'] return img.replace('_m', '_r')
获取话题头像图片地址. :return: 话题头像url :rtype: str
def do_size(self, w, h): """Record size.""" if (w is None): self.sw = self.rw self.sh = self.rh else: self.sw = w self.sh = h # Now we have region and size, generate the image image = Image.new("RGB", (self.sw, self.sh), self.gen.background_color) for y in range(0, self.sh): for x in range(0, self.sw): ix = int((x * self.rw) // self.sw + self.rx) iy = int((y * self.rh) // self.sh + self.ry) color = self.gen.pixel(ix, iy) if (color is not None): image.putpixel((x, y), color) self.image = image
Record size.
def collect(config, pconn): """ All the heavy lifting done here """ # initialize collection target # tar files if config.analyze_file: logger.debug("Client analyzing a compress filesystem.") target = {'type': 'compressed_file', 'name': os.path.splitext( os.path.basename(config.analyze_file))[0], 'location': config.analyze_file} # mountpoints elif config.analyze_mountpoint: logger.debug("Client analyzing a filesystem already mounted.") target = {'type': 'mountpoint', 'name': os.path.splitext( os.path.basename(config.analyze_mountpoint))[0], 'location': config.analyze_mountpoint} # image elif config.analyze_image_id: logger.debug("Client running in image mode.") logger.debug("Scanning for matching image.") from .containers import get_targets targets = get_targets(config) if len(targets) == 0: sys.exit(constants.sig_kill_bad) target = targets[0] # host, or inside container else: if config.analyze_container: logger.debug('Client running in container mode.') else: logger.debug("Host selected as scanning target.") target = constants.default_target branch_info = get_branch_info(config, pconn) pc = InsightsUploadConf(config) tar_file = None collection_rules = pc.get_conf_file() rm_conf = pc.get_rm_conf() if rm_conf: logger.warn("WARNING: Excluding data from files") # defaults archive = None container_connection = None mp = None compressed_filesystem = None try: # analyze docker images if target['type'] == 'docker_image': from .containers import open_image container_connection = open_image(target['name']) logging_name = 'Docker image ' + target['name'] if container_connection: mp = container_connection.get_fs() else: logger.error('Could not open %s for analysis', logging_name) return False # analyze compressed files elif target['type'] == 'compressed_file': logging_name = 'Compressed file ' + target['name'] + ' at location ' + target['location'] from .compressed_file import InsightsCompressedFile compressed_filesystem = InsightsCompressedFile(target['location']) if compressed_filesystem.is_tarfile is False: logger.debug("Could not access compressed tar filesystem.") return False mp = compressed_filesystem.get_filesystem_path() # analyze mountpoints elif target['type'] == 'mountpoint': logging_name = 'Filesystem ' + target['name'] + ' at location ' + target['location'] mp = config.analyze_mountpoint # analyze the host elif target['type'] == 'host': logging_name = determine_hostname() # nothing found to analyze else: logger.error('Unexpected analysis target: %s', target['type']) return False archive = InsightsArchive(compressor=config.compressor, target_name=target['name']) atexit.register(_delete_archive_internal, config, archive) # determine the target type and begin collection # we infer "docker_image" SPEC analysis for certain types if target['type'] in ["mountpoint", "compressed_file"]: target_type = "docker_image" else: target_type = target['type'] logger.debug("Inferring target_type '%s' for SPEC collection", target_type) logger.debug("Inferred from '%s'", target['type']) dc = DataCollector(config, archive, mountpoint=mp) logger.info('Starting to collect Insights data for %s', logging_name) dc.run_collection(collection_rules, rm_conf, branch_info) tar_file = dc.done(collection_rules, rm_conf) finally: # called on loop iter end or unexpected exit if container_connection: container_connection.close() # cleanup the temporary stuff for analyzing tar files if config.analyze_file is not None and compressed_filesystem is not None: compressed_filesystem.cleanup_temp_filesystem() return tar_file
All the heavy lifting done here
def find_throat_facets(self, throats=None): r""" Finds the indicies of the Voronoi nodes that define the facet or ridge between the Delaunay nodes connected by the given throat. Parameters ---------- throats : array_like The throats whose facets are sought. The given throats should be from the 'delaunay' network. If no throats are specified, all 'delaunay' throats are assumed. Notes ----- The method is not well optimized as it scans through each given throat inside a for-loop, so it could be slow for large networks. """ if throats is None: throats = self.throats('delaunay') temp = [] tvals = self['throat.interconnect'].astype(int) am = self.create_adjacency_matrix(weights=tvals, fmt='lil', drop_zeros=True) for t in throats: P12 = self['throat.conns'][t] Ps = list(set(am.rows[P12][0]).intersection(am.rows[P12][1])) temp.append(Ps) return sp.array(temp, dtype=object)
r""" Finds the indicies of the Voronoi nodes that define the facet or ridge between the Delaunay nodes connected by the given throat. Parameters ---------- throats : array_like The throats whose facets are sought. The given throats should be from the 'delaunay' network. If no throats are specified, all 'delaunay' throats are assumed. Notes ----- The method is not well optimized as it scans through each given throat inside a for-loop, so it could be slow for large networks.
def check_is_working(self): """ Returns True if the wash alert web interface seems to be working properly, or False otherwise. >>> l.check_is_working() """ try: r = requests.post("http://{}/".format(LAUNDRY_DOMAIN), timeout=60, data={ "locationid": "5faec7e9-a4aa-47c2-a514-950c03fac460", "email": "pennappslabs@gmail.com", "washers": 0, "dryers": 0, "locationalert": "OK" }) r.raise_for_status() return "The transaction log for database 'QuantumCoin' is full due to 'LOG_BACKUP'." not in r.text except requests.exceptions.HTTPError: return False
Returns True if the wash alert web interface seems to be working properly, or False otherwise. >>> l.check_is_working()
def read_in_chunks(file_obj, chunk_size): """Generator to read a file piece by piece.""" offset = 0 while True: data = file_obj.read(chunk_size) if not data: break yield data, offset offset += len(data)
Generator to read a file piece by piece.
def image_id(self): """ this container is created from image with id...""" try: # docker >= 1.9 image_id = self.data["ImageID"] except KeyError: # docker <= 1.8 image_id = self.metadata_get(["Image"]) return image_id
this container is created from image with id...
def info(self): """Formatted string to display the available choices""" if self.descriptions is None: choice_list = ['"{}"'.format(choice) for choice in self.choices] else: choice_list = [ '"{}" ({})'.format(choice, self.descriptions[choice]) for choice in self.choices ] if len(self.choices) == 2: return 'either {} or {}'.format(choice_list[0], choice_list[1]) return 'any of {}'.format(', '.join(choice_list))
Formatted string to display the available choices
def get_all(self): """Return the complete context as dict including the exported variables. For optimizations reasons this might not return an actual copy so be careful with using it. """ if not self.vars: return self.parent if not self.parent: return self.vars return dict(self.parent, **self.vars)
Return the complete context as dict including the exported variables. For optimizations reasons this might not return an actual copy so be careful with using it.
def _find_newest_ckpt(ckpt_dir): """Returns path to most recently modified checkpoint.""" full_paths = [ os.path.join(ckpt_dir, fname) for fname in os.listdir(ckpt_dir) if fname.startswith("experiment_state") and fname.endswith(".json") ] return max(full_paths)
Returns path to most recently modified checkpoint.
def headers(self): """Response headers. Response headers is a dict with all keys in lower case. >>> import urlfetch >>> response = urlfetch.get("http://docs.python.org/") >>> response.headers { 'content-length': '8719', 'x-cache': 'MISS from localhost', 'accept-ranges': 'bytes', 'vary': 'Accept-Encoding', 'server': 'Apache/2.2.16 (Debian)', 'last-modified': 'Tue, 26 Jun 2012 19:23:18 GMT', 'connection': 'close', 'etag': '"13cc5e4-220f-4c36507ded580"', 'date': 'Wed, 27 Jun 2012 06:50:30 GMT', 'content-type': 'text/html', 'x-cache-lookup': 'MISS from localhost:8080' } """ if py3k: return dict((k.lower(), v) for k, v in self.getheaders()) else: return dict(self.getheaders())
Response headers. Response headers is a dict with all keys in lower case. >>> import urlfetch >>> response = urlfetch.get("http://docs.python.org/") >>> response.headers { 'content-length': '8719', 'x-cache': 'MISS from localhost', 'accept-ranges': 'bytes', 'vary': 'Accept-Encoding', 'server': 'Apache/2.2.16 (Debian)', 'last-modified': 'Tue, 26 Jun 2012 19:23:18 GMT', 'connection': 'close', 'etag': '"13cc5e4-220f-4c36507ded580"', 'date': 'Wed, 27 Jun 2012 06:50:30 GMT', 'content-type': 'text/html', 'x-cache-lookup': 'MISS from localhost:8080' }
def get_boto_ses_connection(): """ Shortcut for instantiating and returning a boto SESConnection object. :rtype: boto.ses.SESConnection :returns: A boto SESConnection object, from which email sending is done. """ access_key_id = getattr( settings, 'CUCUMBER_SES_ACCESS_KEY_ID', getattr(settings, 'AWS_ACCESS_KEY_ID', None)) access_key = getattr( settings, 'CUCUMBER_SES_SECRET_ACCESS_KEY', getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)) region_name = getattr( settings, 'CUCUMBER_SES_REGION_NAME', getattr(settings, 'AWS_SES_REGION_NAME', None)) if region_name != None: return boto.ses.connect_to_region( region_name, aws_access_key_id=access_key_id, aws_secret_access_key=access_key, ) else: return boto.connect_ses( aws_access_key_id=access_key_id, aws_secret_access_key=access_key, )
Shortcut for instantiating and returning a boto SESConnection object. :rtype: boto.ses.SESConnection :returns: A boto SESConnection object, from which email sending is done.
async def stream(self, event_type: Type[TStreamEvent], num_events: Optional[int] = None) -> AsyncGenerator[TStreamEvent, None]: """ Stream all events that match the specified event type. This returns an ``AsyncIterable[BaseEvent]`` which can be consumed through an ``async for`` loop. An optional ``num_events`` parameter can be passed to stop streaming after a maximum amount of events was received. """ queue: asyncio.Queue = asyncio.Queue() if event_type not in self._queues: self._queues[event_type] = [] self._queues[event_type].append(queue) i = None if num_events is None else 0 while True: try: yield await queue.get() except GeneratorExit: self._queues[event_type].remove(queue) break except asyncio.CancelledError: self._queues[event_type].remove(queue) break else: if i is None: continue i += 1 if i >= cast(int, num_events): self._queues[event_type].remove(queue) break
Stream all events that match the specified event type. This returns an ``AsyncIterable[BaseEvent]`` which can be consumed through an ``async for`` loop. An optional ``num_events`` parameter can be passed to stop streaming after a maximum amount of events was received.
def from_database(cls, database): """Initialize migrator by db.""" if isinstance(database, PostgresqlDatabase): return PostgresqlMigrator(database) if isinstance(database, SqliteDatabase): return SqliteMigrator(database) if isinstance(database, MySQLDatabase): return MySQLMigrator(database) return super(SchemaMigrator, cls).from_database(database)
Initialize migrator by db.
def c_Duffy(z, m, h=h): """Concentration from c(M) relation published in Duffy et al. (2008). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. References ---------- Results from N-body simulations using WMAP5 cosmology, presented in: A.R. Duffy, J. Schaye, S.T. Kay, and C. Dalla Vecchia, "Dark matter halo concentrations in the Wilkinson Microwave Anisotropy Probe year 5 cosmology," Monthly Notices of the Royal Astronomical Society, Volume 390, Issue 1, pp. L64-L68, 2008. This calculation uses the parameters corresponding to the NFW model, the '200' halo definition, and the 'full' sample of halos spanning z = 0-2. This means the values of fitted parameters (A,B,C) = (5.71, -0.084,-0.47) in Table 1 of Duffy et al. (2008). """ z, m = _check_inputs(z, m) M_pivot = 2.e12 / h # [M_solar] A = 5.71 B = -0.084 C = -0.47 concentration = A * ((m / M_pivot)**B) * (1 + z)**C return concentration
Concentration from c(M) relation published in Duffy et al. (2008). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. References ---------- Results from N-body simulations using WMAP5 cosmology, presented in: A.R. Duffy, J. Schaye, S.T. Kay, and C. Dalla Vecchia, "Dark matter halo concentrations in the Wilkinson Microwave Anisotropy Probe year 5 cosmology," Monthly Notices of the Royal Astronomical Society, Volume 390, Issue 1, pp. L64-L68, 2008. This calculation uses the parameters corresponding to the NFW model, the '200' halo definition, and the 'full' sample of halos spanning z = 0-2. This means the values of fitted parameters (A,B,C) = (5.71, -0.084,-0.47) in Table 1 of Duffy et al. (2008).
def _runOPF(self): """ Computes dispatch points and LMPs using OPF. """ if self.decommit: solver = UDOPF(self.case, dc=(self.locationalAdjustment == "dc")) elif self.locationalAdjustment == "dc": solver = OPF(self.case, dc=True) else: solver = OPF(self.case, dc=False, opt={"verbose": True}) self._solution = solver.solve() # for ob in self.offers + self.bids: # ob.f = solution["f"] return self._solution["converged"]
Computes dispatch points and LMPs using OPF.
def get_init(self): """Return initial name. """ suffix = self._separator + "%s" % str(self._counter_init) return self._base_name + suffix
Return initial name.
def _get_dvs_portgroup(dvs, portgroup_name): ''' Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object ''' for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None
Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object
def get_device_model(): """ Returns the Device model that is active in this project. """ try: return apps.get_model(settings.GCM_DEVICE_MODEL) except ValueError: raise ImproperlyConfigured("GCM_DEVICE_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "GCM_DEVICE_MODEL refers to model '%s' that has not been installed" % settings.GCM_DEVICE_MODEL )
Returns the Device model that is active in this project.
def from_shapely(geometry, label=None): """ Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection. This also creates all necessary Polygons contained by this MultiPolygon. Parameters ---------- geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\ or shapely.geometry.collection.GeometryCollection The object to convert to a MultiPolygon. label : None or str, optional A label assigned to all Polygons within the MultiPolygon. Returns ------- imgaug.MultiPolygon The derived MultiPolygon. """ # load shapely lazily, which makes the dependency more optional import shapely.geometry if isinstance(geometry, shapely.geometry.MultiPolygon): return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms]) elif isinstance(geometry, shapely.geometry.Polygon): return MultiPolygon([Polygon.from_shapely(geometry, label=label)]) elif isinstance(geometry, shapely.geometry.collection.GeometryCollection): ia.do_assert(all([isinstance(poly, shapely.geometry.Polygon) for poly in geometry.geoms])) return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms]) else: raise Exception("Unknown datatype '%s'. Expected shapely.geometry.Polygon or " "shapely.geometry.MultiPolygon or " "shapely.geometry.collections.GeometryCollection." % (type(geometry),))
Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection. This also creates all necessary Polygons contained by this MultiPolygon. Parameters ---------- geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\ or shapely.geometry.collection.GeometryCollection The object to convert to a MultiPolygon. label : None or str, optional A label assigned to all Polygons within the MultiPolygon. Returns ------- imgaug.MultiPolygon The derived MultiPolygon.
def from_masked_images(cls: Type[T], masked_images: Iterable[np.ndarray], n_subjects: int) -> T: """Create a new instance of MaskedMultiSubjecData from masked images. Parameters ---------- masked_images : iterator Images from multiple subjects to stack along 3rd dimension n_subjects : int Number of subjects; must match the number of images Returns ------- T A new instance of MaskedMultiSubjectData Raises ------ ValueError Images have different shapes. The number of images differs from n_subjects. """ images_iterator = iter(masked_images) first_image = next(images_iterator) first_image_shape = first_image.T.shape result = np.empty((first_image_shape[0], first_image_shape[1], n_subjects)) for n_images, image in enumerate(itertools.chain([first_image], images_iterator)): image = image.T if image.shape != first_image_shape: raise ValueError("Image {} has different shape from first " "image: {} != {}".format(n_images, image.shape, first_image_shape)) result[:, :, n_images] = image n_images += 1 if n_images != n_subjects: raise ValueError("n_subjects != number of images: {} != {}" .format(n_subjects, n_images)) return result.view(cls)
Create a new instance of MaskedMultiSubjecData from masked images. Parameters ---------- masked_images : iterator Images from multiple subjects to stack along 3rd dimension n_subjects : int Number of subjects; must match the number of images Returns ------- T A new instance of MaskedMultiSubjectData Raises ------ ValueError Images have different shapes. The number of images differs from n_subjects.
def store(self, name, data, version, size=0, compressed=False, digest=None, logical_size=None): """Adds a new file to the storage. If the file with the same name existed before, it's not guaranteed that the link for the old version will exist until the operation completes, but it's guaranteed that the link will never point to an invalid blob. Args: name: name of the file being stored. May contain slashes that are treated as path separators. data: binary file-like object with file contents. Files with unknown length are supported for compatibility with WSGI interface: ``size`` parameter should be passed in these cases. version: new file "version" Link modification time will be set to this timestamp. If the link exists, and its modification time is higher, the file is not overwritten. size: length of ``data`` in bytes If not 0, this takes priority over internal ``data`` size. compressed: whether ``data`` is gzip-compressed If True, the compression is skipped, and file is written as-is. Note that the current server implementation sends 'Content-Encoding' header anyway, mandating client to decompress the file. digest: SHA256 digest of the file before compression If specified, the digest will not be computed again, saving resources. logical_size: if ``data`` is gzip-compressed, this parameter has to be set to decompressed file size. """ with _exclusive_lock(self._lock_path('links', name)): logger.debug('Acquired lock to link for %s.', name) link_path = self._link_path(name) if _path_exists(link_path) and _file_version(link_path) > version: logger.info( 'Tried to store older version of %s (%d < %d), ignoring.', name, version, _file_version(link_path)) return _file_version(link_path) # data is managed by contents now, and shouldn't be used directly with _InputStreamWrapper(data, size) as contents: if digest is None or logical_size is None: contents.save() if compressed: # This shouldn't occur if the request came from a proper # filetracker client, so we don't care if it's slow. logger.warning( 'Storing compressed stream without hints.') with gzip.open( contents.current_path, 'rb') as decompressed: digest = file_digest(decompressed) with gzip.open( contents.current_path, 'rb') as decompressed: logical_size = _read_stream_for_size(decompressed) else: digest = file_digest(contents.current_path) logical_size = os.stat(contents.current_path).st_size blob_path = self._blob_path(digest) with _exclusive_lock(self._lock_path('blobs', digest)): logger.debug('Acquired lock for blob %s.', digest) digest_bytes = digest.encode() with self._db_transaction() as txn: logger.debug('Started DB transaction (adding link).') link_count = int(self.db.get(digest_bytes, 0, txn=txn)) new_count = str(link_count + 1).encode() self.db.put(digest_bytes, new_count, txn=txn) if link_count == 0: self.db.put( '{}:logical_size'.format(digest).encode(), str(logical_size).encode(), txn=txn) logger.debug('Commiting DB transaction (adding link).') logger.debug('Committed DB transaction (adding link).') # Create a new blob if this isn't a duplicate. if link_count == 0: logger.debug('Creating new blob.') _create_file_dirs(blob_path) if compressed: contents.save(blob_path) else: contents.save() with open(contents.current_path, 'rb') as raw,\ gzip.open(blob_path, 'wb') as blob: shutil.copyfileobj(raw, blob) logger.debug('Released lock for blob %s.', digest) if _path_exists(link_path): # Lend the link lock to delete(). # Note that DB lock has to be released in advance, otherwise # deadlock is possible in concurrent scenarios. logger.info('Overwriting existing link %s.', name) self.delete(name, version, _lock=False) _create_file_dirs(link_path) rel_blob_path = os.path.relpath(blob_path, os.path.dirname(link_path)) os.symlink(rel_blob_path, link_path) logger.debug('Created link %s.', name) lutime(link_path, version) return version logger.debug('Released lock for link %s.', name)
Adds a new file to the storage. If the file with the same name existed before, it's not guaranteed that the link for the old version will exist until the operation completes, but it's guaranteed that the link will never point to an invalid blob. Args: name: name of the file being stored. May contain slashes that are treated as path separators. data: binary file-like object with file contents. Files with unknown length are supported for compatibility with WSGI interface: ``size`` parameter should be passed in these cases. version: new file "version" Link modification time will be set to this timestamp. If the link exists, and its modification time is higher, the file is not overwritten. size: length of ``data`` in bytes If not 0, this takes priority over internal ``data`` size. compressed: whether ``data`` is gzip-compressed If True, the compression is skipped, and file is written as-is. Note that the current server implementation sends 'Content-Encoding' header anyway, mandating client to decompress the file. digest: SHA256 digest of the file before compression If specified, the digest will not be computed again, saving resources. logical_size: if ``data`` is gzip-compressed, this parameter has to be set to decompressed file size.
def play(self): """Starts a playback""" if self._proc.state() == QProcess.Running: if self.isPlaying is False: self._execute("pause") self._changePlayingState(True) elif self._filePath is not None: self._kill() self._run(self._filePath) self._changePlayingState(True)
Starts a playback
def nfa_word_acceptance(nfa: dict, word: list) -> bool: """ Checks if a given word is accepted by a NFA. The word w is accepted by a NFA if exists at least an accepting run on w. :param dict nfa: input NFA; :param list word: list of symbols ∈ nfa['alphabet']; :return: *(bool)*, True if the word is accepted, False otherwise. """ current_level = set() current_level = current_level.union(nfa['initial_states']) next_level = set() for action in word: for state in current_level: if (state, action) in nfa['transitions']: next_level.update(nfa['transitions'][state, action]) if len(next_level) < 1: return False current_level = next_level next_level = set() if current_level.intersection(nfa['accepting_states']): return True else: return False
Checks if a given word is accepted by a NFA. The word w is accepted by a NFA if exists at least an accepting run on w. :param dict nfa: input NFA; :param list word: list of symbols ∈ nfa['alphabet']; :return: *(bool)*, True if the word is accepted, False otherwise.
def beginning_offsets(self, partitions): """Get the first offset for the given partitions. This method does not change the current consumer position of the partitions. Note: This method may block indefinitely if the partition does not exist. Arguments: partitions (list): List of TopicPartition instances to fetch offsets for. Returns: ``{TopicPartition: int}``: The earliest available offsets for the given partitions. Raises: UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms. """ offsets = self._fetcher.beginning_offsets( partitions, self.config['request_timeout_ms']) return offsets
Get the first offset for the given partitions. This method does not change the current consumer position of the partitions. Note: This method may block indefinitely if the partition does not exist. Arguments: partitions (list): List of TopicPartition instances to fetch offsets for. Returns: ``{TopicPartition: int}``: The earliest available offsets for the given partitions. Raises: UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms.
def write(path, pid, timestamp): """ Write the contents of a LockFile. Arguments: path (str): Path to lockfile. pid (int): The integer process ID. timestamp (datetime): The time the lock was aquired. """ with open(path, "w") as lockfile: print(pid, timestamp, file=lockfile)
Write the contents of a LockFile. Arguments: path (str): Path to lockfile. pid (int): The integer process ID. timestamp (datetime): The time the lock was aquired.
def uniform_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1), normalized = True): """ mean filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) normalized: bool if True, the filter corresponds to mean if False, the filter corresponds to sum Returns ------- filtered image or None (if OCLArray) """ if normalized: if np.isscalar(size): norm = size else: norm = np.int32(np.prod(size))**(1./len(size)) FUNC = "res+val/%s"%norm else: FUNC = "res+val" if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC=FUNC, DEFAULT="0")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC=FUNC, DEFAULT="0")) res = _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks) return res
mean filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) normalized: bool if True, the filter corresponds to mean if False, the filter corresponds to sum Returns ------- filtered image or None (if OCLArray)
def sendData(self, data): """Write data to server""" # Not set up yet if self.client_log is None: self.client_log_buffer.append(data) else: self.client_log.write(data) self.transport.write(data)
Write data to server
def get_options(self): """Get the options that have been set. Called after the user has added all their own options and is ready to use the variables. """ (options, args) = self.parser.parse_args() # Set values from .visdkrc, but only if they haven't already been set visdkrc_opts = self.read_visdkrc() for opt in self.config_vars: if not getattr(options, opt): # Try and use value from visdkrc if visdkrc_opts: if opt in visdkrc_opts: setattr(options, opt, visdkrc_opts[opt]) # Ensure all the required options are set for opt in self.required_opts: if opt not in dir(options) or getattr(options, opt) == None: self.parser.error('%s must be set!' % opt) return options
Get the options that have been set. Called after the user has added all their own options and is ready to use the variables.
def CreateDevice(self, device_address): '''Create a new device ''' device_name = 'dev_' + device_address.replace(':', '_').upper() adapter_path = self.path path = adapter_path + '/' + device_name if path not in mockobject.objects: raise dbus.exceptions.DBusException( 'Could not create device for %s.' % device_address, name='org.bluez.Error.Failed') adapter = mockobject.objects[self.path] adapter.EmitSignal(ADAPTER_IFACE, 'DeviceCreated', 'o', [dbus.ObjectPath(path, variant_level=1)]) return dbus.ObjectPath(path, variant_level=1)
Create a new device
def set_policy_alert_threshold(self, policy_ids, alert_threshold): """Set the alert theshold for the given policies.""" for policy_id in policy_ids: self.logger.debug('Setting alert threshold for policy {0} to {1}'.format(policy_id, alert_threshold)) result = self.zap.ascan.set_policy_alert_threshold(policy_id, alert_threshold) if result != 'OK': raise ZAPError('Error setting alert threshold for policy with ID {0}: {1}'.format(policy_id, result))
Set the alert theshold for the given policies.
def support_autoupload_param_password(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras") autoupload_param = ET.SubElement(support, "autoupload-param") password = ET.SubElement(autoupload_param, "password") password.text = kwargs.pop('password') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def window_features(idx, window_size=100, overlap=10): """ Generate indexes for a sliding window with overlap :param array idx: The indexes that need to be windowed. :param int window_size: The size of the window. :param int overlap: How much should each window overlap. :return array view: The indexes for the windows with overlap. """ overlap = window_size - overlap sh = (idx.size - window_size + 1, window_size) st = idx.strides * 2 view = np.lib.stride_tricks.as_strided(idx, strides=st, shape=sh)[0::overlap] return view
Generate indexes for a sliding window with overlap :param array idx: The indexes that need to be windowed. :param int window_size: The size of the window. :param int overlap: How much should each window overlap. :return array view: The indexes for the windows with overlap.
def make_future_info(first_sid, root_symbols, years, notice_date_func, expiration_date_func, start_date_func, month_codes=None, multiplier=500): """ Create a DataFrame representing futures for `root_symbols` during `year`. Generates a contract per triple of (symbol, year, month) supplied to `root_symbols`, `years`, and `month_codes`. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. notice_date_func : (Timestamp) -> Timestamp Function to generate notice dates from first of the month associated with asset month code. Return NaT to simulate futures with no notice date. expiration_date_func : (Timestamp) -> Timestamp Function to generate expiration dates from first of the month associated with asset month code. start_date_func : (Timestamp) -> Timestamp, optional Function to generate start dates from first of the month associated with each asset month code. Defaults to a start_date one year prior to the month_code date. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Returns ------- futures_info : pd.DataFrame DataFrame of futures data suitable for passing to an AssetDBWriter. """ if month_codes is None: month_codes = CMES_CODE_TO_MONTH year_strs = list(map(str, years)) years = [pd.Timestamp(s, tz='UTC') for s in year_strs] # Pairs of string/date like ('K06', 2006-05-01) contract_suffix_to_beginning_of_month = tuple( (month_code + year_str[-2:], year + MonthBegin(month_num)) for ((year, year_str), (month_code, month_num)) in product( zip(years, year_strs), iteritems(month_codes), ) ) contracts = [] parts = product(root_symbols, contract_suffix_to_beginning_of_month) for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid): contracts.append({ 'sid': sid, 'root_symbol': root_sym, 'symbol': root_sym + suffix, 'start_date': start_date_func(month_begin), 'notice_date': notice_date_func(month_begin), 'expiration_date': notice_date_func(month_begin), 'multiplier': multiplier, 'exchange': "TEST", }) return pd.DataFrame.from_records(contracts, index='sid')
Create a DataFrame representing futures for `root_symbols` during `year`. Generates a contract per triple of (symbol, year, month) supplied to `root_symbols`, `years`, and `month_codes`. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. notice_date_func : (Timestamp) -> Timestamp Function to generate notice dates from first of the month associated with asset month code. Return NaT to simulate futures with no notice date. expiration_date_func : (Timestamp) -> Timestamp Function to generate expiration dates from first of the month associated with asset month code. start_date_func : (Timestamp) -> Timestamp, optional Function to generate start dates from first of the month associated with each asset month code. Defaults to a start_date one year prior to the month_code date. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Returns ------- futures_info : pd.DataFrame DataFrame of futures data suitable for passing to an AssetDBWriter.
def __dbfHeader(self): """Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger""" if not self.dbf: raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)") dbf = self.dbf headerLength = self.__dbfHeaderLength() numFields = (headerLength - 33) // 32 for field in range(numFields): fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32))) name = 0 idx = 0 if b("\x00") in fieldDesc[name]: idx = fieldDesc[name].index(b("\x00")) else: idx = len(fieldDesc[name]) - 1 fieldDesc[name] = fieldDesc[name][:idx] fieldDesc[name] = u(fieldDesc[name]) fieldDesc[name] = fieldDesc[name].lstrip() fieldDesc[1] = u(fieldDesc[1]) self.fields.append(fieldDesc) terminator = dbf.read(1) assert terminator == b("\r") self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger
def _append_zeros_if_too_small(self, value): """ Appends zeros to the points stored if the value we are trying to fit is bigger """ size_diff = len(value) - len(self.array) if size_diff: self.array = np.append( self.array, np.zeros(size_diff, dtype=self.array.dtype) )
Appends zeros to the points stored if the value we are trying to fit is bigger
def _check_module_attrs(self, node, module, module_names): """check that module_names (list of string) are accessible through the given module if the latest access name corresponds to a module, return it """ assert isinstance(module, astroid.Module), module while module_names: name = module_names.pop(0) if name == "__dict__": module = None break try: module = next(module.getattr(name)[0].infer()) if module is astroid.Uninferable: return None except astroid.NotFoundError: if module.name in self._ignored_modules: return None self.add_message( "no-name-in-module", args=(name, module.name), node=node ) return None except astroid.InferenceError: return None if module_names: # FIXME: other message if name is not the latest part of # module_names ? modname = module.name if module else "__dict__" self.add_message( "no-name-in-module", node=node, args=(".".join(module_names), modname) ) return None if isinstance(module, astroid.Module): return module return None
check that module_names (list of string) are accessible through the given module if the latest access name corresponds to a module, return it
def reorder(self, dst_order, arr, src_order=None): """Reorder the output array to match that needed by the viewer.""" if dst_order is None: dst_order = self.viewer.rgb_order if src_order is None: src_order = self.rgb_order if src_order != dst_order: arr = trcalc.reorder_image(dst_order, arr, src_order) return arr
Reorder the output array to match that needed by the viewer.
def _hrf_kernel(hrf_model, tr, oversampling=50, fir_delays=None): """ Given the specification of the hemodynamic model and time parameters, return the list of matching kernels Parameters ---------- hrf_model : string or None, identifier of the hrf model tr : float the repetition time in seconds oversampling : int, optional temporal oversampling factor to have a smooth hrf fir_delays : list of floats, list of delays for finite impulse response models Returns ------- hkernel : list of arrays samples of the hrf (the number depends on the hrf_model used) """ acceptable_hrfs = [ 'spm', 'spm + derivative', 'spm + derivative + dispersion', 'fir', 'glover', 'glover + derivative', 'glover + derivative + dispersion', None] if hrf_model == 'spm': hkernel = [spm_hrf(tr, oversampling)] elif hrf_model == 'spm + derivative': hkernel = [spm_hrf(tr, oversampling), spm_time_derivative(tr, oversampling)] elif hrf_model == 'spm + derivative + dispersion': hkernel = [spm_hrf(tr, oversampling), spm_time_derivative(tr, oversampling), spm_dispersion_derivative(tr, oversampling)] elif hrf_model == 'glover': hkernel = [glover_hrf(tr, oversampling)] elif hrf_model == 'glover + derivative': hkernel = [glover_hrf(tr, oversampling), glover_time_derivative(tr, oversampling)] elif hrf_model == 'glover + derivative + dispersion': hkernel = [glover_hrf(tr, oversampling), glover_time_derivative(tr, oversampling), glover_dispersion_derivative(tr, oversampling)] elif hrf_model == 'fir': hkernel = [np.hstack((np.zeros(f * oversampling), np.ones(oversampling))) for f in fir_delays] elif hrf_model is None: hkernel = [np.hstack((1, np.zeros(oversampling - 1)))] else: raise ValueError('"{0}" is not a known hrf model. Use one of {1}'. format(hrf_model, acceptable_hrfs)) return hkernel
Given the specification of the hemodynamic model and time parameters, return the list of matching kernels Parameters ---------- hrf_model : string or None, identifier of the hrf model tr : float the repetition time in seconds oversampling : int, optional temporal oversampling factor to have a smooth hrf fir_delays : list of floats, list of delays for finite impulse response models Returns ------- hkernel : list of arrays samples of the hrf (the number depends on the hrf_model used)
def send_script_sync(self, conn_id, data, progress_callback): """Asynchronously send a a script to this IOTile device Args: conn_id (int): A unique identifier that will refer to this connection data (string): the script to send to the device progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count) Returns: dict: a dict with the following two entries set 'success': a bool indicating whether we received a response to our attempted RPC 'failure_reason': a string with the reason for the failure if success == False """ done = threading.Event() result = {} def send_script_done(conn_id, adapter_id, status, reason): result['success'] = status result['failure_reason'] = reason done.set() self.send_script_async(conn_id, data, progress_callback, send_script_done) done.wait() return result
Asynchronously send a a script to this IOTile device Args: conn_id (int): A unique identifier that will refer to this connection data (string): the script to send to the device progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count) Returns: dict: a dict with the following two entries set 'success': a bool indicating whether we received a response to our attempted RPC 'failure_reason': a string with the reason for the failure if success == False
def findWalkthrough(self, name): """ Looks up the walkthrough based on the given name. :param name | <str> """ for walkthrough in self._walkthroughs: if walkthrough.name() == name: return walkthrough return None
Looks up the walkthrough based on the given name. :param name | <str>
def df_representative_structures(self): """DataFrame: Get a dataframe of representative protein structure information.""" rep_struct_pre_df = [] df_cols = ['gene', 'id', 'is_experimental', 'file_type', 'structure_file'] for g in self.genes_with_a_representative_structure: repdict = g.protein.representative_structure.get_dict(df_format=True, only_attributes=df_cols) repdict['gene'] = g.id rep_struct_pre_df.append(repdict) df = pd.DataFrame.from_records(rep_struct_pre_df, columns=df_cols).set_index('gene') if df.empty: log.warning('Empty dataframe') return df else: return ssbio.utils.clean_df(df)
DataFrame: Get a dataframe of representative protein structure information.
def r(self, **kwargs): """ Resolve the object. This returns default (if present) or fails on an Empty. """ # by using kwargs we ensure that usage of positional arguments, as if # this object were another kind of function, will fail-fast and raise # a TypeError if 'default' in kwargs: default = kwargs.pop('default') if kwargs: raise TypeError( "Unexpected argument: {}".format(repr(next(iter(kwargs)))) ) return default else: raise JSaneException( "Key does not exist: {}".format(repr(self._key_name)) )
Resolve the object. This returns default (if present) or fails on an Empty.
def delete(cls, cert_id, background=False): """ Delete a certificate.""" result = cls.call('cert.delete', cert_id) if background: return result cls.echo("Deleting your certificate.") cls.display_progress(result) cls.echo('Your certificate %s has been deleted.' % cert_id) return result
Delete a certificate.
def setModelData(self, editor, model, index): """ Gets data from the editor widget and stores it in the specified model at the item index. Does this by calling getEditorValue of the config tree item at the index. :type editor: QWidget :type model: ConfigTreeModel :type index: QModelIndex Reimplemented from QStyledItemDelegate. """ try: data = editor.getData() except InvalidInputError as ex: logger.warn(ex) else: # The value is set via the model so that signals are emitted logger.debug("ConfigItemDelegate.setModelData: {}".format(data)) model.setData(index, data, Qt.EditRole)
Gets data from the editor widget and stores it in the specified model at the item index. Does this by calling getEditorValue of the config tree item at the index. :type editor: QWidget :type model: ConfigTreeModel :type index: QModelIndex Reimplemented from QStyledItemDelegate.
def get_customer_group_by_id(cls, customer_group_id, **kwargs): """Find CustomerGroup Return single instance of CustomerGroup by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_group_by_id(customer_group_id, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to return (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_customer_group_by_id_with_http_info(customer_group_id, **kwargs) else: (data) = cls._get_customer_group_by_id_with_http_info(customer_group_id, **kwargs) return data
Find CustomerGroup Return single instance of CustomerGroup by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_group_by_id(customer_group_id, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to return (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread.
def findTextBackward(self, block, column, needle): """Search for a needle and return (block, column) Raise ValueError, if not found """ if column is not None: index = block.text()[:column].rfind(needle) else: index = block.text().rfind(needle) if index != -1: return block, index for block in self.iterateBlocksBackFrom(block.previous()): column = block.text().rfind(needle) if column != -1: return block, column raise ValueError('Not found')
Search for a needle and return (block, column) Raise ValueError, if not found
def get_key_names(self): """ Gets keys of all elements stored in this map. :return: a list with all map keys. """ names = [] for (k, _) in self.items(): names.append(k) return names
Gets keys of all elements stored in this map. :return: a list with all map keys.
def user_add_link(self): ''' Create link by user. ''' if self.check_post_role()['ADD']: pass else: return False post_data = self.get_post_data() post_data['user_name'] = self.get_current_user() cur_uid = tools.get_uudd(2) while MLink.get_by_uid(cur_uid): cur_uid = tools.get_uudd(2) MLink.create_link(cur_uid, post_data) self.redirect('/link/list')
Create link by user.
def with_ascendants_for_slug(self, slug, **kwargs): """ Given a slug, returns a list of pages from ascendants to descendants, that form the parent/child page relationships for that slug. The main concern is to do this in a single database query rather than querying the database for parents of a given page. Primarily used in ``PageMiddleware`` to provide the current page, which in the case of non-page views, won't match the slug exactly, but will likely match a page that has been created for linking to the entry point for the app, eg the blog page when viewing blog posts. Also used within ``Page.get_ascendants``, which gets called in the ``pages.views`` view, for building a list of possible templates that can be used for the page. If a valid chain of pages is found, we also assign the pages to the ``page._ascendants`` attr of the main/first/deepest page, so that when its ``get_ascendants`` method is called, the ascendants chain can be re-used without querying the database again. This occurs at least once, given the second use-case described above. """ if slug == "/": slugs = [home_slug()] else: # Create a list of slugs within this slug, # eg: ['about', 'about/team', 'about/team/mike'] parts = slug.split("/") slugs = ["/".join(parts[:i]) for i in range(1, len(parts) + 1)] # Find the deepest page that matches one of our slugs. # Sorting by "-slug" should ensure that the pages are in # descendant -> ascendant order. pages_for_user = self.published(**kwargs) pages = list(pages_for_user.filter(slug__in=slugs).order_by("-slug")) if not pages: return [] # Check to see if the other pages retrieved form a valid path # in the page tree, i.e. pages[0].parent == pages[1], # pages[1].parent == pages[2], and so on. If they do, assign # the ascendants to the main/first/deepest page, so that it # can be re-used on calls to its get_ascendants method. pages[0]._ascendants = [] for i, page in enumerate(pages): try: parent = pages[i + 1] except IndexError: # IndexError indicates that this is the last page in # the list, so it should have no parent. if page.parent_id: break # Invalid parent else: if page.parent_id != parent.id: break # Invalid parent else: # Valid parents pages[0]._ascendants = pages[1:] return pages
Given a slug, returns a list of pages from ascendants to descendants, that form the parent/child page relationships for that slug. The main concern is to do this in a single database query rather than querying the database for parents of a given page. Primarily used in ``PageMiddleware`` to provide the current page, which in the case of non-page views, won't match the slug exactly, but will likely match a page that has been created for linking to the entry point for the app, eg the blog page when viewing blog posts. Also used within ``Page.get_ascendants``, which gets called in the ``pages.views`` view, for building a list of possible templates that can be used for the page. If a valid chain of pages is found, we also assign the pages to the ``page._ascendants`` attr of the main/first/deepest page, so that when its ``get_ascendants`` method is called, the ascendants chain can be re-used without querying the database again. This occurs at least once, given the second use-case described above.
def scale(self, w=1.0, h=1.0): """Resizes the layer to the given width and height. When width w or height h is a floating-point number, scales percentual, otherwise scales to the given size in pixels. """ from types import FloatType w0, h0 = self.img.size if type(w) == FloatType: w = int(w*w0) if type(h) == FloatType: h = int(h*h0) self.img = self.img.resize((w,h), INTERPOLATION) self.w = w self.h = h
Resizes the layer to the given width and height. When width w or height h is a floating-point number, scales percentual, otherwise scales to the given size in pixels.
def base_style(self): """ Sibling CT_Style element this style is based on or |None| if no base style or base style not found. """ basedOn = self.basedOn if basedOn is None: return None styles = self.getparent() base_style = styles.get_by_id(basedOn.val) if base_style is None: return None return base_style
Sibling CT_Style element this style is based on or |None| if no base style or base style not found.
def configure(configFile=None, baseConfig="ProductionConfig", port=8000, extraConfig={}): """ TODO Document this critical function! What does it do? What does it assume? """ file_handler = StreamHandler() file_handler.setLevel(logging.WARNING) app.logger.addHandler(file_handler) configStr = 'ga4gh.server.serverconfig:{0}'.format(baseConfig) app.config.from_object(configStr) if os.environ.get('GA4GH_CONFIGURATION') is not None: app.config.from_envvar('GA4GH_CONFIGURATION') if configFile is not None: app.config.from_pyfile(configFile) app.config.update(extraConfig.items()) # Setup file handle cache max size datamodel.fileHandleCache.setMaxCacheSize( app.config["FILE_HANDLE_CACHE_MAX_SIZE"]) # Setup CORS try: cors.CORS(app, allow_headers='Content-Type') except AssertionError: pass app.serverStatus = ServerStatus() app.backend = _configure_backend(app) if app.config.get('SECRET_KEY'): app.secret_key = app.config['SECRET_KEY'] elif app.config.get('OIDC_PROVIDER'): raise exceptions.ConfigurationException( 'OIDC configuration requires a secret key') if app.config.get('CACHE_DIRECTORY'): app.cache_dir = app.config['CACHE_DIRECTORY'] else: app.cache_dir = '/tmp/ga4gh' app.cache = FileSystemCache( app.cache_dir, threshold=5000, default_timeout=600, mode=384) # Peer service initialization network.initialize( app.config.get('INITIAL_PEERS'), app.backend.getDataRepository(), app.logger) app.oidcClient = None app.myPort = port if app.config.get('AUTH0_ENABLED'): emails = app.config.get('AUTH0_AUTHORIZED_EMAILS', '').split(',') [auth.authorize_email(e, app.cache) for e in emails] if "OIDC_PROVIDER" in app.config: # The oic client. If we're testing, we don't want to verify # SSL certificates app.oidcClient = oic.oic.Client( verify_ssl=('TESTING' not in app.config)) try: app.oidcClient.provider_config(app.config['OIDC_PROVIDER']) except requests.exceptions.ConnectionError: configResponse = message.ProviderConfigurationResponse( issuer=app.config['OIDC_PROVIDER'], authorization_endpoint=app.config['OIDC_AUTHZ_ENDPOINT'], token_endpoint=app.config['OIDC_TOKEN_ENDPOINT'], revocation_endpoint=app.config['OIDC_TOKEN_REV_ENDPOINT']) app.oidcClient.handle_provider_config(configResponse, app.config['OIDC_PROVIDER']) # The redirect URI comes from the configuration. # If we are testing, then we allow the automatic creation of a # redirect uri if none is configured redirectUri = app.config.get('OIDC_REDIRECT_URI') if redirectUri is None and app.config.get('TESTING'): redirectUri = 'https://{0}:{1}/oauth2callback'.format( socket.gethostname(), app.myPort) app.oidcClient.redirect_uris = [redirectUri] if redirectUri is []: raise exceptions.ConfigurationException( 'OIDC configuration requires a redirect uri') # We only support dynamic registration while testing. if ('registration_endpoint' in app.oidcClient.provider_info and app.config.get('TESTING')): app.oidcClient.register( app.oidcClient.provider_info["registration_endpoint"], redirect_uris=[redirectUri]) else: response = message.RegistrationResponse( client_id=app.config['OIDC_CLIENT_ID'], client_secret=app.config['OIDC_CLIENT_SECRET'], redirect_uris=[redirectUri], verify_ssl=False) app.oidcClient.store_registration_info(response)
TODO Document this critical function! What does it do? What does it assume?
def force_atlas2_layout(graph, pos_list=None, node_masses=None, iterations=100, outbound_attraction_distribution=False, lin_log_mode=False, prevent_overlapping=False, edge_weight_influence=1.0, jitter_tolerance=1.0, barnes_hut_optimize=False, barnes_hut_theta=1.2, scaling_ratio=2.0, strong_gravity_mode=False, multithread=False, gravity=1.0): """ Position nodes using ForceAtlas2 force-directed algorithm Parameters ---------- graph: NetworkX graph A position will be assigned to every node in G. pos_list : dict or None optional (default=None) Initial positions for nodes as a dictionary with node as keys and values as a coordinate list or tuple. If None, then use random initial positions. node_masses : dict or None optional (default=None) Predefined masses for nodes with node as keys and masses as values. If None, then use degree of nodes. iterations : int optional (default=50) Number of iterations outbound_attraction_distribution : boolean Distributes attraction along outbound edges. Hubs attract less and thus are pushed to the borders. This mode is meant to grant authorities (nodes with a high indegree) a more central position than hubs (nodes with a high outdegree). This is useful for social networks and web networks, where authorities are sometimes considered more important than hubs lin_log_mode: boolean Switch ForceAtlas model from lin-lin to lin-log (tribute to Andreas Noack). Makes clusters more tight prevent_overlapping: boolean With this mode enabled, the repulsion is modified so that the nodes do not overlap. The goal is to produce a more readable and aesthetically pleasing image. edge_weight_influence: float How much influence you give to the edges weight. 0 is “no influence” and 1 is “normal”. jitter_tolerance: float How much swinging you allow. Above 1 discouraged. Lower gives less speed and more precision barnes_hut_optimize: boolean Barnes Hut optimization: n² complexity to n.ln(n) ; allows larger graphs. barnes_hut_theta: float Theta of the Barnes Hut optimization scaling_ratio: float How much repulsion you want. More makes a more sparse graph. strong_gravity_mode: boolean The “Strong gravity” option sets a force that attracts the nodes that are distant from the center more ( is this distance). This force has the drawback of being so strong that it is sometimes stronger than the other forces. It may result in a biased placement of the nodes. However, its advantage is to force a very compact layout, which may be useful for certain purposes. multithread: boolean gravity: float Attracts nodes to the center. Prevents islands from drifting away. Returns ------- pos : dict A dictionary of positions keyed by node """ assert isinstance(graph, networkx.classes.graph.Graph), "Not a networkx graph" assert isinstance(pos_list, dict) or (pos_list is None), "pos must be specified as a dictionary, as in networkx" assert multithread is False, "Not implemented yet" G = numpy.asarray(networkx.to_numpy_matrix(graph)) pos = None if pos_list is not None: pos = numpy.asarray([pos_list[i] for i in graph.nodes()]) masses = None if node_masses is not None: masses = numpy.asarray([node_masses[node] for node in graph.nodes()]) assert G.shape == (G.shape[0], G.shape[0]), "G is not 2D square" assert numpy.all(G.T == G), "G is not symmetric." # speed and speed efficiency describe a scaling factor of dx and dy # before x and y are adjusted. These are modified as the # algorithm runs to help ensure convergence. speed = 1 speed_efficiency = 1 nodes = [] for i in range(0, G.shape[0]): n = Node() if node_masses is None: n.mass = 1 + numpy.count_nonzero(G[i]) else: n.mass = masses[i] n.old_dx = 0 n.old_dy = 0 n.dx = 0 n.dy = 0 if pos is None: n.x = random.random() n.y = random.random() else: n.x = pos[i][0] n.y = pos[i][1] nodes.append(n) edges = [] es = numpy.asarray(G.nonzero()).T for e in es: if e[1] <= e[0]: continue # Avoid duplicate edges edge = Edge() edge.node1 = e[0] # The index of the first node in `nodes` edge.node2 = e[1] # The index of the second node in `nodes` edge.weight = G[tuple(e)] edges.append(edge) repulsion = get_repulsion(prevent_overlapping, scaling_ratio) if strong_gravity_mode: gravity_force = get_strong_gravity(scaling_ratio) else: gravity_force = repulsion if outbound_attraction_distribution: outbound_att_compensation = numpy.mean([n.mass for n in nodes]) attraction_coef = outbound_att_compensation if outbound_attraction_distribution else 1 attraction = get_attraction(lin_log_mode, outbound_attraction_distribution, prevent_overlapping, attraction_coef) # Main loop for _i in range(0, iterations): for n in nodes: n.old_dx = n.dx n.old_dy = n.dy n.dx = 0 n.dy = 0 # Barnes Hut optimization root_region = None if barnes_hut_optimize: root_region = Quadtree(nodes) root_region.build() apply_repulsion(repulsion, nodes, barnes_hut_optimize=barnes_hut_optimize, barnes_hut_theta=barnes_hut_theta, region=root_region) apply_gravity(gravity_force, nodes, gravity, scaling_ratio) apply_attraction(attraction, nodes, edges, edge_weight_influence) # Auto adjust speed. total_swinging = 0.0 # How much irregular movement total_effective_traction = 0.0 # How much useful movement for n in nodes: swinging = math.sqrt((n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy)) total_swinging += n.mass * swinging total_effective_traction += .5 * n.mass * math.sqrt( (n.old_dx + n.dx) * (n.old_dx + n.dx) + (n.old_dy + n.dy) * (n.old_dy + n.dy)) # Optimize jitter tolerance. # The 'right' jitter tolerance for this network. # Bigger networks need more tolerance. Denser networks need less tolerance. # Totally empiric. estimated_optimal_jitter_tolerance = .05 * math.sqrt(len(nodes)) min_jt = math.sqrt(estimated_optimal_jitter_tolerance) max_jt = 10 jt = jitter_tolerance * max(min_jt, min(max_jt, estimated_optimal_jitter_tolerance * total_effective_traction / (len(nodes) ** 2))) min_speed_efficiency = 0.05 # Protective against erratic behavior if total_swinging / total_effective_traction > 2.0: if speed_efficiency > min_speed_efficiency: speed_efficiency *= .5 jt = max(jt, jitter_tolerance) target_speed = jt * speed_efficiency * total_effective_traction / total_swinging if total_swinging > jt * total_effective_traction: if speed_efficiency > min_speed_efficiency: speed_efficiency *= .7 elif speed < 1000: speed_efficiency *= 1.3 # But the speed shoudn't rise too much too quickly, since it would # make the convergence drop dramatically. max_rise = .5 speed = speed + min(target_speed - speed, max_rise * speed) # Apply forces. if prevent_overlapping: for n in nodes: swinging = n.mass * math.sqrt( (n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy)) factor = 0.1 * speed / (1 + math.sqrt(speed * swinging)) df = math.sqrt(math.pow(n.dx, 2) + n.dy ** 2) factor = min(factor * df, 10.) / df x = n.dx * factor y = n.dy * factor else: for n in nodes: swinging = n.mass * math.sqrt( (n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy)) factor = speed / (1.0 + math.sqrt(speed * swinging)) n.x = n.x + (n.dx * factor) n.y = n.y + (n.dy * factor) positions = [(n.x, n.y) for n in nodes] return dict(zip(graph.nodes(), positions))
Position nodes using ForceAtlas2 force-directed algorithm Parameters ---------- graph: NetworkX graph A position will be assigned to every node in G. pos_list : dict or None optional (default=None) Initial positions for nodes as a dictionary with node as keys and values as a coordinate list or tuple. If None, then use random initial positions. node_masses : dict or None optional (default=None) Predefined masses for nodes with node as keys and masses as values. If None, then use degree of nodes. iterations : int optional (default=50) Number of iterations outbound_attraction_distribution : boolean Distributes attraction along outbound edges. Hubs attract less and thus are pushed to the borders. This mode is meant to grant authorities (nodes with a high indegree) a more central position than hubs (nodes with a high outdegree). This is useful for social networks and web networks, where authorities are sometimes considered more important than hubs lin_log_mode: boolean Switch ForceAtlas model from lin-lin to lin-log (tribute to Andreas Noack). Makes clusters more tight prevent_overlapping: boolean With this mode enabled, the repulsion is modified so that the nodes do not overlap. The goal is to produce a more readable and aesthetically pleasing image. edge_weight_influence: float How much influence you give to the edges weight. 0 is “no influence” and 1 is “normal”. jitter_tolerance: float How much swinging you allow. Above 1 discouraged. Lower gives less speed and more precision barnes_hut_optimize: boolean Barnes Hut optimization: n² complexity to n.ln(n) ; allows larger graphs. barnes_hut_theta: float Theta of the Barnes Hut optimization scaling_ratio: float How much repulsion you want. More makes a more sparse graph. strong_gravity_mode: boolean The “Strong gravity” option sets a force that attracts the nodes that are distant from the center more ( is this distance). This force has the drawback of being so strong that it is sometimes stronger than the other forces. It may result in a biased placement of the nodes. However, its advantage is to force a very compact layout, which may be useful for certain purposes. multithread: boolean gravity: float Attracts nodes to the center. Prevents islands from drifting away. Returns ------- pos : dict A dictionary of positions keyed by node
def _get_gecos(name, root=None): ''' Retrieve GECOS field info and return it in dictionary form ''' if root is not None and __grains__['kernel'] != 'AIX': getpwnam = functools.partial(_getpwnam, root=root) else: getpwnam = functools.partial(pwd.getpwnam) gecos_field = salt.utils.stringutils.to_unicode( getpwnam(_quote_username(name)).pw_gecos).split(',', 4) if not gecos_field: return {} else: # Assign empty strings for any unspecified trailing GECOS fields while len(gecos_field) < 5: gecos_field.append('') return {'fullname': salt.utils.data.decode(gecos_field[0]), 'roomnumber': salt.utils.data.decode(gecos_field[1]), 'workphone': salt.utils.data.decode(gecos_field[2]), 'homephone': salt.utils.data.decode(gecos_field[3]), 'other': salt.utils.data.decode(gecos_field[4])}
Retrieve GECOS field info and return it in dictionary form
def encrypted_score(self, x): """Compute the score of `x` by multiplying with the encrypted model, which is a vector of `paillier.EncryptedNumber`""" score = self.intercept _, idx = x.nonzero() for i in idx: score += x[0, i] * self.weights[i] return score
Compute the score of `x` by multiplying with the encrypted model, which is a vector of `paillier.EncryptedNumber`
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): """Return a dictionary that maps the items in *iterable* to categories defined by *keyfunc*, transforms them with *valuefunc*, and then summarizes them by category with *reducefunc*. *valuefunc* defaults to the identity function if it is unspecified. If *reducefunc* is unspecified, no summarization takes place: >>> keyfunc = lambda x: x.upper() >>> result = map_reduce('abbccc', keyfunc) >>> sorted(result.items()) [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] Specifying *valuefunc* transforms the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> result = map_reduce('abbccc', keyfunc, valuefunc) >>> sorted(result.items()) [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] Specifying *reducefunc* summarizes the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> reducefunc = sum >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) >>> sorted(result.items()) [('A', 1), ('B', 2), ('C', 3)] You may want to filter the input iterable before applying the map/reduce procedure: >>> all_items = range(30) >>> items = [x for x in all_items if 10 <= x <= 20] # Filter >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 >>> categories = map_reduce(items, keyfunc=keyfunc) >>> sorted(categories.items()) [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) >>> sorted(summaries.items()) [(0, 90), (1, 75)] Note that all items in the iterable are gathered into a list before the summarization step, which may require significant storage. The returned object is a :obj:`collections.defaultdict` with the ``default_factory`` set to ``None``, such that it behaves like a normal dictionary. """ valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc ret = defaultdict(list) for item in iterable: key = keyfunc(item) value = valuefunc(item) ret[key].append(value) if reducefunc is not None: for key, value_list in ret.items(): ret[key] = reducefunc(value_list) ret.default_factory = None return ret
Return a dictionary that maps the items in *iterable* to categories defined by *keyfunc*, transforms them with *valuefunc*, and then summarizes them by category with *reducefunc*. *valuefunc* defaults to the identity function if it is unspecified. If *reducefunc* is unspecified, no summarization takes place: >>> keyfunc = lambda x: x.upper() >>> result = map_reduce('abbccc', keyfunc) >>> sorted(result.items()) [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] Specifying *valuefunc* transforms the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> result = map_reduce('abbccc', keyfunc, valuefunc) >>> sorted(result.items()) [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] Specifying *reducefunc* summarizes the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> reducefunc = sum >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) >>> sorted(result.items()) [('A', 1), ('B', 2), ('C', 3)] You may want to filter the input iterable before applying the map/reduce procedure: >>> all_items = range(30) >>> items = [x for x in all_items if 10 <= x <= 20] # Filter >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 >>> categories = map_reduce(items, keyfunc=keyfunc) >>> sorted(categories.items()) [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) >>> sorted(summaries.items()) [(0, 90), (1, 75)] Note that all items in the iterable are gathered into a list before the summarization step, which may require significant storage. The returned object is a :obj:`collections.defaultdict` with the ``default_factory`` set to ``None``, such that it behaves like a normal dictionary.
def substitute_placeholders(inputstring, placeholders): """ Take a string with placeholders, and return the strings with substitutions. """ newst = inputstring.format(link=placeholders.link, filename=placeholders.filename, directory=placeholders.directory, fullpath=placeholders.fullpath, title=placeholders.title, filename_title=placeholders.filename_title, date=placeholders.date_string(), podcasttitle=placeholders.podcasttitle, filename_podcasttitle= placeholders.filename_podcasttitle, name=placeholders.name, subtitle=placeholders.sanitizedsubtitle, entrysummary=placeholders.entrysummary) return newst
Take a string with placeholders, and return the strings with substitutions.
def get_ajd_bound(mesh): """ Determine triangular elements adjacend to the boundary elements """ print('Get elements adjacent to boundaries') boundary_elements = [] str_adj_boundaries = '' # for boundary in mesh['elements']['1']: boundaries = mesh['boundaries']['12'] + mesh['boundaries']['11'] for boundary in boundaries: # now find the triangle ('2') with two nodes equal to this boundary indices = [nr if (boundary[0] in x and boundary[1] in x) else np.nan for (nr, x) in enumerate(mesh['elements']['2'])] indices = np.array(indices)[~np.isnan(indices)] if(len(indices) != 1): print('More than one neighbour found!') elif(len(indices) == 0): print('No neighbour found!') boundary_elements.append(indices[0]) str_adj_boundaries += '{0}\n'.format(int(indices[0]) + 1) return str_adj_boundaries, boundary_elements
Determine triangular elements adjacend to the boundary elements
def serialize(self, obj, *args, **kwargs): """Serialize user as per Meteor accounts serialization.""" # use default serialization, then modify to suit our needs. data = super(Users, self).serialize(obj, *args, **kwargs) # everything that isn't handled explicitly ends up in `profile` profile = data.pop('fields') profile.setdefault('name', obj.get_full_name()) fields = data['fields'] = { 'username': obj.get_username(), 'emails': [], 'profile': profile, 'permissions': sorted(self.model.get_all_permissions(obj)), } # clear out sensitive data for sensitive in [ 'password', 'user_permissions_ids', 'is_active', 'is_staff', 'is_superuser', 'groups_ids', ]: profile.pop(sensitive, None) # createdAt (default is django.contrib.auth.models.User.date_joined) try: fields['createdAt'] = profile.pop('date_joined') except KeyError: date_joined = getattr( obj, 'get_date_joined', lambda: getattr(obj, 'date_joined', None) )() if date_joined: fields['createdAt'] = date_joined # email (default is django.contrib.auth.models.User.email) try: email = profile.pop('email') except KeyError: email = getattr( obj, 'get_email', lambda: getattr(obj, 'email', None) )() if email: fields['emails'].append({'address': email, 'verified': True}) return data
Serialize user as per Meteor accounts serialization.
def list_device_data_sources(self, device_rid): """ List data sources of a portal device with rid 'device_rid'. http://docs.exosite.com/portals/#list-device-data-source """ headers = { 'User-Agent': self.user_agent(), } headers.update(self.headers()) r = requests.get( self.portals_url()+'/devices/'+device_rid+'/data-sources', headers=headers, auth=self.auth()) if HTTP_STATUS.OK == r.status_code: return r.json() else: print("Something went wrong: <{0}>: {1}".format( r.status_code, r.reason)) return None
List data sources of a portal device with rid 'device_rid'. http://docs.exosite.com/portals/#list-device-data-source
def _expand_parameters(specification, parameters, original=None): """Expand parameters inside comands for Serial workflow specifications. :param specification: Full valid Serial workflow specification. :param parameters: Parameters to be extended on a Serial specification. :param original: Flag which, determins type of specifications to return. :returns: If 'original' parameter is set, a copy of the specification whithout expanded parametrers will be returned. If 'original' is not set, a copy of the specification with expanded parameters (all $varname and ${varname} will be expanded with their value). Otherwise an error will be thrown if the parameters can not be expanded. :raises: jsonschema.ValidationError """ expanded_specification = deepcopy(specification) try: for step_num, step in enumerate(expanded_specification['steps']): current_step = expanded_specification['steps'][step_num] for command_num, command in enumerate(step['commands']): current_step['commands'][command_num] = \ Template(command).substitute(parameters) # if call is done from client, original==True and original # specifications withtout applied parameters are returned. if original: return specification else: return expanded_specification except KeyError as e: raise ValidationError('Workflow parameter(s) could not ' 'be expanded. Please take a look ' 'to {params}'.format(params=str(e)))
Expand parameters inside comands for Serial workflow specifications. :param specification: Full valid Serial workflow specification. :param parameters: Parameters to be extended on a Serial specification. :param original: Flag which, determins type of specifications to return. :returns: If 'original' parameter is set, a copy of the specification whithout expanded parametrers will be returned. If 'original' is not set, a copy of the specification with expanded parameters (all $varname and ${varname} will be expanded with their value). Otherwise an error will be thrown if the parameters can not be expanded. :raises: jsonschema.ValidationError
def _set_data(self, action): """ capture Wikidata API response data """ if action == 'labels': self._set_labels() if action == 'wikidata': self._set_wikidata() self.get_labels(show=False)
capture Wikidata API response data
def optimize_for_size(self): ''' http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=90752 http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=69813 ''' self.optimization = 's' self.relax = True self.gc_sections = True self.ffunction_sections = True self.fdata_sections = True self.fno_inline_small_functions = True
http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=90752 http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=69813
def make_chart(self): ''' Returns ------- altair.Chart ''' task_df = self.get_task_df() import altair as alt chart = alt.Chart(task_df).mark_bar().encode( x='start', x2='end', y='term', ) return chart
Returns ------- altair.Chart
def update(self, z, R2=None): """ Add a new measurement (z) to the kalman filter. If z is None, nothing is changed. Parameters ---------- z : np.array measurement for this update. R2 : np.array, scalar, or None Sqrt of meaaurement noize. Optionally provide to override the measurement noise for this one call, otherwise self.R2 will be used. """ if z is None: self.z = np.array([[None]*self.dim_z]).T self.x_post = self.x.copy() self._P1_2_post = np.copy(self._P1_2) return if R2 is None: R2 = self._R1_2 elif np.isscalar(R2): R2 = eye(self.dim_z) * R2 # rename for convienance dim_z = self.dim_z M = self.M M[0:dim_z, 0:dim_z] = R2.T M[dim_z:, 0:dim_z] = dot(self.H, self._P1_2).T M[dim_z:, dim_z:] = self._P1_2.T _, self.S = qr(M) self.K = self.S[0:dim_z, dim_z:].T N = self.S[0:dim_z, 0:dim_z].T # y = z - Hx # error (residual) between measurement and prediction self.y = z - dot(self.H, self.x) # x = x + Ky # predict new x with residual scaled by the kalman gain self.x += dot(self.K, pinv(N)).dot(self.y) self._P1_2 = self.S[dim_z:, dim_z:].T self.z = deepcopy(z) self.x_post = self.x.copy() self._P1_2_post = np.copy(self._P1_2)
Add a new measurement (z) to the kalman filter. If z is None, nothing is changed. Parameters ---------- z : np.array measurement for this update. R2 : np.array, scalar, or None Sqrt of meaaurement noize. Optionally provide to override the measurement noise for this one call, otherwise self.R2 will be used.
def poster(self): """ Returns the considered user. """ user_model = get_user_model() return get_object_or_404(user_model, pk=self.kwargs[self.user_pk_url_kwarg])
Returns the considered user.
def equation(self): """Mix-in class that returns matrix rows for difference in head between inside and outside equals zeros Returns matrix part (nunknowns,neq) Returns rhs part nunknowns """ mat = np.empty((self.nunknowns, self.model.neq)) rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero for icp in range(self.ncp): istart = icp * self.nlayers ieq = 0 for e in self.model.elementlist: if e.nunknowns > 0: fluxin = self.intflux(e.disvecinflayers, self.xcin[icp], self.ycin[icp], self.xcin[icp + 1], self.ycin[icp + 1], self.layers, aq=self.aqin) fluxout = self.intflux(e.disvecinflayers, self.xcout[icp], self.ycout[icp], self.xcout[icp + 1], self.ycout[icp + 1], self.layers, aq=self.aqout) mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = fluxin - fluxout ieq += e.nunknowns else: fluxin = self.intflux(e.disveclayers, self.xcin[icp], self.ycin[icp], self.xcin[icp + 1], self.ycin[icp + 1], self.layers, aq=self.aqin) fluxout = self.intflux(e.disveclayers, self.xcout[icp], self.ycout[icp], self.xcout[icp + 1], self.ycout[icp + 1], self.layers, aq=self.aqout) rhs[istart:istart + self.nlayers] -= fluxin - fluxout return mat, rhs
Mix-in class that returns matrix rows for difference in head between inside and outside equals zeros Returns matrix part (nunknowns,neq) Returns rhs part nunknowns
def metaclass(*metaclasses): # type: (*type) -> Callable[[type], type] """Create the class using all metaclasses. Args: metaclasses: A tuple of metaclasses that will be used to generate and replace a specified class. Returns: A decorator that will recreate the class using the specified metaclasses. """ def _inner(cls): # pragma pylint: disable=unused-variable metabases = tuple( collections.OrderedDict( # noqa: F841 (c, None) for c in (metaclasses + (type(cls),)) ).keys() ) # pragma pylint: enable=unused-variable _Meta = metabases[0] for base in metabases[1:]: class _Meta(base, _Meta): # pylint: disable=function-redefined pass return six.add_metaclass(_Meta)(cls) return _inner
Create the class using all metaclasses. Args: metaclasses: A tuple of metaclasses that will be used to generate and replace a specified class. Returns: A decorator that will recreate the class using the specified metaclasses.
def get_import_status(self, sis_import): """ Get the status of an already created SIS import. https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.show """ if not self._canvas_account_id: raise MissingAccountID() url = SIS_IMPORTS_API.format( self._canvas_account_id) + "/{}.json".format(sis_import.import_id) return SISImportModel(data=self._get_resource(url))
Get the status of an already created SIS import. https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.show
def register_iq_request_handler(self, type_, payload_cls, cb, *, with_send_reply=False): """ Register a coroutine function or a function returning an awaitable to run when an IQ request is received. :param type_: IQ type to react to (must be a request type). :type type_: :class:`~aioxmpp.IQType` :param payload_cls: Payload class to react to (subclass of :class:`~xso.XSO`) :type payload_cls: :class:`~.XMLStreamClass` :param cb: Function or coroutine function to invoke :param with_send_reply: Whether to pass a function to send a reply to `cb` as second argument. :type with_send_reply: :class:`bool` :raises ValueError: if there is already a coroutine registered for this target :raises ValueError: if `type_` is not a request IQ type :raises ValueError: if `type_` is not a valid :class:`~.IQType` (and cannot be cast to a :class:`~.IQType`) The callback `cb` will be called whenever an IQ stanza with the given `type_` and payload being an instance of the `payload_cls` is received. The callback must either be a coroutine function or otherwise return an awaitable. The awaitable must evaluate to a valid value for the :attr:`.IQ.payload` attribute. That value will be set as the payload attribute value of an IQ response (with type :attr:`~.IQType.RESULT`) which is generated and sent by the stream. If the awaitable or the function raises an exception, it will be converted to a :class:`~.stanza.Error` object. That error object is then used as payload for an IQ response (with type :attr:`~.IQType.ERROR`) which is generated and sent by the stream. If the exception is a subclass of :class:`aioxmpp.errors.XMPPError`, it is converted to an :class:`~.stanza.Error` instance directly. Otherwise, it is wrapped in a :class:`aioxmpp.XMPPCancelError` with ``undefined-condition``. For this to work, `payload_cls` *must* be registered using :meth:`~.IQ.as_payload_class`. Otherwise, the payload will not be recognised by the stream parser and the IQ is automatically responded to with a ``feature-not-implemented`` error. .. warning:: When using a coroutine function for `cb`, there is no guarantee that concurrent IQ handlers and other coroutines will execute in any defined order. This implies that the strong ordering guarantees normally provided by XMPP XML Streams are lost when using coroutine functions for `cb`. For this reason, the use of non-coroutine functions is allowed. .. note:: Using a non-coroutine function for `cb` will generally lead to less readable code. For the sake of readability, it is recommended to prefer coroutine functions when strong ordering guarantees are not needed. .. versionadded:: 0.11 When the argument `with_send_reply` is true `cb` will be called with two arguments: the IQ stanza to handle and a unary function `send_reply(result=None)` that sends a response to the IQ request and prevents that an automatic response is sent. If `result` is an instance of :class:`~aioxmpp.XMPPError` an error result is generated. This is useful when the handler function needs to execute actions which happen after the IQ result has been sent, for example, sending other stanzas. .. versionchanged:: 0.10 Accepts an awaitable as last argument in addition to coroutine functions. Renamed from :meth:`register_iq_request_coro`. .. versionadded:: 0.6 If the stream is :meth:`stop`\\ -ped (only if SM is not enabled) or :meth:`close`\\ ed, running IQ response coroutines are :meth:`asyncio.Task.cancel`\\ -led. To protect against that, fork from your coroutine using :func:`asyncio.ensure_future`. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.IQType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. """ type_ = self._coerce_enum(type_, structs.IQType) if not type_.is_request: raise ValueError( "{!r} is not a request IQType".format(type_) ) key = type_, payload_cls if key in self._iq_request_map: raise ValueError("only one listener is allowed per tag") self._iq_request_map[key] = cb, with_send_reply self._logger.debug( "iq request coroutine registered: type=%r, payload=%r", type_, payload_cls)
Register a coroutine function or a function returning an awaitable to run when an IQ request is received. :param type_: IQ type to react to (must be a request type). :type type_: :class:`~aioxmpp.IQType` :param payload_cls: Payload class to react to (subclass of :class:`~xso.XSO`) :type payload_cls: :class:`~.XMLStreamClass` :param cb: Function or coroutine function to invoke :param with_send_reply: Whether to pass a function to send a reply to `cb` as second argument. :type with_send_reply: :class:`bool` :raises ValueError: if there is already a coroutine registered for this target :raises ValueError: if `type_` is not a request IQ type :raises ValueError: if `type_` is not a valid :class:`~.IQType` (and cannot be cast to a :class:`~.IQType`) The callback `cb` will be called whenever an IQ stanza with the given `type_` and payload being an instance of the `payload_cls` is received. The callback must either be a coroutine function or otherwise return an awaitable. The awaitable must evaluate to a valid value for the :attr:`.IQ.payload` attribute. That value will be set as the payload attribute value of an IQ response (with type :attr:`~.IQType.RESULT`) which is generated and sent by the stream. If the awaitable or the function raises an exception, it will be converted to a :class:`~.stanza.Error` object. That error object is then used as payload for an IQ response (with type :attr:`~.IQType.ERROR`) which is generated and sent by the stream. If the exception is a subclass of :class:`aioxmpp.errors.XMPPError`, it is converted to an :class:`~.stanza.Error` instance directly. Otherwise, it is wrapped in a :class:`aioxmpp.XMPPCancelError` with ``undefined-condition``. For this to work, `payload_cls` *must* be registered using :meth:`~.IQ.as_payload_class`. Otherwise, the payload will not be recognised by the stream parser and the IQ is automatically responded to with a ``feature-not-implemented`` error. .. warning:: When using a coroutine function for `cb`, there is no guarantee that concurrent IQ handlers and other coroutines will execute in any defined order. This implies that the strong ordering guarantees normally provided by XMPP XML Streams are lost when using coroutine functions for `cb`. For this reason, the use of non-coroutine functions is allowed. .. note:: Using a non-coroutine function for `cb` will generally lead to less readable code. For the sake of readability, it is recommended to prefer coroutine functions when strong ordering guarantees are not needed. .. versionadded:: 0.11 When the argument `with_send_reply` is true `cb` will be called with two arguments: the IQ stanza to handle and a unary function `send_reply(result=None)` that sends a response to the IQ request and prevents that an automatic response is sent. If `result` is an instance of :class:`~aioxmpp.XMPPError` an error result is generated. This is useful when the handler function needs to execute actions which happen after the IQ result has been sent, for example, sending other stanzas. .. versionchanged:: 0.10 Accepts an awaitable as last argument in addition to coroutine functions. Renamed from :meth:`register_iq_request_coro`. .. versionadded:: 0.6 If the stream is :meth:`stop`\\ -ped (only if SM is not enabled) or :meth:`close`\\ ed, running IQ response coroutines are :meth:`asyncio.Task.cancel`\\ -led. To protect against that, fork from your coroutine using :func:`asyncio.ensure_future`. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.IQType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently.
def start_prompt(self): """Start the interpreter.""" logger.show("Coconut Interpreter:") logger.show("(type 'exit()' or press Ctrl-D to end)") self.start_running() while self.running: try: code = self.get_input() if code: compiled = self.handle_input(code) if compiled: self.execute(compiled, use_eval=None) except KeyboardInterrupt: printerr("\nKeyboardInterrupt")
Start the interpreter.
def convert_padding(params, w_name, scope_name, inputs, layers, weights, names): """ Convert padding layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers """ print('Converting padding...') if params['mode'] == 'constant': # raise AssertionError('Cannot convert non-constant padding') if params['value'] != 0.0: raise AssertionError('Cannot convert non-zero padding') if names: tf_name = 'PADD' + random_string(4) else: tf_name = w_name + str(random.random()) # Magic ordering padding_name = tf_name padding_layer = keras.layers.ZeroPadding2D( padding=((params['pads'][2], params['pads'][6]), (params['pads'][3], params['pads'][7])), name=padding_name ) layers[scope_name] = padding_layer(layers[inputs[0]]) elif params['mode'] == 'reflect': def target_layer(x, pads=params['pads']): # x = tf.transpose(x, [0, 2, 3, 1]) layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT') # layer = tf.transpose(layer, [0, 3, 1, 2]) return layer lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert padding layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def transform_hits(hits): """ The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ packages = {} for hit in hits: name = hit['name'] summary = hit['summary'] version = hit['version'] score = hit['_pypi_ordering'] if score is None: score = 0 if name not in packages.keys(): packages[name] = { 'name': name, 'summary': summary, 'versions': [version], 'score': score, } else: packages[name]['versions'].append(version) # if this is the highest version, replace summary and score if version == highest_version(packages[name]['versions']): packages[name]['summary'] = summary packages[name]['score'] = score # each record has a unique name now, so we will convert the dict into a # list sorted by score package_list = sorted( packages.values(), key=lambda x: x['score'], reverse=True, ) return package_list
The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use.
def _find_usage_vpc_links(self): """ Find usage on VPC Links. Update `self.limits`. """ logger.debug('Finding usage for VPC Links') link_count = 0 paginator = self.conn.get_paginator('get_vpc_links') for resp in paginator.paginate(): link_count += len(resp['items']) self.limits['VPC Links per account']._add_current_usage( link_count, aws_type='AWS::ApiGateway::VpcLink' )
Find usage on VPC Links. Update `self.limits`.
def fetch_search_document(self, *, index): """Fetch the object's document from a search index by id.""" assert self.pk, "Object must have a primary key before being indexed." client = get_client() return client.get(index=index, doc_type=self.search_doc_type, id=self.pk)
Fetch the object's document from a search index by id.
def enable_page_breakpoint(self, dwProcessId, address): """ Enables the page breakpoint at the given address. @see: L{define_page_breakpoint}, L{has_page_breakpoint}, L{get_page_breakpoint}, L{enable_one_shot_page_breakpoint}, L{disable_page_breakpoint} L{erase_page_breakpoint}, @type dwProcessId: int @param dwProcessId: Process global ID. @type address: int @param address: Memory address of breakpoint. """ p = self.system.get_process(dwProcessId) bp = self.get_page_breakpoint(dwProcessId, address) if bp.is_running(): self.__del_running_bp_from_all_threads(bp) bp.enable(p, None)
Enables the page breakpoint at the given address. @see: L{define_page_breakpoint}, L{has_page_breakpoint}, L{get_page_breakpoint}, L{enable_one_shot_page_breakpoint}, L{disable_page_breakpoint} L{erase_page_breakpoint}, @type dwProcessId: int @param dwProcessId: Process global ID. @type address: int @param address: Memory address of breakpoint.
def load(self): """Load the data file, do some basic type conversions """ df = pd.read_csv(self.input_file, encoding='utf8') df['wiki_id'] = df['painting'].str.split('/').str[-1] df['creator_wiki_id'] = df['creator'].str.split('/').str[-1] df['decade'] = (df['inception'].str[:4].astype(float) / 10.).astype(int) * 10 df['area'] = df['width'] * df['height'] return df
Load the data file, do some basic type conversions
def _deserialize_dict(cls, cls_target, dict_): """ :type cls_target: T|type :type dict_: dict :rtype: T """ instance = cls_target.__new__(cls_target) dict_deserialized = cls._deserialize_dict_attributes(cls_target, dict_) instance.__dict__ = cls._fill_default_values(cls_target, dict_deserialized) return instance
:type cls_target: T|type :type dict_: dict :rtype: T
def reset(self): """ Clear the active cells. """ self.bumpPhases = np.empty((2,0), dtype="float") self.phaseDisplacement = np.empty((0,2), dtype="float") self.cellsForActivePhases = np.empty(0, dtype="int") self.activeCells = np.empty(0, dtype="int") self.learningCells = np.empty(0, dtype="int") self.sensoryAssociatedCells = np.empty(0, dtype="int")
Clear the active cells.
def command_line_runner(): """ I run functions from the command-line! """ filename = sys.argv[-1] if not filename.endswith(".rst"): print("ERROR! Please enter a ReStructuredText filename!") sys.exit() print(rst_to_json(file_opener(filename)))
I run functions from the command-line!
def _AddSerializeToStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def SerializeToString(self): # Check if the message has all of its required fields set. errors = [] if not self.IsInitialized(): raise message_mod.EncodeError( 'Message %s is missing required fields: %s' % ( self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors()))) return self.SerializePartialToString() cls.SerializeToString = SerializeToString
Helper for _AddMessageMethods().
def get_or_search(self) -> List[GridQubit]: """Starts the search or gives previously calculated sequence. Returns: The linear qubit sequence found. """ if not self._sequence: self._sequence = self._find_sequence() return self._sequence
Starts the search or gives previously calculated sequence. Returns: The linear qubit sequence found.
def __get_category(self, category, name, vivify=False): """ Gets recusively requested category, alternately if **vivify** argument is set, the category will be created. :param category: Base category. :type category: dict :param name: Category to retrieve or vivify. :type name: unicode :param vivify: Vivify missing parents in the chain to the requested category. :type vivify: bool :return: Requested category. :rtype: dict """ namespace = foundations.namespace.get_namespace(name, root_only=True) name = foundations.namespace.remove_namespace(name, root_only=True) if namespace: if vivify and namespace not in category: category[namespace] = {} return self.__get_category(category[namespace], name, vivify) else: if vivify and name not in category: category[name] = {} return category[name]
Gets recusively requested category, alternately if **vivify** argument is set, the category will be created. :param category: Base category. :type category: dict :param name: Category to retrieve or vivify. :type name: unicode :param vivify: Vivify missing parents in the chain to the requested category. :type vivify: bool :return: Requested category. :rtype: dict
def accuracy(links_true, links_pred=None, total=None): """accuracy(links_true, links_pred, total) Compute the accuracy. The accuracy is given by (TP+TN)/(TP+FP+TN+FN). Parameters ---------- links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series The true (or actual) collection of links. links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series The predicted collection of links. total: int, pandas.MultiIndex The count of all record pairs (both links and non-links). When the argument is a pandas.MultiIndex, the length of the index is used. Returns ------- float The accuracy """ if _isconfusionmatrix(links_true): confusion_matrix = links_true v = (confusion_matrix[0, 0] + confusion_matrix[1, 1]) \ / numpy.sum(confusion_matrix) else: tp = true_positives(links_true, links_pred) tn = true_negatives(links_true, links_pred, total) v = (tp + tn) / total return float(v)
accuracy(links_true, links_pred, total) Compute the accuracy. The accuracy is given by (TP+TN)/(TP+FP+TN+FN). Parameters ---------- links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series The true (or actual) collection of links. links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series The predicted collection of links. total: int, pandas.MultiIndex The count of all record pairs (both links and non-links). When the argument is a pandas.MultiIndex, the length of the index is used. Returns ------- float The accuracy
def wait_for_string(self, expected_string, timeout=60): """Wait for string FSM.""" # 0 1 2 3 events = [self.syntax_error_re, self.connection_closed_re, expected_string, self.press_return_re, # 4 5 6 7 self.more_re, pexpect.TIMEOUT, pexpect.EOF, self.buffer_overflow_re] # add detected prompts chain events += self.device.get_previous_prompts() # without target prompt self.log("Expecting: {}".format(pattern_to_str(expected_string))) transitions = [ (self.syntax_error_re, [0], -1, CommandSyntaxError("Command unknown", self.device.hostname), 0), (self.connection_closed_re, [0], 1, a_connection_closed, 10), (pexpect.TIMEOUT, [0], -1, CommandTimeoutError("Timeout waiting for prompt", self.device.hostname), 0), (pexpect.EOF, [0, 1], -1, ConnectionError("Unexpected device disconnect", self.device.hostname), 0), (self.more_re, [0], 0, partial(a_send, " "), 10), (expected_string, [0, 1], -1, a_expected_prompt, 0), (self.press_return_re, [0], -1, a_stays_connected, 0), # TODO: Customize in XR driver (self.buffer_overflow_re, [0], -1, CommandSyntaxError("Command too long", self.device.hostname), 0) ] for prompt in self.device.get_previous_prompts(): transitions.append((prompt, [0, 1], 0, a_unexpected_prompt, 0)) fsm = FSM("WAIT-4-STRING", self.device, events, transitions, timeout=timeout) return fsm.run()
Wait for string FSM.
def resetPassword(self, userId): ''' Changes a user's password to a system-generated value. ''' self._setHeaders('resetPassword') return self._sforce.service.resetPassword(userId)
Changes a user's password to a system-generated value.