code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def remove(self, point, node=None): """ Removes the node with the given point from the tree Returns the new root node of the (sub)tree. If there are multiple points matching "point", only one is removed. The optional "node" parameter is used for checking the identity, once the removeal candidate is decided.""" # Recursion has reached an empty leaf node, nothing here to delete if not self: return # Recursion has reached the node to be deleted if self.should_remove(point, node): return self._remove(point) # Remove direct subnode if self.left and self.left.should_remove(point, node): self.left = self.left._remove(point) elif self.right and self.right.should_remove(point, node): self.right = self.right._remove(point) # Recurse to subtrees if point[self.axis] <= self.data[self.axis]: if self.left: self.left = self.left.remove(point, node) if point[self.axis] >= self.data[self.axis]: if self.right: self.right = self.right.remove(point, node) return self
Removes the node with the given point from the tree Returns the new root node of the (sub)tree. If there are multiple points matching "point", only one is removed. The optional "node" parameter is used for checking the identity, once the removeal candidate is decided.
def _limit_call_handler(self): """ Ensure we don't exceed the N requests a minute limit by leveraging a thread lock """ # acquire a lock on our threading.Lock() object with self.limit_lock: # if we have no configured limit, exit. the lock releases based on scope if self.limit_per_min <= 0: return now = time.time() # self.limits is a list of query times + 60 seconds. In essence it is a list of times # that queries time out of the 60 second query window. # this check expires any limits that have passed self.limits = [l for l in self.limits if l > now] # and we tack on the current query self.limits.append(now + 60) # if we have more than our limit of queries (and remember, we call this before we actually # execute a query) we sleep until the oldest query on the list (element 0 because we append # new queries) times out. We don't worry about cleanup because next time this routine runs # it will clean itself up. if len(self.limits) >= self.limit_per_min: time.sleep(self.limits[0] - now)
Ensure we don't exceed the N requests a minute limit by leveraging a thread lock
def output_sizes(self): """Returns a tuple of all output sizes of all the layers.""" return tuple([l() if callable(l) else l for l in self._output_sizes])
Returns a tuple of all output sizes of all the layers.
def resend_invitations(self): """ Resends invites for an event. :: event = service.calendar().get_event(id='KEY HERE') event.resend_invitations() Anybody who has not declined this meeting will get a new invite. """ if not self.id: raise TypeError(u"You can't send invites for an event that hasn't been created yet.") # Under the hood, this is just an .update() but with no attributes changed. # We're going to enforce that by checking if there are any changed attributes and bail if there are if self._dirty_attributes: raise ValueError(u"There are unsaved changes to this invite - please update it first: %r" % self._dirty_attributes) self.refresh_change_key() body = soap_request.update_item(self, [], calendar_item_update_operation_type=u'SendOnlyToAll') self.service.send(body) return self
Resends invites for an event. :: event = service.calendar().get_event(id='KEY HERE') event.resend_invitations() Anybody who has not declined this meeting will get a new invite.
def _find_blob_start(self): """Find first blob from selection. """ # Convert input frequencies into what their corresponding channel number would be. self._setup_chans() # Check which is the blob time offset blob_time_start = self.t_start # Check which is the blob frequency offset (in channels) blob_freq_start = self.chan_start_idx blob_start = blob_time_start * self.n_channels_in_file + blob_freq_start return blob_start
Find first blob from selection.
def _is_cif(string): """Test if input string is in CIF format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in CIF format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False` """ if (string[0:5] == u"data_" and u"_entry.id" in string) or (string[0:5] == b"data_" and b"_entry.id" in string): return string return False
Test if input string is in CIF format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in CIF format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False`
def watchlist(self, tubes): """Set the watchlist to the given tubes :param tubes: A list of tubes to watch Automatically un-watches any tubes that are not on the target list """ tubes = set(tubes) for tube in tubes - self._watchlist: self.watch(tube) for tube in self._watchlist - tubes: self.ignore(tube)
Set the watchlist to the given tubes :param tubes: A list of tubes to watch Automatically un-watches any tubes that are not on the target list
def lasts(iterable, items=1, default=None): # type: (Iterable[T], int, T) -> Iterable[T] """ Lazily return the last x items from this iterable or default. """ last_items = deque(iterable, maxlen=items) for _ in range(items - len(last_items)): yield default for y in last_items: yield y
Lazily return the last x items from this iterable or default.
def median2D(const, bin1, label1, bin2, label2, data_label, returnData=False): """Return a 2D average of data_label over a season and label1, label2. Parameters ---------- const: Constellation or Instrument bin#: [min, max, number of bins] label#: string identifies data product for bin# data_label: list-like contains strings identifying data product(s) to be averaged Returns ------- median : dictionary 2D median accessed by data_label as a function of label1 and label2 over the season delineated by bounds of passed instrument objects. Also includes 'count' and 'avg_abs_dev' as well as the values of the bin edges in 'bin_x' and 'bin_y'. """ # const is either an Instrument or a Constellation, and we want to # iterate over it. # If it's a Constellation, then we can do that as is, but if it's # an Instrument, we just have to put that Instrument into something # that will yeild that Instrument, like a list. if isinstance(const, pysat.Instrument): const = [const] elif not isinstance(const, pysat.Constellation): raise ValueError("Parameter must be an Instrument or a Constellation.") # create bins #// seems to create the boundaries used for sorting into bins binx = np.linspace(bin1[0], bin1[1], bin1[2]+1) biny = np.linspace(bin2[0], bin2[1], bin2[2]+1) #// how many bins are used numx = len(binx)-1 numy = len(biny)-1 #// how many different data products numz = len(data_label) # create array to store all values before taking median #// the indices of the bins/data products? used for looping. yarr = np.arange(numy) xarr = np.arange(numx) zarr = np.arange(numz) #// 3d array: stores the data that is sorted into each bin? - in a deque ans = [ [ [collections.deque() for i in xarr] for j in yarr] for k in zarr] for inst in const: # do loop to iterate over instrument season #// probably iterates by date but that all depends on the #// configuration of that particular instrument. #// either way, it iterates over the instrument, loading successive #// data between start and end bounds for inst in inst: # collect data in bins for averaging if len(inst.data) != 0: #// sort the data into bins (x) based on label 1 #// (stores bin indexes in xind) xind = np.digitize(inst.data[label1], binx)-1 #// for each possible x index for xi in xarr: #// get the indicies of those pieces of data in that bin xindex, = np.where(xind==xi) if len(xindex) > 0: #// look up the data along y (label2) at that set of indicies (a given x) yData = inst.data.iloc[xindex] #// digitize that, to sort data into bins along y (label2) (get bin indexes) yind = np.digitize(yData[label2], biny)-1 #// for each possible y index for yj in yarr: #// select data with this y index (and we already filtered for this x index) yindex, = np.where(yind==yj) if len(yindex) > 0: #// for each data product label zk for zk in zarr: #// take the data (already filtered by x); filter it by y and #// select the data product, put it in a list, and extend the deque ans[zk][yj][xi].extend( yData.ix[yindex,data_label[zk]].tolist() ) return _calc_2d_median(ans, data_label, binx, biny, xarr, yarr, zarr, numx, numy, numz, returnData)
Return a 2D average of data_label over a season and label1, label2. Parameters ---------- const: Constellation or Instrument bin#: [min, max, number of bins] label#: string identifies data product for bin# data_label: list-like contains strings identifying data product(s) to be averaged Returns ------- median : dictionary 2D median accessed by data_label as a function of label1 and label2 over the season delineated by bounds of passed instrument objects. Also includes 'count' and 'avg_abs_dev' as well as the values of the bin edges in 'bin_x' and 'bin_y'.
def diag_post_enable(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") diag = ET.SubElement(config, "diag", xmlns="urn:brocade.com:mgmt:brocade-diagnostics") post = ET.SubElement(diag, "post") enable = ET.SubElement(post, "enable") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def close(self): """ Close log stream and stream_lock. """ try: self._close() if hasattr(self.stream_lock, 'closed') and \ not self.stream_lock.closed: self.stream_lock.close() finally: self.stream_lock = None if Handler: Handler.close(self)
Close log stream and stream_lock.
def post_relationship(self, session, json_data, api_type, obj_id, rel_key): """ Append to a relationship. :param session: SQLAlchemy session :param json_data: Request JSON Data :param api_type: Type of the resource :param obj_id: ID of the resource :param rel_key: Key of the relationship to fetch """ model = self._fetch_model(api_type) resource = self._fetch_resource(session, api_type, obj_id, Permissions.EDIT) if rel_key not in resource.__jsonapi_map_to_py__.keys(): raise RelationshipNotFoundError(resource, resource, rel_key) py_key = resource.__jsonapi_map_to_py__[rel_key] relationship = self._get_relationship(resource, py_key, Permissions.CREATE) if relationship.direction == MANYTOONE: raise ValidationError('Cannot post to to-one relationship') if not isinstance(json_data['data'], list): raise ValidationError('/data must be an array') remote_side = relationship.back_populates try: for item in json_data['data']: setter = get_rel_desc(resource, relationship.key, RelationshipActions.APPEND) if not isinstance(json_data['data'], list): raise BadRequestError( '{} must be an array'.format(relationship.key)) for item in json_data['data']: if {'type', 'id'} != set(item.keys()): raise BadRequestError( '{} must have type and id keys' .format(relationship.key)) to_relate = self._fetch_resource( session, item['type'], item['id'], Permissions.EDIT) rem = to_relate.__mapper__.relationships[remote_side] if rem.direction == MANYTOONE: check_permission(to_relate, remote_side, Permissions.EDIT) else: check_permission(to_relate, remote_side, Permissions.CREATE) setter(resource, to_relate) session.add(resource) session.commit() except KeyError: raise ValidationError('Incompatible type provided') return self.get_relationship( session, {}, model.__jsonapi_type__, resource.id, rel_key)
Append to a relationship. :param session: SQLAlchemy session :param json_data: Request JSON Data :param api_type: Type of the resource :param obj_id: ID of the resource :param rel_key: Key of the relationship to fetch
def min_scalar_prod(x, y): """Permute vector to minimize scalar product :param x: :param y: x, y are vectors of same size :returns: min sum x[i] * y[sigma[i]] over all permutations sigma :complexity: O(n log n) """ x = sorted(x) # make copies y = sorted(y) # to save arguments return sum(x[i] * y[-i - 1] for i in range(len(x)))
Permute vector to minimize scalar product :param x: :param y: x, y are vectors of same size :returns: min sum x[i] * y[sigma[i]] over all permutations sigma :complexity: O(n log n)
def appendRecord(self, record): """ Saves the record in the underlying csv file. :param record: a list of Python objects that will be string-ified """ assert self._file is not None assert self._mode == self._FILE_WRITE_MODE assert isinstance(record, (list, tuple)), \ "unexpected record type: " + repr(type(record)) assert len(record) == self._fieldCount, \ "len(record): %s, fieldCount: %s" % (len(record), self._fieldCount) # Write header if needed if self._recordCount == 0: # Write the header names, types, specials = zip(*self.getFields()) for line in names, types, specials: self._writer.writerow(line) # Keep track of sequences, make sure time flows forward self._updateSequenceInfo(record) line = [self._adapters[i](f) for i, f in enumerate(record)] self._writer.writerow(line) self._recordCount += 1
Saves the record in the underlying csv file. :param record: a list of Python objects that will be string-ified
def get_grade_mdata(): """Return default mdata map for Grade""" return { 'output_score': { 'element_label': { 'text': 'output score', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter a decimal value.', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_decimal_values': [None], 'syntax': 'DECIMAL', 'decimal_scale': None, 'minimum_decimal': None, 'maximum_decimal': None, 'decimal_set': [], }, 'grade_system': { 'element_label': { 'text': 'grade system', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, 'input_score_end_range': { 'element_label': { 'text': 'input score end range', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter a decimal value.', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_decimal_values': [None], 'syntax': 'DECIMAL', 'decimal_scale': None, 'minimum_decimal': None, 'maximum_decimal': None, 'decimal_set': [], }, 'input_score_start_range': { 'element_label': { 'text': 'input score start range', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter a decimal value.', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_decimal_values': [None], 'syntax': 'DECIMAL', 'decimal_scale': None, 'minimum_decimal': None, 'maximum_decimal': None, 'decimal_set': [], }, }
Return default mdata map for Grade
async def kickban(self, channel, target, reason=None, range=0): """ Kick and ban user from channel. """ await self.ban(channel, target, range) await self.kick(channel, target, reason)
Kick and ban user from channel.
def update_object(self, url, container, container_object, object_headers, container_headers): """Update an existing object in a swift container. This method will place new headers on an existing object or container. :param url: :param container: :param container_object: """ headers, container_uri = self._return_base_data( url=url, container=container, container_object=container_object, container_headers=container_headers, object_headers=object_headers, ) return self._header_poster( uri=container_uri, headers=headers )
Update an existing object in a swift container. This method will place new headers on an existing object or container. :param url: :param container: :param container_object:
def __doDownloadPage(self, *args, **kwargs): """Works like client.downloadPage(), but handle incoming headers """ logger.debug("download page: %r, %r", args, kwargs) return self.__clientDefer(downloadPage(*args, **kwargs))
Works like client.downloadPage(), but handle incoming headers
def encode(string): """ Encode the given string as an OID. >>> import snmp_passpersist as snmp >>> snmp.PassPersist.encode("hello") '5.104.101.108.108.111' >>> """ result=".".join([ str(ord(s)) for s in string ]) return "%s." % (len(string)) + result
Encode the given string as an OID. >>> import snmp_passpersist as snmp >>> snmp.PassPersist.encode("hello") '5.104.101.108.108.111' >>>
def _make_ipmi_payload(self, netfn, command, bridge_request=None, data=()): """This function generates the core ipmi payload that would be applicable for any channel (including KCS) """ bridge_msg = [] self.expectedcmd = command # in ipmi, the response netfn is always one self.expectednetfn = netfn + 1 # higher than the request payload, we assume # we are always the requestor for now seqincrement = 7 # IPMI spec forbids gaps bigger then 7 in seq number. # Risk the taboo rather than violate the rules while (not self.servermode and (netfn, command, self.seqlun) in self.tabooseq and self.tabooseq[(netfn, command, self.seqlun)] and seqincrement): self.tabooseq[(self.expectednetfn, command, self.seqlun)] -= 1 # Allow taboo to eventually expire after a few rounds self.seqlun += 4 # the last two bits are lun, so add 4 to add 1 self.seqlun &= 0xff # we only have one byte, wrap when exceeded seqincrement -= 1 if bridge_request: addr = bridge_request.get('addr', 0x0) channel = bridge_request.get('channel', 0x0) bridge_msg = self._make_bridge_request_msg(channel, netfn, command) # NOTE(fengqian): For bridge request, rsaddr is specified and # rqaddr is BMC address. rqaddr = constants.IPMI_BMC_ADDRESS rsaddr = addr else: rqaddr = self.rqaddr rsaddr = constants.IPMI_BMC_ADDRESS if self.servermode: rsaddr = self.clientaddr # figure 13-4, first two bytes are rsaddr and # netfn, for non-bridge request, rsaddr is always 0x20 since we are # addressing BMC while rsaddr is specified forbridge request header = bytearray((rsaddr, netfn << 2)) reqbody = bytearray((rqaddr, self.seqlun, command)) + data headsum = bytearray((_checksum(*header),)) bodysum = bytearray((_checksum(*reqbody),)) payload = header + headsum + reqbody + bodysum if bridge_request: payload = bridge_msg + payload # NOTE(fengqian): For bridge request, another check sum is needed. tail_csum = _checksum(*payload[3:]) payload.append(tail_csum) if not self.servermode: self._add_request_entry((self.expectednetfn, self.seqlun, command)) return payload
This function generates the core ipmi payload that would be applicable for any channel (including KCS)
def sync(self, hooks=True, async_hooks=True): """Synchronize user repositories. :param bool hooks: True for syncing hooks. :param bool async_hooks: True for sending of an asynchronous task to sync hooks. .. note:: Syncing happens from GitHub's direction only. This means that we consider the information on GitHub as valid, and we overwrite our own state based on this information. """ active_repos = {} github_repos = {repo.id: repo for repo in self.api.repositories() if repo.permissions['admin']} for gh_repo_id, gh_repo in github_repos.items(): active_repos[gh_repo_id] = { 'id': gh_repo_id, 'full_name': gh_repo.full_name, 'description': gh_repo.description, } if hooks: self._sync_hooks(list(active_repos.keys()), asynchronous=async_hooks) # Update changed names for repositories stored in DB db_repos = Repository.query.filter( Repository.user_id == self.user_id, Repository.github_id.in_(github_repos.keys()) ) for repo in db_repos: gh_repo = github_repos.get(repo.github_id) if gh_repo and repo.name != gh_repo.full_name: repo.name = gh_repo.full_name db.session.add(repo) # Remove ownership from repositories that the user has no longer # 'admin' permissions, or have been deleted. Repository.query.filter( Repository.user_id == self.user_id, ~Repository.github_id.in_(github_repos.keys()) ).update(dict(user_id=None, hook=None), synchronize_session=False) # Update repos and last sync self.account.extra_data.update(dict( repos=active_repos, last_sync=iso_utcnow(), )) self.account.extra_data.changed() db.session.add(self.account)
Synchronize user repositories. :param bool hooks: True for syncing hooks. :param bool async_hooks: True for sending of an asynchronous task to sync hooks. .. note:: Syncing happens from GitHub's direction only. This means that we consider the information on GitHub as valid, and we overwrite our own state based on this information.
def install_board(board_id, board_options, hwpack='arduino', replace_existing=False): """install board in boards.txt. :param board_id: string identifier :param board_options: dict like :param replace_existing: bool :rtype: None """ doaction = 0 if board_id in boards(hwpack).keys(): log.debug('board already exists: %s', board_id) if replace_existing: log.debug('remove board: %s' , board_id) remove_board(board_id) doaction = 1 else: doaction = 1 if doaction: lines = bunch2properties(board_id, board_options) boards_txt().write_lines([''] + lines, append=1)
install board in boards.txt. :param board_id: string identifier :param board_options: dict like :param replace_existing: bool :rtype: None
def _gettables(self): """Return a list of hdf5 tables name PyMCsamples. """ groups = self._h5file.list_nodes("/") if len(groups) == 0: return [] else: return [ gr.PyMCsamples for gr in groups if gr._v_name[:5] == 'chain']
Return a list of hdf5 tables name PyMCsamples.
def remove_jobs(self, mask): """Mark all jobs that match a mask as 'removed' """ jobnames = self.table[mask]['jobname'] jobkey = self.table[mask]['jobkey'] self.table[mask]['status'] = JobStatus.removed for jobname, jobkey in zip(jobnames, jobkey): fullkey = JobDetails.make_fullkey(jobname, jobkey) self._cache.pop(fullkey).status = JobStatus.removed self.write_table_file()
Mark all jobs that match a mask as 'removed'
def _should_send(self, rebuild, success, auto_canceled, manual_canceled): """Return True if any state in `self.send_on` meets given conditions, thus meaning that a notification mail should be sent. """ should_send = False should_send_mapping = { self.MANUAL_SUCCESS: not rebuild and success, self.MANUAL_FAIL: not rebuild and not success, self.MANUAL_CANCELED: not rebuild and manual_canceled, self.AUTO_SUCCESS: rebuild and success, self.AUTO_FAIL: rebuild and not success, self.AUTO_CANCELED: rebuild and auto_canceled } for state in self.send_on: should_send |= should_send_mapping[state] return should_send
Return True if any state in `self.send_on` meets given conditions, thus meaning that a notification mail should be sent.
def add_securitygroup_rule(self, group_id, remote_ip=None, remote_group=None, direction=None, ethertype=None, port_max=None, port_min=None, protocol=None): """Add a rule to a security group :param int group_id: The ID of the security group to add this rule to :param str remote_ip: The remote IP or CIDR to enforce the rule on :param int remote_group: The remote security group ID to enforce the rule on :param str direction: The direction to enforce (egress or ingress) :param str ethertype: The ethertype to enforce (IPv4 or IPv6) :param int port_max: The upper port bound to enforce (icmp code if the protocol is icmp) :param int port_min: The lower port bound to enforce (icmp type if the protocol is icmp) :param str protocol: The protocol to enforce (icmp, udp, tcp) """ rule = {'direction': direction} if ethertype is not None: rule['ethertype'] = ethertype if port_max is not None: rule['portRangeMax'] = port_max if port_min is not None: rule['portRangeMin'] = port_min if protocol is not None: rule['protocol'] = protocol if remote_ip is not None: rule['remoteIp'] = remote_ip if remote_group is not None: rule['remoteGroupId'] = remote_group return self.add_securitygroup_rules(group_id, [rule])
Add a rule to a security group :param int group_id: The ID of the security group to add this rule to :param str remote_ip: The remote IP or CIDR to enforce the rule on :param int remote_group: The remote security group ID to enforce the rule on :param str direction: The direction to enforce (egress or ingress) :param str ethertype: The ethertype to enforce (IPv4 or IPv6) :param int port_max: The upper port bound to enforce (icmp code if the protocol is icmp) :param int port_min: The lower port bound to enforce (icmp type if the protocol is icmp) :param str protocol: The protocol to enforce (icmp, udp, tcp)
def connection_made(self, transport: asyncio.BaseTransport) -> None: """ Configure write buffer limits. The high-water limit is defined by ``self.write_limit``. The low-water limit currently defaults to ``self.write_limit // 4`` in :meth:`~asyncio.WriteTransport.set_write_buffer_limits`, which should be all right for reasonable use cases of this library. This is the earliest point where we can get hold of the transport, which means it's the best point for configuring it. """ logger.debug("%s - event = connection_made(%s)", self.side, transport) # mypy thinks transport is a BaseTransport, not a Transport. transport.set_write_buffer_limits(self.write_limit) # type: ignore super().connection_made(transport)
Configure write buffer limits. The high-water limit is defined by ``self.write_limit``. The low-water limit currently defaults to ``self.write_limit // 4`` in :meth:`~asyncio.WriteTransport.set_write_buffer_limits`, which should be all right for reasonable use cases of this library. This is the earliest point where we can get hold of the transport, which means it's the best point for configuring it.
def createEditor(self, parent, column, operator, value): """ Creates a new editor for the system. """ editor = super(EnumPlugin, self).createEditor(parent, column, operator, value) editor.setEnum(column.enum()) if operator in ('contains', 'does not contain'): editor.setCheckable(True) editor.setCurrentValue(value) return editor
Creates a new editor for the system.
def fetch(self): """ Fetch & return a new `Action` object representing the action's current state :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager return api._action(api.request(self.url)["action"])
Fetch & return a new `Action` object representing the action's current state :rtype: Action :raises DOAPIError: if the API endpoint replies with an error
def _identifier_filtered_iterator(graph): """Iterate over names in the given namespace.""" for data in graph: for pair in _get_node_names(data): yield pair for member in data.get(MEMBERS, []): for pair in _get_node_names(member): yield pair for ((_, _, data), side) in itt.product(graph.edges(data=True), (SUBJECT, OBJECT)): side_data = data.get(side) if side_data is None: continue modifier = side_data.get(MODIFIER) effect = side_data.get(EFFECT) if modifier == ACTIVITY and effect is not None and NAMESPACE in effect and NAME in effect: yield effect[NAMESPACE], effect[NAME] elif modifier == TRANSLOCATION and effect is not None: from_loc = effect.get(FROM_LOC) if NAMESPACE in from_loc and NAME in from_loc: yield from_loc[NAMESPACE], from_loc[NAME] to_loc = effect.get(TO_LOC) if NAMESPACE in to_loc and NAME in to_loc: yield to_loc[NAMESPACE], to_loc[NAME] location = side_data.get(LOCATION) if location is not None and NAMESPACE in location and NAME in location: yield location[NAMESPACE], location[NAME]
Iterate over names in the given namespace.
def check(self, state, when): """ Checks state `state` to see if the breakpoint should fire. :param state: The state. :param when: Whether the check is happening before or after the event. :return: A boolean representing whether the checkpoint should fire. """ ok = self.enabled and (when == self.when or self.when == BP_BOTH) if not ok: return ok l.debug("... after enabled and when: %s", ok) for a in [ _ for _ in self.kwargs if not _.endswith("_unique") ]: current_expr = getattr(state.inspect, a) needed = self.kwargs.get(a, None) l.debug("... checking condition %s", a) if current_expr is None and needed is None: l.debug("...... both None, True") c_ok = True elif current_expr is not None and needed is not None: if state.solver.solution(current_expr, needed): l.debug("...... is_solution!") c_ok = True else: l.debug("...... not solution...") c_ok = False if c_ok and self.kwargs.get(a+'_unique', True): l.debug("...... checking uniqueness") if not state.solver.unique(current_expr): l.debug("...... not unique") c_ok = False else: l.debug("...... one None, False") c_ok = False ok = ok and c_ok if not ok: return ok l.debug("... after condition %s: %s", a, ok) ok = ok and (self.condition is None or self.condition(state)) l.debug("... after condition func: %s", ok) return ok
Checks state `state` to see if the breakpoint should fire. :param state: The state. :param when: Whether the check is happening before or after the event. :return: A boolean representing whether the checkpoint should fire.
def calibrate(self, dataset_id, pre_launch_coeffs=False, calib_coeffs=None): """Calibrate the data """ tic = datetime.now() if calib_coeffs is None: calib_coeffs = {} units = {'reflectance': '%', 'brightness_temperature': 'K', 'counts': '', 'radiance': 'W*m-2*sr-1*cm ?'} if dataset_id.name in ("3a", "3b") and self._is3b is None: # Is it 3a or 3b: is3b = np.expand_dims( np.bitwise_and( np.right_shift(self._data['scnlinbit'], 0), 1) == 1, 1) self._is3b = np.repeat(is3b, self._data['hrpt'][0].shape[0], axis=1) try: vis_idx = ['1', '2', '3a'].index(dataset_id.name) ir_idx = None except ValueError: vis_idx = None ir_idx = ['3b', '4', '5'].index(dataset_id.name) if vis_idx is not None: coeffs = calib_coeffs.get('ch' + dataset_id.name) ds = create_xarray( _vis_calibrate(self._data, vis_idx, dataset_id.calibration, pre_launch_coeffs, coeffs, mask=(dataset_id.name == '3a' and self._is3b))) else: ds = create_xarray( _ir_calibrate(self._header, self._data, ir_idx, dataset_id.calibration, mask=(dataset_id.name == '3b' and np.logical_not(self._is3b)))) if dataset_id.name == '3a' and np.all(np.isnan(ds)): raise ValueError("Empty dataset for channel 3A") if dataset_id.name == '3b' and np.all(np.isnan(ds)): raise ValueError("Empty dataset for channel 3B") ds.attrs['units'] = units[dataset_id.calibration] ds.attrs.update(dataset_id._asdict()) logger.debug("Calibration time %s", str(datetime.now() - tic)) return ds
Calibrate the data
def check(self, F): """Rough sanity checks on the input function. """ assert F.ndim == 1, "checker only supports 1D" f = self.xfac * F fabs = np.abs(f) iQ1, iQ3 = np.searchsorted(fabs.cumsum(), np.array([0.25, 0.75]) * fabs.sum()) assert 0 != iQ1 != iQ3 != self.Nin, "checker giving up" fabs_l = fabs[:iQ1].mean() fabs_m = fabs[iQ1:iQ3].mean() fabs_r = fabs[iQ3:].mean() if fabs_l > fabs_m: warnings.warn("left wing seems heavy: {:.2g} vs {:.2g}, " "change tilt and mind convergence".format(fabs_l, fabs_m), RuntimeWarning) if fabs_m < fabs_r: warnings.warn("right wing seems heavy: {:.2g} vs {:.2g}, " "change tilt and mind convergence".format(fabs_m, fabs_r), RuntimeWarning) if fabs[0] > fabs[1]: warnings.warn("left tail may blow up: {:.2g} vs {:.2g}, " "change tilt or avoid extrapolation".format(f[0], f[1]), RuntimeWarning) if fabs[-2] < fabs[-1]: warnings.warn("right tail may blow up: {:.2g} vs {:.2g}, " "change tilt or avoid extrapolation".format(f[-2], f[-1]), RuntimeWarning) if f[0]*f[1] <= 0: warnings.warn("left tail looks wiggly: {:.2g} vs {:.2g}, " "avoid extrapolation".format(f[0], f[1]), RuntimeWarning) if f[-2]*f[-1] <= 0: warnings.warn("right tail looks wiggly: {:.2g} vs {:.2g}, " "avoid extrapolation".format(f[-2], f[-1]), RuntimeWarning)
Rough sanity checks on the input function.
def get_builds(self, project, definitions=None, queues=None, build_number=None, min_time=None, max_time=None, requested_for=None, reason_filter=None, status_filter=None, result_filter=None, tag_filters=None, properties=None, top=None, continuation_token=None, max_builds_per_definition=None, deleted_filter=None, query_order=None, branch_name=None, build_ids=None, repository_id=None, repository_type=None): """GetBuilds. Gets a list of builds. :param str project: Project ID or project name :param [int] definitions: A comma-delimited list of definition IDs. If specified, filters to builds for these definitions. :param [int] queues: A comma-delimited list of queue IDs. If specified, filters to builds that ran against these queues. :param str build_number: If specified, filters to builds that match this build number. Append * to do a prefix search. :param datetime min_time: If specified, filters to builds that finished/started/queued after this date based on the queryOrder specified. :param datetime max_time: If specified, filters to builds that finished/started/queued before this date based on the queryOrder specified. :param str requested_for: If specified, filters to builds requested for the specified user. :param str reason_filter: If specified, filters to builds that match this reason. :param str status_filter: If specified, filters to builds that match this status. :param str result_filter: If specified, filters to builds that match this result. :param [str] tag_filters: A comma-delimited list of tags. If specified, filters to builds that have the specified tags. :param [str] properties: A comma-delimited list of properties to retrieve. :param int top: The maximum number of builds to return. :param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of builds. :param int max_builds_per_definition: The maximum number of builds to return per definition. :param str deleted_filter: Indicates whether to exclude, include, or only return deleted builds. :param str query_order: The order in which builds should be returned. :param str branch_name: If specified, filters to builds that built branches that built this branch. :param [int] build_ids: A comma-delimited list that specifies the IDs of builds to retrieve. :param str repository_id: If specified, filters to builds that built from this repository. :param str repository_type: If specified, filters to builds that built from repositories of this type. :rtype: [Build] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if definitions is not None: definitions = ",".join(map(str, definitions)) query_parameters['definitions'] = self._serialize.query('definitions', definitions, 'str') if queues is not None: queues = ",".join(map(str, queues)) query_parameters['queues'] = self._serialize.query('queues', queues, 'str') if build_number is not None: query_parameters['buildNumber'] = self._serialize.query('build_number', build_number, 'str') if min_time is not None: query_parameters['minTime'] = self._serialize.query('min_time', min_time, 'iso-8601') if max_time is not None: query_parameters['maxTime'] = self._serialize.query('max_time', max_time, 'iso-8601') if requested_for is not None: query_parameters['requestedFor'] = self._serialize.query('requested_for', requested_for, 'str') if reason_filter is not None: query_parameters['reasonFilter'] = self._serialize.query('reason_filter', reason_filter, 'str') if status_filter is not None: query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str') if result_filter is not None: query_parameters['resultFilter'] = self._serialize.query('result_filter', result_filter, 'str') if tag_filters is not None: tag_filters = ",".join(tag_filters) query_parameters['tagFilters'] = self._serialize.query('tag_filters', tag_filters, 'str') if properties is not None: properties = ",".join(properties) query_parameters['properties'] = self._serialize.query('properties', properties, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if max_builds_per_definition is not None: query_parameters['maxBuildsPerDefinition'] = self._serialize.query('max_builds_per_definition', max_builds_per_definition, 'int') if deleted_filter is not None: query_parameters['deletedFilter'] = self._serialize.query('deleted_filter', deleted_filter, 'str') if query_order is not None: query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str') if branch_name is not None: query_parameters['branchName'] = self._serialize.query('branch_name', branch_name, 'str') if build_ids is not None: build_ids = ",".join(map(str, build_ids)) query_parameters['buildIds'] = self._serialize.query('build_ids', build_ids, 'str') if repository_id is not None: query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str') if repository_type is not None: query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str') response = self._send(http_method='GET', location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[Build]', self._unwrap_collection(response))
GetBuilds. Gets a list of builds. :param str project: Project ID or project name :param [int] definitions: A comma-delimited list of definition IDs. If specified, filters to builds for these definitions. :param [int] queues: A comma-delimited list of queue IDs. If specified, filters to builds that ran against these queues. :param str build_number: If specified, filters to builds that match this build number. Append * to do a prefix search. :param datetime min_time: If specified, filters to builds that finished/started/queued after this date based on the queryOrder specified. :param datetime max_time: If specified, filters to builds that finished/started/queued before this date based on the queryOrder specified. :param str requested_for: If specified, filters to builds requested for the specified user. :param str reason_filter: If specified, filters to builds that match this reason. :param str status_filter: If specified, filters to builds that match this status. :param str result_filter: If specified, filters to builds that match this result. :param [str] tag_filters: A comma-delimited list of tags. If specified, filters to builds that have the specified tags. :param [str] properties: A comma-delimited list of properties to retrieve. :param int top: The maximum number of builds to return. :param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of builds. :param int max_builds_per_definition: The maximum number of builds to return per definition. :param str deleted_filter: Indicates whether to exclude, include, or only return deleted builds. :param str query_order: The order in which builds should be returned. :param str branch_name: If specified, filters to builds that built branches that built this branch. :param [int] build_ids: A comma-delimited list that specifies the IDs of builds to retrieve. :param str repository_id: If specified, filters to builds that built from this repository. :param str repository_type: If specified, filters to builds that built from repositories of this type. :rtype: [Build]
def _cursor(self, *args, **kwargs): """A "tough" version of the method cursor().""" # The args and kwargs are not part of the standard, # but some database modules seem to use these. transaction = self._transaction if not transaction: self._ping_check(2) try: if self._maxusage: if self._usage >= self._maxusage: # the connection was used too often raise self._failure cursor = self._con.cursor(*args, **kwargs) # try to get a cursor except self._failures as error: # error in getting cursor try: # try to reopen the connection con = self._create() except Exception: pass else: try: # and try one more time to get a cursor cursor = con.cursor(*args, **kwargs) except Exception: pass else: self._close() self._store(con) if transaction: raise error # re-raise the original error again return cursor try: con.close() except Exception: pass if transaction: self._transaction = False raise error # re-raise the original error again return cursor
A "tough" version of the method cursor().
def add_tenant_user_role(request, project=None, user=None, role=None, group=None, domain=None): """Adds a role for a user on a tenant.""" manager = keystoneclient(request, admin=True).roles if VERSIONS.active < 3: manager.add_user_role(user, role, project) else: manager.grant(role, user=user, project=project, group=group, domain=domain)
Adds a role for a user on a tenant.
def import_module(mod_str): """ inspired by post on stackoverflow :param name: import path string like 'netshowlib.linux.provider_discovery' :return: module matching the import statement """ _module = __import__(mod_str) _mod_parts = mod_str.split('.') for _mod_part in _mod_parts[1:]: _module = getattr(_module, _mod_part) return _module
inspired by post on stackoverflow :param name: import path string like 'netshowlib.linux.provider_discovery' :return: module matching the import statement
def _debug_linter_status(linter, filename, show_lint_files): """Indicate that we are running this linter if required.""" if show_lint_files: print("{linter}: {filename}".format(linter=linter, filename=filename))
Indicate that we are running this linter if required.
def add_params(param_list_left, param_list_right): """Add two lists of parameters one by one :param param_list_left: list of numpy arrays :param param_list_right: list of numpy arrays :return: list of numpy arrays """ res = [] for x, y in zip(param_list_left, param_list_right): res.append(x + y) return res
Add two lists of parameters one by one :param param_list_left: list of numpy arrays :param param_list_right: list of numpy arrays :return: list of numpy arrays
def add_to_bashrc(self, line, match_regexp=None, note=None, loglevel=logging.DEBUG): """Takes care of adding a line to everyone's bashrc (/etc/bash.bashrc). @param line: Line to add. @param match_regexp: See add_line_to_file() @param note: See send() @return: See add_line_to_file() """ shutit = self.shutit shutit.handle_note(note) if not shutit_util.check_regexp(match_regexp): shutit.fail('Illegal regexp found in add_to_bashrc call: ' + match_regexp) # pragma: no cover if self.whoami() == 'root': shutit.add_line_to_file(line, '/root/.bashrc', match_regexp=match_regexp, loglevel=loglevel) else: shutit.add_line_to_file(line, '${HOME}/.bashrc', match_regexp=match_regexp, loglevel=loglevel) shutit.add_line_to_file(line, '/etc/bash.bashrc', match_regexp=match_regexp, loglevel=loglevel) return True
Takes care of adding a line to everyone's bashrc (/etc/bash.bashrc). @param line: Line to add. @param match_regexp: See add_line_to_file() @param note: See send() @return: See add_line_to_file()
def configure_logger(self): """Configure the test batch runner logger """ logger_name = 'brome_runner' self.logger = logging.getLogger(logger_name) format_ = BROME_CONFIG['logger_runner']['format'] # Stream logger if BROME_CONFIG['logger_runner']['streamlogger']: sh = logging.StreamHandler() stream_formatter = logging.Formatter(format_) sh.setFormatter(stream_formatter) self.logger.addHandler(sh) # File logger if BROME_CONFIG['logger_runner']['filelogger'] and \ self.runner_dir: self.log_file_path = os.path.join( self.runner_dir, '%s.log' % logger_name ) self.relative_log_file_path = os.path.join( self.relative_runner_dir, '%s.log' % logger_name ) fh = logging.FileHandler( self.log_file_path ) file_formatter = logging.Formatter(format_) fh.setFormatter(file_formatter) self.logger.addHandler(fh) self.logger.setLevel( getattr( logging, BROME_CONFIG['logger_runner']['level'] ) )
Configure the test batch runner logger
def or_has(self, relation, operator='>=', count=1): """ Add a relationship count condition to the query with an "or". :param relation: The relation to count :type relation: str :param operator: The operator :type operator: str :param count: The count :type count: int :rtype: Builder """ return self.has(relation, operator, count, 'or')
Add a relationship count condition to the query with an "or". :param relation: The relation to count :type relation: str :param operator: The operator :type operator: str :param count: The count :type count: int :rtype: Builder
def to_dot(self, name='BDD'): # pragma: no cover """Convert to DOT language representation. See the `DOT language reference <http://www.graphviz.org/content/dot-language>`_ for details. """ parts = ['graph', name, '{'] for node in self.dfs_postorder(): if node is BDDNODEZERO: parts += ['n' + str(id(node)), '[label=0,shape=box];'] elif node is BDDNODEONE: parts += ['n' + str(id(node)), '[label=1,shape=box];'] else: v = _VARS[node.root] parts.append('n' + str(id(node))) parts.append('[label="{}",shape=circle];'.format(v)) for node in self.dfs_postorder(): if node is not BDDNODEZERO and node is not BDDNODEONE: parts += ['n' + str(id(node)), '--', 'n' + str(id(node.lo)), '[label=0,style=dashed];'] parts += ['n' + str(id(node)), '--', 'n' + str(id(node.hi)), '[label=1];'] parts.append('}') return " ".join(parts)
Convert to DOT language representation. See the `DOT language reference <http://www.graphviz.org/content/dot-language>`_ for details.
def create(*args, **kwargs): """ Create a SDR classifier factory. The implementation of the SDR Classifier can be specified with the "implementation" keyword argument. The SDRClassifierFactory uses the implementation as specified in `Default NuPIC Configuration <default-config.html>`_. """ impl = kwargs.pop('implementation', None) if impl is None: impl = Configuration.get('nupic.opf.sdrClassifier.implementation') if impl == 'py': return SDRClassifier(*args, **kwargs) elif impl == 'cpp': return FastSDRClassifier(*args, **kwargs) elif impl == 'diff': return SDRClassifierDiff(*args, **kwargs) else: raise ValueError('Invalid classifier implementation (%r). Value must be ' '"py", "cpp" or "diff".' % impl)
Create a SDR classifier factory. The implementation of the SDR Classifier can be specified with the "implementation" keyword argument. The SDRClassifierFactory uses the implementation as specified in `Default NuPIC Configuration <default-config.html>`_.
def draw(self, **kwargs): """ Draws the heatmap of the ranking matrix of variables. """ # Set the axes aspect to be equal self.ax.set_aspect("equal") # Generate a mask for the upper triangle mask = np.zeros_like(self.ranks_, dtype=np.bool) mask[np.triu_indices_from(mask)] = True # Draw the heatmap # TODO: Move mesh to a property so the colorbar can be finalized data = np.ma.masked_where(mask, self.ranks_) mesh = self.ax.pcolormesh(data, cmap=self.colormap, vmin=-1, vmax=1) # Set the Axis limits self.ax.set( xlim=(0, data.shape[1]), ylim=(0, data.shape[0]) ) # Add the colorbar cb = self.ax.figure.colorbar(mesh, None, self.ax) cb.outline.set_linewidth(0) # Reverse the rows to get the lower left triangle self.ax.invert_yaxis() # Add ticks and tick labels self.ax.set_xticks(np.arange(len(self.ranks_)) + 0.5) self.ax.set_yticks(np.arange(len(self.ranks_)) + 0.5) if self.show_feature_names_: self.ax.set_xticklabels(self.features_, rotation=90) self.ax.set_yticklabels(self.features_) else: self.ax.set_xticklabels([]) self.ax.set_yticklabels([])
Draws the heatmap of the ranking matrix of variables.
def parse_timestamp(x): """Parse ISO8601 formatted timestamp.""" dt = dateutil.parser.parse(x) if dt.tzinfo is None: dt = dt.replace(tzinfo=pytz.utc) return dt
Parse ISO8601 formatted timestamp.
def create_global_secondary_index(table_name, global_index, region=None, key=None, keyid=None, profile=None): ''' Creates a single global secondary index on a DynamoDB table. CLI Example: .. code-block:: bash salt myminion boto_dynamodb.create_global_secondary_index table_name / index_name ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) table = Table(table_name, connection=conn) return table.create_global_secondary_index(global_index)
Creates a single global secondary index on a DynamoDB table. CLI Example: .. code-block:: bash salt myminion boto_dynamodb.create_global_secondary_index table_name / index_name
def reassign_label(cls, destination_cluster, label): """ Reassign a label from one cluster to another. Args: `destination_cluster`: id/label of the cluster to move the label to `label`: label to be moved from the source cluster """ conn = Qubole.agent(version=Cluster.api_version) data = { "destination_cluster": destination_cluster, "label": label } return conn.put(cls.rest_entity_path + "/reassign-label", data)
Reassign a label from one cluster to another. Args: `destination_cluster`: id/label of the cluster to move the label to `label`: label to be moved from the source cluster
def import_wikipage(self, slug, content, **attrs): """ Import a Wiki page and return a :class:`WikiPage` object. :param slug: slug of the :class:`WikiPage` :param content: content of the :class:`WikiPage` :param attrs: optional attributes for :class:`Task` """ return WikiPages(self.requester).import_( self.id, slug, content, **attrs )
Import a Wiki page and return a :class:`WikiPage` object. :param slug: slug of the :class:`WikiPage` :param content: content of the :class:`WikiPage` :param attrs: optional attributes for :class:`Task`
def upload(self, docs_base, release): """Upload docs in ``docs_base`` to the target of this uploader.""" return getattr(self, '_to_' + self.target)(docs_base, release)
Upload docs in ``docs_base`` to the target of this uploader.
def ws_db996(self, value=None): """ Corresponds to IDD Field `ws_db996` Mean wind speed coincident with 99.6% dry-bulb temperature Args: value (float): value for IDD Field `ws_db996` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `ws_db996`'.format(value)) self._ws_db996 = value
Corresponds to IDD Field `ws_db996` Mean wind speed coincident with 99.6% dry-bulb temperature Args: value (float): value for IDD Field `ws_db996` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def random(self, namespace=0): """ Returns query string for random page """ query = self.LIST.substitute( WIKI=self.uri, ENDPOINT=self.endpoint, LIST='random') query += "&rnlimit=1&rnnamespace=%d" % namespace emoji = [ u'\U0001f32f', # burrito or wrap u'\U0001f355', # slice of pizza u'\U0001f35c', # steaming bowl of ramen u'\U0001f363', # sushi u'\U0001f369', # doughnut u'\U0001f36a', # cookie u'\U0001f36d', # lollipop u'\U0001f370', # strawberry shortcake ] action = 'random' if namespace: action = 'random:%d' % namespace self.set_status(action, random.choice(emoji)) return query
Returns query string for random page
def _compare_variables_function_generator( method_string, aggregation_func): """Return a function usable as a comparison method for class |Variable|. Pass the specific method (e.g. `__eq__`) and the corresponding operator (e.g. `==`) as strings. Also pass either |numpy.all| or |numpy.any| for aggregating multiple boolean values. """ def comparison_function(self, other): """Wrapper for comparison functions for class |Variable|.""" if self is other: return method_string in ('__eq__', '__le__', '__ge__') method = getattr(self.value, method_string) try: if hasattr(type(other), '__hydpy__get_value__'): other = other.__hydpy__get_value__() result = method(other) if result is NotImplemented: return result return aggregation_func(result) except BaseException: objecttools.augment_excmessage( f'While trying to compare variable ' f'{objecttools.elementphrase(self)} with object ' f'`{other}` of type `{objecttools.classname(other)}`') return comparison_function
Return a function usable as a comparison method for class |Variable|. Pass the specific method (e.g. `__eq__`) and the corresponding operator (e.g. `==`) as strings. Also pass either |numpy.all| or |numpy.any| for aggregating multiple boolean values.
def atype_view_asset(self, ): """View the project of the current assettype :returns: None :rtype: None :raises: None """ if not self.cur_atype: return i = self.atype_asset_treev.currentIndex() item = i.internalPointer() if item: asset = item.internal_data() if isinstance(asset, djadapter.models.Asset): self.view_asset(asset)
View the project of the current assettype :returns: None :rtype: None :raises: None
def add_peddy_information(config_data): """Add information from peddy outfiles to the individuals""" ped_info = {} ped_check = {} sex_check = {} relations = [] if config_data.get('peddy_ped'): file_handle = open(config_data['peddy_ped'], 'r') for ind_info in parse_peddy_ped(file_handle): ped_info[ind_info['sample_id']] = ind_info if config_data.get('peddy_ped_check'): file_handle = open(config_data['peddy_ped_check'], 'r') for pair_info in parse_peddy_ped_check(file_handle): ped_check[(pair_info['sample_a'], pair_info['sample_b'])] = pair_info if config_data.get('peddy_sex_check'): file_handle = open(config_data['peddy_sex_check'], 'r') for ind_info in parse_peddy_sex_check(file_handle): sex_check[ind_info['sample_id']] = ind_info if not ped_info: return analysis_inds = {} for ind in config_data['samples']: ind_id = ind['sample_id'] analysis_inds[ind_id] = ind for ind_id in analysis_inds: ind = analysis_inds[ind_id] # Check if peddy has inferred the ancestry if ind_id in ped_info: ind['predicted_ancestry'] = ped_info[ind_id].get('ancestry-prediction', 'UNKNOWN') # Check if peddy has inferred the sex if ind_id in sex_check: if sex_check[ind_id]['error']: ind['confirmed_sex'] = False else: ind['confirmed_sex'] = True # Check if peddy har confirmed parental relations for parent in ['mother', 'father']: # If we are looking at individual with parents if ind[parent] != '0': # Check if the child/parent pair is in peddy data for pair in ped_check: if (ind_id in pair and ind[parent] in pair): # If there is a parent error we mark that if ped_check[pair]['parent_error']: analysis_inds[ind[parent]]['confirmed_parent'] = False else: # Else if parent confirmation has not been done if 'confirmed_parent' not in analysis_inds[ind[parent]]: # Set confirmatio to True analysis_inds[ind[parent]]['confirmed_parent'] = True
Add information from peddy outfiles to the individuals
def as_list(callable): """Convert a scalar validator in a list validator""" @wraps(callable) def wrapper(value_iter): return [callable(value) for value in value_iter] return wrapper
Convert a scalar validator in a list validator
def createContactItem(self, person, notes): """ Create a new L{Notes} associated with the given person based on the given string. @type person: L{Person} @param person: The person with whom to associate the new L{Notes}. @type notes: C{unicode} @param notes: The value to use for the I{notes} attribute of the newly created L{Notes}. If C{''}, no L{Notes} will be created. @rtype: L{Notes} or C{NoneType} """ if notes: return Notes( store=person.store, person=person, notes=notes)
Create a new L{Notes} associated with the given person based on the given string. @type person: L{Person} @param person: The person with whom to associate the new L{Notes}. @type notes: C{unicode} @param notes: The value to use for the I{notes} attribute of the newly created L{Notes}. If C{''}, no L{Notes} will be created. @rtype: L{Notes} or C{NoneType}
def mass1_from_tau0_tau3(tau0, tau3, f_lower): r"""Returns the primary mass from the given :math:`\tau_0, \tau_3`.""" mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower) eta = eta_from_tau0_tau3(tau0, tau3, f_lower) return mass1_from_mtotal_eta(mtotal, eta)
r"""Returns the primary mass from the given :math:`\tau_0, \tau_3`.
def download(self, files=None, destination=None, overwrite=False, callback=None): """Download file or files. :param files: file or files to download :param destination: destination path (defaults to users home directory) :param overwrite: replace existing files? :param callback: callback function that will receive total file size and written bytes as arguments :type files: ``list`` of ``dict`` with file data from filemail :type destination: ``str`` or ``unicode`` :type overwrite: ``bool`` :type callback: ``func`` """ if files is None: files = self.files elif not isinstance(files, list): files = [files] if destination is None: destination = os.path.expanduser('~') for f in files: if not isinstance(f, dict): raise FMBaseError('File must be a <dict> with file data') self._download(f, destination, overwrite, callback)
Download file or files. :param files: file or files to download :param destination: destination path (defaults to users home directory) :param overwrite: replace existing files? :param callback: callback function that will receive total file size and written bytes as arguments :type files: ``list`` of ``dict`` with file data from filemail :type destination: ``str`` or ``unicode`` :type overwrite: ``bool`` :type callback: ``func``
def _gradient(self, diff, d, coords): """Compute the gradient. Args: diff (`array-like`): [`m`, `m`] matrix. `D` - `d` d (`array-like`): [`m`, `m`] matrix. coords (`array-like`): [`m`, `n`] matrix. Returns: `np.array`: Gradient, shape [`m`, `n`]. """ denom = np.copy(d) denom[denom == 0] = 1e-5 with np.errstate(divide='ignore', invalid='ignore'): K = -2 * diff / denom K[np.isnan(K)] = 0 g = np.empty_like(coords) for n in range(self.n): for i in range(self.m): # Vectorised version of (~70 times faster) # for j in range(self.m): # delta_g = ((coords[i, n] - coords[j, n]) * K[i, j]).sum() # g[i, n] += delta_g g[i, n] = ((coords[i, n] - coords[:, n]) * K[i, :]).sum() return g
Compute the gradient. Args: diff (`array-like`): [`m`, `m`] matrix. `D` - `d` d (`array-like`): [`m`, `m`] matrix. coords (`array-like`): [`m`, `n`] matrix. Returns: `np.array`: Gradient, shape [`m`, `n`].
def get_site_packages(venv): ''' Return the path to the site-packages directory of a virtualenv venv Path to the virtualenv. CLI Example: .. code-block:: bash salt '*' virtualenv.get_site_packages /path/to/my/venv ''' bin_path = _verify_virtualenv(venv) ret = __salt__['cmd.exec_code_all']( bin_path, 'from distutils import sysconfig; ' 'print(sysconfig.get_python_lib())' ) if ret['retcode'] != 0: raise CommandExecutionError('{stdout}\n{stderr}'.format(**ret)) return ret['stdout']
Return the path to the site-packages directory of a virtualenv venv Path to the virtualenv. CLI Example: .. code-block:: bash salt '*' virtualenv.get_site_packages /path/to/my/venv
def append(self, new: Statement) -> None: """Append a HistoryItem to end of the History list :param new: command line to convert to HistoryItem and add to the end of the History list """ new = HistoryItem(new) list.append(self, new) new.idx = len(self)
Append a HistoryItem to end of the History list :param new: command line to convert to HistoryItem and add to the end of the History list
def _compute_centers(self, X, sparse, rs): """Generate RBF centers""" # use supplied centers if present centers = self._get_user_components('centers') # use points taken uniformly from the bounding # hyperrectangle if (centers is None): n_features = X.shape[1] if (sparse): fxr = range(n_features) cols = [X.getcol(i) for i in fxr] min_dtype = X.dtype.type(1.0e10) sp_min = lambda col: np.minimum(min_dtype, np.min(col.data)) min_Xs = np.array(map(sp_min, cols)) max_dtype = X.dtype.type(-1.0e10) sp_max = lambda col: np.maximum(max_dtype, np.max(col.data)) max_Xs = np.array(map(sp_max, cols)) else: min_Xs = X.min(axis=0) max_Xs = X.max(axis=0) spans = max_Xs - min_Xs ctrs_size = (self.n_hidden, n_features) centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size) self.components_['centers'] = centers
Generate RBF centers
def command_packet(cmd): """ Build a command message. """ return message('Command', Container(string_length=len(cmd), string=bytes(cmd, ENCODING)), len(cmd) + 2)
Build a command message.
def results(self, **query_params): """Returns a streaming handle to this job's search results. To get a nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`, as in:: import splunklib.client as client import splunklib.results as results from time import sleep service = client.connect(...) job = service.jobs.create("search * | head 5") while not job.is_done(): sleep(.2) rr = results.ResultsReader(job.results()) for result in rr: if isinstance(result, results.Message): # Diagnostic messages may be returned in the results print '%s: %s' % (result.type, result.message) elif isinstance(result, dict): # Normal events are returned as dicts print result assert rr.is_preview == False Results are not available until the job has finished. If called on an unfinished job, the result is an empty event set. This method makes a single roundtrip to the server, plus at most two additional round trips if the ``autologin`` field of :func:`connect` is set to ``True``. :param query_params: Additional parameters (optional). For a list of valid parameters, see `GET search/jobs/{search_id}/results <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults>`_. :type query_params: ``dict`` :return: The ``InputStream`` IO handle to this job's results. """ query_params['segmentation'] = query_params.get('segmentation', 'none') return self.get("results", **query_params).body
Returns a streaming handle to this job's search results. To get a nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`, as in:: import splunklib.client as client import splunklib.results as results from time import sleep service = client.connect(...) job = service.jobs.create("search * | head 5") while not job.is_done(): sleep(.2) rr = results.ResultsReader(job.results()) for result in rr: if isinstance(result, results.Message): # Diagnostic messages may be returned in the results print '%s: %s' % (result.type, result.message) elif isinstance(result, dict): # Normal events are returned as dicts print result assert rr.is_preview == False Results are not available until the job has finished. If called on an unfinished job, the result is an empty event set. This method makes a single roundtrip to the server, plus at most two additional round trips if the ``autologin`` field of :func:`connect` is set to ``True``. :param query_params: Additional parameters (optional). For a list of valid parameters, see `GET search/jobs/{search_id}/results <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults>`_. :type query_params: ``dict`` :return: The ``InputStream`` IO handle to this job's results.
def reindex(self, comments=True, change_history=True, worklogs=True): """ Reindex the Jira instance Kicks off a reindex. Need Admin permissions to perform this reindex. :param comments: Indicates that comments should also be reindexed. Not relevant for foreground reindex, where comments are always reindexed. :param change_history: Indicates that changeHistory should also be reindexed. Not relevant for foreground reindex, where changeHistory is always reindexed. :param worklogs: Indicates that changeHistory should also be reindexed. Not relevant for foreground reindex, where changeHistory is always reindexed. :return: """ params = {} if not comments: params['indexComments'] = comments if not change_history: params['indexChangeHistory'] = change_history if not worklogs: params['indexWorklogs'] = worklogs return self.post('rest/api/2/reindex', params=params)
Reindex the Jira instance Kicks off a reindex. Need Admin permissions to perform this reindex. :param comments: Indicates that comments should also be reindexed. Not relevant for foreground reindex, where comments are always reindexed. :param change_history: Indicates that changeHistory should also be reindexed. Not relevant for foreground reindex, where changeHistory is always reindexed. :param worklogs: Indicates that changeHistory should also be reindexed. Not relevant for foreground reindex, where changeHistory is always reindexed. :return:
def approvewitness(ctx, witnesses, account): """ Approve witness(es) """ pprint(ctx.peerplays.approvewitness(witnesses, account=account))
Approve witness(es)
def parse_content(self, content): """ Parses the output of the ``date`` and ``date --utc`` command. Sample: Fri Jun 24 09:13:34 CST 2016 Sample: Fri Jun 24 09:13:34 UTC 2016 Attributes ---------- datetime: datetime.datetime A native datetime.datetime of the parsed date string timezone: str The string portion of the date string containing the timezone Raises: DateParseException: Raised if any exception occurs parsing the content. """ self.data = get_active_lines(content, comment_char="COMMAND>")[0] parts = self.data.split() if not len(parts) == 6: msg = "Expected six date parts. Got [%s]" raise DateParseException(msg % self.data) try: self.timezone = parts[4] no_tz = ' '.join(parts[:4]) + ' ' + parts[-1] self.datetime = datetime.strptime(no_tz, '%a %b %d %H:%M:%S %Y') except: six.reraise(DateParseException, DateParseException(self.data), sys.exc_info()[2])
Parses the output of the ``date`` and ``date --utc`` command. Sample: Fri Jun 24 09:13:34 CST 2016 Sample: Fri Jun 24 09:13:34 UTC 2016 Attributes ---------- datetime: datetime.datetime A native datetime.datetime of the parsed date string timezone: str The string portion of the date string containing the timezone Raises: DateParseException: Raised if any exception occurs parsing the content.
def coffee(input, output, **kw): """Process CoffeeScript files""" subprocess.call([current_app.config.get('COFFEE_BIN'), '-c', '-o', output, input])
Process CoffeeScript files
def wrap_http_for_jwt_access(credentials, http): """Prepares an HTTP object's request method for JWT access. Wraps HTTP requests with logic to catch auth failures (typically identified via a 401 status code). In the event of failure, tries to refresh the token used and then retry the original request. Args: credentials: _JWTAccessCredentials, the credentials used to identify a service account that uses JWT access tokens. http: httplib2.Http, an http object to be used to make auth requests. """ orig_request_method = http.request wrap_http_for_auth(credentials, http) # The new value of ``http.request`` set by ``wrap_http_for_auth``. authenticated_request_method = http.request # The closure that will replace 'httplib2.Http.request'. def new_request(uri, method='GET', body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None): if 'aud' in credentials._kwargs: # Preemptively refresh token, this is not done for OAuth2 if (credentials.access_token is None or credentials.access_token_expired): credentials.refresh(None) return request(authenticated_request_method, uri, method, body, headers, redirections, connection_type) else: # If we don't have an 'aud' (audience) claim, # create a 1-time token with the uri root as the audience headers = _initialize_headers(headers) _apply_user_agent(headers, credentials.user_agent) uri_root = uri.split('?', 1)[0] token, unused_expiry = credentials._create_token({'aud': uri_root}) headers['Authorization'] = 'Bearer ' + token return request(orig_request_method, uri, method, body, clean_headers(headers), redirections, connection_type) # Replace the request method with our own closure. http.request = new_request # Set credentials as a property of the request method. http.request.credentials = credentials
Prepares an HTTP object's request method for JWT access. Wraps HTTP requests with logic to catch auth failures (typically identified via a 401 status code). In the event of failure, tries to refresh the token used and then retry the original request. Args: credentials: _JWTAccessCredentials, the credentials used to identify a service account that uses JWT access tokens. http: httplib2.Http, an http object to be used to make auth requests.
def PrimaryHDU(model): ''' Construct the primary HDU file containing basic header info. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=0) if 'KEPMAG' not in [c[0] for c in cards]: cards.append(('KEPMAG', model.mag, 'Kepler magnitude')) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) hdu = pyfits.PrimaryHDU(header=header) return hdu
Construct the primary HDU file containing basic header info.
def resolve_method(state, method_name, class_name, params=(), ret_type=None, include_superclasses=True, init_class=True, raise_exception_if_not_found=False): """ Resolves the method based on the given characteristics (name, class and params) The method may be defined in one of the superclasses of the given class (TODO: support interfaces). :rtype: archinfo.arch_soot.SootMethodDescriptor """ base_class = state.javavm_classloader.get_class(class_name) if include_superclasses: class_hierarchy = state.javavm_classloader.get_class_hierarchy(base_class) else: class_hierarchy = [base_class] # walk up in class hierarchy, until method is found for class_descriptor in class_hierarchy: java_binary = state.project.loader.main_object soot_method = java_binary.get_soot_method(method_name, class_descriptor.name, params, none_if_missing=True) if soot_method is not None: # init the class if init_class: state.javavm_classloader.init_class(class_descriptor) return SootMethodDescriptor.from_soot_method(soot_method) # method could not be found # => we are executing code that is not loaded (typically library code) # => fallback: continue with infos available from the invocation, so we # still can use SimProcedures if raise_exception_if_not_found: raise SootMethodNotLoadedException() else: return SootMethodDescriptor(class_name, method_name, params, ret_type=ret_type)
Resolves the method based on the given characteristics (name, class and params) The method may be defined in one of the superclasses of the given class (TODO: support interfaces). :rtype: archinfo.arch_soot.SootMethodDescriptor
def write_brackets(docgraph, output_file, layer='mmax'): """ converts a document graph into a plain text file with brackets. Parameters ---------- layer : str or None The layer from which the pointing chains/relations (i.e. coreference relations) should be extracted. If no layer is selected, all pointing relations will be considered. (This might lead to errors, e.g. when the document contains Tiger syntax trees with secondary edges.) """ bracketed_str = gen_bracketed_output(docgraph, layer=layer) assert isinstance(output_file, (str, file)) if isinstance(output_file, str): path_to_file = os.path.dirname(output_file) if not os.path.isdir(path_to_file): create_dir(path_to_file) with codecs.open(output_file, 'w', 'utf-8') as outfile: outfile.write(bracketed_str) else: # output_file is a file object output_file.write(bracketed_str)
converts a document graph into a plain text file with brackets. Parameters ---------- layer : str or None The layer from which the pointing chains/relations (i.e. coreference relations) should be extracted. If no layer is selected, all pointing relations will be considered. (This might lead to errors, e.g. when the document contains Tiger syntax trees with secondary edges.)
def request(endpoint, verb='GET', session_options=None, **options): """Performs a synchronous request. Uses a dedicated event loop and aiohttp.ClientSession object. Options: - endpoint: the endpoint to call - verb: the HTTP verb to use (defaults: GET) - session_options: a dict containing options to initialize the session (defaults: None) - options: extra options for the request (defaults: None) Returns a dict object with the following keys: - content: the content of the response - status: the status - headers: a dict with all the response headers """ req = functools.partial(_request, endpoint, verb, session_options, **options) return _run_in_fresh_loop(req)
Performs a synchronous request. Uses a dedicated event loop and aiohttp.ClientSession object. Options: - endpoint: the endpoint to call - verb: the HTTP verb to use (defaults: GET) - session_options: a dict containing options to initialize the session (defaults: None) - options: extra options for the request (defaults: None) Returns a dict object with the following keys: - content: the content of the response - status: the status - headers: a dict with all the response headers
async def put(self, cid): """Update description for content Accepts: Query string args: - "cid" - int Request body parameters: - message (signed dict): - "description" - str - "coinid" - str Returns: dict with following fields: - "confirmed": None - "txid" - str - "description" - str - "content" - str - "read_access" - int - "write_access" - int - "cid" - int - "txid" - str - "seller_pubkey" - str - "seller_access_string": None or str Verified: True """ if settings.SIGNATURE_VERIFICATION: super().verify() try: body = json.loads(self.request.body) except: self.set_status(400) self.write({"error":400, "reason":"Unexpected data format. JSON required"}) raise tornado.web.Finish # Get data from signed message public_key = body.get("public_key", None) if isinstance(body["message"], str): message = json.loads(body["message"]) elif isinstance(body["message"], dict): message = body["message"] descr = message.get("description") coinid = message.get("coinid") if not coinid in settings.bridges.keys(): self.set_status(400) self.write({"error":400, "reason":"Unknown coin id"}) raise tornado.web.Finish # Check if all required data exists if not all([public_key, descr, coinid]): self.set_status(400) self.write({"error":400, "reason":"Missed required fields"}) raise tornado.web.Finish owneraddr = self.account.validator[coinid](public_key) # Get content owner response = await self.account.blockchain.ownerbycid(cid=cid) if isinstance(response, dict): if "error" in response.keys(): error_code = response["error"] self.set_status(error_code) self.write({"error":error_code, "reason":response["error"]}) raise tornado.web.Finish # Check if current content belongs to current user if response != owneraddr: self.set_status(403) self.write({"error":403, "reason":"Owner does not match."}) raise tornado.web.Finish # Set fee fee = await billing.update_description_fee(owneraddr=owneraddr,cid=cid, description=descr) # Set bridge url if coinid in settings.bridges.keys(): self.account.blockchain.setendpoint(settings.bridges[coinid]) else: self.set_status(400) self.write({"error":400, "reason":"Invalid coinid"}) raise tornado.web.Finish # Set description for content. Make request to the bridge request = await self.account.blockchain.setdescrforcid(cid=cid, descr=descr, owneraddr=owneraddr) if "error" in request.keys(): self.set_status(request["error"]) self.write(request) raise tornado.web.Finish self.write({"cid":cid, "description":descr, "coinid":coinid, "owneraddr": owneraddr})
Update description for content Accepts: Query string args: - "cid" - int Request body parameters: - message (signed dict): - "description" - str - "coinid" - str Returns: dict with following fields: - "confirmed": None - "txid" - str - "description" - str - "content" - str - "read_access" - int - "write_access" - int - "cid" - int - "txid" - str - "seller_pubkey" - str - "seller_access_string": None or str Verified: True
def reset_sequence(self, topic): """Reset the expected sequence number for a topic If the topic is unknown, this does nothing. This behaviour is useful when you have wildcard topics that only create queues once they receive the first message matching the topic. Args: topic (string): The topic to reset the packet queue on """ if topic in self.queues: self.queues[topic].reset()
Reset the expected sequence number for a topic If the topic is unknown, this does nothing. This behaviour is useful when you have wildcard topics that only create queues once they receive the first message matching the topic. Args: topic (string): The topic to reset the packet queue on
def set_Y(self, Y): """ Set the output data of the model :param Y: output observations :type Y: np.ndarray or ObsArray """ assert isinstance(Y, (np.ndarray, ObsAr)) state = self.update_model() self.update_model(False) if self.normalizer is not None: self.normalizer.scale_by(Y) self.Y_normalized = ObsAr(self.normalizer.normalize(Y)) self.Y = Y else: self.Y = ObsAr(Y) if isinstance(Y, np.ndarray) else Y self.Y_normalized = self.Y self.update_model(state)
Set the output data of the model :param Y: output observations :type Y: np.ndarray or ObsArray
def self_inventory(self): """ Inventory output will only contain the server name and the session ID when a key is provided. Provide the same format as with the full inventory instead for consistency. """ if self.api_key is None: return {} if self._self_inventory: return self._self_inventory resp, self_inventory = self.get('Inventory?key=%s' % self.api_key) real_self_inventory = dict() for host in self_inventory: real_self_inventory[host[0]] = self.full_inventory[host[0]] self._self_inventory = real_self_inventory return self._self_inventory
Inventory output will only contain the server name and the session ID when a key is provided. Provide the same format as with the full inventory instead for consistency.
def applyslicer(array, slicer, pmask, cval = 0): r""" Apply a slicer returned by the iterator to a new array of the same dimensionality as the one used to initialize the iterator. Notes ----- If ``array`` has more dimensions than ``slicer`` and ``pmask``, the first ones are sliced. Parameters ---------- array : array_like A n-dimensional array. slicer : list List if `slice()` instances as returned by `next()`. pmask : narray The array mask as returned by `next()`. cval : number Value to fill undefined positions. Experiments ----------- >>> import numpy >>> from medpy.iterators import CentredPatchIterator >>> arr = numpy.arange(0, 25).reshape((5,5)) >>> for patch, pmask, _, slicer in CentredPatchIterator(arr, 3): >>> new_patch = CentredPatchIterator.applyslicer(arr, slicer, pmask) >>> print numpy.all(new_patch == patch) True ... """ l = len(slicer) patch = numpy.zeros(list(pmask.shape[:l]) + list(array.shape[l:]), array.dtype) if not 0 == cval: patch.fill(cval) sliced = array[slicer] patch[pmask] = sliced.reshape([numpy.prod(sliced.shape[:l])] + list(sliced.shape[l:])) return patch
r""" Apply a slicer returned by the iterator to a new array of the same dimensionality as the one used to initialize the iterator. Notes ----- If ``array`` has more dimensions than ``slicer`` and ``pmask``, the first ones are sliced. Parameters ---------- array : array_like A n-dimensional array. slicer : list List if `slice()` instances as returned by `next()`. pmask : narray The array mask as returned by `next()`. cval : number Value to fill undefined positions. Experiments ----------- >>> import numpy >>> from medpy.iterators import CentredPatchIterator >>> arr = numpy.arange(0, 25).reshape((5,5)) >>> for patch, pmask, _, slicer in CentredPatchIterator(arr, 3): >>> new_patch = CentredPatchIterator.applyslicer(arr, slicer, pmask) >>> print numpy.all(new_patch == patch) True ...
def get_data_frame_transform(self, transform_id=None, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/reference/current/get-data-frame-transform.html>`_ :arg transform_id: The id or comma delimited list of id expressions of the transforms to get, '_all' or '*' implies get all transforms :arg from_: skips a number of transform configs, defaults to 0 :arg size: specifies a max number of transforms to get, defaults to 100 """ return self.transport.perform_request( "GET", _make_path("_data_frame", "transforms", transform_id), params=params )
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/get-data-frame-transform.html>`_ :arg transform_id: The id or comma delimited list of id expressions of the transforms to get, '_all' or '*' implies get all transforms :arg from_: skips a number of transform configs, defaults to 0 :arg size: specifies a max number of transforms to get, defaults to 100
async def reload_modules(self, pathlist): """ Reload modules with a full path in the pathlist """ loadedModules = [] failures = [] for path in pathlist: p, module = findModule(path, False) if module is not None and hasattr(module, '_instance') and module._instance.state != ModuleLoadStateChanged.UNLOADED: loadedModules.append(module) # Unload all modules ums = [ModuleLoadStateChanged.createMatcher(m, ModuleLoadStateChanged.UNLOADED) for m in loadedModules] for m in loadedModules: # Only unload the module itself, not its dependencies, since we will restart the module soon enough self.subroutine(self.unloadmodule(m, True), False) await self.wait_for_all(*ums) # Group modules by package grouped = {} for path in pathlist: dotpos = path.rfind('.') if dotpos == -1: raise ModuleLoadException('Must specify module with full path, including package name') package = path[:dotpos] classname = path[dotpos + 1:] mlist = grouped.setdefault(package, []) p, module = findModule(path, False) mlist.append((classname, module)) for package, mlist in grouped.items(): # Reload each package only once try: p = sys.modules[package] # Remove cache to ensure a clean import from source file removeCache(p) p = reload(p) except KeyError: try: p = __import__(package, fromlist=[m[0] for m in mlist]) except Exception: self._logger.warning('Failed to import a package: %r, resume others', package, exc_info = True) failures.append('Failed to import: ' + package) continue except Exception: self._logger.warning('Failed to import a package: %r, resume others', package, exc_info = True) failures.append('Failed to import: ' + package) continue for cn, module in mlist: try: module2 = getattr(p, cn) except AttributeError: self._logger.warning('Cannot find module %r in package %r, resume others', package, cn) failures.append('Failed to import: ' + package + '.' + cn) continue if module is not None and module is not module2: # Update the references try: lpos = loadedModules.index(module) loaded = True except Exception: loaded = False for d in module.depends: # The new reference is automatically added on import, only remove the old reference d.referencedBy.remove(module) if loaded and hasattr(d, '_instance'): try: d._instance.dependedBy.remove(module) d._instance.dependedBy.add(module2) except ValueError: pass if hasattr(module, 'referencedBy'): for d in module.referencedBy: pos = d.depends.index(module) d.depends[pos] = module2 if not hasattr(module2, 'referencedBy'): module2.referencedBy = [] module2.referencedBy.append(d) if loaded: loadedModules[lpos] = module2 # Start the uploaded modules for m in loadedModules: self.subroutine(self.loadmodule(m)) if failures: raise ModuleLoadException('Following errors occurred during reloading, check log for more details:\n' + '\n'.join(failures))
Reload modules with a full path in the pathlist
def get_category_by_id(cls, category_id, **kwargs): """Find Category Return single instance of Category by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_category_by_id(category_id, async=True) >>> result = thread.get() :param async bool :param str category_id: ID of category to return (required) :return: Category If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_category_by_id_with_http_info(category_id, **kwargs) else: (data) = cls._get_category_by_id_with_http_info(category_id, **kwargs) return data
Find Category Return single instance of Category by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_category_by_id(category_id, async=True) >>> result = thread.get() :param async bool :param str category_id: ID of category to return (required) :return: Category If the method is called asynchronously, returns the request thread.
def _set_value(self, target, value, bitarray): ''' set given numeric value to target field in bitarray ''' # derive raw value rng = target.find('range') rng_min = float(rng.find('min').text) rng_max = float(rng.find('max').text) scl = target.find('scale') scl_min = float(scl.find('min').text) scl_max = float(scl.find('max').text) raw_value = (value - scl_min) * (rng_max - rng_min) / (scl_max - scl_min) + rng_min # store value in bitfield return self._set_raw(target, int(raw_value), bitarray)
set given numeric value to target field in bitarray
def stop(self): """ Stops the backend process. """ if self._process is None: return if self._shared: BackendManager.SHARE_COUNT -= 1 if BackendManager.SHARE_COUNT: return comm('stopping backend process') # close all sockets for s in self._sockets: s._callback = None s.close() self._sockets[:] = [] # prevent crash logs from being written if we are busy killing # the process self._process._prevent_logs = True while self._process.state() != self._process.NotRunning: self._process.waitForFinished(1) if sys.platform == 'win32': # Console applications on Windows that do not run an event # loop, or whose event loop does not handle the WM_CLOSE # message, can only be terminated by calling kill(). self._process.kill() else: self._process.terminate() self._process._prevent_logs = False self._heartbeat_timer.stop() comm('backend process terminated')
Stops the backend process.
async def acquire_async(self): """Acquire the :attr:`lock` asynchronously """ r = self.acquire(blocking=False) while not r: await asyncio.sleep(.01) r = self.acquire(blocking=False)
Acquire the :attr:`lock` asynchronously
def now(format_string): """ Displays the date, formatted according to the given string. Uses the same format as PHP's ``date()`` function; see http://php.net/date for all the possible values. Sample usage:: It is {% now "jS F Y H:i" %} """ from datetime import datetime from django.utils.dateformat import DateFormat return DateFormat(datetime.now()).format(self.format_string)
Displays the date, formatted according to the given string. Uses the same format as PHP's ``date()`` function; see http://php.net/date for all the possible values. Sample usage:: It is {% now "jS F Y H:i" %}
def build(ctx, builder="html", options=""): """Build docs with sphinx-build""" sourcedir = ctx.config.sphinx.sourcedir destdir = Path(ctx.config.sphinx.destdir or "build")/builder destdir = destdir.abspath() with cd(sourcedir): destdir_relative = Path(".").relpathto(destdir) command = "sphinx-build {opts} -b {builder} {sourcedir} {destdir}" \ .format(builder=builder, sourcedir=".", destdir=destdir_relative, opts=options) ctx.run(command)
Build docs with sphinx-build
def get_provider_token(self, provider_secret): """ 获取服务商凭证 https://work.weixin.qq.com/api/doc#90001/90143/91200 :param provider_secret: 服务商的secret,在服务商管理后台可见 :return: 返回的 JSON 数据包 """ return self._post( 'service/get_provider_token', data={ 'corpid': self._client.corp_id, 'provider_secret': provider_secret, } )
获取服务商凭证 https://work.weixin.qq.com/api/doc#90001/90143/91200 :param provider_secret: 服务商的secret,在服务商管理后台可见 :return: 返回的 JSON 数据包
async def list_vms(self, preset_name): ''' List VMs by preset name :arg present_name: string ''' response = await self.nova.servers.list(name=f'^{preset_name}$') result = [] for server in response['servers']: result.append(self._map_vm_structure(server)) return result
List VMs by preset name :arg present_name: string
def new(self, vd, ino, orig_len, csum): # type: (headervd.PrimaryOrSupplementaryVD, inode.Inode, int, int) -> None ''' A method to create a new boot info table. Parameters: vd - The volume descriptor to associate with this boot info table. ino - The Inode associated with this Boot Info Table. orig_len - The original length of the file before the boot info table was patched into it. csum - The checksum for the boot file, starting at the byte after the boot info table. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('This Eltorito Boot Info Table is already initialized') self.vd = vd self.orig_len = orig_len self.csum = csum self.inode = ino self._initialized = True
A method to create a new boot info table. Parameters: vd - The volume descriptor to associate with this boot info table. ino - The Inode associated with this Boot Info Table. orig_len - The original length of the file before the boot info table was patched into it. csum - The checksum for the boot file, starting at the byte after the boot info table. Returns: Nothing.
def relabel(self, catalogue): """Relabels results rows according to `catalogue`. A row whose work is labelled in the catalogue will have its label set to the label in the catalogue. Rows whose works are not labelled in the catalogue will be unchanged. :param catalogue: mapping of work names to labels :type catalogue: `Catalogue` """ for work, label in catalogue.items(): self._matches.loc[self._matches[constants.WORK_FIELDNAME] == work, constants.LABEL_FIELDNAME] = label
Relabels results rows according to `catalogue`. A row whose work is labelled in the catalogue will have its label set to the label in the catalogue. Rows whose works are not labelled in the catalogue will be unchanged. :param catalogue: mapping of work names to labels :type catalogue: `Catalogue`
def patch_lines(x): """ Draw lines between groups """ for idx in range(len(x)-1): x[idx] = np.vstack([x[idx], x[idx+1][0,:]]) return x
Draw lines between groups
def delete_dashboard(self, team_context, dashboard_id): """DeleteDashboard. [Preview API] Delete a dashboard given its ID. This also deletes the widgets associated with this dashboard. :param :class:`<TeamContext> <azure.devops.v5_0.dashboard.models.TeamContext>` team_context: The team context for the operation :param str dashboard_id: ID of the dashboard to delete. """ project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if dashboard_id is not None: route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str') self._send(http_method='DELETE', location_id='454b3e51-2e6e-48d4-ad81-978154089351', version='5.0-preview.2', route_values=route_values)
DeleteDashboard. [Preview API] Delete a dashboard given its ID. This also deletes the widgets associated with this dashboard. :param :class:`<TeamContext> <azure.devops.v5_0.dashboard.models.TeamContext>` team_context: The team context for the operation :param str dashboard_id: ID of the dashboard to delete.
def divsin_fc(fdata): """Apply divide by sine in the Fourier domain.""" nrows = fdata.shape[0] ncols = fdata.shape[1] L = int(nrows / 2) # Assuming nrows is even, which it should be. L2 = L - 2 # This is the last index in the recursion for division by sine. g = np.zeros([nrows, ncols], dtype=np.complex128) g[L2, :] = 2 * 1j * fdata[L - 1, :] for k in xrange(L2, -L2, -1): g[k - 1, :] = 2 * 1j * fdata[k, :] + g[k + 1, :] fdata[:, :] = g
Apply divide by sine in the Fourier domain.
def get_asset_by_name(self, publisher_name, extension_name, version, asset_type, account_token=None, accept_default=None, account_token_header=None, **kwargs): """GetAssetByName. [Preview API] :param str publisher_name: :param str extension_name: :param str version: :param str asset_type: :param str account_token: :param bool accept_default: :param String account_token_header: Header to pass the account token :rtype: object """ route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if version is not None: route_values['version'] = self._serialize.url('version', version, 'str') if asset_type is not None: route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str') query_parameters = {} if account_token is not None: query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str') if accept_default is not None: query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool') response = self._send(http_method='GET', location_id='7529171f-a002-4180-93ba-685f358a0482', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback)
GetAssetByName. [Preview API] :param str publisher_name: :param str extension_name: :param str version: :param str asset_type: :param str account_token: :param bool accept_default: :param String account_token_header: Header to pass the account token :rtype: object
def _certifi_where_for_ssl_version(): """Gets the right location for certifi certifications for the current SSL version. Older versions of SSL don't support the stronger set of root certificates. """ if not ssl: return if ssl.OPENSSL_VERSION_INFO < (1, 0, 2): warnings.warn( 'You are using an outdated version of OpenSSL that ' 'can\'t use stronger root certificates.') return certifi.old_where() return certifi.where()
Gets the right location for certifi certifications for the current SSL version. Older versions of SSL don't support the stronger set of root certificates.
def update_ethernet_settings(self, configuration, force=False, timeout=-1): """ Updates the Ethernet interconnect settings for the logical interconnect. Args: configuration: Ethernet interconnect settings. force: If set to true, the operation completes despite any problems with network connectivity or errors on the resource itself. The default is false. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Logical Interconnect. """ uri = "{}/ethernetSettings".format(self.data["uri"]) return self._helper.update(configuration, uri=uri, force=force, timeout=timeout)
Updates the Ethernet interconnect settings for the logical interconnect. Args: configuration: Ethernet interconnect settings. force: If set to true, the operation completes despite any problems with network connectivity or errors on the resource itself. The default is false. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Logical Interconnect.
def is_installed(self, pkgname): """Given a package name, returns whether it is installed in the environment :param str pkgname: The name of a package :return: Whether the supplied package is installed in the environment :rtype: bool """ return any(d for d in self.get_distributions() if d.project_name == pkgname)
Given a package name, returns whether it is installed in the environment :param str pkgname: The name of a package :return: Whether the supplied package is installed in the environment :rtype: bool
def search_article(self, keyword, page=1, timesn=WechatSogouConst.search_article_time.anytime, article_type=WechatSogouConst.search_article_type.all, ft=None, et=None, unlock_callback=None, identify_image_callback=None, decode_url=True): """搜索 文章 对于出现验证码的情况,可以由使用者自己提供: 1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程 2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决 注意: 函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用 Parameters ---------- keyword : str or unicode 搜索文字 page : int, optional 页数 the default is 1 timesn : WechatSogouConst.search_article_time 时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定 the default is anytime article_type : WechatSogouConst.search_article_type 含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有 ft, et : datetime.date or None 当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01 当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15 unlock_callback : callable 处理出现验证码页面的函数,参见 unlock_callback_example identify_image_callback : callable 处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example decode_url : bool 是否解析 url Returns ------- list[dict] { 'article': { 'title': '', # 文章标题 'url': '', # 文章链接 'imgs': '', # 文章图片list 'abstract': '', # 文章摘要 'time': '' # 文章推送时间 }, 'gzh': { 'profile_url': '', # 公众号最近10条群发页链接 'headimage': '', # 头像 'wechat_name': '', # 名称 'isv': '', # 是否加v } } Raises ------ WechatSogouRequestsException requests error """ url = WechatSogouRequest.gen_search_article_url(keyword, page, timesn, article_type, ft, et) session = requests.session() resp = self.__get_by_unlock(url, WechatSogouRequest.gen_search_article_url(keyword), unlock_platform=self.__unlock_sogou, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session) article_list = WechatSogouStructuring.get_article_by_search(resp.text) for i in article_list: if decode_url: i['article']['url'] = self.__format_url(i['article']['url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session) i['gzh']['profile_url'] = self.__format_url(i['gzh']['profile_url'], url, resp.text, unlock_callback=unlock_callback, identify_image_callback=identify_image_callback, session=session) yield i
搜索 文章 对于出现验证码的情况,可以由使用者自己提供: 1、函数 unlock_callback ,这个函数 handle 出现验证码到解决的整个流程 2、也可以 只提供函数 identify_image_callback,这个函数输入验证码二进制数据,输出验证码文字,剩下的由 wechatsogou 包来解决 注意: 函数 unlock_callback 和 identify_image_callback 只需要提供一个,如果都提供了,那么 identify_image_callback 不起作用 Parameters ---------- keyword : str or unicode 搜索文字 page : int, optional 页数 the default is 1 timesn : WechatSogouConst.search_article_time 时间 anytime 没有限制 / day 一天 / week 一周 / month 一月 / year 一年 / specific 自定 the default is anytime article_type : WechatSogouConst.search_article_type 含有内容的类型 image 有图 / video 有视频 / rich 有图和视频 / all 啥都有 ft, et : datetime.date or None 当 tsn 是 specific 时,ft 代表开始时间,如: 2017-07-01 当 tsn 是 specific 时,et 代表结束时间,如: 2017-07-15 unlock_callback : callable 处理出现验证码页面的函数,参见 unlock_callback_example identify_image_callback : callable 处理验证码函数,输入验证码二进制数据,输出文字,参见 identify_image_callback_example decode_url : bool 是否解析 url Returns ------- list[dict] { 'article': { 'title': '', # 文章标题 'url': '', # 文章链接 'imgs': '', # 文章图片list 'abstract': '', # 文章摘要 'time': '' # 文章推送时间 }, 'gzh': { 'profile_url': '', # 公众号最近10条群发页链接 'headimage': '', # 头像 'wechat_name': '', # 名称 'isv': '', # 是否加v } } Raises ------ WechatSogouRequestsException requests error
async def on_raw_433(self, message): """ Nickname in use. """ if not self.registered: self._registration_attempts += 1 # Attempt to set new nickname. if self._attempt_nicknames: await self.set_nickname(self._attempt_nicknames.pop(0)) else: await self.set_nickname( self._nicknames[0] + '_' * (self._registration_attempts - len(self._nicknames)))
Nickname in use.