code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def gen_primes(): """ Generate an infinite sequence of prime numbers. """ D = {} q = 2 while True: if q not in D: yield q D[q * q] = [q] else: for p in D[q]: D.setdefault(p + q, []).append(p) del D[q] q += 1
Generate an infinite sequence of prime numbers.
def get_signed_area(self): """ Return area of a simple (ie. non-self-intersecting) polygon. If verts wind anti-clockwise, this returns a negative number. Assume y-axis points up. """ accum = 0.0 for i in range(len(self.verts)): j = (i + 1) % len(self.verts) accum += ( self.verts[j][0] * self.verts[i][1] - self.verts[i][0] * self.verts[j][1]) return accum / 2
Return area of a simple (ie. non-self-intersecting) polygon. If verts wind anti-clockwise, this returns a negative number. Assume y-axis points up.
def find_dependencies_with_parent(self, dependent, parent): """Find all dependencies of the given revision caused by the given parent commit. This will be called multiple times for merge commits which have multiple parents. """ self.logger.info(" Finding dependencies of %s via parent %s" % (dependent.hex[:8], parent.hex[:8])) diff = self.repo.diff(parent, dependent, context_lines=self.options.context_lines) for patch in diff: path = patch.delta.old_file.path self.logger.info(" Examining hunks in %s" % path) for hunk in patch.hunks: self.blame_diff_hunk(dependent, parent, path, hunk)
Find all dependencies of the given revision caused by the given parent commit. This will be called multiple times for merge commits which have multiple parents.
def newEntity(self, name, type, ExternalID, SystemID, content): """Create a new entity, this differs from xmlAddDocEntity() that if the document is None or has no internal subset defined, then an unlinked entity structure will be returned, it is then the responsability of the caller to link it to the document later or free it when not needed anymore. """ ret = libxml2mod.xmlNewEntity(self._o, name, type, ExternalID, SystemID, content) if ret is None:raise treeError('xmlNewEntity() failed') __tmp = xmlEntity(_obj=ret) return __tmp
Create a new entity, this differs from xmlAddDocEntity() that if the document is None or has no internal subset defined, then an unlinked entity structure will be returned, it is then the responsability of the caller to link it to the document later or free it when not needed anymore.
def _get_manifest_string(self): """Returns the nextflow manifest config string to include in the config file from the information on the pipeline. Returns ------- str Nextflow manifest configuration string """ config_str = "" config_str += '\n\tname = "{}"'.format(self.pipeline_name) config_str += '\n\tmainScript = "{}"'.format(self.nf_file) return config_str
Returns the nextflow manifest config string to include in the config file from the information on the pipeline. Returns ------- str Nextflow manifest configuration string
def get_next_base26(prev=None): """Increment letter-based IDs. Generates IDs like ['a', 'b', ..., 'z', 'aa', ab', ..., 'az', 'ba', ...] Returns: str: Next base-26 ID. """ if not prev: return 'a' r = re.compile("^[a-z]*$") if not r.match(prev): raise ValueError("Invalid base26") if not prev.endswith('z'): return prev[:-1] + chr(ord(prev[-1]) + 1) return get_next_base26(prev[:-1]) + 'a'
Increment letter-based IDs. Generates IDs like ['a', 'b', ..., 'z', 'aa', ab', ..., 'az', 'ba', ...] Returns: str: Next base-26 ID.
def sync(self, videoQuality, limit=None, unwatched=False, **kwargs): """ Add current Movie library section as sync item for specified device. See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and :func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions. Parameters: videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in :mod:`plexapi.sync` module. limit (int): maximum count of movies to sync, unlimited if `None`. unwatched (bool): if `True` watched videos wouldn't be synced. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem. Example: .. code-block:: python from plexapi import myplex from plexapi.sync import VIDEO_QUALITY_3_MBPS_720p c = myplex.MyPlexAccount() target = c.device('Plex Client') sync_items_wd = c.syncItems(target.clientIdentifier) srv = c.resource('Server Name').connect() section = srv.library.section('Movies') section.sync(VIDEO_QUALITY_3_MBPS_720p, client=target, limit=1, unwatched=True, title='Next best movie', sort='rating:desc') """ from plexapi.sync import Policy, MediaSettings kwargs['mediaSettings'] = MediaSettings.createVideo(videoQuality) kwargs['policy'] = Policy.create(limit, unwatched) return super(MovieSection, self).sync(**kwargs)
Add current Movie library section as sync item for specified device. See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and :func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions. Parameters: videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in :mod:`plexapi.sync` module. limit (int): maximum count of movies to sync, unlimited if `None`. unwatched (bool): if `True` watched videos wouldn't be synced. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem. Example: .. code-block:: python from plexapi import myplex from plexapi.sync import VIDEO_QUALITY_3_MBPS_720p c = myplex.MyPlexAccount() target = c.device('Plex Client') sync_items_wd = c.syncItems(target.clientIdentifier) srv = c.resource('Server Name').connect() section = srv.library.section('Movies') section.sync(VIDEO_QUALITY_3_MBPS_720p, client=target, limit=1, unwatched=True, title='Next best movie', sort='rating:desc')
def _on_timeout(self, info: str = None) -> None: """Timeout callback of _HTTPConnection instance. Raise a `HTTPTimeoutError` when a timeout occurs. :info string key: More detailed timeout information. """ self._timeout = None error_message = "Timeout {0}".format(info) if info else "Timeout" if self.final_callback is not None: self._handle_exception( HTTPTimeoutError, HTTPTimeoutError(error_message), None )
Timeout callback of _HTTPConnection instance. Raise a `HTTPTimeoutError` when a timeout occurs. :info string key: More detailed timeout information.
def get(search="unsigned"): """ List all available plugins""" plugins = [] for i in os.walk('/usr/lib/nagios/plugins'): for f in i[2]: plugins.append(f) return plugins
List all available plugins
def filter_results(self, boxlist, num_classes): """Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). """ # unwrap the boxlist to avoid additional overhead. # if we had multi-class NMS, we could perform this directly on the boxlist boxes = boxlist.bbox.reshape(-1, num_classes * 4) scores = boxlist.get_field("scores").reshape(-1, num_classes) device = scores.device result = [] # Apply threshold on detection probabilities and apply NMS # Skip j = 0, because it's the background class inds_all = scores > self.score_thresh for j in range(1, num_classes): inds = inds_all[:, j].nonzero().squeeze(1) scores_j = scores[inds, j] boxes_j = boxes[inds, j * 4 : (j + 1) * 4] boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class = boxlist_nms( boxlist_for_class, self.nms ) num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device) ) result.append(boxlist_for_class) result = cat_boxlist(result) number_of_detections = len(result) # Limit to max_per_image detections **over all classes** if number_of_detections > self.detections_per_img > 0: cls_scores = result.get_field("scores") image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.detections_per_img + 1 ) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep).squeeze(1) result = result[keep] return result
Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS).
def privmsg(self, target, message): """ Sends a PRIVMSG to someone. Required arguments: * target - Who to send the message to. * message - Message to send. """ with self.lock: self.send('PRIVMSG ' + target + ' :' + message) if self.readable(): msg = self._recv(expected_replies=('301',)) if msg[0] == '301': return 'AWAY', msg[2].split(None, 1)[1].replace(':', '', 1)
Sends a PRIVMSG to someone. Required arguments: * target - Who to send the message to. * message - Message to send.
def _LowerBoundSearch(partitions, hash_value): """Searches the partition in the partition array using hashValue. """ for i in xrange(0, len(partitions) - 1): if partitions[i].CompareTo(hash_value) <= 0 and partitions[i+1].CompareTo(hash_value) > 0: return i return len(partitions) - 1
Searches the partition in the partition array using hashValue.
def _flatten(lst): """ Flatten a nested list. """ if not isinstance(lst, (list, tuple)): return [lst] result = [] for item in lst: result.extend(_flatten(item)) return result
Flatten a nested list.
def get_synset_1000(self): """ Returns: dict: {cls_number: synset_id} """ fname = os.path.join(self.dir, 'synsets.txt') assert os.path.isfile(fname) lines = [x.strip() for x in open(fname).readlines()] return dict(enumerate(lines))
Returns: dict: {cls_number: synset_id}
def format_currency_field(__, prec, number, locale): """Formats a currency field.""" locale = Locale.parse(locale) currency = get_territory_currencies(locale.territory)[0] if prec is None: pattern, currency_digits = None, True else: prec = int(prec) pattern = locale.currency_formats['standard'] pattern = modify_number_pattern(pattern, frac_prec=(prec, prec)) currency_digits = False return format_currency(number, currency, pattern, locale=locale, currency_digits=currency_digits)
Formats a currency field.
def tagAttributes(fdef_master_list,node,depth=0): '''recursively tag objects with sizes, depths and path names ''' if type(node)==list: for i in node: depth+=1 tagAttributes(fdef_master_list,i,depth) if type(node)==dict: for x in fdef_master_list: if jsName(x.path,x.name)==node['name']: node['path']=x.path node['depth']=depth if "children" not in node: node["size"]=x.weight for i in node.values(): depth+=1 tagAttributes(fdef_master_list,i,depth) return node
recursively tag objects with sizes, depths and path names
def data(link): '''Returns a dictionary from requested link''' link = _remove_api_url_from_link(link) req = _get_from_dapi_or_mirror(link) return _process_req(req)
Returns a dictionary from requested link
def iterate_similarity_datasets(args): """Generator over all similarity evaluation datasets. Iterates over dataset names, keyword arguments for their creation and the created dataset. """ for dataset_name in args.similarity_datasets: parameters = nlp.data.list_datasets(dataset_name) for key_values in itertools.product(*parameters.values()): kwargs = dict(zip(parameters.keys(), key_values)) yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs)
Generator over all similarity evaluation datasets. Iterates over dataset names, keyword arguments for their creation and the created dataset.
def get_encoder_from_vocab(vocab_filepath): """Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set. """ if not tf.gfile.Exists(vocab_filepath): raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath)) tf.logging.info("Found vocab file: %s", vocab_filepath) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set.
def ap_state(value, failure_string=None): """ Converts a state's name, postal abbreviation or FIPS to A.P. style. Example usage: >> ap_state("California") 'Calif.' """ try: return statestyle.get(value).ap except: if failure_string: return failure_string else: return value
Converts a state's name, postal abbreviation or FIPS to A.P. style. Example usage: >> ap_state("California") 'Calif.'
def modify_ip_prefixes( config, config_file, variable_name, dummy_ip_prefix, reconfigure_cmd, keep_changes, changes_counter, ip_version): """Modify IP prefixes in Bird configuration. Depending on the configuration either removes or reports IP prefixes found in Bird configuration for which we don't have a service check associated with them. Moreover, it adds the dummy IP prefix if it isn't present and ensures that the correct variable name is set. Arguments: config (obg): A configparser object which holds our configuration. config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration dummy_ip_prefix (str): The dummy IP prefix, which must be always reconfigure_cmd (str): The command to run to trigger a reconfiguration on Bird daemon upon successful configuration update keep_changes (boolean): To enable keeping a history of changes applied to bird configuration changes_counter (int): The number of configuration changes to keep ip_version (int): IP protocol version of Bird configuration """ log = logging.getLogger(PROGRAM_NAME) services = config.sections() services.remove('daemon') # not needed during sanity check for IP-Prefixes update_bird_conf = False try: ip_prefixes_in_bird = get_ip_prefixes_from_bird(config_file) except OSError as error: log.error("failed to open Bird configuration %s, this is a FATAL " "error, thus exiting main program", error) sys.exit(1) _name = get_variable_name_from_bird(config_file) if _name is None: log.warning("failed to find variable name in %s, going to add it", config_file) update_bird_conf = True elif _name != variable_name: log.warning("found incorrect variable name in %s, going to add the " "correct one %s", _name, variable_name) update_bird_conf = True if dummy_ip_prefix not in ip_prefixes_in_bird: log.warning("dummy IP prefix %s is missing from bird configuration " "%s, adding it", dummy_ip_prefix, config_file) ip_prefixes_in_bird.insert(0, dummy_ip_prefix) update_bird_conf = True # Find IP prefixes in Bird configuration without a check. ip_prefixes_with_check = get_ip_prefixes_from_config( config, services, ip_version) # dummy_ip_prefix doesn't have a config by design ip_prefixes_with_check.add(dummy_ip_prefix) ip_prefixes_without_check = set(ip_prefixes_in_bird).difference( ip_prefixes_with_check) if ip_prefixes_without_check: if config.getboolean('daemon', 'purge_ip_prefixes'): log.warning("removing IP prefix(es) %s from %s because they don't " "have a service check configured", ','.join(ip_prefixes_without_check), config_file) ip_prefixes_in_bird[:] = (ip for ip in ip_prefixes_in_bird if ip not in ip_prefixes_without_check) update_bird_conf = True else: log.warning("found IP prefixes %s in %s without a service " "check configured", ','.join(ip_prefixes_without_check), config_file) if update_bird_conf: if keep_changes: archive_bird_conf(config_file, changes_counter) tempname = write_temp_bird_conf( dummy_ip_prefix, config_file, variable_name, ip_prefixes_in_bird ) try: os.rename(tempname, config_file) except OSError as error: msg = ("CRITICAL: failed to create Bird configuration {e}, " "this is FATAL error, thus exiting main program" .format(e=error)) sys.exit("{m}".format(m=msg)) else: log.info("Bird configuration for IPv%s is updated", ip_version) reconfigure_bird(reconfigure_cmd)
Modify IP prefixes in Bird configuration. Depending on the configuration either removes or reports IP prefixes found in Bird configuration for which we don't have a service check associated with them. Moreover, it adds the dummy IP prefix if it isn't present and ensures that the correct variable name is set. Arguments: config (obg): A configparser object which holds our configuration. config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration dummy_ip_prefix (str): The dummy IP prefix, which must be always reconfigure_cmd (str): The command to run to trigger a reconfiguration on Bird daemon upon successful configuration update keep_changes (boolean): To enable keeping a history of changes applied to bird configuration changes_counter (int): The number of configuration changes to keep ip_version (int): IP protocol version of Bird configuration
def available_edbg_ports(self): """ Finds available EDBG COM ports. :return: list of available ports """ ports_available = sorted(list(list_ports.comports())) edbg_ports = [] for iport in ports_available: port = iport[0] desc = iport[1] hwid = iport[2] if str(desc).startswith("EDBG Virtual COM Port") or \ "VID:PID=03EB:2111" in str(hwid).upper(): # print("%-10s: %s (%s)\n" % (port, desc, hwid)) try: edbg_ports.index(port, 0) print("There is multiple %s ports with same number!" % port) except ValueError: edbg_ports.append(port) # print("Detected %i DUT's" % len(edbg_ports)) return edbg_ports
Finds available EDBG COM ports. :return: list of available ports
def _load_types(root): """Returns {name: Type}""" def text(t): if t.tag == 'name': return '{name}' elif t.tag == 'apientry': return '{apientry}' out = [] if t.text: out.append(_escape_tpl_str(t.text)) for x in t: out.append(text(x)) if x.tail: out.append(_escape_tpl_str(x.tail)) return ''.join(out) out_dict = collections.OrderedDict() for elem in root.findall('types/type'): name = elem.get('name') or elem.find('name').text template = text(elem) api = elem.get('api') if 'requires' in elem.attrib: required_types = set((elem.attrib['requires'],)) else: required_types = set() comment = elem.get('comment') if api: k = (name, api) else: k = (name, None) out_dict[k] = Type(name, template, required_types, api, comment) return out_dict
Returns {name: Type}
def remove_group_from_favorites(self, id): """ Remove group from favorites. Remove a group from the current user's favorites. """ path = {} data = {} params = {} # REQUIRED - PATH - id """the ID or SIS ID of the group to remove""" path["id"] = id self.logger.debug("DELETE /api/v1/users/self/favorites/groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/self/favorites/groups/{id}".format(**path), data=data, params=params, single_item=True)
Remove group from favorites. Remove a group from the current user's favorites.
def array_dualmap(ol,value_map_func,**kwargs): ''' from elist.elist import * ol = ['a','b','c','d'] def index_map_func(index,prefix,suffix): s = prefix +str(index+97)+ suffix return(s) def value_map_func(mapped_index,ele,prefix,suffix): s = prefix+mapped_index+': ' + str(ele) + suffix return(s) #### rslt = array_dualmap2(ol,index_map_func=index_map_func,index_map_func_args=[': ',' is '],value_map_func=value_map_func,value_map_func_args=['ord',' yes?']) pobj(rslt) ''' def get_self(obj): return(obj) if('index_map_func_args' in kwargs): index_map_func_args = kwargs['index_map_func_args'] else: index_map_func_args = [] if('value_map_func_args' in kwargs): value_map_func_args = kwargs['value_map_func_args'] else: value_map_func_args = [] if('index_map_func' in kwargs): index_map_func = kwargs['index_map_func'] else: index_map_func = get_self length = ol.__len__() il = list(range(0,length)) nil = list(map(lambda ele:index_map_func(ele,*index_map_func_args),il)) nvl = [] for i in range(0,length): ele = ol[i] v = value_map_func(nil[i],ele,*value_map_func_args) nvl.append(v) return(nvl)
from elist.elist import * ol = ['a','b','c','d'] def index_map_func(index,prefix,suffix): s = prefix +str(index+97)+ suffix return(s) def value_map_func(mapped_index,ele,prefix,suffix): s = prefix+mapped_index+': ' + str(ele) + suffix return(s) #### rslt = array_dualmap2(ol,index_map_func=index_map_func,index_map_func_args=[': ',' is '],value_map_func=value_map_func,value_map_func_args=['ord',' yes?']) pobj(rslt)
def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' regex = "[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += "\-" return re.sub(regex + "]", "_", word)
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
def make_tx(version, tx_ins, tx_outs, lock_time, expiry=None, value_balance=0, tx_shielded_spends=None, tx_shielded_outputs=None, tx_witnesses=None, tx_joinsplits=None, joinsplit_pubkey=None, joinsplit_sig=None, binding_sig=None): ''' int, list(TxIn), list(TxOut), int, list(InputWitness) -> Tx ''' n = riemann.get_current_network_name() if 'decred' in n: return tx.DecredTx( version=utils.i2le_padded(version, 4), tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), expiry=utils.i2le_padded(expiry, 4), tx_witnesses=[tx_witnesses]) if 'sprout' in n and tx_joinsplits is not None: return tx.SproutTx( version=version, tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [], joinsplit_pubkey=joinsplit_pubkey, joinsplit_sig=joinsplit_sig) if 'overwinter' in n: return tx.OverwinterTx( tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), expiry_height=utils.i2le_padded(expiry, 4), tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [], joinsplit_pubkey=joinsplit_pubkey, joinsplit_sig=joinsplit_sig) if 'sapling' in n: return tx.SaplingTx( tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), expiry_height=utils.i2le_padded(expiry, 4), value_balance=utils.i2le_padded(value_balance, 8), tx_shielded_spends=(tx_shielded_spends if tx_shielded_spends is not None else []), tx_shielded_outputs=(tx_shielded_outputs if tx_shielded_outputs is not None else []), tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [], joinsplit_pubkey=joinsplit_pubkey, joinsplit_sig=joinsplit_sig, binding_sig=binding_sig) flag = riemann.network.SEGWIT_TX_FLAG \ if tx_witnesses is not None else None return tx.Tx(version=utils.i2le_padded(version, 4), flag=flag, tx_ins=tx_ins, tx_outs=tx_outs, tx_witnesses=tx_witnesses, lock_time=utils.i2le_padded(lock_time, 4))
int, list(TxIn), list(TxOut), int, list(InputWitness) -> Tx
def download(self, packageName, versionCode=None, offerType=1, expansion_files=False): """Download an app and return its raw data (APK file). Free apps need to be "purchased" first, in order to retrieve the download cookie. If you want to download an already purchased app, use *delivery* method. Args: packageName (str): app unique ID (usually starting with 'com.') versionCode (int): version to download offerType (int): different type of downloads (mostly unused for apks) downloadToken (str): download token returned by 'purchase' API progress_bar (bool): wether or not to print a progress bar to stdout Returns Dictionary containing apk data and optional expansion files (see *delivery*) """ if self.authSubToken is None: raise LoginError("You need to login before executing any request") if versionCode is None: # pick up latest version appDetails = self.details(packageName).get('details').get('appDetails') versionCode = appDetails.get('versionCode') headers = self.getHeaders() params = {'ot': str(offerType), 'doc': packageName, 'vc': str(versionCode)} self.log(packageName) response = requests.post(PURCHASE_URL, headers=headers, params=params, verify=ssl_verify, timeout=60, proxies=self.proxies_config) response = googleplay_pb2.ResponseWrapper.FromString(response.content) if response.commands.displayErrorMessage != "": raise RequestError(response.commands.displayErrorMessage) else: dlToken = response.payload.buyResponse.downloadToken return self.delivery(packageName, versionCode, offerType, dlToken, expansion_files=expansion_files)
Download an app and return its raw data (APK file). Free apps need to be "purchased" first, in order to retrieve the download cookie. If you want to download an already purchased app, use *delivery* method. Args: packageName (str): app unique ID (usually starting with 'com.') versionCode (int): version to download offerType (int): different type of downloads (mostly unused for apks) downloadToken (str): download token returned by 'purchase' API progress_bar (bool): wether or not to print a progress bar to stdout Returns Dictionary containing apk data and optional expansion files (see *delivery*)
def edit_standard_fwl_rules(self, firewall_id, rules): """Edit the rules for standard firewall. :param integer firewall_id: the instance ID of the standard firewall :param dict rules: the rules to be pushed on the firewall """ rule_svc = self.client['Network_Firewall_Update_Request'] template = {'networkComponentFirewallId': firewall_id, 'rules': rules} return rule_svc.createObject(template)
Edit the rules for standard firewall. :param integer firewall_id: the instance ID of the standard firewall :param dict rules: the rules to be pushed on the firewall
def get_mode(self, gpio): """ Returns the gpio mode. gpio:= 0-53. Returns a value as follows . . 0 = INPUT 1 = OUTPUT 2 = ALT5 3 = ALT4 4 = ALT0 5 = ALT1 6 = ALT2 7 = ALT3 . . ... print(pi.get_mode(0)) 4 ... """ res = yield from self._pigpio_aio_command(_PI_CMD_MODEG, gpio, 0) return _u2i(res)
Returns the gpio mode. gpio:= 0-53. Returns a value as follows . . 0 = INPUT 1 = OUTPUT 2 = ALT5 3 = ALT4 4 = ALT0 5 = ALT1 6 = ALT2 7 = ALT3 . . ... print(pi.get_mode(0)) 4 ...
def open_connection(self): """Open an sqlite connection to the metadata database. By default the metadata database will be used in the plugin dir, unless an explicit path has been set using setmetadataDbPath, or overridden in QSettings. If the db does not exist it will be created. :raises: An sqlite.Error is raised if anything goes wrong """ self.connection = None base_directory = os.path.dirname(self.metadata_db_path) if not os.path.exists(base_directory): try: os.mkdir(base_directory) except IOError: LOGGER.exception( 'Could not create directory for metadata cache.') raise try: self.connection = sqlite.connect(self.metadata_db_path) except (OperationalError, sqlite.Error): LOGGER.exception('Failed to open metadata cache database.') raise
Open an sqlite connection to the metadata database. By default the metadata database will be used in the plugin dir, unless an explicit path has been set using setmetadataDbPath, or overridden in QSettings. If the db does not exist it will be created. :raises: An sqlite.Error is raised if anything goes wrong
def _format_args(): """Get JSON dump indentation and separates.""" # Ensure we can run outside a application/request context. try: pretty_format = \ current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and \ not request.is_xhr except RuntimeError: pretty_format = False if pretty_format: return dict( indent=2, separators=(', ', ': '), ) else: return dict( indent=None, separators=(',', ':'), )
Get JSON dump indentation and separates.
def get_exif_tags(data, datetime_format='%c'): """Make a simplified version with common tags from raw EXIF data.""" logger = logging.getLogger(__name__) simple = {} for tag in ('Model', 'Make', 'LensModel'): if tag in data: if isinstance(data[tag], tuple): simple[tag] = data[tag][0].strip() else: simple[tag] = data[tag].strip() if 'FNumber' in data: fnumber = data['FNumber'] try: simple['fstop'] = float(fnumber[0]) / fnumber[1] except Exception: logger.debug('Skipped invalid FNumber: %r', fnumber, exc_info=True) if 'FocalLength' in data: focal = data['FocalLength'] try: simple['focal'] = round(float(focal[0]) / focal[1]) except Exception: logger.debug('Skipped invalid FocalLength: %r', focal, exc_info=True) if 'ExposureTime' in data: exptime = data['ExposureTime'] if isinstance(exptime, tuple): try: simple['exposure'] = str(fractions.Fraction(exptime[0], exptime[1])) except ZeroDivisionError: logger.info('Invalid ExposureTime: %r', exptime) elif isinstance(exptime, int): simple['exposure'] = str(exptime) else: logger.info('Unknown format for ExposureTime: %r', exptime) if data.get('ISOSpeedRatings'): simple['iso'] = data['ISOSpeedRatings'] if 'DateTimeOriginal' in data: # Remove null bytes at the end if necessary date = data['DateTimeOriginal'].rsplit('\x00')[0] try: simple['dateobj'] = datetime.strptime(date, '%Y:%m:%d %H:%M:%S') simple['datetime'] = simple['dateobj'].strftime(datetime_format) except (ValueError, TypeError) as e: logger.info('Could not parse DateTimeOriginal: %s', e) if 'GPSInfo' in data: info = data['GPSInfo'] lat_info = info.get('GPSLatitude') lon_info = info.get('GPSLongitude') lat_ref_info = info.get('GPSLatitudeRef') lon_ref_info = info.get('GPSLongitudeRef') if lat_info and lon_info and lat_ref_info and lon_ref_info: try: lat = dms_to_degrees(lat_info) lon = dms_to_degrees(lon_info) except (ZeroDivisionError, ValueError, TypeError): logger.info('Failed to read GPS info') else: simple['gps'] = { 'lat': - lat if lat_ref_info != 'N' else lat, 'lon': - lon if lon_ref_info != 'E' else lon, } return simple
Make a simplified version with common tags from raw EXIF data.
def read(filename, file_format=None): """Reads an unstructured mesh with added data. :param filenames: The files to read from. :type filenames: str :returns mesh{2,3}d: The mesh data. """ # https://stackoverflow.com/q/4843173/353337 assert isinstance(filename, str) if not file_format: # deduce file format from extension file_format = _filetype_from_filename(filename) format_to_reader = { "ansys": ansys_io, "ansys-ascii": ansys_io, "ansys-binary": ansys_io, # "gmsh": msh_io, "gmsh-ascii": msh_io, "gmsh-binary": msh_io, "gmsh2": msh_io, "gmsh2-ascii": msh_io, "gmsh2-binary": msh_io, "gmsh4": msh_io, "gmsh4-ascii": msh_io, "gmsh4-binary": msh_io, # "med": med_io, "medit": medit_io, "dolfin-xml": dolfin_io, "permas": permas_io, "moab": h5m_io, "off": off_io, # "stl": stl_io, "stl-ascii": stl_io, "stl-binary": stl_io, # "vtu-ascii": vtu_io, "vtu-binary": vtu_io, # "vtk-ascii": vtk_io, "vtk-binary": vtk_io, # "xdmf": xdmf_io, "exodus": exodus_io, # "abaqus": abaqus_io, # "mdpa": mdpa_io, } assert file_format in format_to_reader, "Unknown file format '{}' of '{}'.".format( file_format, filename ) return format_to_reader[file_format].read(filename)
Reads an unstructured mesh with added data. :param filenames: The files to read from. :type filenames: str :returns mesh{2,3}d: The mesh data.
def poe_map(self, src, s_sites, imtls, trunclevel, rup_indep=True): """ :param src: a source object :param s_sites: a filtered SiteCollection of sites around the source :param imtls: intensity measure and levels :param trunclevel: truncation level :param rup_indep: True if the ruptures are independent :returns: a ProbabilityMap instance """ pmap = ProbabilityMap.build( len(imtls.array), len(self.gsims), s_sites.sids, initvalue=rup_indep) eff_ruptures = 0 for rup, sctx, dctx in self.gen_rup_contexts(src, s_sites): eff_ruptures += 1 with self.poe_mon: pnes = self._make_pnes(rup, sctx, dctx, imtls, trunclevel) for sid, pne in zip(sctx.sids, pnes): if rup_indep: pmap[sid].array *= pne else: pmap[sid].array += (1.-pne) * rup.weight if rup_indep: pmap = ~pmap pmap.eff_ruptures = eff_ruptures return pmap
:param src: a source object :param s_sites: a filtered SiteCollection of sites around the source :param imtls: intensity measure and levels :param trunclevel: truncation level :param rup_indep: True if the ruptures are independent :returns: a ProbabilityMap instance
def encode(arg, delimiter=None, encodeseq=None, encoded=tuple()): '''Encode a single argument for the file-system''' arg = coerce_unicode(arg, _c.FSQ_CHARSET) new_arg = sep = u'' delimiter, encodeseq = delimiter_encodeseq( _c.FSQ_DELIMITER if delimiter is None else delimiter, _c.FSQ_ENCODE if encodeseq is None else encodeseq, _c.FSQ_CHARSET) # validate encoded tuple for enc in encoded: enc = coerce_unicode(enc, _c.FSQ_CHARSET) try: enc = enc.encode('ascii') except UnicodeEncodeError: raise FSQEncodeError(errno.EINVAL, u'invalid encoded value: {0}'\ u' non-ascii'.format(enc)) # char-wise encode walk for seq in arg: if seq == delimiter or seq == encodeseq or seq in _ENCODED + encoded: h_val = hex(ord(seq)) # front-pad with zeroes if 3 == len(h_val): h_val = sep.join([h_val[:2], u'0', h_val[2:]]) if 4 != len(h_val): raise FSQEncodeError(errno.EINVAL, u'invalid hex ({0}) for'\ ' encode-target: {1}'.format(h_val, seq)) seq = sep.join([encodeseq, h_val[2:]]) new_arg = sep.join([new_arg, seq]) return new_arg
Encode a single argument for the file-system
def bind(context, block=False): """ Given the context, returns a decorator wrapper; the binder replaces the wrapped func with the value from the context OR puts this function in the context with the name. """ if block: def decorate(func): name = func.__name__.replace('__TK__block__', '') if name not in context: context[name] = func return context[name] return decorate def decorate(func): name = func.__name__ if name not in context: context[name] = func return context[name] return decorate
Given the context, returns a decorator wrapper; the binder replaces the wrapped func with the value from the context OR puts this function in the context with the name.
def get_item_bank_id_metadata(self): """get the metadata for item bank""" metadata = dict(self._item_bank_id_metadata) metadata.update({'existing_id_values': self.my_osid_object_form._my_map['itemBankId']}) return Metadata(**metadata)
get the metadata for item bank
def _finish(self): """ Closes and waits for subprocess to exit. """ if self._process.returncode is None: self._process.stdin.flush() self._process.stdin.close() self._process.wait() self.closed = True
Closes and waits for subprocess to exit.
def run(self): ''' Fire up halite! ''' salt.utils.process.appendproctitle(self.__class__.__name__) halite.start(self.hopts)
Fire up halite!
def add_error_class(klass, code): """ Maps an exception class to a string code. Used to map remoting C{onStatus} objects to an exception class so that an exception can be built to represent that error. An example:: >>> class AuthenticationError(Exception): ... pass ... >>> pyamf.add_error_class(AuthenticationError, 'Auth.Failed') >>> print pyamf.ERROR_CLASS_MAP {'TypeError': <type 'exceptions.TypeError'>, 'IndexError': <type 'exceptions.IndexError'>, 'Auth.Failed': <class '__main__.AuthenticationError'>, 'KeyError': <type 'exceptions.KeyError'>, 'NameError': <type 'exceptions.NameError'>, 'LookupError': <type 'exceptions.LookupError'>} @param klass: Exception class @param code: Exception code @type code: C{str} @see: L{remove_error_class} """ if not isinstance(code, python.str_types): code = code.decode('utf-8') if not isinstance(klass, python.class_types): raise TypeError("klass must be a class type") mro = inspect.getmro(klass) if not Exception in mro: raise TypeError( 'Error classes must subclass the __builtin__.Exception class') if code in ERROR_CLASS_MAP: raise ValueError('Code %s is already registered' % (code,)) ERROR_CLASS_MAP[code] = klass
Maps an exception class to a string code. Used to map remoting C{onStatus} objects to an exception class so that an exception can be built to represent that error. An example:: >>> class AuthenticationError(Exception): ... pass ... >>> pyamf.add_error_class(AuthenticationError, 'Auth.Failed') >>> print pyamf.ERROR_CLASS_MAP {'TypeError': <type 'exceptions.TypeError'>, 'IndexError': <type 'exceptions.IndexError'>, 'Auth.Failed': <class '__main__.AuthenticationError'>, 'KeyError': <type 'exceptions.KeyError'>, 'NameError': <type 'exceptions.NameError'>, 'LookupError': <type 'exceptions.LookupError'>} @param klass: Exception class @param code: Exception code @type code: C{str} @see: L{remove_error_class}
def clean(self): """ Validates the current instance. """ super().clean() if ( (self.user is None and not self.anonymous_user) or (self.user and self.anonymous_user) ): raise ValidationError( _('A permission should target either a user or an anonymous user'), )
Validates the current instance.
def _base_get_list(self, url, limit=None, *, query=None, order_by=None, batch=None): """ Returns a collection of drive items """ if limit is None or limit > self.protocol.max_top_value: batch = self.protocol.max_top_value params = {'$top': batch if batch else limit} if order_by: params['$orderby'] = order_by if query: if query.has_filters: warnings.warn( 'Filters are not allowed by the Api Provider ' 'in this method') query.clear_filters() if isinstance(query, str): params['$filter'] = query else: params.update(query.as_params()) response = self.con.get(url, params=params) if not response: return iter(()) data = response.json() # Everything received from cloud must be passed as self._cloud_data_key items = ( self._classifier(item)(parent=self, **{self._cloud_data_key: item}) for item in data.get('value', [])) next_link = data.get(NEXT_LINK_KEYWORD, None) if batch and next_link: return Pagination(parent=self, data=items, constructor=self._classifier, next_link=next_link, limit=limit) else: return items
Returns a collection of drive items
def config_(dev=None, **kwargs): ''' Show or update config of a bcache device. If no device is given, operate on the cache set itself. CLI example: .. code-block:: bash salt '*' bcache.config salt '*' bcache.config bcache1 salt '*' bcache.config errors=panic journal_delay_ms=150 salt '*' bcache.config bcache1 cache_mode=writeback writeback_percent=15 :return: config or True/False ''' if dev is None: spath = _fspath() else: spath = _bcpath(dev) # filter out 'hidden' kwargs added by our favourite orchestration system updates = dict([(key, val) for key, val in kwargs.items() if not key.startswith('__')]) if updates: endres = 0 for key, val in updates.items(): endres += _sysfs_attr([spath, key], val, 'warn', 'Failed to update {0} with {1}'.format(os.path.join(spath, key), val)) return endres > 0 else: result = {} data = _sysfs_parse(spath, config=True, internals=True, options=True) for key in ('other_ro', 'inter_ro'): if key in data: del data[key] for key in data: result.update(data[key]) return result
Show or update config of a bcache device. If no device is given, operate on the cache set itself. CLI example: .. code-block:: bash salt '*' bcache.config salt '*' bcache.config bcache1 salt '*' bcache.config errors=panic journal_delay_ms=150 salt '*' bcache.config bcache1 cache_mode=writeback writeback_percent=15 :return: config or True/False
def serialize_rdf(update_graph, signing): '''Tweak rdflib's pretty-xml serialization of update_graph into the "indentical" representation as defined in http://mzl.la/x4XF6o ''' unsorted_s = update_graph.serialize(format = 'pretty-xml') unsorted_s = unsorted_s.replace('xmlns:rdf', 'xmlns:RDF') unsorted_s = unsorted_s.replace('rdf:', 'RDF:') unsorted_s = unsorted_s.replace('RDF:about', 'about') unsorted_s = unsorted_s.split('\n') start, end = unsorted_s[0:5], unsorted_s[-2:] unsorted_s = unsorted_s[5:-2] if signing: unsorted_s = [line[2:] for line in unsorted_s] unsorted_s.append('') sorting_s = [] prev_leading_spaces = -2 for line in unsorted_s: leading_spaces = len(line) - len(line.lstrip()) if leading_spaces > prev_leading_spaces: sorting_s.append([line]) elif leading_spaces == prev_leading_spaces: sorting_s[-1].append(line) elif leading_spaces < prev_leading_spaces: tmp_line = sorting_s.pop() tmp_line = '\n'.join(sorted(tmp_line)) tmp_line = [sorting_s[-1].pop(), tmp_line, line] tmp_line = '\n'.join(tmp_line) sorting_s[-1].append(tmp_line) prev_leading_spaces = leading_spaces if signing: sorted_s = '\n'.join(sorting_s[0]) else: sorted_s = [] sorted_s.extend(start) sorted_s.extend(sorting_s[0]) sorted_s.extend(end) sorted_s = '\n'.join(sorted_s) return sorted_s
Tweak rdflib's pretty-xml serialization of update_graph into the "indentical" representation as defined in http://mzl.la/x4XF6o
def execute(self): """ migrate storage """ repo = repository.PickleRepository(self.params.storage_path) clusters = [i[:-7] for i in os.listdir(self.params.storage_path) if i.endswith('.pickle')] if self.params.cluster: clusters = [x for x in clusters if x in self.params.cluster] if not clusters: print("No clusters") sys.exit(0) patch_cluster() for cluster in clusters: print("Cluster `%s`" % cluster) print("path: %s" % repo.storage_path + '/%s.pickle' % cluster) cl = repo.get(cluster) if cl._patches: print("Attributes changed: ") for attr, val in cl._patches.items(): print(" %s: %s -> %s" % (attr, val[0], val[1])) else: print("No upgrade needed") print("") if not self.params.dry_run: if cl._patches: del cl._patches print("Changes saved to disk") cl.repository.save_or_update(cl)
migrate storage
def to_text_string(obj, encoding=None): """Convert `obj` to (unicode) text string""" if PY2: # Python 2 if encoding is None: return unicode(obj) else: return unicode(obj, encoding) else: # Python 3 if encoding is None: return str(obj) elif isinstance(obj, str): # In case this function is not used properly, this could happen return obj else: return str(obj, encoding)
Convert `obj` to (unicode) text string
def format_duration(seconds): """Formats a number of seconds using the best units.""" units, divider = get_time_units_and_multiplier(seconds) seconds *= divider return "%.3f %s" % (seconds, units)
Formats a number of seconds using the best units.
def get_bucket(): """ Get listing of S3 Bucket """ args = parser.parse_args() bucket = s3_bucket(args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name) for b in bucket.list(): print(''.join([i if ord(i) < 128 else ' ' for i in b.name]))
Get listing of S3 Bucket
def generate(input_path, output_path=None, in_memory=False, safe_mode=False, error_context=None): """Executes the Statik site generator using the given parameters. """ project = StatikProject(input_path, safe_mode=safe_mode, error_context=error_context) return project.generate(output_path=output_path, in_memory=in_memory)
Executes the Statik site generator using the given parameters.
def getCatalogPixels(self): """ Return the catalog pixels spanned by this ROI. """ filenames = self.config.getFilenames() nside_catalog = self.config.params['coords']['nside_catalog'] nside_pixel = self.config.params['coords']['nside_pixel'] # All possible catalog pixels spanned by the ROI superpix = ugali.utils.skymap.superpixel(self.pixels,nside_pixel,nside_catalog) superpix = np.unique(superpix) # Only catalog pixels that exist in catalog files pixels = np.intersect1d(superpix, filenames['pix'].compressed()) return pixels
Return the catalog pixels spanned by this ROI.
def lstm_seq2seq_internal_attention(inputs, targets, hparams, train, inputs_length, targets_length): """LSTM seq2seq model with attention, main step used for training.""" with tf.variable_scope("lstm_seq2seq_attention"): # Flatten inputs. inputs = common_layers.flatten4d3d(inputs) # LSTM encoder. inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1) encoder_outputs, final_encoder_state = lstm( inputs, inputs_length, hparams, train, "encoder") # LSTM decoder with attention. shifted_targets = common_layers.shift_right(targets) # Add 1 to account for the padding added to the left from shift_right targets_length = targets_length + 1 decoder_outputs = lstm_attention_decoder( common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder", final_encoder_state, encoder_outputs, inputs_length, targets_length) return tf.expand_dims(decoder_outputs, axis=2)
LSTM seq2seq model with attention, main step used for training.
def _is_valid(self, log: Optional[Logger] = None) -> bool: """ Determine whether the current contents are valid """ return self._validate(self, log)[0]
Determine whether the current contents are valid
def get_exif_data(filename): """Return a dict with the raw EXIF data.""" logger = logging.getLogger(__name__) img = _read_image(filename) try: exif = img._getexif() or {} except ZeroDivisionError: logger.warning('Failed to read EXIF data.') return None data = {TAGS.get(tag, tag): value for tag, value in exif.items()} if 'GPSInfo' in data: try: data['GPSInfo'] = {GPSTAGS.get(tag, tag): value for tag, value in data['GPSInfo'].items()} except AttributeError: logger = logging.getLogger(__name__) logger.info('Failed to get GPS Info') del data['GPSInfo'] return data
Return a dict with the raw EXIF data.
def _update_self_link(self, link, headers): '''Update the self link of this navigator''' self.self.props.update(link) # Set the self.type to the content_type of the returned document self.self.props['type'] = headers.get( 'Content-Type', self.DEFAULT_CONTENT_TYPE) self.self.props
Update the self link of this navigator
def _init_filename(self, filename=None, ext=None): """Initialize the current filename :attr:`FileUtils.real_filename` of the object. Bit of a hack. - The first invocation must have ``filename != None``; this will set a default filename with suffix :attr:`FileUtils.default_extension` unless another one was supplied. - Subsequent invocations either change the filename accordingly or ensure that the default filename is set with the proper suffix. """ extension = ext or self.default_extension filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True) #: Current full path of the object for reading and writing I/O. self.real_filename = os.path.realpath(filename)
Initialize the current filename :attr:`FileUtils.real_filename` of the object. Bit of a hack. - The first invocation must have ``filename != None``; this will set a default filename with suffix :attr:`FileUtils.default_extension` unless another one was supplied. - Subsequent invocations either change the filename accordingly or ensure that the default filename is set with the proper suffix.
def ListingBox(listing, *args, **kwargs): " Delegate the boxing to the target's Box class. " obj = listing.publishable return obj.box_class(obj, *args, **kwargs)
Delegate the boxing to the target's Box class.
def channel_info(self, channel): """Fetch information about a channel.""" resource = self.RCHANNEL_INFO params = { self.PCHANNEL: channel, } response = self._fetch(resource, params) return response
Fetch information about a channel.
def gradient(self, q, t=0.): """ Compute the gradient of the potential at the given position(s). Parameters ---------- q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like The position to compute the value of the potential. If the input position object has no units (i.e. is an `~numpy.ndarray`), it is assumed to be in the same unit system as the potential. Returns ------- grad : `~astropy.units.Quantity` The gradient of the potential. Will have the same shape as the input position. """ q = self._remove_units_prepare_shape(q) orig_shape, q = self._get_c_valid_arr(q) t = self._validate_prepare_time(t, q) ret_unit = self.units['length'] / self.units['time']**2 return (self._gradient(q, t=t).T.reshape(orig_shape) * ret_unit).to(self.units['acceleration'])
Compute the gradient of the potential at the given position(s). Parameters ---------- q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like The position to compute the value of the potential. If the input position object has no units (i.e. is an `~numpy.ndarray`), it is assumed to be in the same unit system as the potential. Returns ------- grad : `~astropy.units.Quantity` The gradient of the potential. Will have the same shape as the input position.
def create_ticket_from_albaran(pk, list_lines): MODEL_SOURCE = SalesAlbaran MODEL_FINAL = SalesTicket url_reverse = 'CDNX_invoicing_ticketsaless_list' # type_doc msg_error_relation = _("Hay lineas asignadas a ticket") msg_error_not_found = _('Sales albaran not found') msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket') return SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False) """ context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=True ).values_list('pk') if new_list_lines: new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first() if new_pk: context = SalesLines.create_ticket_from_order(new_pk, new_list_lines) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context """
context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=True ).values_list('pk') if new_list_lines: new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first() if new_pk: context = SalesLines.create_ticket_from_order(new_pk, new_list_lines) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context
def connect(self): """ Connect the instance to redis by checking the existence of its primary key. Do nothing if already connected. """ if self.connected: return pk = self._pk if self.exists(pk=pk): self._connected = True else: self._pk = None self._connected = False raise DoesNotExist("No %s found with pk %s" % (self.__class__.__name__, pk))
Connect the instance to redis by checking the existence of its primary key. Do nothing if already connected.
def write_string(value, buff, byteorder='big'): """Write a string to a file-like object.""" data = value.encode('utf-8') write_numeric(USHORT, len(data), buff, byteorder) buff.write(data)
Write a string to a file-like object.
def run(self): ''' Execute salt-run ''' import salt.runner self.parse_args() # Setup file logging! self.setup_logfile_logger() verify_log(self.config) profiling_enabled = self.options.profiling_enabled runner = salt.runner.Runner(self.config) if self.options.doc: runner.print_docs() self.exit(salt.defaults.exitcodes.EX_OK) # Run this here so SystemExit isn't raised anywhere else when # someone tries to use the runners via the python API try: if check_user(self.config['user']): pr = salt.utils.profile.activate_profile(profiling_enabled) try: ret = runner.run() # In older versions ret['data']['retcode'] was used # for signaling the return code. This has been # changed for the orchestrate runner, but external # runners might still use it. For this reason, we # also check ret['data']['retcode'] if # ret['retcode'] is not available. if isinstance(ret, dict) and 'retcode' in ret: self.exit(ret['retcode']) elif isinstance(ret, dict) and 'retcode' in ret.get('data', {}): self.exit(ret['data']['retcode']) finally: salt.utils.profile.output_profile( pr, stats_path=self.options.profiling_path, stop=True) except SaltClientError as exc: raise SystemExit(six.text_type(exc))
Execute salt-run
def extract_status(self, status_headers): """ Extract status code only from status line """ self['status'] = status_headers.get_statuscode() if not self['status']: self['status'] = '-' elif self['status'] == '204' and 'Error' in status_headers.statusline: self['status'] = '-'
Extract status code only from status line
def NumExpr(ex, signature=(), **kwargs): """ Compile an expression built using E.<variable> variables to a function. ex can also be specified as a string "2*a+3*b". The order of the input variables and their types can be specified using the signature parameter, which is a list of (name, type) pairs. Returns a `NumExpr` object containing the compiled function. """ # NumExpr can be called either directly by the end-user, in which case # kwargs need to be sanitized by getContext, or by evaluate, # in which case kwargs are in already sanitized. # In that case frame_depth is wrong (it should be 2) but it doesn't matter # since it will not be used (because truediv='auto' has already been # translated to either True or False). context = getContext(kwargs, frame_depth=1) threeAddrProgram, inputsig, tempsig, constants, input_names = precompile(ex, signature, context) program = compileThreeAddrForm(threeAddrProgram) return interpreter.NumExpr(inputsig.encode('ascii'), tempsig.encode('ascii'), program, constants, input_names)
Compile an expression built using E.<variable> variables to a function. ex can also be specified as a string "2*a+3*b". The order of the input variables and their types can be specified using the signature parameter, which is a list of (name, type) pairs. Returns a `NumExpr` object containing the compiled function.
def pwarning(*args, **kwargs): """print formatted output to stderr with indentation control""" if should_msg(kwargs.get("groups", ["warning"])): # initialize colorama only if uninitialized global colorama_init if not colorama_init: colorama_init = True colorama.init() args = indent_text(*args, **kwargs) # write to stdout sys.stderr.write(colorama.Fore.YELLOW) sys.stderr.write("".join(args)) sys.stderr.write(colorama.Fore.RESET) sys.stderr.write("\n")
print formatted output to stderr with indentation control
def get_holdings(self, account: SEPAAccount): """ Retrieve holdings of an account. :param account: SEPAAccount to retrieve holdings for. :return: List of Holding objects """ # init dialog with self._get_dialog() as dialog: hkwpd = self._find_highest_supported_command(HKWPD5, HKWPD6) responses = self._fetch_with_touchdowns( dialog, lambda touchdown: hkwpd( account=hkwpd._fields['account'].type.from_sepa_account(account), touchdown_point=touchdown, ), 'HIWPD' ) holdings = [] for resp in responses: if type(resp.holdings) == bytes: holding_str = resp.holdings.decode() else: holding_str = resp.holdings mt535_lines = str.splitlines(holding_str) # The first line is empty - drop it. del mt535_lines[0] mt535 = MT535_Miniparser() holdings.extend(mt535.parse(mt535_lines)) if not holdings: logger.debug('No HIWPD response segment found - maybe account has no holdings?') return holdings
Retrieve holdings of an account. :param account: SEPAAccount to retrieve holdings for. :return: List of Holding objects
def parse(json, query_path, expected_vars=NO_VARS): """ INTENDED TO TREAT JSON AS A STREAM; USING MINIMAL MEMORY WHILE IT ITERATES THROUGH THE STRUCTURE. ASSUMING THE JSON IS LARGE, AND HAS A HIGH LEVEL ARRAY STRUCTURE, IT WILL yield EACH OBJECT IN THAT ARRAY. NESTED ARRAYS ARE HANDLED BY REPEATING THE PARENT PROPERTIES FOR EACH MEMBER OF THE NESTED ARRAY. DEEPER NESTED PROPERTIES ARE TREATED AS PRIMITIVE VALUES; THE STANDARD JSON DECODER IS USED. LARGE MANY-PROPERTY OBJECTS CAN BE HANDLED BY `items()` :param json: SOME STRING-LIKE STRUCTURE THAT CAN ASSUME WE LOOK AT ONE CHARACTER AT A TIME, IN ORDER :param query_path: A DOT-SEPARATED STRING INDICATING THE PATH TO THE NESTED ARRAY OPTIONALLY, {"items":query_path} TO FURTHER ITERATE OVER PROPERTIES OF OBJECTS FOUND AT query_path :param expected_vars: REQUIRED PROPERTY NAMES, USED TO DETERMINE IF MORE-THAN-ONE PASS IS REQUIRED :return: RETURNS AN ITERATOR OVER ALL OBJECTS FROM ARRAY LOCATED AT query_path """ if hasattr(json, "read"): # ASSUME IT IS A STREAM temp = json def get_more(): return temp.read(MIN_READ_SIZE) json = List_usingStream(get_more) elif hasattr(json, "__call__"): json = List_usingStream(json) elif isinstance(json, GeneratorType): json = List_usingStream(json.next) else: Log.error("Expecting json to be a stream, or a function that will return more bytes") def _iterate_list(index, c, parent_path, path, expected_vars): c, index = skip_whitespace(index) if c == b']': yield index return while True: if not path: index = _assign_token(index, c, expected_vars) c, index = skip_whitespace(index) if c == b']': yield index _done(parent_path) return elif c == b',': yield index c, index = skip_whitespace(index) else: for index in _decode_token(index, c, parent_path, path, expected_vars): c, index = skip_whitespace(index) if c == b']': yield index _done(parent_path) return elif c == b',': yield index c, index = skip_whitespace(index) def _done(parent_path): if len(parent_path) < len(done[0]): done[0] = parent_path def _decode_object(index, c, parent_path, query_path, expected_vars): if "." in expected_vars: if len(done[0]) <= len(parent_path) and all(d == p for d, p in zip(done[0], parent_path)): Log.error("Can not pick up more variables, iterator is done") if query_path: Log.error("Can not extract objects that contain the iteration", var=join_field(query_path)) index = _assign_token(index, c, expected_vars) # c, index = skip_whitespace(index) yield index return did_yield = False while True: c, index = skip_whitespace(index) if c == b',': continue elif c == b'"': name, index = simple_token(index, c) c, index = skip_whitespace(index) if c != b':': Log.error("Expecting colon") c, index = skip_whitespace(index) child_expected = needed(name, expected_vars) child_path = parent_path + [name] if any(child_expected): if not query_path: index = _assign_token(index, c, child_expected) elif query_path[0] == name: for index in _decode_token(index, c, child_path, query_path[1:], child_expected): did_yield = True yield index else: if len(done[0]) <= len(child_path): Log.error("Can not pick up more variables, iterator over {{path}} is done", path=join_field(done[0])) index = _assign_token(index, c, child_expected) elif query_path and query_path[0] == name: for index in _decode_token(index, c, child_path, query_path[1:], child_expected): yield index else: index = jump_to_end(index, c) elif c == b"}": if not did_yield: yield index break def set_destination(expected_vars, value): for i, e in enumerate(expected_vars): if e is None: pass elif e == ".": destination[i] = value elif is_data(value): destination[i] = value[e] else: destination[i] = Null def _decode_object_items(index, c, parent_path, query_path, expected_vars): """ ITERATE THROUGH THE PROPERTIES OF AN OBJECT """ c, index = skip_whitespace(index) num_items = 0 while True: if c == b',': c, index = skip_whitespace(index) elif c == b'"': name, index = simple_token(index, c) if "name" in expected_vars: for i, e in enumerate(expected_vars): if e == "name": destination[i] = name c, index = skip_whitespace(index) if c != b':': Log.error("Expecting colon") c, index = skip_whitespace(index) child_expected = needed("value", expected_vars) index = _assign_token(index, c, child_expected) c, index = skip_whitespace(index) DEBUG and not num_items % 1000 and Log.note("{{num}} items iterated", num=num_items) yield index num_items += 1 elif c == b"}": break def _decode_token(index, c, parent_path, query_path, expected_vars): if c == b'{': if query_path and query_path[0] == "$items": if any(expected_vars): for index in _decode_object_items(index, c, parent_path, query_path[1:], expected_vars): yield index else: index = jump_to_end(index, c) yield index elif not any(expected_vars): index = jump_to_end(index, c) yield index else: for index in _decode_object(index, c, parent_path, query_path, expected_vars): yield index elif c == b'[': for index in _iterate_list(index, c, parent_path, query_path, expected_vars): yield index else: index = _assign_token(index, c, expected_vars) yield index def _assign_token(index, c, expected_vars): if not any(expected_vars): return jump_to_end(index, c) value, index = simple_token(index, c) set_destination(expected_vars, value) return index def jump_to_end(index, c): """ DO NOT PROCESS THIS JSON OBJECT, JUST RETURN WHERE IT ENDS """ if c == b'"': while True: c = json[index] index += 1 if c == b'\\': index += 1 elif c == b'"': break return index elif c not in b"[{": while True: c = json[index] index += 1 if c in b',]}': break return index - 1 # OBJECTS AND ARRAYS ARE MORE INVOLVED stack = [None] * 1024 stack[0] = CLOSE[c] i = 0 # FOR INDEXING THE STACK while True: c = json[index] index += 1 if c == b'"': while True: c = json[index] index += 1 if c == b'\\': index += 1 elif c == b'"': break elif c in b'[{': i += 1 stack[i] = CLOSE[c] elif c == stack[i]: i -= 1 if i == -1: return index # FOUND THE MATCH! RETURN elif c in b']}': Log.error("expecting {{symbol}}", symbol=stack[i]) def simple_token(index, c): if c == b'"': json.mark(index - 1) while True: c = json[index] index += 1 if c == b"\\": index += 1 elif c == b'"': break return json_decoder(json.release(index).decode("utf8")), index elif c in b"{[": json.mark(index-1) index = jump_to_end(index, c) value = wrap(json_decoder(json.release(index).decode("utf8"))) return value, index elif c == b"t" and json.slice(index, index + 3) == b"rue": return True, index + 3 elif c == b"n" and json.slice(index, index + 3) == b"ull": return None, index + 3 elif c == b"f" and json.slice(index, index + 4) == b"alse": return False, index + 4 else: json.mark(index-1) while True: c = json[index] if c in b',]}': break index += 1 text = json.release(index) try: return float(text), index except Exception: Log.error("Not a known JSON primitive: {{text|quote}}", text=text) def skip_whitespace(index): """ RETURN NEXT NON-WHITESPACE CHAR, AND ITS INDEX """ c = json[index] while c in WHITESPACE: index += 1 c = json[index] return c, index + 1 if is_data(query_path) and query_path.get("items"): path_list = split_field(query_path.get("items")) + ["$items"] # INSERT A MARKER SO THAT OBJECT IS STREAM DECODED else: path_list = split_field(query_path) destination = [None] * len(expected_vars) c, index = skip_whitespace(0) done = [path_list + [None]] for _ in _decode_token(index, c, [], path_list, expected_vars): output = Data() for i, e in enumerate(expected_vars): output[e] = destination[i] yield output
INTENDED TO TREAT JSON AS A STREAM; USING MINIMAL MEMORY WHILE IT ITERATES THROUGH THE STRUCTURE. ASSUMING THE JSON IS LARGE, AND HAS A HIGH LEVEL ARRAY STRUCTURE, IT WILL yield EACH OBJECT IN THAT ARRAY. NESTED ARRAYS ARE HANDLED BY REPEATING THE PARENT PROPERTIES FOR EACH MEMBER OF THE NESTED ARRAY. DEEPER NESTED PROPERTIES ARE TREATED AS PRIMITIVE VALUES; THE STANDARD JSON DECODER IS USED. LARGE MANY-PROPERTY OBJECTS CAN BE HANDLED BY `items()` :param json: SOME STRING-LIKE STRUCTURE THAT CAN ASSUME WE LOOK AT ONE CHARACTER AT A TIME, IN ORDER :param query_path: A DOT-SEPARATED STRING INDICATING THE PATH TO THE NESTED ARRAY OPTIONALLY, {"items":query_path} TO FURTHER ITERATE OVER PROPERTIES OF OBJECTS FOUND AT query_path :param expected_vars: REQUIRED PROPERTY NAMES, USED TO DETERMINE IF MORE-THAN-ONE PASS IS REQUIRED :return: RETURNS AN ITERATOR OVER ALL OBJECTS FROM ARRAY LOCATED AT query_path
def start(check_time: int = 500) -> None: """Begins watching source files for changes. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ io_loop = ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning("tornado.autoreload started more than once in the same process") modify_times = {} # type: Dict[str, float] callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time) scheduler.start()
Begins watching source files for changes. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed.
async def run_task(self) -> None: '''Initialize the queue and spawn extra worker tasks if this if the first task. Then wait for work items to enter the task queue, and execute the `run()` method with the current work item.''' while self.running: try: item = self.QUEUE.get_nowait() Log.debug('%s processing work item', self.name) await self.run(item) Log.debug('%s completed work item', self.name) self.QUEUE.task_done() except asyncio.QueueEmpty: if self.OPEN: await self.sleep(0.05) else: Log.debug('%s queue closed and empty, stopping', self.name) return except CancelledError: Log.debug('%s cancelled, dropping work item') self.QUEUE.task_done() raise except Exception: Log.exception('%s failed work item', self.name) self.QUEUE.task_done()
Initialize the queue and spawn extra worker tasks if this if the first task. Then wait for work items to enter the task queue, and execute the `run()` method with the current work item.
def profile_write(self, profile, outfile=None): """Write the profile to the output directory. Args: profile (dict): The dictionary containting the profile settings. outfile (str, optional): Defaults to None. The filename for the profile. """ # fully qualified output file if outfile is None: outfile = '{}.json'.format(profile.get('profile_name').replace(' ', '_').lower()) fqpn = os.path.join(self.profile_dir, outfile) if os.path.isfile(fqpn): # append print('Append to File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, fqpn)) with open(fqpn, 'r+') as fh: try: data = json.load(fh, object_pairs_hook=OrderedDict) except ValueError as e: self.handle_error('Can not parse JSON data ({}).'.format(e)) data.append(profile) fh.seek(0) fh.write(json.dumps(data, indent=2, sort_keys=True)) fh.truncate() else: print('Create File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, fqpn)) with open(fqpn, 'w') as fh: data = [profile] fh.write(json.dumps(data, indent=2, sort_keys=True))
Write the profile to the output directory. Args: profile (dict): The dictionary containting the profile settings. outfile (str, optional): Defaults to None. The filename for the profile.
def cleanup(self): "Purpose: Frees the GL resources for a render model" if self.m_glVertBuffer != 0: glDeleteBuffers(1, (self.m_glIndexBuffer,)) glDeleteVertexArrays( 1, (self.m_glVertArray,) ) glDeleteBuffers(1, (self.m_glVertBuffer,)) self.m_glIndexBuffer = 0 self.m_glVertArray = 0 self.m_glVertBuffer = 0
Purpose: Frees the GL resources for a render model
def _read_configfile(self): """Read the config file and store it (when valid)""" rc = self.config_filename if not os.path.isabs(rc): rc = os.path.join(os.path.expanduser('~'), self.config_filename) # If there is a setup.cfg in the package, parse it files = [f for f in [rc, 'setup.cfg'] if os.path.exists(f)] if not files: self.config = None return self.config = ConfigParser() self.config.read(files) if (not self.is_old_pypi_config() and not self.is_new_pypi_config()): # Safety valve self.config = None
Read the config file and store it (when valid)
def render_context_with_title(self, context): """Render a page title and insert it into the context. This function takes in a context dict and uses it to render the page_title variable. It then appends this title to the context using the 'page_title' key. If there is already a page_title key defined in context received then this function will do nothing. """ if "page_title" not in context: con = template.Context(context) # NOTE(sambetts): Use force_text to ensure lazy translations # are handled correctly. temp = template.Template(encoding.force_text(self.page_title)) context["page_title"] = temp.render(con) return context
Render a page title and insert it into the context. This function takes in a context dict and uses it to render the page_title variable. It then appends this title to the context using the 'page_title' key. If there is already a page_title key defined in context received then this function will do nothing.
def account_setup(remote, token=None, response=None, account_setup=None): """Setup user account.""" gh = GitHubAPI(user_id=token.remote_account.user_id) with db.session.begin_nested(): gh.init_account() # Create user <-> external id link. oauth_link_external_id( token.remote_account.user, dict(id=str(gh.account.extra_data['id']), method="github") )
Setup user account.
def truepath_relative(path, otherpath=None): """ Normalizes and returns absolute path with so specs Args: path (str): path to file or directory otherpath (None): (default = None) Returns: str: path_ CommandLine: python -m utool.util_path --exec-truepath_relative --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> path = 'C:/foobar/foobiz' >>> otherpath = 'C:/foobar' >>> path_ = truepath_relative(path, otherpath) >>> result = ('path_ = %s' % (ut.repr2(path_),)) >>> print(result) path_ = 'foobiz' """ if otherpath is None: otherpath = os.getcwd() otherpath = truepath(otherpath) path_ = normpath(relpath(path, otherpath)) return path_
Normalizes and returns absolute path with so specs Args: path (str): path to file or directory otherpath (None): (default = None) Returns: str: path_ CommandLine: python -m utool.util_path --exec-truepath_relative --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> path = 'C:/foobar/foobiz' >>> otherpath = 'C:/foobar' >>> path_ = truepath_relative(path, otherpath) >>> result = ('path_ = %s' % (ut.repr2(path_),)) >>> print(result) path_ = 'foobiz'
def get_publication_date(self, **kwargs): """Determine the creation date for the publication date.""" date_string = kwargs.get('content', '') date_match = CREATION_DATE_REGEX.match(date_string) month_match = CREATION_MONTH_REGEX.match(date_string) year_match = CREATION_YEAR_REGEX.match(date_string) # Check if a date match exists. if date_match: (year, month, day) = date_match.groups('') # Create the date. try: creation_date = datetime.date(int(year), int(month), int(day)) except ValueError: return None else: return '%s/%s/%s' % ( format_date_string(creation_date.month), format_date_string(creation_date.day), creation_date.year, ) elif month_match: (year, month) = month_match.groups('') # Create the date. try: creation_date = datetime.date(int(year), int(month), 1) except ValueError: return None else: return '%s/%s' % ( format_date_string(creation_date.month), creation_date.year, ) elif year_match: year = year_match.groups('')[0] return year else: return None
Determine the creation date for the publication date.
def post_values(self, values): """ Method for `Post Data Stream Values <https://m2x.att.com/developer/documentation/v2/device#Post-Data-Stream-Values>`_ endpoint. :param values: Values to post, see M2X API docs for details :type values: dict :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request """ return self.api.post(self.subpath('/values'), data={ 'values': values })
Method for `Post Data Stream Values <https://m2x.att.com/developer/documentation/v2/device#Post-Data-Stream-Values>`_ endpoint. :param values: Values to post, see M2X API docs for details :type values: dict :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
def readline(self, timeout=1): # pylint: disable=unused-argument """ Readline implementation. :param timeout: Timeout, not used :return: Line read or None """ data = None if self.read_thread: # Ignore the timeout value, return imediately if no lines in queue data = self.read_thread.readline() if data and self.__print_io: self.logger.info(data, extra={'type': '<--'}) return data
Readline implementation. :param timeout: Timeout, not used :return: Line read or None
def sample_less_than_condition(choices_in, condition): """Creates a random sample from choices without replacement, subject to the condition that each element of the output is greater than the corresponding element of the condition array. condition should be in ascending order. """ output = np.zeros(min(condition.shape[0], choices_in.shape[0])) choices = copy.deepcopy(choices_in) for i, _ in enumerate(output): # randomly select one of the choices which meets condition avail_inds = np.where(choices < condition[i])[0] selected_ind = np.random.choice(avail_inds) output[i] = choices[selected_ind] # remove the chosen value choices = np.delete(choices, selected_ind) return output
Creates a random sample from choices without replacement, subject to the condition that each element of the output is greater than the corresponding element of the condition array. condition should be in ascending order.
def write(self, output=None): """ Write association table to a file. """ if not output: outfile = self['output']+'_asn.fits' output = self['output'] else: outfile = output # Delete the file if it exists. if os.path.exists(outfile): warningmsg = "\n#########################################\n" warningmsg += "# #\n" warningmsg += "# WARNING: #\n" warningmsg += "# The existing association table, #\n" warningmsg += " " + str(outfile) + '\n' warningmsg += "# is being replaced. #\n" warningmsg += "# #\n" warningmsg += "#########################################\n\n" fasn = fits.HDUList() # Compute maximum length of MEMNAME for table column definition _maxlen = 0 for _fname in self['order']: if len(_fname) > _maxlen: _maxlen = len(_fname) # Enforce a mimimum size of 24 if _maxlen < 24: _maxlen = 24 namelen_str = str(_maxlen+2)+'A' self.buildPrimary(fasn, output=output) mname = self['order'][:] mname.append(output) mtype = ['EXP-DTH' for l in self['order']] mtype.append('PROD-DTH') mprsn = [True for l in self['order']] mprsn.append(False) xoff = [self['members'][l]['xoff'] for l in self['order']] xoff.append(0.0) yoff = [self['members'][l]['yoff'] for l in self['order']] yoff.append(0.0) xsh = [self['members'][l]['xshift'] for l in self['order']] xsh.append(0.0) ysh = [self['members'][l]['yshift'] for l in self['order']] ysh.append(0.0) rot = [self['members'][l]['rot'] for l in self['order']] rot.append(0.0) scl = [self['members'][l]['scale'] for l in self['order']] scl.append(1.0) memname = fits.Column(name='MEMNAME',format=namelen_str,array=N.char.array(mname)) memtype = fits.Column(name='MEMTYPE',format='14A',array=N.char.array(mtype)) memprsn = fits.Column(name='MEMPRSNT', format='L', array=N.array(mprsn).astype(N.uint8)) xoffset = fits.Column(name='XOFFSET', format='E', array=N.array(xoff)) yoffset = fits.Column(name='YOFFSET', format='E', array=N.array(yoff)) xdelta = fits.Column(name='XDELTA', format='E', array=N.array(xsh)) ydelta = fits.Column(name='YDELTA', format='E', array=N.array(ysh)) rotation = fits.Column(name='ROTATION', format='E', array=N.array(rot)) scale = fits.Column(name='SCALE', format='E', array=N.array(scl)) cols = fits.ColDefs([memname,memtype,memprsn,xoffset,yoffset,xdelta,ydelta,rotation,scale]) hdu = fits.BinTableHDU.from_columns(cols) fasn.append(hdu) if ASTROPY_VER_GE13: fasn.writeto(outfile, overwrite=True) else: fasn.writeto(outfile, clobber=True) fasn.close() mem0 = self['order'][0] refimg = self['members'][mem0]['refimage'] if refimg is not None: whdu = wcsutil.WCSObject(refimg) whdu.createReferenceWCS(outfile,overwrite=False) ftab = fits.open(outfile) ftab['primary'].header['refimage'] = outfile+"[wcs]" ftab.close() del whdu
Write association table to a file.
def input(self, data): """ 小数据片段拼接成完整数据包 如果内容足够则yield数据包 """ self.buf += data while len(self.buf) > HEADER_SIZE: data_len = struct.unpack('i', self.buf[0:HEADER_SIZE])[0] if len(self.buf) >= data_len + HEADER_SIZE: content = self.buf[HEADER_SIZE:data_len + HEADER_SIZE] self.buf = self.buf[data_len + HEADER_SIZE:] yield content else: break
小数据片段拼接成完整数据包 如果内容足够则yield数据包
def guess_external_url(local_host, port): """Return a URL that is most likely to route to `local_host` from outside. The point is that we may be running on a remote host from the user's point of view, so they can't access `local_host` from a Web browser just by typing ``http://localhost:12345/``. """ if local_host in ['0.0.0.0', '::']: # The server is listening on all interfaces, but we have to pick one. # The system's FQDN should give us a hint. local_host = socket.getfqdn() # https://github.com/vfaronov/turq/issues/9 match = IPV4_REVERSE_DNS.match(local_host) if match: local_host = '.'.join(reversed(match.groups())) else: match = IPV6_REVERSE_DNS.match(local_host) if match: address_as_int = int(''.join(reversed(match.groups())), 16) local_host = str(IPv6Address(address_as_int)) if ':' in local_host: # Looks like an IPv6 literal. Has to be wrapped in brackets in a URL. # Also, an IPv6 address can have a zone ID tacked on the end, # like "%3". RFC 6874 allows encoding them in URLs as well, # but in my experiments on Windows 8.1, I had more success # removing the zone ID altogether. After all this is just a guess. local_host = '[%s]' % local_host.rsplit('%', 1)[0] return 'http://%s:%d/' % (local_host, port)
Return a URL that is most likely to route to `local_host` from outside. The point is that we may be running on a remote host from the user's point of view, so they can't access `local_host` from a Web browser just by typing ``http://localhost:12345/``.
def join(self, word_blocks, float_part): """ join the words by first join lists in the tuple :param word_blocks: tuple :rtype: str """ word_list = [] length = len(word_blocks) - 1 first_block = word_blocks[0], start = 0 if length == 1 and first_block[0][0] == '1': word_list += ['seribu'] start = 1 for i in range(start, length + 1, 1): word_list += word_blocks[i][1] if not word_blocks[i][1]: continue if i == length: break word_list += [self.TENS_TO[(length - i) * 3]] return ' '.join(word_list) + float_part
join the words by first join lists in the tuple :param word_blocks: tuple :rtype: str
def apply_augments(self, auglist, p_elem, pset): """Handle substatements of augments from `auglist`. The augments are applied in the context of `p_elem`. `pset` is a patch set containing patches that may be applicable to descendants. """ for a in auglist: par = a.parent if a.search_one("when") is None: wel = p_elem else: if p_elem.interleave: kw = "interleave" else: kw = "group" wel = SchemaNode(kw, p_elem, interleave=p_elem.interleave) wel.occur = p_elem.occur if par.keyword == "uses": self.handle_substmts(a, wel, pset) continue if par.keyword == "submodule": mnam = par.i_including_modulename else: mnam = par.arg if self.prefix_stack[-1] == self.module_prefixes[mnam]: self.handle_substmts(a, wel, pset) else: self.prefix_stack.append(self.module_prefixes[mnam]) self.handle_substmts(a, wel, pset) self.prefix_stack.pop()
Handle substatements of augments from `auglist`. The augments are applied in the context of `p_elem`. `pset` is a patch set containing patches that may be applicable to descendants.
def logged_api_call(func): """ Function decorator that causes the decorated API function or method to log calls to itself to a logger. The logger's name is the dotted module name of the module defining the decorated function (e.g. 'zhmcclient._cpc'). Parameters: func (function object): The original function being decorated. Returns: function object: The function wrappering the original function being decorated. Raises: TypeError: The @logged_api_call decorator must be used on a function or method (and not on top of the @property decorator). """ # Note that in this decorator function, we are in a module loading context, # where the decorated functions are being defined. When this decorator # function is called, its call stack represents the definition of the # decorated functions. Not all global definitions in the module have been # defined yet, and methods of classes that are decorated with this # decorator are still functions at this point (and not yet methods). module = inspect.getmodule(func) if not inspect.isfunction(func) or not hasattr(module, '__name__'): raise TypeError("The @logged_api_call decorator must be used on a " "function or method (and not on top of the @property " "decorator)") try: # We avoid the use of inspect.getouterframes() because it is slow, # and use the pointers up the stack frame, instead. this_frame = inspect.currentframe() # this decorator function here apifunc_frame = this_frame.f_back # the decorated API function apifunc_owner = inspect.getframeinfo(apifunc_frame)[2] finally: # Recommended way to deal with frame objects to avoid ref cycles del this_frame del apifunc_frame # TODO: For inner functions, show all outer levels instead of just one. if apifunc_owner == '<module>': # The decorated API function is defined globally (at module level) apifunc_str = '{func}()'.format(func=func.__name__) else: # The decorated API function is defined in a class or in a function apifunc_str = '{owner}.{func}()'.format(owner=apifunc_owner, func=func.__name__) logger = get_logger(API_LOGGER_NAME) def is_external_call(): """ Return a boolean indicating whether the call to the decorated API function is an external call (vs. b eing an internal call). """ try: # We avoid the use of inspect.getouterframes() because it is slow, # and use the pointers up the stack frame, instead. log_it_frame = inspect.currentframe() # this log_it() function log_api_call_frame = log_it_frame.f_back # the log_api_call() func apifunc_frame = log_api_call_frame.f_back # the decorated API func apicaller_frame = apifunc_frame.f_back # caller of API function apicaller_module = inspect.getmodule(apicaller_frame) if apicaller_module is None: apicaller_module_name = "<unknown>" else: apicaller_module_name = apicaller_module.__name__ finally: # Recommended way to deal with frame objects to avoid ref cycles del log_it_frame del log_api_call_frame del apifunc_frame del apicaller_frame del apicaller_module # Log only if the caller is not from the zhmcclient package return apicaller_module_name.split('.')[0] != 'zhmcclient' def log_api_call(func, *args, **kwargs): """ Log entry to and exit from the decorated function, at the debug level. Note that this wrapper function is called every time the decorated function/method is called, but that the log message only needs to be constructed when logging for this logger and for this log level is turned on. Therefore, we do as much as possible in the decorator function, plus we use %-formatting and lazy interpolation provided by the log functions, in order to save resources in this function here. Parameters: func (function object): The decorated function. *args: Any positional arguments for the decorated function. **kwargs: Any keyword arguments for the decorated function. """ # Note that in this function, we are in the context where the # decorated function is actually called. _log_it = is_external_call() and logger.isEnabledFor(logging.DEBUG) if _log_it: logger.debug("Called: {}, args: {:.500}, kwargs: {:.500}". format(apifunc_str, log_escaped(repr(args)), log_escaped(repr(kwargs)))) result = func(*args, **kwargs) if _log_it: logger.debug("Return: {}, result: {:.1000}". format(apifunc_str, log_escaped(repr(result)))) return result if 'decorate' in globals(): return decorate(func, log_api_call) else: return decorator(log_api_call, func)
Function decorator that causes the decorated API function or method to log calls to itself to a logger. The logger's name is the dotted module name of the module defining the decorated function (e.g. 'zhmcclient._cpc'). Parameters: func (function object): The original function being decorated. Returns: function object: The function wrappering the original function being decorated. Raises: TypeError: The @logged_api_call decorator must be used on a function or method (and not on top of the @property decorator).
def attempt_file_write( path: str, contents: typing.Union[str, bytes], mode: str = 'w', offset: int = 0 ) -> typing.Union[None, Exception]: """ Attempts to write the specified contents to a file and returns None if successful, or the raised exception if writing failed. :param path: The path to the file that will be written :param contents: The contents of the file to write :param mode: The mode in which the file will be opened when written :param offset: The byte offset in the file where the contents should be written. If the value is zero, the offset information will be ignored and the operation will write entirely based on mode. Note that if you indicate an append write mode and an offset, the mode will be forced to write instead of append. :return: None if the write operation succeeded. Otherwise, the exception that was raised by the failed write action. """ try: data = contents.encode() except Exception: data = contents if offset > 0: with open(path, 'rb') as f: existing = f.read(offset) else: existing = None append = 'a' in mode write_mode = 'wb' if offset > 0 or not append else 'ab' try: with open(path, write_mode) as f: if existing is not None: f.write(existing) f.write(data) return None except Exception as error: return error
Attempts to write the specified contents to a file and returns None if successful, or the raised exception if writing failed. :param path: The path to the file that will be written :param contents: The contents of the file to write :param mode: The mode in which the file will be opened when written :param offset: The byte offset in the file where the contents should be written. If the value is zero, the offset information will be ignored and the operation will write entirely based on mode. Note that if you indicate an append write mode and an offset, the mode will be forced to write instead of append. :return: None if the write operation succeeded. Otherwise, the exception that was raised by the failed write action.
def close(self): """ Closing a cursor just exhausts all remaining data. """ conn = self.connection if conn is None: return try: while self.nextset(): pass finally: self.connection = None
Closing a cursor just exhausts all remaining data.
def _find_base_tds_url(catalog_url): """Identify the base URL of the THREDDS server from the catalog URL. Will retain URL scheme, host, port and username/password when present. """ url_components = urlparse(catalog_url) if url_components.path: return catalog_url.split(url_components.path)[0] else: return catalog_url
Identify the base URL of the THREDDS server from the catalog URL. Will retain URL scheme, host, port and username/password when present.
def __do_query_into_hash(conn, sql_str): ''' Perform the query that is passed to it (sql_str). Returns: results in a dict. ''' mod = sys._getframe().f_code.co_name log.debug('%s<--(%s)', mod, sql_str) rtn_results = [] try: cursor = conn.cursor() except MySQLdb.MySQLError: log.error('%s: Can\'t get cursor for SQL->%s', mod, sql_str) cursor.close() log.debug('%s-->', mod) return rtn_results try: _execute(cursor, sql_str) except MySQLdb.MySQLError: log.error('%s: try to execute : SQL->%s', mod, sql_str) cursor.close() log.debug('%s-->', mod) return rtn_results qrs = cursor.fetchall() for row_data in qrs: col_cnt = 0 row = {} for col_data in cursor.description: col_name = col_data[0] row[col_name] = row_data[col_cnt] col_cnt += 1 rtn_results.append(row) cursor.close() log.debug('%s-->', mod) return rtn_results
Perform the query that is passed to it (sql_str). Returns: results in a dict.
def _CronJobFromRow(self, row): """Creates a cronjob object from a database result row.""" (job, create_time, enabled, forced_run_requested, last_run_status, last_run_time, current_run_id, state, leased_until, leased_by) = row job = rdf_cronjobs.CronJob.FromSerializedString(job) job.current_run_id = db_utils.IntToCronJobRunID(current_run_id) job.enabled = enabled job.forced_run_requested = forced_run_requested job.last_run_status = last_run_status job.last_run_time = mysql_utils.TimestampToRDFDatetime(last_run_time) if state: job.state = rdf_protodict.AttributedDict.FromSerializedString(state) job.created_at = mysql_utils.TimestampToRDFDatetime(create_time) job.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until) job.leased_by = leased_by return job
Creates a cronjob object from a database result row.
def checks(similarities, verbose = False): """Check that a matrix is a proper similarity matrix and bring appropriate changes if applicable. Parameters ---------- similarities : array of shape (n_samples, n_samples) A matrix of pairwise similarities between (sub)-samples of the data-set. verbose : Boolean, optional (default = False) Alerts of any issue with the similarities matrix provided and of any step possibly taken to remediate such problem. """ if similarities.size == 0: raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities " "matrix provided as input happens to be empty.\n") elif np.where(np.isnan(similarities))[0].size != 0: raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities " "matrix contains at least one 'NaN'.\n") elif np.where(np.isinf(similarities))[0].size != 0: raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry " "detected in input similarities matrix.\n") else: if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0: if verbose: print("\nINFO: Cluster_Ensembles: checks: complex entries found " "in the similarities matrix.") similarities = similarities.real if verbose: print("\nINFO: Cluster_Ensembles: checks: " "truncated to their real components.") if similarities.shape[0] != similarities.shape[1]: if verbose: print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.") N_square = min(similarities.shape) similarities = similarities[:N_square, :N_square] if verbose: print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.") max_sim = np.amax(similarities) min_sim = np.amin(similarities) if max_sim > 1 or min_sim < 0: if verbose: print("\nINFO: Cluster_Ensembles: checks: strictly negative " "or bigger than unity entries spotted in input similarities matrix.") indices_too_big = np.where(similarities > 1) indices_negative = np.where(similarities < 0) similarities[indices_too_big] = 1.0 similarities[indices_negative] = 0.0 if verbose: print("\nINFO: Cluster_Ensembles: checks: done setting them to " "the lower or upper accepted values.") if not np.allclose(similarities, np.transpose(similarities)): if verbose: print("\nINFO: Cluster_Ensembles: checks: non-symmetric input " "similarities matrix.") similarities = np.divide(similarities + np.transpose(similarities), 2.0) if verbose: print("\nINFO: Cluster_Ensembles: checks: now symmetrized.") if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])): if verbose: print("\nINFO: Cluster_Ensembles: checks: the self-similarities " "provided as input are not all of unit value.") similarities[np.diag_indices(similarities.shape[0])] = 1 if verbose: print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
Check that a matrix is a proper similarity matrix and bring appropriate changes if applicable. Parameters ---------- similarities : array of shape (n_samples, n_samples) A matrix of pairwise similarities between (sub)-samples of the data-set. verbose : Boolean, optional (default = False) Alerts of any issue with the similarities matrix provided and of any step possibly taken to remediate such problem.
def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): ''' Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object. ''' log.trace('Retrieving new service instance') token = None if mechanism == 'userpass': if username is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'username\' is missing') if password is None: raise salt.exceptions.CommandExecutionError( 'Login mechanism userpass was specified but the mandatory ' 'parameter \'password\' is missing') elif mechanism == 'sspi': if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = 'Login mechanism \'{0}\' was specified but the' \ ' mandatory parameters are missing'.format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( 'Unsupported mechanism: \'{0}\''.format(mechanism)) try: log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'', mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if 'unexpected keyword argument' in exc.message: log.error('Initial connect to the VMware endpoint failed with %s', exc.message) log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.') log.error('We recommend updating to that version or later.') raise except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. default_msg = 'Could not connect to host \'{0}\'. ' \ 'Please check the debug log for more information.'.format(host) try: if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \ '[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc): service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: # pylint: disable=broad-except # pyVmomi's SmartConnect() actually raises Exception in some cases. if 'certificate verify failed' in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, 'msg') else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance
Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
def get_vardict_command(data): """ convert variantcaller specification to proper vardict command, handling string or list specification """ vcaller = dd.get_variantcaller(data) if isinstance(vcaller, list): vardict = [x for x in vcaller if "vardict" in x] if not vardict: return None vardict = vardict[0] elif not vcaller: return None else: vardict = vcaller vardict = "vardict-java" if not vardict.endswith("-perl") else "vardict" return vardict
convert variantcaller specification to proper vardict command, handling string or list specification
def find_action(self, action_name): """Find an action by name. Convenience method that searches through all the services offered by the Server for an action and returns an Action instance. If the action is not found, returns None. If multiple actions with the same name are found it returns the first one. """ for service in self.services: action = service.find_action(action_name) if action is not None: return action
Find an action by name. Convenience method that searches through all the services offered by the Server for an action and returns an Action instance. If the action is not found, returns None. If multiple actions with the same name are found it returns the first one.
def get_photo_url(photo_id): """Request the photo download url with the photo id :param photo_id: The photo id of flickr :type photo_id: str :return: Photo download url :rtype: str """ args = _get_request_args( 'flickr.photos.getSizes', photo_id=photo_id ) resp = requests.post(API_URL, data=args) resp_json = json.loads(resp.text.encode('utf-8')) logger.debug(json.dumps(resp_json, indent=2)) size_list = resp_json['sizes']['size'] size_list_len = len(size_list) global image_size_mode image_size_mode = size_list_len if size_list_len < image_size_mode \ else image_size_mode download_url = resp_json['sizes']['size'][-image_size_mode]['source'] return download_url
Request the photo download url with the photo id :param photo_id: The photo id of flickr :type photo_id: str :return: Photo download url :rtype: str
def verify(backup_path, fast): """Verify a existing backup""" from PyHardLinkBackup.phlb.verify import verify_backup verify_backup(backup_path, fast)
Verify a existing backup
def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self.base_n_encoder.fit(X, y, **kwargs) return self
Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self.
def num2term(num, fs, conj=False): """Convert *num* into a min/max term in an N-dimensional Boolean space. The *fs* argument is a sequence of :math:`N` Boolean functions. There are :math:`2^N` points in the corresponding Boolean space. The dimension number of each function is its index in the sequence. The *num* argument is an int in range :math:`[0, 2^N)`. If *conj* is ``False``, return a minterm. Otherwise, return a maxterm. For example, consider the 3-dimensional space formed by functions :math:`f`, :math:`g`, :math:`h`. Each vertex corresponds to a min/max term as summarized by the table:: 6-----------7 ===== ======= ========== ========== /| /| num f g h minterm maxterm / | / | ===== ======= ========== ========== / | / | 0 0 0 0 f' g' h' f g h 4-----------5 | 1 1 0 0 f g' h' f' g h | | | | 2 0 1 0 f' g h' f g' h | | | | 3 1 1 0 f g h' f' g' h | 2-------|---3 4 0 0 1 f' g' h f g h' | / | / 5 1 0 1 f g' h f' g h' h g | / | / 6 0 1 1 f' g h f g' h' |/ |/ |/ 7 1 1 1 f g h f' g' h' +-f 0-----------1 ===== ======= ========= =========== .. note:: The ``f g h`` column is the binary representation of *num* written in little-endian order. """ if not isinstance(num, int): fstr = "expected num to be an int, got {0.__name__}" raise TypeError(fstr.format(type(num))) n = len(fs) if not 0 <= num < 2**n: fstr = "expected num to be in range [0, {}), got {}" raise ValueError(fstr.format(2**n, num)) if conj: return tuple(~f if bit_on(num, i) else f for i, f in enumerate(fs)) else: return tuple(f if bit_on(num, i) else ~f for i, f in enumerate(fs))
Convert *num* into a min/max term in an N-dimensional Boolean space. The *fs* argument is a sequence of :math:`N` Boolean functions. There are :math:`2^N` points in the corresponding Boolean space. The dimension number of each function is its index in the sequence. The *num* argument is an int in range :math:`[0, 2^N)`. If *conj* is ``False``, return a minterm. Otherwise, return a maxterm. For example, consider the 3-dimensional space formed by functions :math:`f`, :math:`g`, :math:`h`. Each vertex corresponds to a min/max term as summarized by the table:: 6-----------7 ===== ======= ========== ========== /| /| num f g h minterm maxterm / | / | ===== ======= ========== ========== / | / | 0 0 0 0 f' g' h' f g h 4-----------5 | 1 1 0 0 f g' h' f' g h | | | | 2 0 1 0 f' g h' f g' h | | | | 3 1 1 0 f g h' f' g' h | 2-------|---3 4 0 0 1 f' g' h f g h' | / | / 5 1 0 1 f g' h f' g h' h g | / | / 6 0 1 1 f' g h f g' h' |/ |/ |/ 7 1 1 1 f g h f' g' h' +-f 0-----------1 ===== ======= ========= =========== .. note:: The ``f g h`` column is the binary representation of *num* written in little-endian order.
def predecessors(self, node): """Returns list of the predecessors of a node as DAGNodes.""" if isinstance(node, int): warnings.warn('Calling predecessors() with a node id is deprecated,' ' use a DAGNode instead', DeprecationWarning, 2) node = self._id_to_node[node] return self._multi_graph.predecessors(node)
Returns list of the predecessors of a node as DAGNodes.