code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def apply_config(self, config): """ Applies config """ self.hash_name = config['hash_name'] self.dim = config['dim'] self.projection_count = config['projection_count'] self.normals = config['normals'] self.tree_root = config['tree_root'] self.minimum_result_size = config['minimum_result_size']
Applies config
def scroll_up(self, n, pre_dl=None, post_dl=None): """Scroll up ``n`` times. **中文文档** 鼠标滚轮向上滚动n次。 """ self.delay(pre_dl) self.m.scroll(vertical=n) self.delay(post_dl)
Scroll up ``n`` times. **中文文档** 鼠标滚轮向上滚动n次。
def install_cub(mb_inc_path): """ Downloads and installs cub into mb_inc_path """ cub_url = 'https://github.com/NVlabs/cub/archive/1.6.4.zip' cub_sha_hash = '0d5659200132c2576be0b3959383fa756de6105d' cub_version_str = 'Current release: v1.6.4 (12/06/2016)' cub_zip_file = 'cub.zip' cub_zip_dir = 'cub-1.6.4' cub_unzipped_path = os.path.join(mb_inc_path, cub_zip_dir) cub_new_unzipped_path = os.path.join(mb_inc_path, 'cub') cub_header = os.path.join(cub_new_unzipped_path, 'cub', 'cub.cuh') cub_readme = os.path.join(cub_new_unzipped_path, 'README.md' ) # Check for a reasonably valid install cub_installed, _ = is_cub_installed(cub_readme, cub_header, cub_version_str) if cub_installed: log.info("NVIDIA cub installation found " "at '{}'".format(cub_new_unzipped_path)) return log.info("No NVIDIA cub installation found") # Do we already have a valid cub zip file have_valid_cub_file = (os.path.exists(cub_zip_file) and os.path.isfile(cub_zip_file) and sha_hash_file(cub_zip_file) == cub_sha_hash) if have_valid_cub_file: log.info("Valid NVIDIA cub archive found '{}'".format(cub_zip_file)) # Download if we don't have a valid file else: log.info("Downloading cub archive '{}'".format(cub_url)) dl_cub(cub_url, cub_zip_file) cub_file_sha_hash = sha_hash_file(cub_zip_file) # Compare against our supplied hash if cub_sha_hash != cub_file_sha_hash: msg = ('Hash of file %s downloaded from %s ' 'is %s and does not match the expected ' 'hash of %s. Please manually download ' 'as per the README.md instructions.') % ( cub_zip_file, cub_url, cub_file_sha_hash, cub_sha_hash) raise InstallCubException(msg) # Unzip into montblanc/include/cub with zipfile.ZipFile(cub_zip_file, 'r') as zip_file: # Remove any existing installs shutil.rmtree(cub_unzipped_path, ignore_errors=True) shutil.rmtree(cub_new_unzipped_path, ignore_errors=True) # Unzip zip_file.extractall(mb_inc_path) # Rename. cub_unzipped_path is mb_inc_path/cub_zip_dir shutil.move(cub_unzipped_path, cub_new_unzipped_path) log.info("NVIDIA cub archive unzipped into '{}'".format( cub_new_unzipped_path)) there, reason = is_cub_installed(cub_readme, cub_header, cub_version_str) if not there: raise InstallCubException(reason)
Downloads and installs cub into mb_inc_path
def confirm(message="", title="", default=False, ok=False, cancel=False, parent=None): "Ask for confirmation (yes/no or ok and cancel), returns True or False" style = wx.CENTRE if ok: style |= wx.OK else: style |= wx.YES | wx.NO if default: style |= wx.YES_DEFAULT else: style |= wx.NO_DEFAULT if cancel: style |= wx.CANCEL result = dialogs.messageDialog(parent, message, title, style) if cancel and result.returned == wx.ID_CANCEL: return None return result.accepted
Ask for confirmation (yes/no or ok and cancel), returns True or False
def transfer_data_from_mongo(self, index, doc_type, use_mongo_id=False, indexed_flag_field_name='', mongo_query_params={}, mongo_host=default.MONGO_HOST, mongo_port=default.MONGO_PORT, mongo_db=default.MONGO_DB, mongo_collection=default.MONGO_COLLECTION): """ Transfer data from MongoDB into the Elasticsearch, the hostname, port, database and collection name in MongoDB default from load in default.py :param index: The name of the index :param doc_type: The type of the document :param use_mongo_id: Use id of MongoDB in the Elasticsearch if is true otherwise automatic generation :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param mongo_client_params: The dictionary for client params of MongoDB :param mongo_query_params: The dictionary for query params of MongoDB :param mongo_host: The name of the hostname from MongoDB :param mongo_port: The number of the port from MongoDB :param mongo_db: The name of the database from MongoDB :param mongo_collection: The name of the collection from MongoDB :return: void """ mongo_client = MongoClient(host=mongo_host, port=int(mongo_port)) try: collection = mongo_client[mongo_db][mongo_collection] if indexed_flag_field_name != '': mongo_query_params.update({indexed_flag_field_name: False}) mongo_docs = collection.find(mongo_query_params) finally: mongo_client.close() # Joint actions of Elasticsearch for execute bulk api actions = [] id_array = [] for doc in mongo_docs: action = { '_op_type': 'index', '_index': index, '_type': doc_type } id_array.append(doc['_id']) if not use_mongo_id: doc.pop('_id') else: doc['id'] = str(doc['_id']) doc.pop('_id') action['_source'] = doc actions.append(action) success, failed = es_helpers.bulk(self.client, actions, request_timeout=60 * 60) logger.info( 'Transfer data from MongoDB(%s:%s) into the Elasticsearch(%s) success: %s, failed: %s' % ( mongo_host, mongo_port, self.client, success, failed)) # Back update flag if indexed_flag_field_name != '': t = threading.Thread(target=ElasticsearchClient._back_update_mongo, args=(self, mongo_host, mongo_port, mongo_db, mongo_collection, id_array, {indexed_flag_field_name: True}), name='mongodb_back_update') t.start() return success, failed
Transfer data from MongoDB into the Elasticsearch, the hostname, port, database and collection name in MongoDB default from load in default.py :param index: The name of the index :param doc_type: The type of the document :param use_mongo_id: Use id of MongoDB in the Elasticsearch if is true otherwise automatic generation :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param mongo_client_params: The dictionary for client params of MongoDB :param mongo_query_params: The dictionary for query params of MongoDB :param mongo_host: The name of the hostname from MongoDB :param mongo_port: The number of the port from MongoDB :param mongo_db: The name of the database from MongoDB :param mongo_collection: The name of the collection from MongoDB :return: void
def identify(self, access_token, leeway=10.0): """ Gather identifying information about a user via an authorized token. :Parameters: access_token : `AccessToken` A token representing an authorized user. Obtained from `complete()`. leeway : `int` | `float` The number of seconds of leeway to account for when examining a tokens "issued at" timestamp. :Returns: A dictionary containing identity information. """ return identify(self.mw_uri, self.consumer_token, access_token, leeway=leeway, user_agent=self.user_agent)
Gather identifying information about a user via an authorized token. :Parameters: access_token : `AccessToken` A token representing an authorized user. Obtained from `complete()`. leeway : `int` | `float` The number of seconds of leeway to account for when examining a tokens "issued at" timestamp. :Returns: A dictionary containing identity information.
def format_coord(self, x, y): """Format displayed coordinates during mouseover of axes.""" p, b = stereonet_math.geographic2plunge_bearing(x, y) s, d = stereonet_math.geographic2pole(x, y) pb = u'P/B={:0.0f}\u00b0/{:03.0f}\u00b0'.format(p[0], b[0]) sd = u'S/D={:03.0f}\u00b0/{:0.0f}\u00b0'.format(s[0], d[0]) return u'{}, {}'.format(pb, sd)
Format displayed coordinates during mouseover of axes.
def goBack(self): """ Goes up one level if possible and returns the url at the current level. If it cannot go up, then a blank string will be returned. :return <str> """ if not self.canGoBack(): return '' self._blockStack = True self._index -= 1 self.emitCurrentChanged() self._blockStack = False return self.currentUrl()
Goes up one level if possible and returns the url at the current level. If it cannot go up, then a blank string will be returned. :return <str>
def ProduceExtractionWarning(self, message, path_spec=None): """Produces an extraction warning. Args: message (str): message of the warning. path_spec (Optional[dfvfs.PathSpec]): path specification, where None will use the path specification of current file entry set in the mediator. Raises: RuntimeError: when storage writer is not set. """ if not self._storage_writer: raise RuntimeError('Storage writer not set.') if not path_spec and self._file_entry: path_spec = self._file_entry.path_spec parser_chain = self.GetParserChain() warning = warnings.ExtractionWarning( message=message, parser_chain=parser_chain, path_spec=path_spec) self._storage_writer.AddWarning(warning) self._number_of_warnings += 1 self.last_activity_timestamp = time.time()
Produces an extraction warning. Args: message (str): message of the warning. path_spec (Optional[dfvfs.PathSpec]): path specification, where None will use the path specification of current file entry set in the mediator. Raises: RuntimeError: when storage writer is not set.
def with_item(self, context, as_opt): """(2.7, 3.1-) with_item: test ['as' expr]""" if as_opt: as_loc, optional_vars = as_opt return ast.withitem(context_expr=context, optional_vars=optional_vars, as_loc=as_loc, loc=context.loc.join(optional_vars.loc)) else: return ast.withitem(context_expr=context, optional_vars=None, as_loc=None, loc=context.loc)
(2.7, 3.1-) with_item: test ['as' expr]
def add(self, count, timestamp=None): """Add a value at the specified time to the series. :param count: The number of work items ready at the specified time. :param timestamp: The timestamp to add. Defaults to None, meaning current time. It should be strictly greater (newer) than the last added timestamp. """ if timestamp is None: timestamp = time.time() if self.last_data >= timestamp: raise ValueError("Time {} >= {} in load average calculation".format(self.last_data, timestamp)) self.last_data = timestamp for meta in self.intervals.values(): meta.push(count, timestamp)
Add a value at the specified time to the series. :param count: The number of work items ready at the specified time. :param timestamp: The timestamp to add. Defaults to None, meaning current time. It should be strictly greater (newer) than the last added timestamp.
def groups(self): """Return the names of groups in the file Note that there is not necessarily a TDMS object associated with each group name. :rtype: List of strings. """ # Split paths into components and take the first (group) component. object_paths = ( path_components(path) for path in self.objects) group_names = (path[0] for path in object_paths if len(path) > 0) # Use an ordered dict as an ordered set to find unique # groups in order. groups_set = OrderedDict() for group in group_names: groups_set[group] = None return list(groups_set)
Return the names of groups in the file Note that there is not necessarily a TDMS object associated with each group name. :rtype: List of strings.
def handle(self, request): """Handle an HTTP request for executing an API call. This method authenticates the request checking its signature, and then calls the C{execute} method, passing it a L{Call} object set with the principal for the authenticated user and the generic parameters extracted from the request. @param request: The L{HTTPRequest} to handle. """ request.id = str(uuid4()) deferred = maybeDeferred(self._validate, request) deferred.addCallback(self.execute) def write_response(response): request.setHeader("Content-Length", str(len(response))) request.setHeader("Content-Type", self.content_type) # Prevent browsers from trying to guess a different content type. request.setHeader("X-Content-Type-Options", "nosniff") request.write(response) request.finish() return response def write_error(failure): if failure.check(APIError): status = failure.value.status # Don't log the stack traces for 4xx responses. if status < 400 or status >= 500: log.err(failure) else: log.msg("status: %s message: %s" % ( status, safe_str(failure.value))) body = failure.value.response if body is None: body = self.dump_error(failure.value, request) else: # If the error is a generic one (not an APIError), log the # message , but don't send it back to the client, as it could # contain sensitive information. Send a generic server error # message instead. log.err(failure) body = "Server error" status = 500 request.setResponseCode(status) write_response(body) deferred.addCallback(write_response) deferred.addErrback(write_error) return deferred
Handle an HTTP request for executing an API call. This method authenticates the request checking its signature, and then calls the C{execute} method, passing it a L{Call} object set with the principal for the authenticated user and the generic parameters extracted from the request. @param request: The L{HTTPRequest} to handle.
def summary( self ): """ Creates a text string representing the current query and its children for this item. :return <str> """ child_text = [] for c in range(self.childCount()): child = self.child(c) text = [child.text(0), child.text(1), child.text(2), child.text(3)] text = map(str, text) while ( '' in text ): text.remove('') child_text.append( ' '.join(text) ) return ' '.join(child_text)
Creates a text string representing the current query and its children for this item. :return <str>
def queryMore(self, queryLocator): ''' Retrieves the next batch of objects from a query. ''' self._setHeaders('queryMore') return self._sforce.service.queryMore(queryLocator)
Retrieves the next batch of objects from a query.
def axis_angle_to_rotation_matrix(v, theta): """Convert rotation from axis-angle to rotation matrix Parameters --------------- v : (3,) ndarray Rotation axis (normalized) theta : float Rotation angle (radians) Returns ---------------- R : (3,3) ndarray Rotation matrix """ if np.abs(theta) < np.spacing(1): return np.eye(3) else: v = v.reshape(3,1) np.testing.assert_almost_equal(np.linalg.norm(v), 1.) vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) vvt = np.dot(v, v.T) R = np.eye(3)*np.cos(theta) + (1 - np.cos(theta))*vvt + vx * np.sin(theta) return R
Convert rotation from axis-angle to rotation matrix Parameters --------------- v : (3,) ndarray Rotation axis (normalized) theta : float Rotation angle (radians) Returns ---------------- R : (3,3) ndarray Rotation matrix
def substitute_any_type(type_: Type, basic_types: Set[BasicType]) -> List[Type]: """ Takes a type and a set of basic types, and substitutes all instances of ANY_TYPE with all possible basic types and returns a list with all possible combinations. Note that this substitution is unconstrained. That is, If you have a type with placeholders, <#1,#1> for example, this may substitute the placeholders with different basic types. In that case, you'd want to use ``_substitute_placeholder_type`` instead. """ if type_ == ANY_TYPE: return list(basic_types) if isinstance(type_, BasicType): return [type_] # If we've made it this far, we have a ComplexType, and we can just call # `type_.substitute_any_type()`. return type_.substitute_any_type(basic_types)
Takes a type and a set of basic types, and substitutes all instances of ANY_TYPE with all possible basic types and returns a list with all possible combinations. Note that this substitution is unconstrained. That is, If you have a type with placeholders, <#1,#1> for example, this may substitute the placeholders with different basic types. In that case, you'd want to use ``_substitute_placeholder_type`` instead.
def get_chacra_repo(shaman_url): """ From a Shaman URL, get the chacra url for a repository, read the contents that point to the repo and return it as a string. """ shaman_response = get_request(shaman_url) chacra_url = shaman_response.geturl() chacra_response = get_request(chacra_url) return chacra_response.read()
From a Shaman URL, get the chacra url for a repository, read the contents that point to the repo and return it as a string.
def _enable(name, started, result=True, skip_verify=False, **kwargs): ''' Enable the service ''' ret = {} if not skip_verify: # is service available? try: if not _available(name, ret): return ret except CommandExecutionError as exc: ret['result'] = False ret['comment'] = exc.strerror return ret # Set default expected result ret['result'] = result # Check to see if this minion supports enable if 'service.enable' not in __salt__ or 'service.enabled' not in __salt__: if started is True: ret['comment'] = ('Enable is not available on this minion,' ' service {0} started').format(name) elif started is None: ret['comment'] = ('Enable is not available on this minion,' ' service {0} is in the desired state' ).format(name) else: ret['comment'] = ('Enable is not available on this minion,' ' service {0} is dead').format(name) return ret # Service can be enabled before_toggle_enable_status = __salt__['service.enabled'](name, **kwargs) if before_toggle_enable_status: # Service is enabled if started is True: ret['comment'] = ('Service {0} is already enabled,' ' and is running').format(name) elif started is None: # always be sure in this case to reset the changes dict ret['changes'] = {} ret['comment'] = ('Service {0} is already enabled,' ' and is in the desired state').format(name) else: ret['comment'] = ('Service {0} is already enabled,' ' and is dead').format(name) return ret # Service needs to be enabled if __opts__['test']: ret['result'] = None ret['comment'] = 'Service {0} set to be enabled'.format(name) return ret try: if __salt__['service.enable'](name, **kwargs): # Service has been enabled ret['changes'] = {} after_toggle_enable_status = __salt__['service.enabled']( name, **kwargs) # on upstart, certain services like apparmor will always return # False, even if correctly activated # do not trigger a change if before_toggle_enable_status != after_toggle_enable_status: ret['changes'][name] = True if started is True: ret['comment'] = ('Service {0} has been enabled,' ' and is running').format(name) elif started is None: ret['comment'] = ('Service {0} has been enabled,' ' and is in the desired state').format(name) else: ret['comment'] = ('Service {0} has been enabled,' ' and is dead').format(name) return ret except CommandExecutionError as exc: enable_error = exc.strerror else: enable_error = False # Service failed to be enabled ret['result'] = False if started is True: ret['comment'] = ('Failed when setting service {0} to start at boot,' ' but the service is running').format(name) elif started is None: ret['comment'] = ('Failed when setting service {0} to start at boot,' ' but the service was already running').format(name) else: ret['comment'] = ('Failed when setting service {0} to start at boot,' ' and the service is dead').format(name) if enable_error: ret['comment'] += '. Additional information follows:\n\n{0}'.format( enable_error ) return ret
Enable the service
def parse_version(version): """ Return a comparable tuple from a version string. We try to force tuple to semver with version like 1.2.0 Replace pkg_resources.parse_version which now display a warning when use for comparing version with tuple :returns: Version string as comparable tuple """ release_type_found = False version_infos = re.split('(\.|[a-z]+)', version) version = [] for info in version_infos: if info == '.' or len(info) == 0: continue try: info = int(info) # We pad with zero to compare only on string # This avoid issue when comparing version with different length version.append("%06d" % (info,)) except ValueError: # Force to a version with three number if len(version) == 1: version.append("00000") if len(version) == 2: version.append("000000") # We want rc to be at lower level than dev version if info == 'rc': info = 'c' version.append(info) release_type_found = True if release_type_found is False: # Force to a version with three number if len(version) == 1: version.append("00000") if len(version) == 2: version.append("000000") version.append("final") return tuple(version)
Return a comparable tuple from a version string. We try to force tuple to semver with version like 1.2.0 Replace pkg_resources.parse_version which now display a warning when use for comparing version with tuple :returns: Version string as comparable tuple
def p_definition_list(p): """ definition_list : definition definition_list | definition """ if len(p) == 3: p[0] = p[1] + p[2] elif len(p) == 2: p[0] = p[1] else: raise RuntimeError("Invalid production rules 'p_action_list'")
definition_list : definition definition_list | definition
def main( req_files, verbose=False, outdated=False, latest=False, verbatim=False, repo=None, path='requirements.txt', token=None, branch='master', url=None, delay=None, ): """Given a list of requirements files reports which requirements are out of date. Everything is rather somewhat obvious: - verbose makes things a little louder - outdated forces piprot to only report out of date packages - latest outputs the requirements line with the latest version - verbatim outputs the requirements file as-is - with comments showing the latest versions (can be used with latest to output the latest with the old version in the comment) - delay specifies a timerange during an outdated package is allowed """ requirements = [] if repo: github_url = build_github_url(repo, branch, path, token) req_file = get_requirements_file_from_url(github_url) requirements.extend(parse_req_file(req_file)) elif url: req_file = get_requirements_file_from_url(url) requirements.extend(parse_req_file(req_file)) else: for req_file in req_files: requirements.extend(parse_req_file(req_file, verbatim=verbatim)) req_file.close() total_time_delta = 0 max_outdated_time = 0 session = FuturesSession() results = [] for req, version, ignore in requirements: if verbatim and not req: results.append(version) elif req: results.append({ 'req': req, 'version': version, 'ignore': ignore, 'latest': session.get(get_pypi_url(req)), 'specified': session.get(get_pypi_url(req, version)) }) for result in results: if isinstance(result, str): print(result.replace('\n', '')) continue if result['ignore']: if verbatim: print('{}=={} # norot'.format(result['req'], result['version'])) else: print('Ignoring updates for {}. '.format(result['req'])) continue req = result['req'] version = result['version'] latest_version, latest_release_date = get_version_and_release_date( req, verbose=verbose, response=result['latest'].result() ) specified_version, specified_release_date = \ get_version_and_release_date( req, version, response=result['specified'].result() ) if latest_release_date and specified_release_date: time_delta = (latest_release_date - specified_release_date).days total_time_delta = total_time_delta + time_delta max_outdated_time = max(time_delta, max_outdated_time) if verbose: if time_delta > 0: print('{} ({}) is {} days out of date. ' 'Latest is {}'.format(req, version, time_delta, latest_version)) elif version != latest_version: print('{} ({}) is out of date. ' 'Latest is {}'.format(req, version, latest_version)) elif not outdated: print('{} ({}) is up to date'.format(req, version)) if latest and latest_version != specified_version: print('{}=={} # Updated from {}'.format(req, latest_version, specified_version)) elif verbatim and latest_version != specified_version: print('{}=={} # Latest {}'.format(req, specified_version, latest_version)) elif verbatim: print('{}=={}'.format(req, specified_version)) elif verbatim: print( '{}=={} # Error checking latest version'.format(req, version) ) verbatim_str = "" if verbatim: verbatim_str = "# Generated with piprot {}\n# ".format(VERSION) if total_time_delta > 0 and delay is None: print("{}Your requirements are {} " "days out of date".format(verbatim_str, total_time_delta)) sys.exit(1) elif delay is not None and max_outdated_time > int(delay): print("{}At least one of your dependancies is {} " "days out of date which is more than the allowed" "{} days.".format(verbatim_str, max_outdated_time, delay)) sys.exit(1) elif delay is not None and max_outdated_time <= int(delay): print("{}All of your dependancies are at most {} " "days out of date.".format(verbatim_str, delay)) sys.exit(1) else: print("{}Looks like you've been keeping up to date, " "time for a delicious beverage!".format(verbatim_str))
Given a list of requirements files reports which requirements are out of date. Everything is rather somewhat obvious: - verbose makes things a little louder - outdated forces piprot to only report out of date packages - latest outputs the requirements line with the latest version - verbatim outputs the requirements file as-is - with comments showing the latest versions (can be used with latest to output the latest with the old version in the comment) - delay specifies a timerange during an outdated package is allowed
def calculate_within_class_scatter_matrix(X, y): """Calculates the Within-Class Scatter matrix Parameters: ----------- X : array-like, shape (m, n) - the samples y : array-like, shape (m, ) - the class labels Returns: -------- within_class_scatter_matrix : array-like, shape (n, n) """ mean_vectors = calculate_mean_vectors(X, y) n_features = X.shape[1] Sw = np.zeros((n_features, n_features)) for cl, m in zip(np.unique(y), mean_vectors): Si = np.zeros((n_features, n_features)) m = m.reshape(n_features, 1) for x in X[y == cl, :]: v = x.reshape(n_features, 1) - m Si += v @ v.T Sw += Si return Sw
Calculates the Within-Class Scatter matrix Parameters: ----------- X : array-like, shape (m, n) - the samples y : array-like, shape (m, ) - the class labels Returns: -------- within_class_scatter_matrix : array-like, shape (n, n)
def read(cls, data): """Reads data from URL or OrderedDict. Args: data: can be a URL pointing to a JSONstat file, a JSON string or an OrderedDict. Returns: An object of class Collection populated with data. """ if isinstance(data, OrderedDict): return cls(data) elif isinstance(data, basestring)\ and data.startswith(("http://", "https://", "ftp://", "ftps://")): return cls(request(data)) elif isinstance(data, basestring): try: json_dict = json.loads(data, object_pairs_hook=OrderedDict) return cls(json_dict) except ValueError: raise else: try: json_dict = json.load(data, object_pairs_hook=OrderedDict) return cls(json_dict) except ValueError: raise
Reads data from URL or OrderedDict. Args: data: can be a URL pointing to a JSONstat file, a JSON string or an OrderedDict. Returns: An object of class Collection populated with data.
def _filter_attributes(self, keyset): """ Return a copy of this object with a subset of its attributes set. """ filtered = self._filter_keys(self.to_dict(), keyset) return Language.make(**filtered)
Return a copy of this object with a subset of its attributes set.
def _time_show(self): """Show the time marker window""" if not self._time_visible: self._time_visible = True self._time_window = tk.Toplevel(self) self._time_window.attributes("-topmost", True) self._time_window.overrideredirect(True) self._time_label = ttk.Label(self._time_window) self._time_label.grid() self._time_window.lift() x, y = self.master.winfo_pointerxy() geometry = "{0}x{1}+{2}+{3}".format( self._time_label.winfo_width(), self._time_label.winfo_height(), x - 15, self._canvas_ticks.winfo_rooty() - 10) self._time_window.wm_geometry(geometry) self._time_label.config(text=TimeLine.get_time_string(self.time, self._unit))
Show the time marker window
def status(name, runas=None): ''' Status of a VM :param str name: Name/ID of VM whose status will be returned :param str runas: The user that the prlctl command will be run as Example: .. code-block:: bash salt '*' parallels.status macvm runas=macdev ''' return prlctl('status', salt.utils.data.decode(name), runas=runas)
Status of a VM :param str name: Name/ID of VM whose status will be returned :param str runas: The user that the prlctl command will be run as Example: .. code-block:: bash salt '*' parallels.status macvm runas=macdev
def match(self, *command_tokens, **command_env): """ :meth:`.WCommandProto.match` implementation """ mutated_command_tokens = self.mutate_command_tokens(*command_tokens) if mutated_command_tokens is None: return False return self.selector().select(*mutated_command_tokens, **command_env) is not None
:meth:`.WCommandProto.match` implementation
def scroll_to(self, selector, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT): ''' Fast scroll to destination ''' if self.demo_mode: self.slow_scroll_to(selector, by=by, timeout=timeout) return if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT: timeout = self.__get_new_timeout(timeout) element = self.wait_for_element_visible( selector, by=by, timeout=timeout) try: self.__scroll_to_element(element) except (StaleElementReferenceException, ENI_Exception): self.wait_for_ready_state_complete() time.sleep(0.05) element = self.wait_for_element_visible( selector, by=by, timeout=timeout) self.__scroll_to_element(element)
Fast scroll to destination
def cli(context, host, username, password): """ FritzBox SmartHome Tool \b Provides the following functions: - A easy to use library for querying SmartHome actors - This CLI tool for testing - A carbon client for pipeing data into graphite """ context.obj = FritzBox(host, username, password)
FritzBox SmartHome Tool \b Provides the following functions: - A easy to use library for querying SmartHome actors - This CLI tool for testing - A carbon client for pipeing data into graphite
def disable_host_svc_notifications(self, host): """Disable services notifications for a host Format of the line that triggers function call:: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ for service_id in host.services: if service_id in self.daemon.services: service = self.daemon.services[service_id] self.disable_svc_notifications(service) self.send_an_element(service.get_update_status_brok())
Disable services notifications for a host Format of the line that triggers function call:: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None
def update_pypsa_bus_timeseries(network, timesteps=None): """ Updates buses voltage time series in pypsa representation. This function overwrites v_mag_pu_set of buses_t attribute of pypsa network. Be aware that if you call this function with `timesteps` and thus overwrite current time steps it may lead to inconsistencies in the pypsa network since only bus time series are updated but none of the other time series or the snapshots attribute of the pypsa network. Use the function :func:`update_pypsa_timeseries` to change the time steps you want to analyse in the power flow analysis. Parameters ---------- network : Network The eDisGo grid topology model overall container timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>` Timesteps specifies which time steps of the time series to export to pypsa representation. If None all time steps currently existing in pypsa representation are updated. If not None current time steps are overwritten by given time steps. Default: None. """ if timesteps is None: timesteps = network.pypsa.buses_t.v_mag_pu_set.index # check if timesteps is array-like, otherwise convert to list if not hasattr(timesteps, "__len__"): timesteps = [timesteps] buses = network.pypsa.buses.index v_mag_pu_set = _pypsa_bus_timeseries(network, buses, timesteps) network.pypsa.buses_t.v_mag_pu_set = v_mag_pu_set
Updates buses voltage time series in pypsa representation. This function overwrites v_mag_pu_set of buses_t attribute of pypsa network. Be aware that if you call this function with `timesteps` and thus overwrite current time steps it may lead to inconsistencies in the pypsa network since only bus time series are updated but none of the other time series or the snapshots attribute of the pypsa network. Use the function :func:`update_pypsa_timeseries` to change the time steps you want to analyse in the power flow analysis. Parameters ---------- network : Network The eDisGo grid topology model overall container timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>` Timesteps specifies which time steps of the time series to export to pypsa representation. If None all time steps currently existing in pypsa representation are updated. If not None current time steps are overwritten by given time steps. Default: None.
def set_autocut_params(self, method, **params): """Set auto-cut parameters. Parameters ---------- method : str Auto-cut algorithm. A list of acceptable options can be obtained by :meth:`get_autocut_methods`. params : dict Algorithm-specific keywords and values. """ self.logger.debug("Setting autocut params method=%s params=%s" % ( method, str(params))) params = list(params.items()) self.t_.set(autocut_method=method, autocut_params=params)
Set auto-cut parameters. Parameters ---------- method : str Auto-cut algorithm. A list of acceptable options can be obtained by :meth:`get_autocut_methods`. params : dict Algorithm-specific keywords and values.
def connectSubsystem(connection, protocol, subsystem): """Connect a Protocol to a ssh subsystem channel """ deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestSubsystem(subsystem) return deferred
Connect a Protocol to a ssh subsystem channel
def Images(vent=True): """ Get images that are build, by default limit to vent images """ images = [] # TODO needs to also check images in the manifest that couldn't have the # label added try: d_client = docker.from_env() if vent: i = d_client.images.list(filters={'label': 'vent'}) else: i = d_client.images.list() for image in i: images.append((image.tags[0], image.short_id)) except Exception as e: # pragma: no cover logger.error('Something with the Images went wrong ' + str(e)) return images
Get images that are build, by default limit to vent images
def reference(self): """Return the Reference object for this Key. This is a entity_pb.Reference instance -- a protocol buffer class used by the lower-level API to the datastore. NOTE: The caller should not mutate the return value. """ if self.__reference is None: self.__reference = _ConstructReference(self.__class__, pairs=self.__pairs, app=self.__app, namespace=self.__namespace) return self.__reference
Return the Reference object for this Key. This is a entity_pb.Reference instance -- a protocol buffer class used by the lower-level API to the datastore. NOTE: The caller should not mutate the return value.
def text_editor(file='', background=False, return_cmd=False): '''Starts the default graphical text editor. Start the user's preferred graphical text editor, optionally with a file. Args: file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file). background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing. ''' desktop_env = system.get_name() if desktop_env == 'windows': editor_cmd_str = system.get_cmd_out( ['ftype', 'textfile']).split('=', 1)[1] elif desktop_env == 'mac': editor_cmd_str = 'open -a' + system.get_cmd_out( ['def', 'read', 'com.apple.LaunchServices', 'LSHandlers' '-array' '{LSHandlerContentType=public.plain-text;}'] ) else: # Use def handler for MIME-type text/plain editor_cmd_str = system.get_cmd_out( ['xdg-mime', 'query', 'default', 'text/plain']) if '\n' in editor_cmd_str: # Sometimes locate returns multiple results # use first one editor_cmd_str = editor_cmd_str.split('\n')[0] if editor_cmd_str.endswith('.desktop'): # We don't use desktopfile.execute() in order to have working # return_cmd and background editor_cmd_str = desktopfile.parse( desktopfile.locate(editor_cmd_str)[0])['Exec'] for i in editor_cmd_str.split(): if i.startswith('%'): # %-style formatters editor_cmd_str = editor_cmd_str.replace(i, '') if i == '--new-document': # Gedit editor_cmd_str = editor_cmd_str.replace(i, '') if file: editor_cmd_str += ' {}'.format(shlex.quote(file)) if return_cmd: return editor_cmd_str text_editor_proc = sp.Popen([editor_cmd_str], shell=True) if not background: text_editor_proc.wait()
Starts the default graphical text editor. Start the user's preferred graphical text editor, optionally with a file. Args: file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file). background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
def create_scan(self, host_ips): """ Creates a scan with the given host ips Returns the scan id of the created object. """ now = datetime.datetime.now() data = { "uuid": self.get_template_uuid(), "settings": { "name": "jackal-" + now.strftime("%Y-%m-%d %H:%M"), "text_targets": host_ips } } response = requests.post(self.url + 'scans', data=json.dumps(data), verify=False, headers=self.headers) if response: result = json.loads(response.text) return result['scan']['id']
Creates a scan with the given host ips Returns the scan id of the created object.
def filter_rep_set(inF, otuSet): """ Parse the rep set file and remove all sequences not associated with unique OTUs. :@type inF: file :@param inF: The representative sequence set :@rtype: list :@return: The set of sequences associated with unique OTUs """ seqs = [] for record in SeqIO.parse(inF, "fasta"): if record.id in otuSet: seqs.append(record) return seqs
Parse the rep set file and remove all sequences not associated with unique OTUs. :@type inF: file :@param inF: The representative sequence set :@rtype: list :@return: The set of sequences associated with unique OTUs
def netHours(self): ''' For regular event staff, this is the net hours worked for financial purposes. For Instructors, netHours is caclulated net of any substitutes. ''' if self.specifiedHours is not None: return self.specifiedHours elif self.category in [getConstant('general__eventStaffCategoryAssistant'),getConstant('general__eventStaffCategoryInstructor')]: return self.event.duration - sum([sub.netHours for sub in self.replacementFor.all()]) else: return sum([x.duration for x in self.occurrences.filter(cancelled=False)])
For regular event staff, this is the net hours worked for financial purposes. For Instructors, netHours is caclulated net of any substitutes.
def find_common_root(elements): """ Find root which is common for all `elements`. Args: elements (list): List of double-linked HTMLElement objects. Returns: list: Vector of HTMLElement containing path to common root. """ if not elements: raise UserWarning("Can't find common root - no elements suplied.") root_path = el_to_path_vector(elements.pop()) for el in elements: el_path = el_to_path_vector(el) root_path = common_vector_root(root_path, el_path) if not root_path: raise UserWarning( "Vectors without common root:\n%s" % str(el_path) ) return root_path
Find root which is common for all `elements`. Args: elements (list): List of double-linked HTMLElement objects. Returns: list: Vector of HTMLElement containing path to common root.
def appendNullPadding(str, blocksize=AES_blocksize): 'Pad with null bytes' pad_len = paddingLength(len(str), blocksize) padding = '\0'*pad_len return str + padding
Pad with null bytes
def _field_name_from_uri(self, uri): """helper, returns the name of an attribute (without namespace prefix) """ # TODO - should use graph API uri = str(uri) parts = uri.split('#') if len(parts) == 1: return uri.split('/')[-1] or uri return parts[-1]
helper, returns the name of an attribute (without namespace prefix)
def import_results(log, pathToYamlFile): """ *Import the results of the simulation (the filename is an argument of this models)* **Key Arguments:** - ``log`` -- logger - ``pathToYamlFile`` -- the path to the yaml file to be imported **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## import yaml ## LOCAL APPLICATION ## ################ >ACTION(S) ################ fileName = pathToYamlFile stream = file(fileName, 'r') yamlContent = yaml.load(stream) snSurveyDiscoveryTimes = yamlContent['Discoveries Relative to Survey Year'] lightCurveDiscoveryTimes = yamlContent[ 'Discoveries Relative to Peak Magnitudes'] snTypes = yamlContent['SN Types'] redshifts = yamlContent['Redshifts'] cadenceDictionary = yamlContent['Cadence Dictionary'] peakAppMagList = yamlContent['Peak Apparent Magnitudes'] snCampaignLengthList = yamlContent['Campaign Length'] # log.info('yamlContent %s' % (yamlContent,)) stream.close() return snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList
*Import the results of the simulation (the filename is an argument of this models)* **Key Arguments:** - ``log`` -- logger - ``pathToYamlFile`` -- the path to the yaml file to be imported **Return:** - None
def get_arguments(self): """ Extracts the specific arguments of this CLI """ PluginBase.get_arguments(self) if self.args.organizationName is not None: self.organizationName = self.args.organizationName if self.args.repositoryName is not None: self.repositoryName = self.args.repositoryName self.path = "v1/plugins/private/{0}/{1}/{2}".format(self.pluginName, self.organizationName, self.repositoryName)
Extracts the specific arguments of this CLI
def sequence_equal(self, second_iterable, equality_comparer=operator.eq): ''' Determine whether two sequences are equal by elementwise comparison. Sequence equality is defined as the two sequences being equal length and corresponding elements being equal as determined by the equality comparer. Note: This method uses immediate execution. Args: second_iterable: The sequence which will be compared with the source sequence. equality_comparer: An optional binary predicate function which is used to compare corresponding elements. Should return True if the elements are equal, otherwise False. The default equality comparer is operator.eq which calls __eq__ on elements of the source sequence with the corresponding element of the second sequence as a parameter. Returns: True if the sequences are equal, otherwise False. Raises: ValueError: If the Queryable is closed. TypeError: If second_iterable is not in fact iterable. TypeError: If equality_comparer is not callable. ''' if self.closed(): raise ValueError("Attempt to call to_tuple() on a closed Queryable.") if not is_iterable(second_iterable): raise TypeError("Cannot compute sequence_equal() with second_iterable of non-iterable {type}".format( type=str(type(second_iterable))[7: -1])) if not is_callable(equality_comparer): raise TypeError("aggregate() parameter equality_comparer={equality_comparer} is not callable".format( equality_comparer=repr(equality_comparer))) # Try to check the lengths directly as an optimization try: if len(self._iterable) != len(second_iterable): return False except TypeError: pass sentinel = object() for first, second in izip_longest(self, second_iterable, fillvalue=sentinel): if first is sentinel or second is sentinel: return False if not equality_comparer(first, second): return False return True
Determine whether two sequences are equal by elementwise comparison. Sequence equality is defined as the two sequences being equal length and corresponding elements being equal as determined by the equality comparer. Note: This method uses immediate execution. Args: second_iterable: The sequence which will be compared with the source sequence. equality_comparer: An optional binary predicate function which is used to compare corresponding elements. Should return True if the elements are equal, otherwise False. The default equality comparer is operator.eq which calls __eq__ on elements of the source sequence with the corresponding element of the second sequence as a parameter. Returns: True if the sequences are equal, otherwise False. Raises: ValueError: If the Queryable is closed. TypeError: If second_iterable is not in fact iterable. TypeError: If equality_comparer is not callable.
def weighted_choice(self, probabilities, key): """Makes a weighted choice between several options. Probabilities is a list of 2-tuples, (probability, option). The probabilties don't need to add up to anything, they are automatically scaled.""" try: choice = self.values[key].lower() except KeyError: # override not set. return super(RecordingParameters, self)\ .weighted_choice(probabilities, key) # Find the matching key (case insensitive) for probability, option in probabilities: if str(option).lower() == choice: return option # for function or class-type choices, also check __name__ for probability, option in probabilities: if option.__name__.lower() == choice: return option assert False, "Invalid value provided"
Makes a weighted choice between several options. Probabilities is a list of 2-tuples, (probability, option). The probabilties don't need to add up to anything, they are automatically scaled.
def initial(self, request, *args, **kwargs): """ Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only links of current node """ super(NodeLinkList, self).initial(request, *args, **kwargs) # ensure node exists try: self.node = Node.objects.published()\ .accessible_to(request.user)\ .get(slug=self.kwargs.get('slug', None)) except Node.DoesNotExist: raise Http404(_('Node not found.')) # check permissions on node (for link creation) self.check_object_permissions(request, self.node) # return only links of current node self.queryset = Link.objects.select_related('node_a', 'node_b')\ .accessible_to(self.request.user)\ .filter(Q(node_a_id=self.node.id) | Q(node_b_id=self.node.id))
Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only links of current node
def to_element(self, root_name=None): """Serialize this `Resource` instance to an XML element.""" if not root_name: root_name = self.nodename elem = ElementTreeBuilder.Element(root_name) for attrname in self.serializable_attributes(): # Only use values that have been loaded into the internal # __dict__. For retrieved objects we look into the XML response at # access time, so the internal __dict__ contains only the elements # that have been set on the client side. try: value = self.__dict__[attrname] except KeyError: continue if attrname in self.xml_attribute_attributes: elem.attrib[attrname] = six.text_type(value) else: sub_elem = self.element_for_value(attrname, value) elem.append(sub_elem) return elem
Serialize this `Resource` instance to an XML element.
def get_all_assignable_users_for_project(self, project_key, start=0, limit=50): """ Provide assignable users for project :param project_key: :param start: OPTIONAL: The start point of the collection to return. Default: 0. :param limit: OPTIONAL: The limit of the number of users to return, this may be restricted by fixed system limits. Default by built-in method: 50 :return: """ url = 'rest/api/2/user/assignable/search?project={project_key}&startAt={start}&maxResults={limit}'.format( project_key=project_key, start=start, limit=limit) return self.get(url)
Provide assignable users for project :param project_key: :param start: OPTIONAL: The start point of the collection to return. Default: 0. :param limit: OPTIONAL: The limit of the number of users to return, this may be restricted by fixed system limits. Default by built-in method: 50 :return:
def _bash_completion(self): """Prints all of the commands and options for bash-completion.""" commands = set() options = set() for option, _action in self.parser._option_string_actions.items(): options.add(option) for _name, _command in self.command_manager: commands.add(_name) cmd_factory = _command.load() cmd = cmd_factory(self, None) cmd_parser = cmd.get_parser('') for option, _action in cmd_parser._option_string_actions.items(): options.add(option) print(' '.join(commands | options))
Prints all of the commands and options for bash-completion.
def _factory(slice_, axis, weighted): """return subclass for PairwiseSignificance, based on slice dimension types.""" if slice_.dim_types[0] == DT.MR_SUBVAR: return _MrXCatPairwiseSignificance(slice_, axis, weighted) return _CatXCatPairwiseSignificance(slice_, axis, weighted)
return subclass for PairwiseSignificance, based on slice dimension types.
def sign_data(self, data, expires_in=None, url_safe=True): """ To safely sign a user data. It will be signed with the user key :param data: mixed :param expires_in: The time for it to expire :param url_safe: bool. If true it will allow it to be passed in URL :return: str - the token/signed data """ if url_safe: return utils.sign_url_safe(data, secret_key=self.secret_key, salt=self.user_salt, expires_in=expires_in) else: return utils.sign_data(data, secret_key=self.secret_key, salt=self.user_salt, expires_in=expires_in)
To safely sign a user data. It will be signed with the user key :param data: mixed :param expires_in: The time for it to expire :param url_safe: bool. If true it will allow it to be passed in URL :return: str - the token/signed data
def linestring_to_utm(linestring: LineString) -> LineString: """ Given a Shapely LineString in WGS84 coordinates, convert it to the appropriate UTM coordinates. If ``inverse``, then do the inverse. """ proj = lambda x, y: utm.from_latlon(y, x)[:2] return transform(proj, linestring)
Given a Shapely LineString in WGS84 coordinates, convert it to the appropriate UTM coordinates. If ``inverse``, then do the inverse.
def delete_resource(self, resource, resource_id, msg="resource", max_wait=120): """Delete one openstack resource, such as one instance, keypair, image, volume, stack, etc., and confirm deletion within max wait time. :param resource: pointer to os resource type, ex:glance_client.images :param resource_id: unique name or id for the openstack resource :param msg: text to identify purpose in logging :param max_wait: maximum wait time in seconds :returns: True if successful, otherwise False """ self.log.debug('Deleting OpenStack resource ' '{} ({})'.format(resource_id, msg)) num_before = len(list(resource.list())) resource.delete(resource_id) tries = 0 num_after = len(list(resource.list())) while num_after != (num_before - 1) and tries < (max_wait / 4): self.log.debug('{} delete check: ' '{} [{}:{}] {}'.format(msg, tries, num_before, num_after, resource_id)) time.sleep(4) num_after = len(list(resource.list())) tries += 1 self.log.debug('{}: expected, actual count = {}, ' '{}'.format(msg, num_before - 1, num_after)) if num_after == (num_before - 1): return True else: self.log.error('{} delete timed out'.format(msg)) return False
Delete one openstack resource, such as one instance, keypair, image, volume, stack, etc., and confirm deletion within max wait time. :param resource: pointer to os resource type, ex:glance_client.images :param resource_id: unique name or id for the openstack resource :param msg: text to identify purpose in logging :param max_wait: maximum wait time in seconds :returns: True if successful, otherwise False
def shorten(text): """ Reduce text length for displaying / logging purposes. """ if len(text) >= MAX_DISPLAY_LEN: text = text[:MAX_DISPLAY_LEN//2]+"..."+text[-MAX_DISPLAY_LEN//2:] return text
Reduce text length for displaying / logging purposes.
def now(cls, Name = None): """ Instantiate a Time element initialized to the current UTC time in the default format (ISO-8601). The Name attribute will be set to the value of the Name parameter if given. """ self = cls() if Name is not None: self.Name = Name self.pcdata = datetime.datetime.utcnow() return self
Instantiate a Time element initialized to the current UTC time in the default format (ISO-8601). The Name attribute will be set to the value of the Name parameter if given.
def fcoe_get_login_output_fcoe_login_list_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_login = ET.Element("fcoe_get_login") config = fcoe_get_login output = ET.SubElement(fcoe_get_login, "output") fcoe_login_list = ET.SubElement(output, "fcoe-login-list") fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac") fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac') interface_name = ET.SubElement(fcoe_login_list, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def wsgi_app(self, environ, start_response): """A basic WSGI app""" @_LOCAL_MANAGER.middleware def _wrapped_app(environ, start_response): request = Request(environ) setattr(_local, _CURRENT_REQUEST_KEY, request) response = self._dispatch_request(request) return response(environ, start_response) return _wrapped_app(environ, start_response)
A basic WSGI app
def shell_call(self, shellcmd): """Shell call with necessary setup first.""" return(subprocess.call(self.shellsetup + shellcmd, shell=True))
Shell call with necessary setup first.
def edit(self, name, config, events=github.GithubObject.NotSet, add_events=github.GithubObject.NotSet, remove_events=github.GithubObject.NotSet, active=github.GithubObject.NotSet): """ :calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_ :param name: string :param config: dict :param events: list of string :param add_events: list of string :param remove_events: list of string :param active: bool :rtype: None """ assert isinstance(name, (str, unicode)), name assert isinstance(config, dict), config assert events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in events), events assert add_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in add_events), add_events assert remove_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in remove_events), remove_events assert active is github.GithubObject.NotSet or isinstance(active, bool), active post_parameters = { "name": name, "config": config, } if events is not github.GithubObject.NotSet: post_parameters["events"] = events if add_events is not github.GithubObject.NotSet: post_parameters["add_events"] = add_events if remove_events is not github.GithubObject.NotSet: post_parameters["remove_events"] = remove_events if active is not github.GithubObject.NotSet: post_parameters["active"] = active headers, data = self._requester.requestJsonAndCheck( "PATCH", self.url, input=post_parameters ) self._useAttributes(data)
:calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_ :param name: string :param config: dict :param events: list of string :param add_events: list of string :param remove_events: list of string :param active: bool :rtype: None
def get_authorization_url(self, client_id=None, instance_id=None, redirect_uri=None, region=None, scope=None, state=None): """Generate authorization URL. Args: client_id (str): OAuth2 client ID. Defaults to ``None``. instance_id (str): App Instance ID. Defaults to ``None``. redirect_uri (str): Redirect URI. Defaults to ``None``. region (str): App Region. Defaults to ``None``. scope (str): Permissions. Defaults to ``None``. state (str): UUID to detect CSRF. Defaults to ``None``. Returns: str, str: Auth URL, state """ client_id = client_id or self.client_id instance_id = instance_id or self.instance_id redirect_uri = redirect_uri or self.redirect_uri region = region or self.region scope = scope or self.scope state = state or str(uuid.uuid4()) self.state = state return Request( 'GET', self.auth_base_url, params={ 'client_id': client_id, 'instance_id': instance_id, 'redirect_uri': redirect_uri, 'region': region, 'response_type': 'code', 'scope': scope, 'state': state } ).prepare().url, state
Generate authorization URL. Args: client_id (str): OAuth2 client ID. Defaults to ``None``. instance_id (str): App Instance ID. Defaults to ``None``. redirect_uri (str): Redirect URI. Defaults to ``None``. region (str): App Region. Defaults to ``None``. scope (str): Permissions. Defaults to ``None``. state (str): UUID to detect CSRF. Defaults to ``None``. Returns: str, str: Auth URL, state
def AddTableColumn(self, table, column): """Add column to table if it is not already there.""" if column not in self._table_columns[table]: self._table_columns[table].append(column)
Add column to table if it is not already there.
def update_metric(self, metric, labels, pre_sliced=False): """Update evaluation metric with label and current outputs.""" for current_exec, (texec, islice) in enumerate(zip(self.train_execs, self.slices)): if not pre_sliced: labels_slice = [label[islice] for label in labels] else: labels_slice = labels[current_exec] metric.update(labels_slice, texec.outputs)
Update evaluation metric with label and current outputs.
def _paths_from_env(variable: str, default: List[Path]) -> List[Path]: """Read an environment variable as a list of paths. The environment variable with the specified name is read, and its value split on colons and returned as a list of paths. If the environment variable is not set, or set to the empty string, the default value is returned. Parameters ---------- variable : str Name of the environment variable. default : List[Path] Default value. Returns ------- List[Path] Value from environment or default. """ # TODO(srstevenson): Use assignment expression in Python 3.8. value = os.environ.get(variable) if value: return [Path(path) for path in value.split(":")] return default
Read an environment variable as a list of paths. The environment variable with the specified name is read, and its value split on colons and returned as a list of paths. If the environment variable is not set, or set to the empty string, the default value is returned. Parameters ---------- variable : str Name of the environment variable. default : List[Path] Default value. Returns ------- List[Path] Value from environment or default.
def dist( self, src, tar, word_approx_min=0.3, char_approx_min=0.73, tests=2 ** 12 - 1, ): """Return the normalized Synoname distance between two words. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison word_approx_min : float The minimum word approximation value to signal a 'word_approx' match char_approx_min : float The minimum character approximation value to signal a 'char_approx' match tests : int or Iterable Either an integer indicating tests to perform or a list of test names to perform (defaults to performing all tests) Returns ------- float Normalized Synoname distance """ return ( synoname(src, tar, word_approx_min, char_approx_min, tests, False) / 14 )
Return the normalized Synoname distance between two words. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison word_approx_min : float The minimum word approximation value to signal a 'word_approx' match char_approx_min : float The minimum character approximation value to signal a 'char_approx' match tests : int or Iterable Either an integer indicating tests to perform or a list of test names to perform (defaults to performing all tests) Returns ------- float Normalized Synoname distance
def read_data(self, size): """Receive data from the device. If the read fails for any reason, an :obj:`IOError` exception is raised. :param size: the number of bytes to read. :type size: int :return: the data received. :rtype: list(int) """ result = list() while size > 0: count = min(size, 8) buf = self.hid.read(count) if len(buf) < count: raise IOError( 'pywws.device_cython_hidapi.USBDevice.read_data failed') result += buf size -= count return result
Receive data from the device. If the read fails for any reason, an :obj:`IOError` exception is raised. :param size: the number of bytes to read. :type size: int :return: the data received. :rtype: list(int)
def _trim_xpath(self, xpath, prop): """ Removes primitive type tags from an XPATH """ xroot = self._get_xroot_for(prop) if xroot is None and isinstance(xpath, string_types): xtags = xpath.split(XPATH_DELIM) if xtags[-1] in _iso_tag_primitives: xroot = XPATH_DELIM.join(xtags[:-1]) return xroot
Removes primitive type tags from an XPATH
def close_other_windows(self): """ Closes all not current windows. Useful for tests - after each test you can automatically close all windows. """ main_window_handle = self.current_window_handle for window_handle in self.window_handles: if window_handle == main_window_handle: continue self.switch_to_window(window_handle) self.close() self.switch_to_window(main_window_handle)
Closes all not current windows. Useful for tests - after each test you can automatically close all windows.
def configure(self, config_file): """ Parse configuration, and setup objects to use it. """ cfg = configparser.RawConfigParser() try: cfg.readfp(open(config_file)) except IOError as err: logger.critical( 'Error while reading config file {}: {}'.format( config_file, err.strerror)) sys.exit(1) logger.info('Parsed config file ' + config_file) # Extract user-defined log level from configuration if cfg.has_option('milter', 'loglevel'): loglevel = cfg.get('milter', 'loglevel') loglevel_numeric = getattr(logging, loglevel.upper(), None) if not isinstance(loglevel_numeric, int): logger.critical( 'Config contains unsupported loglevel: ' + loglevel) exit(1) rl = logging.getLogger() rl.setLevel(loglevel_numeric) logger.debug( 'Config option applied: milter->loglevel: {}'.format(loglevel)) # Apply all config options to their respective classes section_class_map = { 'milter': self, 'dspam': DspamClient, 'classification': DspamMilter, } for section in cfg.sections(): try: class_ = section_class_map[section] except KeyError: logger.warning('Config contains unknown section: ' + section) continue logger.debug('Handling config section: ' + section) dict_options = [ 'headers', 'reject_classes', 'quarantine_classes', 'accept_classes' ] for option in cfg.options(section): # Kludge: static_user needs to be set on the milter, # not on the client if section == 'dspam' and option == 'static_user': value = cfg.get('dspam', 'static_user') DspamMilter.static_user = value logger.debug( 'Config option applied: dspam->static_user: {}'.format( value)) continue if not hasattr(class_, option): logger.warning( 'Config contains unknown option: {}->{}'.format( section, option)) continue value = cfg.get(section, option) if option in dict_options: value = utils.config_str2dict(value) elif value.lower() in ['false', 'no']: value = False elif value.lower() in ['true', 'yes']: value = True setattr(class_, option, value) logger.debug( 'Config option applied: {}->{}: {}'.format( section, option, value)) logger.debug('Configuration completed')
Parse configuration, and setup objects to use it.
async def peers(self): """Returns the current Raft peer set Returns: Collection: addresses of peers This endpoint retrieves the Raft peers for the datacenter in which the agent is running. It returns a collection of addresses, such as:: [ "10.1.10.12:8300", "10.1.10.11:8300", "10.1.10.10:8300" ] This list of peers is strongly consistent and can be useful in determining when a given server has successfully joined the cluster. """ response = await self._api.get("/v1/status/peers") if response.status == 200: return set(response.body)
Returns the current Raft peer set Returns: Collection: addresses of peers This endpoint retrieves the Raft peers for the datacenter in which the agent is running. It returns a collection of addresses, such as:: [ "10.1.10.12:8300", "10.1.10.11:8300", "10.1.10.10:8300" ] This list of peers is strongly consistent and can be useful in determining when a given server has successfully joined the cluster.
async def set_agent_neighbors(self): '''Set neighbors for each agent in each cardinal direction. This method assumes that the neighboring :class:`GridEnvironment` of this grid environment have already been set. ''' for i in range(len(self.grid)): for j in range(len(self.grid[0])): agent = self.grid[i][j] xy = (self.origin[0] + i, self.origin[1] + j) nxy = _get_neighbor_xy('N', xy) exy = _get_neighbor_xy('E', xy) sxy = _get_neighbor_xy('S', xy) wxy = _get_neighbor_xy('W', xy) if j == 0: naddr = await self._get_xy_address_from_neighbor('N', nxy) else: naddr = self.get_xy(nxy, addr=True) if i == 0: waddr = await self._get_xy_address_from_neighbor('W', wxy) else: waddr = self.get_xy(wxy, addr=True) if j == len(self.grid[0]) - 1: saddr = await self._get_xy_address_from_neighbor('S', sxy) else: saddr = self.get_xy(sxy, addr=True) if i == len(self.grid) - 1: eaddr = await self._get_xy_address_from_neighbor('E', exy) else: eaddr = self.get_xy(exy, addr=True) agent.neighbors['N'] = naddr agent.neighbors['E'] = eaddr agent.neighbors['S'] = saddr agent.neighbors['W'] = waddr
Set neighbors for each agent in each cardinal direction. This method assumes that the neighboring :class:`GridEnvironment` of this grid environment have already been set.
def page(self, friendly_name=values.unset, evaluate_worker_attributes=values.unset, worker_sid=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of TaskQueueInstance records from the API. Request is executed immediately :param unicode friendly_name: Filter by a human readable description of a TaskQueue :param unicode evaluate_worker_attributes: Provide a Worker attributes expression, and this will return the list of TaskQueues that would distribute tasks to a worker with these attributes. :param unicode worker_sid: The worker_sid :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of TaskQueueInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueuePage """ params = values.of({ 'FriendlyName': friendly_name, 'EvaluateWorkerAttributes': evaluate_worker_attributes, 'WorkerSid': worker_sid, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return TaskQueuePage(self._version, response, self._solution)
Retrieve a single page of TaskQueueInstance records from the API. Request is executed immediately :param unicode friendly_name: Filter by a human readable description of a TaskQueue :param unicode evaluate_worker_attributes: Provide a Worker attributes expression, and this will return the list of TaskQueues that would distribute tasks to a worker with these attributes. :param unicode worker_sid: The worker_sid :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of TaskQueueInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueuePage
def get_aligned_adjacent_coords(x, y): ''' returns the nine clockwise adjacent coordinates on a keypad, where each row is vertically aligned. ''' return [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)]
returns the nine clockwise adjacent coordinates on a keypad, where each row is vertically aligned.
def _get_host_only_mac_address(): """Returns the MAC address assigned to the host-only adapter, using output from VBoxManage. Returned MAC address has no colons and is lower-cased.""" # Get the number of the host-only adapter vm_config = _get_vm_config() for line in vm_config: if line.startswith('hostonlyadapter'): adapter_number = int(line[15:16]) break else: raise ValueError('No host-only adapter is defined for the Dusty VM') for line in vm_config: if line.startswith('macaddress{}'.format(adapter_number)): return line.split('=')[1].strip('"').lower() raise ValueError('Could not find MAC address for adapter number {}'.format(adapter_number))
Returns the MAC address assigned to the host-only adapter, using output from VBoxManage. Returned MAC address has no colons and is lower-cased.
def _get_attach_id(self, key, value, attributes): """ Get the attach record ID and extra attributes. """ if isinstance(value, dict): key = list(value.keys())[0] attributes.update(value[key]) return [key, attributes] return value, attributes
Get the attach record ID and extra attributes.
def color(colors, export_type, output_file=None): """Export a single template file.""" all_colors = flatten_colors(colors) template_name = get_export_type(export_type) template_file = os.path.join(MODULE_DIR, "templates", template_name) output_file = output_file or os.path.join(CACHE_DIR, template_name) if os.path.isfile(template_file): template(all_colors, template_file, output_file) logging.info("Exported %s.", export_type) else: logging.warning("Template '%s' doesn't exist.", export_type)
Export a single template file.
def _determine_namespaces(self): """ Determine the names of all namespaces of the WBEM server, by communicating with it and enumerating the instances of a number of possible CIM classes that typically represent CIM namespaces. Their class names are defined in the :attr:`NAMESPACE_CLASSNAMES` class variable. If the namespaces could be determined, this method sets the following properties of this object: * :attr:`namespace_classname` * :attr:`namespaces` * :attr:`namespace_paths` Otherwise, it raises an exception. Note that there is at least one WBEM server that implements an Interop namespace but does not represent that with a CIM instance. In that case, the :attr:`namespaces` property will include the Interop namespace, but the :attr:`namespace_paths` property will not. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. ModelError: An issue with the model implemented by the WBEM server. CIMError: CIM_ERR_NOT_FOUND, Interop namespace could not be determined. CIMError: CIM_ERR_NOT_FOUND, Namespace class could not be determined. """ ns_insts = None ns_classname = None interop_ns = self.interop_ns # Determines the Interop namespace for classname in self.NAMESPACE_CLASSNAMES: try: ns_insts = self._conn.EnumerateInstances( classname, namespace=interop_ns) except CIMError as exc: if exc.status_code in (CIM_ERR_INVALID_CLASS, CIM_ERR_NOT_FOUND): # Class is not implemented, try next one. continue else: # Some other error. raise else: # Found a namespace class that is implemented. ns_classname = classname break if ns_insts is None: # Exhausted the possible class names raise CIMError( CIM_ERR_NOT_FOUND, _format("Namespace class could not be determined " "(tried {0!A})", self.NAMESPACE_CLASSNAMES), conn_id=self.conn.conn_id) self._namespace_classname = ns_classname self._namespaces = [inst['Name'] for inst in ns_insts] self._namespace_paths = [inst.path for inst in ns_insts] # An old version of a Hitachi server supports an Interop namespace # named 'interop' but does not represent it with a CIM instance. namespaces_lower = [ns.lower() for ns in self._namespaces] if interop_ns.lower() not in namespaces_lower: warnings.warn( _format("Server at {0} has an Interop namespace {1!A}, but " "does not return it when enumerating class {2!A} " "- adding it to the 'namespaces' property", self.conn.url, interop_ns, ns_classname), ToleratedServerIssueWarning, stacklevel=2) self._namespaces.append(interop_ns)
Determine the names of all namespaces of the WBEM server, by communicating with it and enumerating the instances of a number of possible CIM classes that typically represent CIM namespaces. Their class names are defined in the :attr:`NAMESPACE_CLASSNAMES` class variable. If the namespaces could be determined, this method sets the following properties of this object: * :attr:`namespace_classname` * :attr:`namespaces` * :attr:`namespace_paths` Otherwise, it raises an exception. Note that there is at least one WBEM server that implements an Interop namespace but does not represent that with a CIM instance. In that case, the :attr:`namespaces` property will include the Interop namespace, but the :attr:`namespace_paths` property will not. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. ModelError: An issue with the model implemented by the WBEM server. CIMError: CIM_ERR_NOT_FOUND, Interop namespace could not be determined. CIMError: CIM_ERR_NOT_FOUND, Namespace class could not be determined.
def wrap_json_response(func=None, *, encoder=json.JSONEncoder): """ A middleware that encodes in json the response body in case of that the "Content-Type" header is "application/json". This middlware accepts and optional `encoder` parameter, that allow to the user specify its own json encoder class. """ if func is None: return functools.partial(wrap_json_response, encoder=encoder) @functools.wraps(func) def wrapper(request, *args, **kwargs): response = func(request, *args, **kwargs) if "Content-Type" in response.headers and response.headers['Content-Type'] is not None: ctype, pdict = parse_header(response.headers.get('Content-Type', '')) if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)): response.body = json.dumps(response.body, cls=encoder) return response return wrapper
A middleware that encodes in json the response body in case of that the "Content-Type" header is "application/json". This middlware accepts and optional `encoder` parameter, that allow to the user specify its own json encoder class.
def _call_in_reactor_thread(self, f, *args, **kwargs): """Call the given function with args in the reactor thread.""" self._reactor.callFromThread(f, *args, **kwargs)
Call the given function with args in the reactor thread.
def process_terminals(self, word): """ Deal with terminal Es and Ls and convert any uppercase Ys back to lowercase. """ length = len(word) if word[length - 1] == 'e': if self.r2 <= (length - 1): word = word[:-1] elif self.r1 <= (length - 1): if not self.is_short(word[:-1]): word = word[:-1] elif word[length - 1] == 'l': if self.r2 <= (length - 1) and word[length - 2] == 'l': word = word[:-1] char_list = [x if x != 'Y' else 'y' for x in word] word = ''.join(char_list) return word
Deal with terminal Es and Ls and convert any uppercase Ys back to lowercase.
def _optimize(self, maxIter=1000, c1=1.193, c2=1.193, lookback=0.25, standard_dev=None): """ :param maxIter: maximum number of swarm iterations :param c1: social weight :param c2: personal weight :param lookback: how many particles to assess when considering convergence :param standard_dev: the standard deviation of the last lookback # of particles used to determine convergence :return: """ gBests = [] for swarm in self._sample(maxIter, c1, c2, lookback, standard_dev): #swarms.append(swarm) gBests.append(self._gbest.copy()) return gBests
:param maxIter: maximum number of swarm iterations :param c1: social weight :param c2: personal weight :param lookback: how many particles to assess when considering convergence :param standard_dev: the standard deviation of the last lookback # of particles used to determine convergence :return:
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True): """ Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`. When ``schema`` is a list of column names, the type of each column will be inferred from ``data``. When ``schema`` is ``None``, it will try to infer the schema (column names and types) from ``data``, which should be an RDD of :class:`Row`, or :class:`namedtuple`, or :class:`dict`. When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match the real data, or an exception will be thrown at runtime. If the given schema is not :class:`pyspark.sql.types.StructType`, it will be wrapped into a :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value", each record will also be wrapped into a tuple, which can be converted to row later. If schema inference is needed, ``samplingRatio`` is used to determined the ratio of rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``. :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean, etc.), or :class:`list`, or :class:`pandas.DataFrame`. :param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of column names, default is ``None``. The data type string format equals to :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use ``int`` as a short name for ``IntegerType``. :param samplingRatio: the sample ratio of rows used for inferring :param verifySchema: verify data types of every row against schema. :return: :class:`DataFrame` .. versionchanged:: 2.1 Added verifySchema. .. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental. >>> l = [('Alice', 1)] >>> spark.createDataFrame(l).collect() [Row(_1=u'Alice', _2=1)] >>> spark.createDataFrame(l, ['name', 'age']).collect() [Row(name=u'Alice', age=1)] >>> d = [{'name': 'Alice', 'age': 1}] >>> spark.createDataFrame(d).collect() [Row(age=1, name=u'Alice')] >>> rdd = sc.parallelize(l) >>> spark.createDataFrame(rdd).collect() [Row(_1=u'Alice', _2=1)] >>> df = spark.createDataFrame(rdd, ['name', 'age']) >>> df.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql import Row >>> Person = Row('name', 'age') >>> person = rdd.map(lambda r: Person(*r)) >>> df2 = spark.createDataFrame(person) >>> df2.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql.types import * >>> schema = StructType([ ... StructField("name", StringType(), True), ... StructField("age", IntegerType(), True)]) >>> df3 = spark.createDataFrame(rdd, schema) >>> df3.collect() [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP [Row(0=1, 1=2)] >>> spark.createDataFrame(rdd, "a: string, b: int").collect() [Row(a=u'Alice', b=1)] >>> rdd = rdd.map(lambda row: row[1]) >>> spark.createDataFrame(rdd, "int").collect() [Row(value=1)] >>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ... """ SparkSession._activeSession = self self._jvm.SparkSession.setActiveSession(self._jsparkSession) if isinstance(data, DataFrame): raise TypeError("data is already a DataFrame") if isinstance(schema, basestring): schema = _parse_datatype_string(schema) elif isinstance(schema, (list, tuple)): # Must re-encode any unicode strings to be consistent with StructField names schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema] try: import pandas has_pandas = True except Exception: has_pandas = False if has_pandas and isinstance(data, pandas.DataFrame): from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() if self._wrapped._conf.pandasRespectSessionTimeZone(): timezone = self._wrapped._conf.sessionLocalTimeZone() else: timezone = None # If no schema supplied by user then get the names of columns only if schema is None: schema = [str(x) if not isinstance(x, basestring) else (x.encode('utf-8') if not isinstance(x, str) else x) for x in data.columns] if self._wrapped._conf.arrowEnabled() and len(data) > 0: try: return self._create_from_pandas_with_arrow(data, schema, timezone) except Exception as e: from pyspark.util import _exception_message if self._wrapped._conf.arrowFallbackEnabled(): msg = ( "createDataFrame attempted Arrow optimization because " "'spark.sql.execution.arrow.enabled' is set to true; however, " "failed by the reason below:\n %s\n" "Attempting non-optimization as " "'spark.sql.execution.arrow.fallback.enabled' is set to " "true." % _exception_message(e)) warnings.warn(msg) else: msg = ( "createDataFrame attempted Arrow optimization because " "'spark.sql.execution.arrow.enabled' is set to true, but has reached " "the error below and will not continue because automatic fallback " "with 'spark.sql.execution.arrow.fallback.enabled' has been set to " "false.\n %s" % _exception_message(e)) warnings.warn(msg) raise data = self._convert_from_pandas(data, schema, timezone) if isinstance(schema, StructType): verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True def prepare(obj): verify_func(obj) return obj elif isinstance(schema, DataType): dataType = schema schema = StructType().add("value", schema) verify_func = _make_type_verifier( dataType, name="field value") if verifySchema else lambda _: True def prepare(obj): verify_func(obj) return obj, else: prepare = lambda obj: obj if isinstance(data, RDD): rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio) else: rdd, schema = self._createFromLocal(map(prepare, data), schema) jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd()) jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json()) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`. When ``schema`` is a list of column names, the type of each column will be inferred from ``data``. When ``schema`` is ``None``, it will try to infer the schema (column names and types) from ``data``, which should be an RDD of :class:`Row`, or :class:`namedtuple`, or :class:`dict`. When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match the real data, or an exception will be thrown at runtime. If the given schema is not :class:`pyspark.sql.types.StructType`, it will be wrapped into a :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value", each record will also be wrapped into a tuple, which can be converted to row later. If schema inference is needed, ``samplingRatio`` is used to determined the ratio of rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``. :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean, etc.), or :class:`list`, or :class:`pandas.DataFrame`. :param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of column names, default is ``None``. The data type string format equals to :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use ``int`` as a short name for ``IntegerType``. :param samplingRatio: the sample ratio of rows used for inferring :param verifySchema: verify data types of every row against schema. :return: :class:`DataFrame` .. versionchanged:: 2.1 Added verifySchema. .. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental. >>> l = [('Alice', 1)] >>> spark.createDataFrame(l).collect() [Row(_1=u'Alice', _2=1)] >>> spark.createDataFrame(l, ['name', 'age']).collect() [Row(name=u'Alice', age=1)] >>> d = [{'name': 'Alice', 'age': 1}] >>> spark.createDataFrame(d).collect() [Row(age=1, name=u'Alice')] >>> rdd = sc.parallelize(l) >>> spark.createDataFrame(rdd).collect() [Row(_1=u'Alice', _2=1)] >>> df = spark.createDataFrame(rdd, ['name', 'age']) >>> df.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql import Row >>> Person = Row('name', 'age') >>> person = rdd.map(lambda r: Person(*r)) >>> df2 = spark.createDataFrame(person) >>> df2.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql.types import * >>> schema = StructType([ ... StructField("name", StringType(), True), ... StructField("age", IntegerType(), True)]) >>> df3 = spark.createDataFrame(rdd, schema) >>> df3.collect() [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP [Row(0=1, 1=2)] >>> spark.createDataFrame(rdd, "a: string, b: int").collect() [Row(a=u'Alice', b=1)] >>> rdd = rdd.map(lambda row: row[1]) >>> spark.createDataFrame(rdd, "int").collect() [Row(value=1)] >>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ...
def fit(self, blocks, y=None): """ Args: blocks (List[Block]): as output by :class:`Blockifier.blockify` y (None): This isn't used, it's only here for API consistency. Returns: :class:`StandardizedFeature`: an instance of this class with the ``self.scaler`` attribute fit to the ``blocks`` data Note: When fitting the :class:`StandardScaler` object, you'll probably want to determine the mean and/or std of *multiple* HTML files' blocks, rather than just a single observation. To do that, just concatenate all of the blocks together in a single iterable. In contrast, you'll typically apply :meth:`transform` to a *single* HTML file's blocks at a time. """ feature_array = self.feature.fit_transform(blocks) self.scaler = self.scaler.fit(feature_array) return self
Args: blocks (List[Block]): as output by :class:`Blockifier.blockify` y (None): This isn't used, it's only here for API consistency. Returns: :class:`StandardizedFeature`: an instance of this class with the ``self.scaler`` attribute fit to the ``blocks`` data Note: When fitting the :class:`StandardScaler` object, you'll probably want to determine the mean and/or std of *multiple* HTML files' blocks, rather than just a single observation. To do that, just concatenate all of the blocks together in a single iterable. In contrast, you'll typically apply :meth:`transform` to a *single* HTML file's blocks at a time.
def generate_targets(self, local_go_targets=None): """Generate Go targets in memory to form a complete Go graph. :param local_go_targets: The local Go targets to fill in a complete target graph for. If `None`, then all local Go targets under the Go source root are used. :type local_go_targets: :class:`collections.Iterable` of :class:`pants.contrib.go.targets.go_local_source import GoLocalSource` :returns: A generation result if targets were generated, else `None`. :rtype: :class:`GoBuildgen.GenerationResult` """ # TODO(John Sirois): support multiple source roots like GOPATH does? # The GOPATH's 1st element is read-write, the rest are read-only; ie: their sources build to # the 1st element's pkg/ and bin/ dirs. go_roots_by_category = defaultdict(list) # TODO: Add "find source roots for lang" functionality to SourceRoots and use that instead. for sr in self.context.source_roots.all_roots(): if 'go' in sr.langs: go_roots_by_category[sr.category].append(sr.path) if go_roots_by_category[SourceRootCategories.TEST]: raise self.InvalidLocalRootsError('Go buildgen does not support test source roots.') if go_roots_by_category[SourceRootCategories.UNKNOWN]: raise self.InvalidLocalRootsError('Go buildgen does not support source roots of ' 'unknown category.') local_roots = go_roots_by_category[SourceRootCategories.SOURCE] if not local_roots: raise self.NoLocalRootsError('Can only BUILD gen if a Go local sources source root is ' 'defined.') if len(local_roots) > 1: raise self.InvalidLocalRootsError('Can only BUILD gen for a single Go local sources source ' 'root, found:\n\t{}' .format('\n\t'.join(sorted(local_roots)))) local_root = local_roots.pop() if local_go_targets: unrooted_locals = {t for t in local_go_targets if t.target_base != local_root} if unrooted_locals: raise self.UnrootedLocalSourceError('Cannot BUILD gen until the following targets are ' 'relocated to the source root at {}:\n\t{}' .format(local_root, '\n\t'.join(sorted(t.address.reference() for t in unrooted_locals)))) else: root = os.path.join(get_buildroot(), local_root) local_go_targets = self.context.scan(root=root).targets(self.is_local_src) if not local_go_targets: return None remote_roots = go_roots_by_category[SourceRootCategories.THIRDPARTY] if len(remote_roots) > 1: raise self.InvalidRemoteRootsError('Can only BUILD gen for a single Go remote library source ' 'root, found:\n\t{}' .format('\n\t'.join(sorted(remote_roots)))) remote_root = remote_roots.pop() if remote_roots else None generator = GoTargetGenerator(self.import_oracle, self.context.build_graph, local_root, self.get_fetcher_factory(), generate_remotes=self.get_options().remote, remote_root=remote_root) with self.context.new_workunit('go.buildgen', labels=[WorkUnitLabel.MULTITOOL]): try: generated = generator.generate(local_go_targets) return self.GenerationResult(generated=generated, local_root=local_root, remote_root=remote_root) except generator.GenerationError as e: raise self.GenerationError(e)
Generate Go targets in memory to form a complete Go graph. :param local_go_targets: The local Go targets to fill in a complete target graph for. If `None`, then all local Go targets under the Go source root are used. :type local_go_targets: :class:`collections.Iterable` of :class:`pants.contrib.go.targets.go_local_source import GoLocalSource` :returns: A generation result if targets were generated, else `None`. :rtype: :class:`GoBuildgen.GenerationResult`
def withColumnRenamed(self, existing, new): """Returns a new :class:`DataFrame` by renaming an existing column. This is a no-op if schema doesn't contain the given column name. :param existing: string, name of the existing column to rename. :param new: string, new name of the column. >>> df.withColumnRenamed('age', 'age2').collect() [Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')] """ return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
Returns a new :class:`DataFrame` by renaming an existing column. This is a no-op if schema doesn't contain the given column name. :param existing: string, name of the existing column to rename. :param new: string, new name of the column. >>> df.withColumnRenamed('age', 'age2').collect() [Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
def create_settings(sender, **kwargs): """ create user notification settings on user creation """ created = kwargs['created'] user = kwargs['instance'] if created: UserWebNotificationSettings.objects.create(user=user) UserEmailNotificationSettings.objects.create(user=user)
create user notification settings on user creation
def save(self, prepend_vault_id=''): """ Adds or updates a users CC to the vault. @prepend_vault_id: any string to prepend all vault id's with in case the same braintree account is used by multiple projects/apps. """ assert self.is_valid() cc_details_map = { # cc details 'number': self.cleaned_data['cc_number'], 'cardholder_name': self.cleaned_data['name'], 'expiration_date': '%s/%s' %\ (self.cleaned_data['expiration_month'], self.cleaned_data['expiration_year']), 'cvv': self.cleaned_data['cvv'], 'billing_address': { 'postal_code': self.cleaned_data['zip_code'], } } if self.__user_vault: try: # get customer info, its credit card and then update that credit card response = Customer.find(self.__user_vault.vault_id) cc_info = response.credit_cards[0] return CreditCard.update(cc_info.token, params=cc_details_map) except Exception, e: logging.error('Was not able to get customer from vault. %s' % e) self.__user_vault.delete() # delete the stale instance from our db # in case the above updating fails or user was never in the vault new_customer_vault_id = '%s%s' % (prepend_vault_id, md5_hash()[:24]) respone = Customer.create({ # creating a customer, but we really just want to store their CC details 'id': new_customer_vault_id, # vault id, uniquely identifies customer. We're not caring about tokens (used for storing multiple CC's per user) 'credit_card': cc_details_map }) if respone.is_success: # save a new UserVault instance UserVault.objects.create(user=self.__user, vault_id=new_customer_vault_id) return respone
Adds or updates a users CC to the vault. @prepend_vault_id: any string to prepend all vault id's with in case the same braintree account is used by multiple projects/apps.
def message(self, category, subject, msg_file): """Send message to all users in `category`.""" users = getattr(self.sub, category) if not users: print('There are no {} users on {}.'.format(category, self.sub)) return if msg_file: try: msg = open(msg_file).read() except IOError as error: print(str(error)) return else: print('Enter message:') msg = sys.stdin.read() print('You are about to send the following message to the users {}:' .format(', '.join([str(x) for x in users]))) print('---BEGIN MESSAGE---\n{}\n---END MESSAGE---'.format(msg)) if input('Are you sure? yes/[no]: ').lower() not in ['y', 'yes']: print('Message sending aborted.') return for user in users: user.send_message(subject, msg) print('Sent to: {}'.format(user))
Send message to all users in `category`.
def setBottomLeft(self, loc): """ Move this region so its bottom left corner is on ``loc`` """ offset = self.getBottomLeft().getOffset(loc) # Calculate offset from current bottom left return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
Move this region so its bottom left corner is on ``loc``
def merge_env(self, env): """ :param env: :return: """ # convert to dict to allow update current_env = dict(item.split('=') for item in self._env) # do validation and set new values. self.env = env # convert to dict to allow update new_env = dict(item.split('=') for item in self._env) # update old with new current_env.update(new_env) # apply updated values self.env = current_env
:param env: :return:
def annotate_snapshot(self, snapshot): """ Store additional statistical data in snapshot. """ if hasattr(snapshot, 'classes'): return snapshot.classes = {} for classname in list(self.index.keys()): total = 0 active = 0 merged = Asized(0, 0) for tobj in self.index[classname]: _merge_objects(snapshot.timestamp, merged, tobj) total += tobj.get_size_at_time(snapshot.timestamp) if tobj.birth < snapshot.timestamp and \ (tobj.death is None or tobj.death > snapshot.timestamp): active += 1 try: pct = total * 100.0 / snapshot.total except ZeroDivisionError: # pragma: no cover pct = 0 try: avg = total / active except ZeroDivisionError: avg = 0 snapshot.classes[classname] = dict(sum=total, avg=avg, pct=pct, active=active) snapshot.classes[classname]['merged'] = merged
Store additional statistical data in snapshot.
def _check_link_completion(self, link, fail_pending=False, fail_running=False): """Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ status_vect = JobStatusVector() for job_key, job_details in link.jobs.items(): # if job_details.status == JobStatus.failed: # failed = True # continue # elif job_details.status == JobStatus.done: # continue if job_key.find(JobDetails.topkey) >= 0: continue job_details.status = self._interface.check_job(job_details) if job_details.status == JobStatus.pending: if fail_pending: job_details.status = JobStatus.failed elif job_details.status == JobStatus.running: if fail_running: job_details.status = JobStatus.failed status_vect[job_details.status] += 1 link.jobs[job_key] = job_details link._set_status_self(job_details.jobkey, job_details.status) return status_vect
Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
def validate_VALUERANGE(in_value, restriction): """ Test to ensure that a value sits between a lower and upper bound. Parameters: A Decimal value and a tuple, containing a lower and upper bound, both as Decimal values. """ if len(restriction) != 2: raise ValidationError("Template ERROR: Only two values can be specified in a date range.") value = _get_val(in_value) if type(value) is list: for subval in value: if type(subval) is tuple: subval = subval[1] validate_VALUERANGE(subval, restriction) else: min_val = Decimal(restriction[0]) max_val = Decimal(restriction[1]) val = Decimal(value) if val < min_val or val > max_val: raise ValidationError("VALUERANGE: %s, %s"%(min_val, max_val))
Test to ensure that a value sits between a lower and upper bound. Parameters: A Decimal value and a tuple, containing a lower and upper bound, both as Decimal values.
def query(self, sql_query, return_as="dataframe"): """ Execute a raw SQL query against the the SQL DB. Args: sql_query (str): A raw SQL query to execute. Kwargs: return_as (str): Specify what type of object should be returned. The following are acceptable types: - "dataframe": pandas.DataFrame or None if no matching query - "result": sqlalchemy.engine.result.ResultProxy Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result (specified with return_as="result") Raises: QueryDbError """ if isinstance(sql_query, str): pass elif isinstance(sql_query, unicode): sql_query = str(sql_query) else: raise QueryDbError("query() requires a str or unicode input.") query = sqlalchemy.sql.text(sql_query) if return_as.upper() in ["DF", "DATAFRAME"]: return self._to_df(query, self._engine) elif return_as.upper() in ["RESULT", "RESULTPROXY"]: with self._engine.connect() as conn: result = conn.execute(query) return result else: raise QueryDbError("Other return types not implemented.")
Execute a raw SQL query against the the SQL DB. Args: sql_query (str): A raw SQL query to execute. Kwargs: return_as (str): Specify what type of object should be returned. The following are acceptable types: - "dataframe": pandas.DataFrame or None if no matching query - "result": sqlalchemy.engine.result.ResultProxy Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result (specified with return_as="result") Raises: QueryDbError
async def nodes(self, *, dc=None, near=None, watch=None, consistency=None): """Lists nodes in a given DC Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. near (str): Sort the node list in ascending order based on the estimated round trip time from that node. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: CollectionMeta: where value is a list It returns a body like this:: [ { "Node": "baz", "Address": "10.1.10.11", "TaggedAddresses": { "lan": "10.1.10.11", "wan": "10.1.10.11" } }, { "Node": "foobar", "Address": "10.1.10.12", "TaggedAddresses": { "lan": "10.1.10.11", "wan": "10.1.10.12" } } ] """ params = {"dc": dc, "near": near} response = await self._api.get("/v1/catalog/nodes", params=params, watch=watch, consistency=consistency) return consul(response)
Lists nodes in a given DC Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. near (str): Sort the node list in ascending order based on the estimated round trip time from that node. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: CollectionMeta: where value is a list It returns a body like this:: [ { "Node": "baz", "Address": "10.1.10.11", "TaggedAddresses": { "lan": "10.1.10.11", "wan": "10.1.10.11" } }, { "Node": "foobar", "Address": "10.1.10.12", "TaggedAddresses": { "lan": "10.1.10.11", "wan": "10.1.10.12" } } ]
def _get_importer(path_name): """Python version of PyImport_GetImporter C API function""" cache = sys.path_importer_cache try: importer = cache[path_name] except KeyError: # Not yet cached. Flag as using the # standard machinery until we finish # checking the hooks cache[path_name] = None for hook in sys.path_hooks: try: importer = hook(path_name) break except ImportError: pass else: # The following check looks a bit odd. The trick is that # NullImporter throws ImportError if the supplied path is a # *valid* directory entry (and hence able to be handled # by the standard import machinery) try: importer = imp.NullImporter(path_name) except ImportError: return None cache[path_name] = importer return importer
Python version of PyImport_GetImporter C API function
def nworker(data, smpchunk, tests): """ The workhorse function. Not numba. """ ## tell engines to limit threads #numba.config.NUMBA_DEFAULT_NUM_THREADS = 1 ## open the seqarray view, the modified array is in bootsarr with h5py.File(data.database.input, 'r') as io5: seqview = io5["bootsarr"][:] maparr = io5["bootsmap"][:] ## create an N-mask array of all seq cols (this isn't really too slow) nall_mask = seqview[:] == 78 ## tried numba compiling everythign below here, but was not faster ## than making nmask w/ axis arg in numpy ## get the input arrays ready rquartets = np.zeros((smpchunk.shape[0], 4), dtype=np.uint16) rweights = None #rweights = np.ones(smpchunk.shape[0], dtype=np.float64) rdstats = np.zeros((smpchunk.shape[0], 4), dtype=np.uint32) #times = [] ## fill arrays with results using numba funcs for idx in xrange(smpchunk.shape[0]): ## get seqchunk for 4 samples (4, ncols) sidx = smpchunk[idx] seqchunk = seqview[sidx] ## get N-containing columns in 4-array, and invariant sites. nmask = np.any(nall_mask[sidx], axis=0) nmask += np.all(seqchunk == seqchunk[0], axis=0) ## <- do we need this? ## get matrices if there are any shared SNPs ## returns best-tree index, qscores, and qstats #bidx, qscores, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests) bidx, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests) ## get weights from the three scores sorted. ## Only save to file if the quartet has information rdstats[idx] = qstats rquartets[idx] = smpchunk[idx][bidx] return rquartets, rweights, rdstats
The workhorse function. Not numba.
def numericalize(self, arr, device=None): """Turn a batch of examples that use this field into a Variable. If the field has include_lengths=True, a tensor of lengths will be included in the return value. Arguments: arr (List[List[str]], or tuple of (List[List[str]], List[int])): List of tokenized and padded examples, or tuple of List of tokenized and padded examples and List of lengths of each example if self.include_lengths is True. device (str or torch.device): A string or instance of `torch.device` specifying which device the Variables are going to be created on. If left as default, the tensors will be created on cpu. Default: None. """ if self.include_lengths and not isinstance(arr, tuple): raise ValueError("Field has include_lengths set to True, but " "input data is not a tuple of " "(data batch, batch lengths).") if isinstance(arr, tuple): arr, lengths = arr lengths = torch.tensor(lengths, dtype=self.dtype, device=device) if self.use_vocab: if self.sequential: arr = [[self.vocab.stoi[x] for x in ex] for ex in arr] else: arr = [self.vocab.stoi[x] for x in arr] if self.postprocessing is not None: arr = self.postprocessing(arr, self.vocab) else: if self.dtype not in self.dtypes: raise ValueError( "Specified Field dtype {} can not be used with " "use_vocab=False because we do not know how to numericalize it. " "Please raise an issue at " "https://github.com/pytorch/text/issues".format(self.dtype)) numericalization_func = self.dtypes[self.dtype] # It doesn't make sense to explicitly coerce to a numeric type if # the data is sequential, since it's unclear how to coerce padding tokens # to a numeric type. if not self.sequential: arr = [numericalization_func(x) if isinstance(x, six.string_types) else x for x in arr] if self.postprocessing is not None: arr = self.postprocessing(arr, None) var = torch.tensor(arr, dtype=self.dtype, device=device) if self.sequential and not self.batch_first: var.t_() if self.sequential: var = var.contiguous() if self.include_lengths: return var, lengths return var
Turn a batch of examples that use this field into a Variable. If the field has include_lengths=True, a tensor of lengths will be included in the return value. Arguments: arr (List[List[str]], or tuple of (List[List[str]], List[int])): List of tokenized and padded examples, or tuple of List of tokenized and padded examples and List of lengths of each example if self.include_lengths is True. device (str or torch.device): A string or instance of `torch.device` specifying which device the Variables are going to be created on. If left as default, the tensors will be created on cpu. Default: None.
def setRpms(self, package, build, build_ts, rpms): """Add/Update package rpm """ self._builds[package] = {"build": build, "build_ts": build_ts, "rpms": rpms}
Add/Update package rpm