code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def TableDrivenVacuumAgent(): "[Fig. 2.3]" table = {((loc_A, 'Clean'),): 'Right', ((loc_A, 'Dirty'),): 'Suck', ((loc_B, 'Clean'),): 'Left', ((loc_B, 'Dirty'),): 'Suck', ((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right', ((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck', # ... ((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right', ((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck', # ... } return Agent(TableDrivenAgentProgram(table))
[Fig. 2.3]
def get(self, element, selected=False): """ Returns the curses attribute code for the given element. """ if self._attribute_map is None: raise RuntimeError('Attempted to access theme attribute before ' 'calling initialize_curses_theme()') if selected or self._selected: element = '@{0}'.format(element) return self._attribute_map[element]
Returns the curses attribute code for the given element.
def _header_poster(self, uri, headers): """POST Headers on a specified object in the container. :param uri: ``str`` :param headers: ``dict`` """ resp = self.http.post(url=uri, body=None, headers=headers) self._resp_exception(resp=resp) return resp
POST Headers on a specified object in the container. :param uri: ``str`` :param headers: ``dict``
def load_index(self, filename, reindex=False): ''' Load the Layout's index from a plaintext file. Args: filename (str): Path to the plaintext index file. reindex (bool): If True, discards entity values provided in the loaded index and instead re-indexes every file in the loaded index against the entities defined in the config. Default is False, in which case it is assumed that all entity definitions in the loaded index are correct and do not need any further validation. Note: At the moment, directory-specific config files aren't serialized. This means reconstructed indexes will only work properly in cases where there aren't multiple layout specs within a project. ''' self._reset_index() with open(filename, 'r') as fobj: data = json.load(fobj) for path, file in data.items(): ents, domains = file['entities'], file['domains'] root, f = dirname(path), basename(path) if reindex: self._index_file(root, f, domains) else: f = self._make_file_object(root, f) tags = {k: Tag(self.entities[k], v) for k, v in ents.items()} f.tags = tags self.files[f.path] = f for ent, val in f.entities.items(): self.entities[ent].add_file(f.path, val)
Load the Layout's index from a plaintext file. Args: filename (str): Path to the plaintext index file. reindex (bool): If True, discards entity values provided in the loaded index and instead re-indexes every file in the loaded index against the entities defined in the config. Default is False, in which case it is assumed that all entity definitions in the loaded index are correct and do not need any further validation. Note: At the moment, directory-specific config files aren't serialized. This means reconstructed indexes will only work properly in cases where there aren't multiple layout specs within a project.
def image_convert(fname,saveAs=True,showToo=False): """ Convert weird TIF files into web-friendly versions. Auto contrast is applied (saturating lower and upper 0.1%). make saveAs True to save as .TIF.png make saveAs False and it won't save at all make saveAs "someFile.jpg" to save it as a different path/format """ # load the image #im = Image.open(fname) #PIL can't handle 12-bit TIFs well im=scipy.ndimage.imread(fname) #scipy does better with it im=np.array(im,dtype=float) # now it's a numpy array # do all image enhancement here cutoffLow=np.percentile(im,.01) cutoffHigh=np.percentile(im,99.99) im[np.where(im<cutoffLow)]=cutoffLow im[np.where(im>cutoffHigh)]=cutoffHigh # IMAGE FORMATTING im-=np.min(im) #auto contrast im/=np.max(im) #normalize im*=255 #stretch contrast (8-bit) im = Image.fromarray(im) # IMAGE DRAWING msg="Filename: %s\n"%os.path.basename(fname) timestamp = datetime.datetime.fromtimestamp(os.path.getctime(fname)) msg+="Created: %s\n"%timestamp.strftime('%Y-%m-%d %H:%M:%S') d = ImageDraw.Draw(im) fnt = ImageFont.truetype("arial.ttf", 20) d.text((6,6),msg,font=fnt,fill=0) d.text((4,4),msg,font=fnt,fill=255) if showToo: im.show() if saveAs is False: return if saveAs is True: saveAs=fname+".png" im.convert('RGB').save(saveAs)
Convert weird TIF files into web-friendly versions. Auto contrast is applied (saturating lower and upper 0.1%). make saveAs True to save as .TIF.png make saveAs False and it won't save at all make saveAs "someFile.jpg" to save it as a different path/format
def inverse_kinematic_optimization(chain, target_frame, starting_nodes_angles, regularization_parameter=None, max_iter=None): """ Computes the inverse kinematic on the specified target with an optimization method Parameters ---------- chain: ikpy.chain.Chain The chain used for the Inverse kinematics. target_frame: numpy.array The desired target. starting_nodes_angles: numpy.array The initial pose of your chain. regularization_parameter: float The coefficient of the regularization. max_iter: int Maximum number of iterations for the optimisation algorithm. """ # Only get the position target = target_frame[:3, 3] if starting_nodes_angles is None: raise ValueError("starting_nodes_angles must be specified") # Compute squared distance to target def optimize_target(x): # y = np.append(starting_nodes_angles[:chain.first_active_joint], x) y = chain.active_to_full(x, starting_nodes_angles) squared_distance = np.linalg.norm(chain.forward_kinematics(y)[:3, -1] - target) return squared_distance # If a regularization is selected if regularization_parameter is not None: def optimize_total(x): regularization = np.linalg.norm(x - starting_nodes_angles[chain.first_active_joint:]) return optimize_target(x) + regularization_parameter * regularization else: def optimize_total(x): return optimize_target(x) # Compute bounds real_bounds = [link.bounds for link in chain.links] # real_bounds = real_bounds[chain.first_active_joint:] real_bounds = chain.active_from_full(real_bounds) options = {} # Manage iterations maximum if max_iter is not None: options["maxiter"] = max_iter # Utilisation d'une optimisation L-BFGS-B res = scipy.optimize.minimize(optimize_total, chain.active_from_full(starting_nodes_angles), method='L-BFGS-B', bounds=real_bounds, options=options) logs.manager.info("Inverse kinematic optimisation OK, done in {} iterations".format(res.nit)) return chain.active_to_full(res.x, starting_nodes_angles)
Computes the inverse kinematic on the specified target with an optimization method Parameters ---------- chain: ikpy.chain.Chain The chain used for the Inverse kinematics. target_frame: numpy.array The desired target. starting_nodes_angles: numpy.array The initial pose of your chain. regularization_parameter: float The coefficient of the regularization. max_iter: int Maximum number of iterations for the optimisation algorithm.
def reverse_pivot_to_fact(self, staging_table, piv_column, piv_list, from_column, meas_names, meas_values, new_line): """ For each column in the piv_list, append ALL from_column's using the group_list e.g. Input Table YEAR Person Q1 Q2 2010 Fred Spain 14 2010 Jane Spain 13.995 Output Table Year Person Question Result 2010 Fred Q1 Spain 2010 Fred Q2 14 2010 Jane Q1 Spain 2010 Jane Q2 13.995 You would use: from_column = [YEAR, Person] pivot_column = 'Question' piv_list = [Q1, Q2] meas_names = [Result] meas_values = [Result] # this can be SQL such as count(*) or count(distinct COL_NAME) To get the result: INSERT INTO C_UES2014_FT ( YEAR, Person, Question, Result, REC_EXTRACT_DATE) ( SELECT YEAR, Person, 'Q1', Q1, SYSDATE FROM S_UES2014_RAW); INSERT INTO C_UES2014_FT ( YEAR, Person, Question, Result, REC_EXTRACT_DATE) ( SELECT YEAR, Person, 'Q2', Q2, SYSDATE FROM S_UES2014_RAW); COMMIT; """ self.sql_text += '\n-----------------------------\n--Reverse Pivot\n--------------------------\n\n' num_chars_on_line = 0 for piv_num in range(len(piv_list)): # INSERT columns self.sql_text += 'INSERT INTO ' + self.fact_table + ' (' + new_line if piv_column not in from_column: self.sql_text += piv_column + ', ' #pass for g in meas_names: self.sql_text += g + ', ' for c in from_column: if c not in meas_names: if c == piv_column: # dont insert the same column twice print("Error - do NOT specify pivot column in the fact list") exit(1) else: self.sql_text += c + ', ' num_chars_on_line += len(c) + 2 if num_chars_on_line > 100: self.sql_text += new_line num_chars_on_line = 0 self.sql_text += '' + self.date_updated_col + ') (\n' # SELECT columns self.sql_text += 'SELECT ' + new_line num_chars_on_line = 0 self.sql_text += "'" + piv_list[piv_num] + "', " for meas_num, _ in enumerate(meas_names): if meas_values[meas_num] == '': self.sql_text += piv_list[piv_num] + ', ' else: self.sql_text += meas_values[meas_num] + ', ' for c in from_column: if c not in meas_names: # dont insert the same column twice self.sql_text += c + ', ' num_chars_on_line += len(c) + 2 if num_chars_on_line > 100: self.sql_text += new_line num_chars_on_line = 0 self.sql_text += 'SYSDATE \nFROM ' + staging_table self.sql_text += ');\n'
For each column in the piv_list, append ALL from_column's using the group_list e.g. Input Table YEAR Person Q1 Q2 2010 Fred Spain 14 2010 Jane Spain 13.995 Output Table Year Person Question Result 2010 Fred Q1 Spain 2010 Fred Q2 14 2010 Jane Q1 Spain 2010 Jane Q2 13.995 You would use: from_column = [YEAR, Person] pivot_column = 'Question' piv_list = [Q1, Q2] meas_names = [Result] meas_values = [Result] # this can be SQL such as count(*) or count(distinct COL_NAME) To get the result: INSERT INTO C_UES2014_FT ( YEAR, Person, Question, Result, REC_EXTRACT_DATE) ( SELECT YEAR, Person, 'Q1', Q1, SYSDATE FROM S_UES2014_RAW); INSERT INTO C_UES2014_FT ( YEAR, Person, Question, Result, REC_EXTRACT_DATE) ( SELECT YEAR, Person, 'Q2', Q2, SYSDATE FROM S_UES2014_RAW); COMMIT;
def hdbscan(X, min_cluster_size=5, min_samples=None, alpha=1.0, metric='minkowski', p=2, leaf_size=40, algorithm='best', memory=Memory(cachedir=None, verbose=0), approx_min_span_tree=True, gen_min_span_tree=False, core_dist_n_jobs=4, cluster_selection_method='eom', allow_single_cluster=False, match_reference_implementation=False, **kwargs): """Perform HDBSCAN clustering from a vector array or distance matrix. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. min_cluster_size : int, optional (default=5) The minimum number of samples in a group for that group to be considered a cluster; groupings smaller than this size will be left as noise. min_samples : int, optional (default=None) The number of samples in a neighborhood for a point to be considered as a core point. This includes the point itself. defaults to the min_cluster_size. alpha : float, optional (default=1.0) A distance scaling parameter as used in robust single linkage. See [2]_ for more information. metric : string or callable, optional (default='minkowski') The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. p : int, optional (default=2) p value to use if using the minkowski metric. leaf_size : int, optional (default=40) Leaf size for trees responsible for fast nearest neighbour queries. algorithm : string, optional (default='best') Exactly which algorithm to use; hdbscan has variants specialised for different characteristics of the data. By default this is set to ``best`` which chooses the "best" algorithm given the nature of the data. You can force other options if you believe you know better. Options are: * ``best`` * ``generic`` * ``prims_kdtree`` * ``prims_balltree`` * ``boruvka_kdtree`` * ``boruvka_balltree`` memory : instance of joblib.Memory or string, optional Used to cache the output of the computation of the tree. By default, no caching is done. If a string is given, it is the path to the caching directory. approx_min_span_tree : bool, optional (default=True) Whether to accept an only approximate minimum spanning tree. For some algorithms this can provide a significant speedup, but the resulting clustering may be of marginally lower quality. If you are willing to sacrifice speed for correctness you may want to explore this; in general this should be left at the default True. gen_min_span_tree : bool, optional (default=False) Whether to generate the minimum spanning tree for later analysis. core_dist_n_jobs : int, optional (default=4) Number of parallel jobs to run in core distance computations (if supported by the specific algorithm). For ``core_dist_n_jobs`` below -1, (n_cpus + 1 + core_dist_n_jobs) are used. cluster_selection_method : string, optional (default='eom') The method used to select clusters from the condensed tree. The standard approach for HDBSCAN* is to use an Excess of Mass algorithm to find the most persistent clusters. Alternatively you can instead select the clusters at the leaves of the tree -- this provides the most fine grained and homogeneous clusters. Options are: * ``eom`` * ``leaf`` allow_single_cluster : bool, optional (default=False) By default HDBSCAN* will not produce a single cluster, setting this to t=True will override this and allow single cluster results in the case that you feel this is a valid result for your dataset. (default False) match_reference_implementation : bool, optional (default=False) There exist some interpretational differences between this HDBSCAN* implementation and the original authors reference implementation in Java. This can result in very minor differences in clustering results. Setting this flag to True will, at a some performance cost, ensure that the clustering results match the reference implementation. **kwargs : optional Arguments passed to the distance metric Returns ------- labels : ndarray, shape (n_samples, ) Cluster labels for each point. Noisy samples are given the label -1. probabilities : ndarray, shape (n_samples, ) Cluster membership strengths for each point. Noisy samples are assigned 0. cluster_persistence : array, shape (n_clusters, ) A score of how persistent each cluster is. A score of 1.0 represents a perfectly stable cluster that persists over all distance scales, while a score of 0.0 represents a perfectly ephemeral cluster. These scores can be guage the relative coherence of the clusters output by the algorithm. condensed_tree : record array The condensed cluster hierarchy used to generate clusters. single_linkage_tree : ndarray, shape (n_samples - 1, 4) The single linkage tree produced during clustering in scipy hierarchical clustering format (see http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html). min_spanning_tree : ndarray, shape (n_samples - 1, 3) The minimum spanning as an edgelist. If gen_min_span_tree was False this will be None. References ---------- .. [1] Campello, R. J., Moulavi, D., & Sander, J. (2013, April). Density-based clustering based on hierarchical density estimates. In Pacific-Asia Conference on Knowledge Discovery and Data Mining (pp. 160-172). Springer Berlin Heidelberg. .. [2] Chaudhuri, K., & Dasgupta, S. (2010). Rates of convergence for the cluster tree. In Advances in Neural Information Processing Systems (pp. 343-351). """ if min_samples is None: min_samples = min_cluster_size if type(min_samples) is not int or type(min_cluster_size) is not int: raise ValueError('Min samples and min cluster size must be integers!') if min_samples <= 0 or min_cluster_size <= 0: raise ValueError('Min samples and Min cluster size must be positive' ' integers') if min_cluster_size == 1: raise ValueError('Min cluster size must be greater than one') if not isinstance(alpha, float) or alpha <= 0.0: raise ValueError('Alpha must be a positive float value greater than' ' 0!') if leaf_size < 1: raise ValueError('Leaf size must be greater than 0!') if metric == 'minkowski': if p is None: raise TypeError('Minkowski metric given but no p value supplied!') if p < 0: raise ValueError('Minkowski metric with negative p value is not' ' defined!') if match_reference_implementation: min_samples = min_samples - 1 min_cluster_size = min_cluster_size + 1 approx_min_span_tree = False if cluster_selection_method not in ('eom', 'leaf'): raise ValueError('Invalid Cluster Selection Method: %s\n' 'Should be one of: "eom", "leaf"\n') # Checks input and converts to an nd-array where possible if metric != 'precomputed' or issparse(X): X = check_array(X, accept_sparse='csr') else: # Only non-sparse, precomputed distance matrices are handled here # and thereby allowed to contain numpy.inf for missing distances check_precomputed_distance_matrix(X) # Python 2 and 3 compliant string_type checking if isinstance(memory, six.string_types): memory = Memory(cachedir=memory, verbose=0) size = X.shape[0] min_samples = min(size - 1, min_samples) if min_samples == 0: min_samples = 1 if algorithm != 'best': if metric != 'precomputed' and issparse(X) and metric != 'generic': raise ValueError("Sparse data matrices only support algorithm 'generic'.") if algorithm == 'generic': (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_generic)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) elif algorithm == 'prims_kdtree': if metric not in KDTree.valid_metrics: raise ValueError("Cannot use Prim's with KDTree for this" " metric!") (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_prims_kdtree)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) elif algorithm == 'prims_balltree': if metric not in BallTree.valid_metrics: raise ValueError("Cannot use Prim's with BallTree for this" " metric!") (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_prims_balltree)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) elif algorithm == 'boruvka_kdtree': if metric not in BallTree.valid_metrics: raise ValueError("Cannot use Boruvka with KDTree for this" " metric!") (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_boruvka_kdtree)(X, min_samples, alpha, metric, p, leaf_size, approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs, **kwargs) elif algorithm == 'boruvka_balltree': if metric not in BallTree.valid_metrics: raise ValueError("Cannot use Boruvka with BallTree for this" " metric!") (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_boruvka_balltree)(X, min_samples, alpha, metric, p, leaf_size, approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs, **kwargs) else: raise TypeError('Unknown algorithm type %s specified' % algorithm) else: if issparse(X) or metric not in FAST_METRICS: # We can't do much with sparse matrices ... (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_generic)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) elif metric in KDTree.valid_metrics: # TO DO: Need heuristic to decide when to go to boruvka; # still debugging for now if X.shape[1] > 60: (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_prims_kdtree)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) else: (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_boruvka_kdtree)(X, min_samples, alpha, metric, p, leaf_size, approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs, **kwargs) else: # Metric is a valid BallTree metric # TO DO: Need heuristic to decide when to go to boruvka; # still debugging for now if X.shape[1] > 60: (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_prims_balltree)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) else: (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_boruvka_balltree)(X, min_samples, alpha, metric, p, leaf_size, approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs, **kwargs) return _tree_to_labels(X, single_linkage_tree, min_cluster_size, cluster_selection_method, allow_single_cluster, match_reference_implementation) + \ (result_min_span_tree,)
Perform HDBSCAN clustering from a vector array or distance matrix. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. min_cluster_size : int, optional (default=5) The minimum number of samples in a group for that group to be considered a cluster; groupings smaller than this size will be left as noise. min_samples : int, optional (default=None) The number of samples in a neighborhood for a point to be considered as a core point. This includes the point itself. defaults to the min_cluster_size. alpha : float, optional (default=1.0) A distance scaling parameter as used in robust single linkage. See [2]_ for more information. metric : string or callable, optional (default='minkowski') The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. p : int, optional (default=2) p value to use if using the minkowski metric. leaf_size : int, optional (default=40) Leaf size for trees responsible for fast nearest neighbour queries. algorithm : string, optional (default='best') Exactly which algorithm to use; hdbscan has variants specialised for different characteristics of the data. By default this is set to ``best`` which chooses the "best" algorithm given the nature of the data. You can force other options if you believe you know better. Options are: * ``best`` * ``generic`` * ``prims_kdtree`` * ``prims_balltree`` * ``boruvka_kdtree`` * ``boruvka_balltree`` memory : instance of joblib.Memory or string, optional Used to cache the output of the computation of the tree. By default, no caching is done. If a string is given, it is the path to the caching directory. approx_min_span_tree : bool, optional (default=True) Whether to accept an only approximate minimum spanning tree. For some algorithms this can provide a significant speedup, but the resulting clustering may be of marginally lower quality. If you are willing to sacrifice speed for correctness you may want to explore this; in general this should be left at the default True. gen_min_span_tree : bool, optional (default=False) Whether to generate the minimum spanning tree for later analysis. core_dist_n_jobs : int, optional (default=4) Number of parallel jobs to run in core distance computations (if supported by the specific algorithm). For ``core_dist_n_jobs`` below -1, (n_cpus + 1 + core_dist_n_jobs) are used. cluster_selection_method : string, optional (default='eom') The method used to select clusters from the condensed tree. The standard approach for HDBSCAN* is to use an Excess of Mass algorithm to find the most persistent clusters. Alternatively you can instead select the clusters at the leaves of the tree -- this provides the most fine grained and homogeneous clusters. Options are: * ``eom`` * ``leaf`` allow_single_cluster : bool, optional (default=False) By default HDBSCAN* will not produce a single cluster, setting this to t=True will override this and allow single cluster results in the case that you feel this is a valid result for your dataset. (default False) match_reference_implementation : bool, optional (default=False) There exist some interpretational differences between this HDBSCAN* implementation and the original authors reference implementation in Java. This can result in very minor differences in clustering results. Setting this flag to True will, at a some performance cost, ensure that the clustering results match the reference implementation. **kwargs : optional Arguments passed to the distance metric Returns ------- labels : ndarray, shape (n_samples, ) Cluster labels for each point. Noisy samples are given the label -1. probabilities : ndarray, shape (n_samples, ) Cluster membership strengths for each point. Noisy samples are assigned 0. cluster_persistence : array, shape (n_clusters, ) A score of how persistent each cluster is. A score of 1.0 represents a perfectly stable cluster that persists over all distance scales, while a score of 0.0 represents a perfectly ephemeral cluster. These scores can be guage the relative coherence of the clusters output by the algorithm. condensed_tree : record array The condensed cluster hierarchy used to generate clusters. single_linkage_tree : ndarray, shape (n_samples - 1, 4) The single linkage tree produced during clustering in scipy hierarchical clustering format (see http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html). min_spanning_tree : ndarray, shape (n_samples - 1, 3) The minimum spanning as an edgelist. If gen_min_span_tree was False this will be None. References ---------- .. [1] Campello, R. J., Moulavi, D., & Sander, J. (2013, April). Density-based clustering based on hierarchical density estimates. In Pacific-Asia Conference on Knowledge Discovery and Data Mining (pp. 160-172). Springer Berlin Heidelberg. .. [2] Chaudhuri, K., & Dasgupta, S. (2010). Rates of convergence for the cluster tree. In Advances in Neural Information Processing Systems (pp. 343-351).
def cli(ctx, feature_id, old_db, old_accession, new_db, new_accession, organism="", sequence=""): """Delete a dbxref from a feature Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.update_dbxref(feature_id, old_db, old_accession, new_db, new_accession, organism=organism, sequence=sequence)
Delete a dbxref from a feature Output: A standard apollo feature dictionary ({"features": [{...}]})
def get_service_account_token(request, service_account='default'): """Get the OAuth 2.0 access token for a service account. Args: request (google.auth.transport.Request): A callable used to make HTTP requests. service_account (str): The string 'default' or a service account email address. The determines which service account for which to acquire an access token. Returns: Union[str, datetime]: The access token and its expiration. Raises: google.auth.exceptions.TransportError: if an error occurred while retrieving metadata. """ token_json = get( request, 'instance/service-accounts/{0}/token'.format(service_account)) token_expiry = _helpers.utcnow() + datetime.timedelta( seconds=token_json['expires_in']) return token_json['access_token'], token_expiry
Get the OAuth 2.0 access token for a service account. Args: request (google.auth.transport.Request): A callable used to make HTTP requests. service_account (str): The string 'default' or a service account email address. The determines which service account for which to acquire an access token. Returns: Union[str, datetime]: The access token and its expiration. Raises: google.auth.exceptions.TransportError: if an error occurred while retrieving metadata.
def _updateFrame(self): """ Updates the frame for the given sender. """ mov = self.movie() if mov: self.setIcon(QtGui.QIcon(mov.currentPixmap()))
Updates the frame for the given sender.
def write_string(self, string): """ Writes a string to the underlying output file as a buffer of chars with UTF-8 encoding. """ buf = bytes(string, 'UTF-8') length = len(buf) self.write_int(length) self.write(buf)
Writes a string to the underlying output file as a buffer of chars with UTF-8 encoding.
def _parse_byte_data(self, byte_data): """Extract the values from byte string.""" self.data_type = b''.join(unpack('cccc', byte_data[:4])).decode() self.run = unpack('>i', byte_data[4:8])[0] self.udp_sequence = unpack('>i', byte_data[8:12])[0] self.timestamp, self.ns_ticks = unpack('>II', byte_data[12:20]) self.dom_id = unpack('>i', byte_data[20:24])[0] dom_status_bits = unpack('>I', byte_data[24:28])[0] self.dom_status = "{0:032b}".format(dom_status_bits) self.human_readable_timestamp = datetime.datetime.fromtimestamp( int(self.timestamp), UTC_TZ ).strftime('%Y-%m-%d %H:%M:%S')
Extract the values from byte string.
def _update_record(self, identifier=None, rtype=None, name=None, content=None): # pylint: disable=too-many-locals,too-many-branches """ Connects to Hetzner account, changes an existing record and returns a boolean, if update was successful or not. Needed identifier or rtype & name to lookup over all records of the zone for exactly one record to update. """ with self._session(self.domain, self.domain_id) as ddata: # Validate method parameters if identifier: dtype, dname, dcontent = self._parse_identifier(identifier, ddata['zone']['data']) if dtype and dname and dcontent: rtype = rtype if rtype else dtype name = name if name else dname content = content if content else dcontent else: LOGGER.warning('Hetzner => Record with identifier \'%s\' does not exist', identifier) return False elif rtype and name and content: dtype, dname, dcontent = rtype, name, None else: LOGGER.warning('Hetzner => Record has no rtype|name|content specified') return False dname = ddata['cname'] if ddata['cname'] else self._fqdn_name(dname) records = self._list_records_in_zone(ddata['zone']['data'], dtype, dname, dcontent) if len(records) == 1: # Remove record from zone rrset = ddata['zone']['data'].get_rdataset(records[0]['name'] + '.', rdtype=records[0]['type']) rdatas = [] for rdata in rrset: if self._convert_content(records[0]['type'], records[0]['content']) != rdata.to_text(): rdatas.append(rdata.to_text()) if rdatas: rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype, records[0]['ttl'], rdatas) ddata['zone']['data'].replace_rdataset(records[0]['name'] + '.', rdataset) else: ddata['zone']['data'].delete_rdataset(records[0]['name'] + '.', records[0]['type']) # Add record to zone name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name) rrset = ddata['zone']['data'].get_rdataset(name, rdtype=rtype, create=True) synced_change = False for rdata in rrset: if self._convert_content(rtype, content) == rdata.to_text(): LOGGER.info('Hetzner => Record with content \'%s\' already exists', content) synced_change = True break if not synced_change: ttl = (rrset.ttl if 0 < rrset.ttl < self._get_lexicon_option('ttl') else self._get_lexicon_option('ttl')) rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype, ttl, self._convert_content(rtype, content)) rrset.update(rdataset) # Post zone to Hetzner synced_change = self._post_zone(ddata['zone']) if synced_change: self._propagated_record(rtype, name, self._convert_content(rtype, content), ddata['nameservers']) return synced_change LOGGER.warning('Hetzner => Record lookup has not only one match') return False
Connects to Hetzner account, changes an existing record and returns a boolean, if update was successful or not. Needed identifier or rtype & name to lookup over all records of the zone for exactly one record to update.
def Expandkey(key, clen): """ Internal method supporting encryption and decryption functionality. """ import sha from string import join from array import array blocks = (clen + 19) / 20 xkey = [] seed = key for i in xrange(blocks): seed = sha.new(key + seed).digest() xkey.append(seed) j = join(xkey, '') return array('L', j)
Internal method supporting encryption and decryption functionality.
async def redis(self) -> aioredis.RedisConnection: """ Get Redis connection This property is awaitable. """ # Use thread-safe asyncio Lock because this method without that is not safe async with self._connection_lock: if self._redis is None: self._redis = await aioredis.create_connection((self._host, self._port), db=self._db, password=self._password, ssl=self._ssl, loop=self._loop, **self._kwargs) return self._redis
Get Redis connection This property is awaitable.
def init_array(store, shape, chunks=True, dtype=None, compressor='default', fill_value=None, order='C', overwrite=False, path=None, chunk_store=None, filters=None, object_codec=None): """Initialize an array store with the given configuration. Note that this is a low-level function and there should be no need to call this directly from user code. Parameters ---------- store : MutableMapping A mapping that supports string keys and bytes-like values. shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. If True, will be guessed from `shape` and `dtype`. If False, will be set to `shape`, i.e., single chunk for the whole array. dtype : string or dtype, optional NumPy dtype. compressor : Codec, optional Primary compressor. fill_value : object Default value to use for uninitialized portions of the array. order : {'C', 'F'}, optional Memory layout to be used within each chunk. overwrite : bool, optional If True, erase all data in `store` prior to initialisation. path : string, optional Path under which array is stored. chunk_store : MutableMapping, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. object_codec : Codec, optional A codec to encode object arrays, only needed if dtype=object. Examples -------- Initialize an array store:: >>> from zarr.storage import init_array >>> store = dict() >>> init_array(store, shape=(10000, 10000), chunks=(1000, 1000)) >>> sorted(store.keys()) ['.zarray'] Array metadata is stored as JSON:: >>> print(store['.zarray'].decode()) { "chunks": [ 1000, 1000 ], "compressor": { "blocksize": 0, "clevel": 5, "cname": "lz4", "id": "blosc", "shuffle": 1 }, "dtype": "<f8", "fill_value": null, "filters": null, "order": "C", "shape": [ 10000, 10000 ], "zarr_format": 2 } Initialize an array using a storage path:: >>> store = dict() >>> init_array(store, shape=100000000, chunks=1000000, dtype='i1', path='foo') >>> sorted(store.keys()) ['.zgroup', 'foo/.zarray'] >>> print(store['foo/.zarray'].decode()) { "chunks": [ 1000000 ], "compressor": { "blocksize": 0, "clevel": 5, "cname": "lz4", "id": "blosc", "shuffle": 1 }, "dtype": "|i1", "fill_value": null, "filters": null, "order": "C", "shape": [ 100000000 ], "zarr_format": 2 } Notes ----- The initialisation process involves normalising all array metadata, encoding as JSON and storing under the '.zarray' key. """ # normalize path path = normalize_storage_path(path) # ensure parent group initialized _require_parent_group(path, store=store, chunk_store=chunk_store, overwrite=overwrite) _init_array_metadata(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, fill_value=fill_value, order=order, overwrite=overwrite, path=path, chunk_store=chunk_store, filters=filters, object_codec=object_codec)
Initialize an array store with the given configuration. Note that this is a low-level function and there should be no need to call this directly from user code. Parameters ---------- store : MutableMapping A mapping that supports string keys and bytes-like values. shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. If True, will be guessed from `shape` and `dtype`. If False, will be set to `shape`, i.e., single chunk for the whole array. dtype : string or dtype, optional NumPy dtype. compressor : Codec, optional Primary compressor. fill_value : object Default value to use for uninitialized portions of the array. order : {'C', 'F'}, optional Memory layout to be used within each chunk. overwrite : bool, optional If True, erase all data in `store` prior to initialisation. path : string, optional Path under which array is stored. chunk_store : MutableMapping, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. object_codec : Codec, optional A codec to encode object arrays, only needed if dtype=object. Examples -------- Initialize an array store:: >>> from zarr.storage import init_array >>> store = dict() >>> init_array(store, shape=(10000, 10000), chunks=(1000, 1000)) >>> sorted(store.keys()) ['.zarray'] Array metadata is stored as JSON:: >>> print(store['.zarray'].decode()) { "chunks": [ 1000, 1000 ], "compressor": { "blocksize": 0, "clevel": 5, "cname": "lz4", "id": "blosc", "shuffle": 1 }, "dtype": "<f8", "fill_value": null, "filters": null, "order": "C", "shape": [ 10000, 10000 ], "zarr_format": 2 } Initialize an array using a storage path:: >>> store = dict() >>> init_array(store, shape=100000000, chunks=1000000, dtype='i1', path='foo') >>> sorted(store.keys()) ['.zgroup', 'foo/.zarray'] >>> print(store['foo/.zarray'].decode()) { "chunks": [ 1000000 ], "compressor": { "blocksize": 0, "clevel": 5, "cname": "lz4", "id": "blosc", "shuffle": 1 }, "dtype": "|i1", "fill_value": null, "filters": null, "order": "C", "shape": [ 100000000 ], "zarr_format": 2 } Notes ----- The initialisation process involves normalising all array metadata, encoding as JSON and storing under the '.zarray' key.
def mctransp(I,J,K,c,d,M): """mctransp -- model for solving the Multi-commodity Transportation Problem Parameters: - I: set of customers - J: set of facilities - K: set of commodities - c[i,j,k]: unit transportation cost on arc (i,j) for commodity k - d[i][k]: demand for commodity k at node i - M[j]: capacity Returns a model, ready to be solved. """ model = Model("multi-commodity transportation") # Create variables x = {} for (i,j,k) in c: x[i,j,k] = model.addVar(vtype="C", name="x(%s,%s,%s)" % (i,j,k), obj=c[i,j,k]) # tuplelist is a Gurobi data structure to manage lists of equal sized tuples - try itertools as alternative arcs = tuplelist([(i,j,k) for (i,j,k) in x]) # Demand constraints for i in I: for k in K: model.addCons(sum(x[i,j,k] for (i,j,k) in arcs.select(i,"*",k)) == d[i,k], "Demand(%s,%s)" % (i,k)) # Capacity constraints for j in J: model.addCons(sum(x[i,j,k] for (i,j,k) in arcs.select("*",j,"*")) <= M[j], "Capacity(%s)" % j) model.data = x return model
mctransp -- model for solving the Multi-commodity Transportation Problem Parameters: - I: set of customers - J: set of facilities - K: set of commodities - c[i,j,k]: unit transportation cost on arc (i,j) for commodity k - d[i][k]: demand for commodity k at node i - M[j]: capacity Returns a model, ready to be solved.
def taf(wxdata: TafData, units: Units) -> TafTrans: """ Translate the results of taf.parse Keys: Forecast, Min-Temp, Max-Temp Forecast keys: Wind, Visibility, Clouds, Altimeter, Wind-Shear, Turbulance, Icing, Other """ translations = {'forecast': []} # type: ignore for line in wxdata.forecast: trans = shared(line, units) # type: ignore trans['wind'] = wind(line.wind_direction, line.wind_speed, line.wind_gust, unit=units.wind_speed) trans['wind_shear'] = wind_shear(line.wind_shear, units.altitude, units.wind_speed) trans['turbulance'] = turb_ice(line.turbulance, units.altitude) trans['icing'] = turb_ice(line.icing, units.altitude) # Remove false 'Sky Clear' if line type is 'BECMG' if line.type == 'BECMG' and trans['clouds'] == 'Sky clear': trans['clouds'] = None # type: ignore translations['forecast'].append(TafLineTrans(**trans)) # type: ignore translations['min_temp'] = min_max_temp(wxdata.min_temp, units.temperature) # type: ignore translations['max_temp'] = min_max_temp(wxdata.max_temp, units.temperature) # type: ignore translations['remarks'] = remarks.translate(wxdata.remarks) return TafTrans(**translations)
Translate the results of taf.parse Keys: Forecast, Min-Temp, Max-Temp Forecast keys: Wind, Visibility, Clouds, Altimeter, Wind-Shear, Turbulance, Icing, Other
def validate_aead_otp(self, public_id, otp, key_handle, aead): """ Ask YubiHSM to validate a YubiKey OTP using an AEAD and a key_handle to decrypt the AEAD. @param public_id: The six bytes public id of the YubiKey @param otp: The one time password (OTP) to validate @param key_handle: The key handle that can decrypt the AEAD @param aead: AEAD containing the cryptographic key and permission flags @type public_id: string @type otp: string @type key_handle: integer or string @type aead: L{YHSM_GeneratedAEAD} or string @returns: validation response @rtype: L{YHSM_ValidationResult} @see: L{pyhsm.validate_cmd.YHSM_Cmd_AEAD_Validate_OTP} """ if type(public_id) is not str: assert() if type(otp) is not str: assert() if type(key_handle) is not int: assert() if type(aead) is not str: assert() return pyhsm.validate_cmd.YHSM_Cmd_AEAD_Validate_OTP( \ self.stick, public_id, otp, key_handle, aead).execute()
Ask YubiHSM to validate a YubiKey OTP using an AEAD and a key_handle to decrypt the AEAD. @param public_id: The six bytes public id of the YubiKey @param otp: The one time password (OTP) to validate @param key_handle: The key handle that can decrypt the AEAD @param aead: AEAD containing the cryptographic key and permission flags @type public_id: string @type otp: string @type key_handle: integer or string @type aead: L{YHSM_GeneratedAEAD} or string @returns: validation response @rtype: L{YHSM_ValidationResult} @see: L{pyhsm.validate_cmd.YHSM_Cmd_AEAD_Validate_OTP}
def with_access_to(self, request, *args, **kwargs): # pylint: disable=invalid-name,unused-argument """ Returns the list of enterprise customers the user has a specified group permission access to. """ self.queryset = self.queryset.order_by('name') enterprise_id = self.request.query_params.get('enterprise_id', None) enterprise_slug = self.request.query_params.get('enterprise_slug', None) enterprise_name = self.request.query_params.get('search', None) if enterprise_id is not None: self.queryset = self.queryset.filter(uuid=enterprise_id) elif enterprise_slug is not None: self.queryset = self.queryset.filter(slug=enterprise_slug) elif enterprise_name is not None: self.queryset = self.queryset.filter(name__icontains=enterprise_name) return self.list(request, *args, **kwargs)
Returns the list of enterprise customers the user has a specified group permission access to.
def header(text): '''Display an header''' print(' '.join((blue('>>'), cyan(text)))) sys.stdout.flush()
Display an header
def pool(n=None, dummy=False): """ create a multiprocessing pool that responds to interrupts. """ if dummy: from multiprocessing.dummy import Pool else: from multiprocessing import Pool if n is None: import multiprocessing n = multiprocessing.cpu_count() - 1 return Pool(n)
create a multiprocessing pool that responds to interrupts.
def Mean(self): """Computes the mean of a CDF. Returns: float mean """ old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps): p = new_p - old_p total += p * x old_p = new_p return total
Computes the mean of a CDF. Returns: float mean
def unapprove(self, **kwargs): """Unapprove the merge request. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabMRApprovalError: If the unapproval failed """ path = '%s/%s/unapprove' % (self.manager.path, self.get_id()) data = {} server_data = self.manager.gitlab.http_post(path, post_data=data, **kwargs) self._update_attrs(server_data)
Unapprove the merge request. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabMRApprovalError: If the unapproval failed
def module_entry(yfile): """Add entry for one file containing YANG module text. Args: yfile (file): File containing a YANG module or submodule. """ ytxt = yfile.read() mp = ModuleParser(ytxt) mst = mp.statement() submod = mst.keyword == "submodule" import_only = True rev = "" features = [] includes = [] rec = {} for sst in mst.substatements: if not rev and sst.keyword == "revision": rev = sst.argument elif import_only and sst.keyword in data_kws: import_only = False elif sst.keyword == "feature": features.append(sst.argument) elif submod: continue elif sst.keyword == "namespace": rec["namespace"] = sst.argument elif sst.keyword == "include": rd = sst.find1("revision-date") includes.append((sst.argument, rd.argument if rd else None)) rec["import-only"] = import_only rec["features"] = features if submod: rec["revision"] = rev submodmap[mst.argument] = rec else: rec["includes"] = includes modmap[(mst.argument, rev)] = rec
Add entry for one file containing YANG module text. Args: yfile (file): File containing a YANG module or submodule.
def set_row_name(self, index, name): """ Sets the row name. :param index: the 0-based row index :type index: int :param name: the name of the row :type name: str """ javabridge.call(self.jobject, "setRowName", "(ILjava/lang/String;)V", index, name)
Sets the row name. :param index: the 0-based row index :type index: int :param name: the name of the row :type name: str
def next(self): """ Returns the next packet in the cap. If the capture's keep_packets flag is True, will also keep it in the internal packet list. """ if not self.keep_packets: return self._packet_generator.send(None) elif self._current_packet >= len(self._packets): packet = self._packet_generator.send(None) self._packets += [packet] return super(FileCapture, self).next_packet()
Returns the next packet in the cap. If the capture's keep_packets flag is True, will also keep it in the internal packet list.
def _cleanstarlog(file_in): """ cleaning history.data or star.log file, e.g. to take care of repetitive restarts. private, should not be called by user directly Parameters ---------- file_in : string Typically the filename of the mesa output history.data or star.log file, creates a clean file called history.datasa or star.logsa. (thanks to Raphael for providing this tool) """ file_out=file_in+'sa' f = open(file_in) lignes = f.readlines() f.close() nb = np.array([],dtype=int) # model number nb = np.concatenate((nb ,[ int(lignes[len(lignes)-1].split()[ 0])])) nbremove = np.array([],dtype=int) # model number i=-1 for i in np.arange(len(lignes)-1,0,-1): line = lignes[i-1] if i > 6 and line != "" : if int(line.split()[ 0])>=nb[-1]: nbremove = np.concatenate((nbremove,[i-1])) else: nb = np.concatenate((nb ,[ int(line.split()[ 0])])) i=-1 for j in nbremove: lignes.remove(lignes[j]) fout = open(file_out,'w') for j in np.arange(len(lignes)): fout.write(lignes[j]) fout.close()
cleaning history.data or star.log file, e.g. to take care of repetitive restarts. private, should not be called by user directly Parameters ---------- file_in : string Typically the filename of the mesa output history.data or star.log file, creates a clean file called history.datasa or star.logsa. (thanks to Raphael for providing this tool)
def init_all_objects(self, data, target=None, single_result=True): """ Initializes model instances from given data. Returns single instance if single_result=True. """ if single_result: return self.init_target_object(target, data) return list(self.expand_models(target, data))
Initializes model instances from given data. Returns single instance if single_result=True.
async def readobj(self): """ Return a parsed Redis object or an exception when something wrong happened. """ assert self._parser is not None, "set_parser must be called" while True: obj = self._parser.gets() if obj is not False: # TODO: implement resume the read # Return any valid object and the Nil->None # case. When its False there is nothing there # to be parsed and we have to wait for more data. return obj if self._exception: raise self._exception if self._eof: break await self._wait_for_data('readobj')
Return a parsed Redis object or an exception when something wrong happened.
def send_put(self, mri, attribute_name, value): """Abstract method to dispatch a Put to the server Args: mri (str): The mri of the Block attribute_name (str): The name of the Attribute within the Block value: The value to put """ path = attribute_name + ".value" typ, value = convert_to_type_tuple_value(serialize_object(value)) if isinstance(typ, tuple): # Structure, make into a Value _, typeid, fields = typ value = Value(Type(fields, typeid), value) try: self._ctxt.put(mri, {path: value}, path) except RemoteError: if attribute_name == "exports": # TODO: use a tag instead of a name # This will change the structure of the block # Wait for reconnect self._queues[mri].get(timeout=DEFAULT_TIMEOUT) else: # Not expected, raise raise
Abstract method to dispatch a Put to the server Args: mri (str): The mri of the Block attribute_name (str): The name of the Attribute within the Block value: The value to put
def get_activity_ids_by_objective_bank(self, objective_bank_id): """Gets the list of ``Activity`` ``Ids`` associated with an ``ObjectiveBank``. arg: objective_bank_id (osid.id.Id): ``Id`` of the ``ObjectiveBank`` return: (osid.id.IdList) - list of related activity ``Ids`` raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resource_ids_by_bin id_list = [] for activity in self.get_activities_by_objective_bank(objective_bank_id): id_list.append(activity.get_id()) return IdList(id_list)
Gets the list of ``Activity`` ``Ids`` associated with an ``ObjectiveBank``. arg: objective_bank_id (osid.id.Id): ``Id`` of the ``ObjectiveBank`` return: (osid.id.IdList) - list of related activity ``Ids`` raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def violations(self, src_path): """ Return a list of Violations recorded in `src_path`. """ if not any(src_path.endswith(ext) for ext in self.driver.supported_extensions): return [] if src_path not in self.violations_dict: if self.reports: self.violations_dict = self.driver.parse_reports(self.reports) else: if self.driver_tool_installed is None: self.driver_tool_installed = self.driver.installed() if not self.driver_tool_installed: raise EnvironmentError("{} is not installed".format(self.driver.name)) command = copy.deepcopy(self.driver.command) if self.options: command.append(self.options) if os.path.exists(src_path): command.append(src_path.encode(sys.getfilesystemencoding())) output, _ = execute(command, self.driver.exit_codes) self.violations_dict.update(self.driver.parse_reports([output])) return self.violations_dict[src_path]
Return a list of Violations recorded in `src_path`.
def send(self, use_open_peers=True, queue=True, **kw): """ send a transaction immediately. Failed transactions are picked up by the TxBroadcaster :param ip: specific peer IP to send tx to :param port: port of specific peer :param use_open_peers: use Arky's broadcast method """ if not use_open_peers: ip = kw.get('ip') port = kw.get('port') peer = 'http://{}:{}'.format(ip, port) res = arky.rest.POST.peer.transactions(peer=peer, transactions=[self.tx.tx]) else: res = arky.core.sendPayload(self.tx.tx) if self.tx.success != '0.0%': self.tx.error = None self.tx.success = True else: self.tx.error = res['messages'] self.tx.success = False self.tx.tries += 1 self.tx.res = res if queue: self.tx.send = True self.__save() return res
send a transaction immediately. Failed transactions are picked up by the TxBroadcaster :param ip: specific peer IP to send tx to :param port: port of specific peer :param use_open_peers: use Arky's broadcast method
def frequency_app(parser, cmd, args): # pragma: no cover """ perform frequency analysis on a value. """ parser.add_argument('value', help='the value to analyse, read from stdin if omitted', nargs='?') args = parser.parse_args(args) data = frequency(six.iterbytes(pwnypack.main.binary_value_or_stdin(args.value))) return '\n'.join( '0x%02x (%c): %d' % (key, chr(key), value) if key >= 32 and chr(key) in string.printable else '0x%02x ---: %d' % (key, value) for key, value in data.items() )
perform frequency analysis on a value.
def subjects(self): """ Return identifiers for all the subjects that are in the cache. :return: list of subject identifiers """ subj = [i["subject_id"] for i in self._cache.find()] return list(set(subj))
Return identifiers for all the subjects that are in the cache. :return: list of subject identifiers
def __add_location(self, type, *args): """ Defines the slot triggered by **Where_lineEdit** Widget when a context menu entry is clicked. :param type: Location type. :type type: unicode :param \*args: Arguments. :type \*args: \* """ if type == "directory": location = umbra.ui.common.store_last_browsed_path((QFileDialog.getExistingDirectory(self, "Add Directory:", RuntimeGlobals.last_browsed_path))) elif type == "file": location = umbra.ui.common.store_last_browsed_path((QFileDialog.getOpenFileName(self, "Add File:", RuntimeGlobals.last_browsed_path, "All Files (*)"))) elif type == "editors": location = self.__targets_format.format(self.__default_target) elif type == "include_filter": location = self.__filters_in_format.format(self.__default_filter_in) elif type == "exclude_filter": location = self.__filters_out_format.format(self.__default_filter_out) location and self.Where_lineEdit.setText(", ".join(filter(bool, (foundations.strings.to_string( self.Where_lineEdit.text()), location))))
Defines the slot triggered by **Where_lineEdit** Widget when a context menu entry is clicked. :param type: Location type. :type type: unicode :param \*args: Arguments. :type \*args: \*
def get_settings(self): '''get all settings for a client, if defined in config. ''' config = self._load_config_user() if self.name in config: return config[self.name]
get all settings for a client, if defined in config.
def _iterslice(self, slice): """Yield records from a slice index.""" indices = range(*slice.indices(len(self._records))) if self.is_attached(): rows = self._enum_attached_rows(indices) if slice.step is not None and slice.step < 0: rows = reversed(list(rows)) else: rows = zip(indices, self._records[slice]) fields = self.fields for i, row in rows: yield Record._make(fields, row, self, i)
Yield records from a slice index.
def rehash(path, algo='sha256', blocksize=1 << 20): """Return (hash, length) for path using hashlib.new(algo)""" h = hashlib.new(algo) length = 0 with open(path, 'rb') as f: block = f.read(blocksize) while block: length += len(block) h.update(block) block = f.read(blocksize) digest = 'sha256=' + urlsafe_b64encode( h.digest() ).decode('latin1').rstrip('=') return (digest, length)
Return (hash, length) for path using hashlib.new(algo)
def update_local(self): """Update CPU stats using psutil.""" # Grab CPU stats using psutil's cpu_percent and cpu_times_percent # Get all possible values for CPU stats: user, system, idle, # nice (UNIX), iowait (Linux), irq (Linux, FreeBSD), steal (Linux 2.6.11+) # The following stats are returned by the API but not displayed in the UI: # softirq (Linux), guest (Linux 2.6.24+), guest_nice (Linux 3.2.0+) # Init new stats stats = self.get_init_value() stats['total'] = cpu_percent.get() cpu_times_percent = psutil.cpu_times_percent(interval=0.0) for stat in ['user', 'system', 'idle', 'nice', 'iowait', 'irq', 'softirq', 'steal', 'guest', 'guest_nice']: if hasattr(cpu_times_percent, stat): stats[stat] = getattr(cpu_times_percent, stat) # Additional CPU stats (number of events not as a %; psutil>=4.1.0) # ctx_switches: number of context switches (voluntary + involuntary) per second # interrupts: number of interrupts per second # soft_interrupts: number of software interrupts per second. Always set to 0 on Windows and SunOS. # syscalls: number of system calls since boot. Always set to 0 on Linux. cpu_stats = psutil.cpu_stats() # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API time_since_update = getTimeSinceLastUpdate('cpu') # Previous CPU stats are stored in the cpu_stats_old variable if not hasattr(self, 'cpu_stats_old'): # First call, we init the cpu_stats_old var self.cpu_stats_old = cpu_stats else: for stat in cpu_stats._fields: if getattr(cpu_stats, stat) is not None: stats[stat] = getattr(cpu_stats, stat) - getattr(self.cpu_stats_old, stat) stats['time_since_update'] = time_since_update # Core number is needed to compute the CTX switch limit stats['cpucore'] = self.nb_log_core # Save stats to compute next step self.cpu_stats_old = cpu_stats return stats
Update CPU stats using psutil.
def params_as_tensors(method): """ The `params_as_tensors` decorator converts representation for parameters into their unconstrained tensors, and data holders to their data tensors inside wrapped function, subject to this function is a member of parameterized object. """ @functools.wraps(method) def tensor_mode_wrapper(obj, *args, **kwargs): if not isinstance(obj, Parameterized): raise GPflowError( 'Tensor mode works only for Parameterized object.') prev_value = _params_as_tensors_enter(obj, True) try: result = method(obj, *args, **kwargs) finally: _params_as_tensors_exit(obj, prev_value) return result return tensor_mode_wrapper
The `params_as_tensors` decorator converts representation for parameters into their unconstrained tensors, and data holders to their data tensors inside wrapped function, subject to this function is a member of parameterized object.
def likes_count(obj): """ Usage: {% likes_count obj %} or {% likes_count obj as var %} or {{ obj|likes_count }} """ return Like.objects.filter( receiver_content_type=ContentType.objects.get_for_model(obj), receiver_object_id=obj.pk ).count()
Usage: {% likes_count obj %} or {% likes_count obj as var %} or {{ obj|likes_count }}
def _at_include(self, calculator, rule, scope, block): """ Implements @include, for @mixins """ caller_namespace = rule.namespace caller_calculator = self._make_calculator(caller_namespace) funct, caller_argspec = self._get_funct_def(rule, caller_calculator, block.argument) # Render the passed arguments, using the caller's namespace args, kwargs = caller_argspec.evaluate_call_args(caller_calculator) argc = len(args) + len(kwargs) try: mixin = caller_namespace.mixin(funct, argc) except KeyError: try: # TODO maybe? don't do this, once '...' works # Fallback to single parameter: mixin = caller_namespace.mixin(funct, 1) except KeyError: log.error("Mixin not found: %s:%d (%s)", funct, argc, rule.file_and_line, extra={'stack': True}) return else: args = [List(args, use_comma=True)] # TODO what happens to kwargs? source_file = mixin[0] lineno = mixin[1] m_codestr = mixin[2] pristine_callee_namespace = mixin[3] callee_argspec = mixin[4] if caller_argspec.inject and callee_argspec.inject: # DEVIATION: Pass the ENTIRE local namespace to the mixin (yikes) callee_namespace = Namespace.derive_from( caller_namespace, pristine_callee_namespace) else: callee_namespace = pristine_callee_namespace.derive() self._populate_namespace_from_call( "Mixin {0}".format(funct), callee_namespace, mixin, args, kwargs) _rule = SassRule( # These must be file and line in which the @include occurs source_file=rule.source_file, lineno=rule.lineno, # These must be file and line in which the @mixin was defined from_source_file=source_file, from_lineno=lineno, unparsed_contents=m_codestr, namespace=callee_namespace, # rule import_key=rule.import_key, legacy_compiler_options=rule.legacy_compiler_options, options=rule.options, properties=rule.properties, extends_selectors=rule.extends_selectors, ancestry=rule.ancestry, nested=rule.nested, ) _rule.options['@content'] = block.unparsed_contents self.manage_children(_rule, scope)
Implements @include, for @mixins
def classpath(self, targets, classpath_prefix=None, classpath_product=None, exclude_scopes=None, include_scopes=None): """Builds a transitive classpath for the given targets. Optionally includes a classpath prefix or building from a non-default classpath product. :param targets: the targets for which to build the transitive classpath. :param classpath_prefix: optional additional entries to prepend to the classpath. :param classpath_product: an optional ClasspathProduct from which to build the classpath. if not specified, the runtime_classpath will be used. :param :class:`pants.build_graph.target_scopes.Scope` exclude_scopes: Exclude targets which have at least one of these scopes on the classpath. :param :class:`pants.build_graph.target_scopes.Scope` include_scopes: Only include targets which have at least one of these scopes on the classpath. Defaults to Scopes.JVM_RUNTIME_SCOPES. :return: a list of classpath strings. """ include_scopes = Scopes.JVM_RUNTIME_SCOPES if include_scopes is None else include_scopes classpath_product = classpath_product or self.context.products.get_data('runtime_classpath') closure = BuildGraph.closure(targets, bfs=True, include_scopes=include_scopes, exclude_scopes=exclude_scopes, respect_intransitive=True) classpath_for_targets = ClasspathUtil.classpath(closure, classpath_product, self.confs) classpath = list(classpath_prefix or ()) classpath.extend(classpath_for_targets) return classpath
Builds a transitive classpath for the given targets. Optionally includes a classpath prefix or building from a non-default classpath product. :param targets: the targets for which to build the transitive classpath. :param classpath_prefix: optional additional entries to prepend to the classpath. :param classpath_product: an optional ClasspathProduct from which to build the classpath. if not specified, the runtime_classpath will be used. :param :class:`pants.build_graph.target_scopes.Scope` exclude_scopes: Exclude targets which have at least one of these scopes on the classpath. :param :class:`pants.build_graph.target_scopes.Scope` include_scopes: Only include targets which have at least one of these scopes on the classpath. Defaults to Scopes.JVM_RUNTIME_SCOPES. :return: a list of classpath strings.
def _jsonfileconstructor(self, filename=None, filepath=None, logger=None): """ Constructor method for the JsonFile object. :param filename: Name of the file :param filepath: Path to the file :param logger: Optional logger. :return: JsonFile """ if filepath: path = filepath else: tc_path = os.path.abspath(os.path.join(inspect.getfile(self.bench.__class__), os.pardir)) path = os.path.abspath(os.path.join(tc_path, os.pardir, "session_data")) name = "default_file.json" if not filename else filename log = self.bench.logger if not logger else logger self.bench.logger.info("Setting json file location to: {}".format(path)) return files.JsonFile(log, path, name)
Constructor method for the JsonFile object. :param filename: Name of the file :param filepath: Path to the file :param logger: Optional logger. :return: JsonFile
def value(self): """ Returns the current value (adjusted for the time decay) :rtype: float """ with self.lock: now = time.time() dt = now - self.t0 self.t0 = now self.p = self.p * (math.pow(self.e, self.r * dt)) return self.p
Returns the current value (adjusted for the time decay) :rtype: float
def go_offline(connected=None): """ connected : bool If True, the plotly.js library will be loaded from an online CDN. If False, the plotly.js library will be loaded locally from the plotly python package """ from .auth import get_config_file if connected is None: try: connected=True if get_config_file()['offline_connected'] is None else get_config_file()['offline_connected'] except: connected=True if run_from_ipython(): try: py_offline.init_notebook_mode(connected) except TypeError: #For older versions of plotly py_offline.init_notebook_mode() py_offline.__PLOTLY_OFFLINE_INITIALIZED=True
connected : bool If True, the plotly.js library will be loaded from an online CDN. If False, the plotly.js library will be loaded locally from the plotly python package
def _lock_renewer(lockref, interval, stop): """ Renew the lock key in redis every `interval` seconds for as long as `self._lock_renewal_thread.should_exit` is False. """ log = getLogger("%s.lock_refresher" % __name__) while not stop.wait(timeout=interval): log.debug("Refreshing lock") lock = lockref() if lock is None: log.debug("The lock no longer exists, " "stopping lock refreshing") break lock.extend(expire=lock._expire) del lock log.debug("Exit requested, stopping lock refreshing")
Renew the lock key in redis every `interval` seconds for as long as `self._lock_renewal_thread.should_exit` is False.
def run(self): """ Write the input/data files and run LAMMPS. """ lammps_cmd = self.lammps_bin + ['-in', self.input_filename] print("Running: {}".format(" ".join(lammps_cmd))) p = Popen(lammps_cmd, stdout=PIPE, stderr=PIPE) (stdout, stderr) = p.communicate() return stdout, stderr
Write the input/data files and run LAMMPS.
def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered
Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return:
def remove_index(self, field=None, value=None): """ remove_index(field=None, value=None) Remove the specified field/value pair as an index on this object. :param field: The index field. :type field: string :param value: The index value. :type value: string or integer :rtype: :class:`RiakObject <riak.riak_object.RiakObject>` """ if not field and not value: self.indexes.clear() elif field and not value: for index in [x for x in self.indexes if x[0] == field]: self.indexes.remove(index) elif field and value: self.indexes.remove((field, value)) else: raise RiakError("Cannot pass value without a field" " name while removing index") return self._robject
remove_index(field=None, value=None) Remove the specified field/value pair as an index on this object. :param field: The index field. :type field: string :param value: The index value. :type value: string or integer :rtype: :class:`RiakObject <riak.riak_object.RiakObject>`
def is_password_valid(password): """ Check if a password is valid """ pattern = re.compile(r"^.{4,75}$") return bool(pattern.match(password))
Check if a password is valid
def reshape(x,input_dim): ''' Reshapes x into a matrix with input_dim columns ''' x = np.array(x) if x.size ==input_dim: x = x.reshape((1,input_dim)) return x
Reshapes x into a matrix with input_dim columns
def chdir(path: str) -> Iterator[None]: """Context manager for changing dir and restoring previous workdir after exit. """ curdir = os.getcwd() os.chdir(path) try: yield finally: os.chdir(curdir)
Context manager for changing dir and restoring previous workdir after exit.
def parse_delete(prs, conn): """Delete record. Arguments: prs: parser object of argparse conn: dictionary of connection information """ prs_delete = prs.add_parser( 'delete', help='delete a record of specific zone') set_option(prs_delete, 'domain') conn_options(prs_delete, conn) prs_delete.set_defaults(func=delete)
Delete record. Arguments: prs: parser object of argparse conn: dictionary of connection information
def reload(self, client=None): """API call: reload the config via a ``GET`` request. This method will reload the newest data for the config. See https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs/get :type client: :class:`google.cloud.runtimeconfig.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the client stored on the current config. """ client = self._require_client(client) # We assume the config exists. If it doesn't it will raise a NotFound # exception. resp = client._connection.api_request(method="GET", path=self.path) self._set_properties(api_response=resp)
API call: reload the config via a ``GET`` request. This method will reload the newest data for the config. See https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs/get :type client: :class:`google.cloud.runtimeconfig.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the client stored on the current config.
def schedule_in(self, job, timedelta): """ Schedule job to run at datetime.timedelta from now.""" now = long(self._now() * 1e6) when = now + timedelta.total_seconds() * 1e6 self.schedule(job, when)
Schedule job to run at datetime.timedelta from now.
def stop_process(self): """ Stop the process. :raises: EnvironmentError if stopping fails due to unknown environment TestStepError if process stops with non-default returncode and return code is not ignored. """ if self.read_thread is not None: self.logger.debug("stop_process::readThread.stop()-in") self.read_thread.stop() self.logger.debug("stop_process::readThread.stop()-out") returncode = None if self.proc: self.logger.debug("os.killpg(%d)", self.proc.pid) for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGKILL): timeout = 5 try: try: self.logger.debug("Trying signal %s", sig) os.killpg(self.proc.pid, sig) except AttributeError: self.logger.debug("os.killpg::AttributeError") # Failed most likely because in windows, # so use taskkill to kill whole process tree of proc if platform.system() == "Windows": subprocess.call(['taskkill', '/F', '/T', '/PID', str(self.proc.pid)]) else: self.logger.debug("os.killpg::unknown env") raise EnvironmentError("Unknown platform, " "don't know how to terminate process") while self.proc.poll() is None and timeout > 0: time.sleep(1) timeout -= 1 returncode = self.proc.poll() if returncode is not None: break except OSError as error: self.logger.info("os.killpg::OSError: %s", error) self.proc = None if returncode is not None: self.logger.debug("Process stopped with returncode %s" % returncode) if returncode != self.default_retcode and not self.__ignore_return_code: raise TestStepError("Process stopped with returncode %d" % returncode) self.logger.debug("stop_process-out")
Stop the process. :raises: EnvironmentError if stopping fails due to unknown environment TestStepError if process stops with non-default returncode and return code is not ignored.
def tracer(object): """ | Traces execution. | Any method / definition decorated will have it's execution traced. :param object: Object to decorate. :type object: object :return: Object. :rtype: object """ @functools.wraps(object) @functools.partial(validate_tracer, object) def tracer_wrapper(*args, **kwargs): """ Traces execution. :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* :return: Object. :rtype: object """ code = object.func_code args_count = code.co_argcount args_names = code.co_varnames[:args_count] function_defaults = object.func_defaults or list() args_defaults = dict(zip(args_names[-len(function_defaults):], function_defaults)) positional_args = map(format_argument, zip(args_names, args)) defaulted_args = [format_argument((name, args_defaults[name])) for name in args_names[len(args):] if name not in kwargs] nameless_args = map(repr, args[args_count:]) keyword_args = map(format_argument, kwargs.items()) sys.stdout.write("{0}({1})\n".format(get_trace_name(object), ", ".join(itertools.chain(positional_args, defaulted_args, nameless_args, keyword_args)))) return object(*args, **kwargs) return tracer_wrapper
| Traces execution. | Any method / definition decorated will have it's execution traced. :param object: Object to decorate. :type object: object :return: Object. :rtype: object
def update_dimension(model, input_dimension): """ Given a model that takes an array of dimension input_dimension, returns the output dimension. """ if not(_HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'active_features_')) _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'n_values_')) if model.categorical_features == 'all': return len(model.active_features_) else: out_dimension = (len(model.active_features_) + (input_dimension - len(model.n_values_))) return out_dimension
Given a model that takes an array of dimension input_dimension, returns the output dimension.
def driver(self, sim): """ Push data to interface set vld high and wait on rd in high then pass new data """ r = sim.read # pop new data if there are not any pending if self.actualData is NOP and self.data: self.actualData = self.data.popleft() doSend = self.actualData is not NOP # update data on signals if is required if self.actualData is not self._lastWritten: if doSend: self.doWrite(sim, self.actualData) else: self.doWrite(sim, None) self._lastWritten = self.actualData en = self.notReset(sim) vld = int(en and doSend) if self._lastVld is not vld: self.wrVld(sim.write, vld) self._lastVld = vld if not self._enabled: # we can not check rd it in this function because we can not wait # because we can be reactivated in this same time sim.add_process(self.checkIfRdWillBeValid(sim)) return # wait of response of slave yield sim.waitOnCombUpdate() rd = self.isRd(r) assert rd.vldMask, (sim.now, self.intf, "rd signal in invalid state") if not vld: return if rd.val: # slave did read data, take new one if self._debugOutput is not None: self._debugOutput.write("%s, wrote, %d: %r\n" % ( self.intf._getFullName(), sim.now, self.actualData)) a = self.actualData # pop new data, because actual was read by slave if self.data: self.actualData = self.data.popleft() else: self.actualData = NOP # try to run onDriverWriteAck if there is any onDriverWriteAck = getattr(self, "onDriverWriteAck", None) if onDriverWriteAck is not None: onDriverWriteAck(sim) onDone = getattr(a, "onDone", None) if onDone is not None: onDone(sim)
Push data to interface set vld high and wait on rd in high then pass new data
def add_suffix(self, suffix): """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial('{}{suffix}'.format, suffix=suffix) mapper = {self._info_axis_name: f} return self.rename(**mapper)
Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6
def cd(self, path=None): """ Get or set the current working directory from the underlying interpreter (see https://en.wikipedia.org/wiki/Working_directory). Args: path: New working directory or None (to display the working directory). Returns: Current working directory. """ if path is None: return lock_and_call( lambda: self._impl.cd(), self._lock ) else: return lock_and_call( lambda: self._impl.cd(path), self._lock )
Get or set the current working directory from the underlying interpreter (see https://en.wikipedia.org/wiki/Working_directory). Args: path: New working directory or None (to display the working directory). Returns: Current working directory.
def _Close(self): """Closes the file system. Raises: IOError: if the close failed. """ self._cpio_archive_file.Close() self._cpio_archive_file = None self._file_object.close() self._file_object = None
Closes the file system. Raises: IOError: if the close failed.
def create_schema(self): """Create all necessary tables""" execute = self.cursor.execute try: return execute('SELECT 1 FROM gauged_statistics') except self.psycopg2.ProgrammingError: pass self.db.rollback() execute("""CREATE TABLE IF NOT EXISTS gauged_data ( namespace integer NOT NULL, "offset" integer NOT NULL, key bigint NOT NULL, data bytea NOT NULL, flags integer NOT NULL, PRIMARY KEY ("offset", namespace, key)); CREATE TABLE IF NOT EXISTS gauged_keys ( id serial PRIMARY KEY, namespace integer NOT NULL, key varchar NOT NULL); CREATE UNIQUE INDEX ON gauged_keys ( namespace, key); CREATE OR REPLACE RULE gauged_ignore_duplicate_keys AS ON INSERT TO gauged_keys WHERE EXISTS ( SELECT 1 FROM gauged_keys WHERE key = NEW.key AND namespace = NEW.namespace) DO INSTEAD NOTHING; CREATE TABLE IF NOT EXISTS gauged_writer_history ( id varchar PRIMARY KEY, timestamp bigint NOT NULL); CREATE TABLE IF NOT EXISTS gauged_cache ( namespace integer NOT NULL, key bigint NOT NULL, "hash" bytea NOT NULL, length bigint NOT NULL, start bigint NOT NULL, value real, PRIMARY KEY(namespace, hash, length, start)); CREATE OR REPLACE RULE gauged_ignore_duplicate_cache AS ON INSERT TO gauged_cache WHERE EXISTS ( SELECT 1 FROM gauged_cache WHERE namespace = NEW.namespace AND "hash" = NEW.hash AND length = NEW.length AND start = NEW.start) DO INSTEAD NOTHING; CREATE TABLE IF NOT EXISTS gauged_statistics ( namespace integer NOT NULL, "offset" integer NOT NULL, data_points integer NOT NULL, byte_count integer NOT NULL, PRIMARY KEY (namespace, "offset")); CREATE TABLE IF NOT EXISTS gauged_metadata ( key varchar PRIMARY KEY, value varchar NOT NULL); CREATE OR REPLACE RULE gauged_ignore_duplicate_metadata AS ON INSERT TO gauged_metadata WHERE EXISTS ( SELECT 1 FROM gauged_metadata WHERE key = NEW.key) DO INSTEAD NOTHING""") self.db.commit()
Create all necessary tables
def create_reference_server_flask_app(cfg): """Create referece server Flask application with one or more IIIF handlers.""" # Create Flask app app = Flask(__name__) Flask.secret_key = "SECRET_HERE" app.debug = cfg.debug # Install request handlers client_prefixes = dict() for api_version in cfg.api_versions: handler_config = Config(cfg) handler_config.api_version = api_version handler_config.klass_name = 'pil' handler_config.auth_type = 'none' # Set same prefix on local server as expected on iiif.io handler_config.prefix = "api/image/%s/example/reference" % (api_version) handler_config.client_prefix = handler_config.prefix add_handler(app, handler_config) return app
Create referece server Flask application with one or more IIIF handlers.
def use_comparative_bin_view(self): """Pass through to provider ResourceBinSession.use_comparative_bin_view""" self._bin_view = COMPARATIVE # self._get_provider_session('resource_bin_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_bin_view() except AttributeError: pass
Pass through to provider ResourceBinSession.use_comparative_bin_view
def func(self, node): 'func = "(" parameters ")" _ "->" expr' _, params, _, _, _, expr = node params = map(self.eval, params) def func(*args): env = dict(self.env.items() + zip(params, args)) return Mini(env).eval(expr) return func
func = "(" parameters ")" _ "->" expr
def list_all(self): ''' Return a list of the reactors ''' if isinstance(self.minion.opts['reactor'], six.string_types): log.debug('Reading reactors from yaml %s', self.opts['reactor']) try: with salt.utils.files.fopen(self.opts['reactor']) as fp_: react_map = salt.utils.yaml.safe_load(fp_) except (OSError, IOError): log.error('Failed to read reactor map: "%s"', self.opts['reactor']) except Exception: log.error( 'Failed to parse YAML in reactor map: "%s"', self.opts['reactor'] ) else: log.debug('Not reading reactors from yaml') react_map = self.minion.opts['reactor'] return react_map
Return a list of the reactors
def shift_rows(state): """ Transformation in the Cipher that processes the State by cyclically shifting the last three rows of the State by different offsets. """ state = state.reshape(4, 4, 8) return fcat( state[0][0], state[1][1], state[2][2], state[3][3], state[1][0], state[2][1], state[3][2], state[0][3], state[2][0], state[3][1], state[0][2], state[1][3], state[3][0], state[0][1], state[1][2], state[2][3] )
Transformation in the Cipher that processes the State by cyclically shifting the last three rows of the State by different offsets.
def Missenard(T, P, Tc, Pc, kl): r'''Adjustes for pressure the thermal conductivity of a liquid using an emperical formula based on [1]_, but as given in [2]_. .. math:: \frac{k}{k^*} = 1 + Q P_r^{0.7} Parameters ---------- T : float Temperature of fluid [K] P : float Pressure of fluid [Pa] Tc: float Critical point of fluid [K] Pc : float Critical pressure of the fluid [Pa] kl : float Thermal conductivity of liquid at 1 atm or saturation, [W/m/K] Returns ------- kl_dense : float Thermal conductivity of liquid at P, [W/m/K] Notes ----- This equation is entirely dimensionless; all dimensions cancel. An interpolation routine is used here from tabulated values of Q. The original source has not been reviewed. Examples -------- Example from [2]_, toluene; matches. >>> Missenard(304., 6330E5, 591.8, 41E5, 0.129) 0.2198375777069657 References ---------- .. [1] Missenard, F. A., Thermal Conductivity of Organic Liquids of a Series or a Group of Liquids , Rev. Gen.Thermodyn., 101 649 (1970). .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000. ''' Tr = T/Tc Pr = P/Pc Q = float(Qfunc_Missenard(Pr, Tr)) return kl*(1. + Q*Pr**0.7)
r'''Adjustes for pressure the thermal conductivity of a liquid using an emperical formula based on [1]_, but as given in [2]_. .. math:: \frac{k}{k^*} = 1 + Q P_r^{0.7} Parameters ---------- T : float Temperature of fluid [K] P : float Pressure of fluid [Pa] Tc: float Critical point of fluid [K] Pc : float Critical pressure of the fluid [Pa] kl : float Thermal conductivity of liquid at 1 atm or saturation, [W/m/K] Returns ------- kl_dense : float Thermal conductivity of liquid at P, [W/m/K] Notes ----- This equation is entirely dimensionless; all dimensions cancel. An interpolation routine is used here from tabulated values of Q. The original source has not been reviewed. Examples -------- Example from [2]_, toluene; matches. >>> Missenard(304., 6330E5, 591.8, 41E5, 0.129) 0.2198375777069657 References ---------- .. [1] Missenard, F. A., Thermal Conductivity of Organic Liquids of a Series or a Group of Liquids , Rev. Gen.Thermodyn., 101 649 (1970). .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000.
def max_id_length(self): """ Returns the maximum length of a todo ID, used for formatting purposes. """ if config().identifiers() == "text": return max_id_length(len(self._todos)) else: try: return math.ceil(math.log(len(self._todos), 10)) except ValueError: return 0
Returns the maximum length of a todo ID, used for formatting purposes.
def __set_data(self, data): """ Sets the given state data on the given entity of the given class. :param data: State data to set. :type data: Dictionary mapping attributes to attribute values. :param entity: Entity to receive the state data. """ ent = self.__entity_ref() self.set_state_data(ent, data)
Sets the given state data on the given entity of the given class. :param data: State data to set. :type data: Dictionary mapping attributes to attribute values. :param entity: Entity to receive the state data.
def reload(self, index): """Reload file from disk""" finfo = self.data[index] txt, finfo.encoding = encoding.read(finfo.filename) finfo.lastmodified = QFileInfo(finfo.filename).lastModified() position = finfo.editor.get_position('cursor') finfo.editor.set_text(txt) finfo.editor.document().setModified(False) finfo.editor.document().changed_since_autosave = False finfo.editor.set_cursor_position(position) #XXX CodeEditor-only: re-scan the whole text to rebuild outline # explorer data from scratch (could be optimized because # rehighlighting text means searching for all syntax coloring # patterns instead of only searching for class/def patterns which # would be sufficient for outline explorer data. finfo.editor.rehighlight() self._refresh_outlineexplorer(index)
Reload file from disk
def status(self): """Return the current status of the solve. Returns: SolverStatus: Enum representation of the state of the solver. """ if self.request_list.conflict: return SolverStatus.failed if self.callback_return == SolverCallbackReturn.fail: # the solve has failed because a callback has nominated the most # recent failure as the reason. return SolverStatus.failed st = self.phase_stack[-1].status if st == SolverStatus.cyclic: return SolverStatus.failed elif len(self.phase_stack) > 1: if st == SolverStatus.solved: return SolverStatus.solved else: return SolverStatus.unsolved elif st in (SolverStatus.pending, SolverStatus.exhausted): return SolverStatus.unsolved else: return st
Return the current status of the solve. Returns: SolverStatus: Enum representation of the state of the solver.
def get_next(self,oid): """Return snmp value for the next OID.""" try: # Nested try..except because of Python 2.4 self.lock.acquire() try: # remove trailing zeroes from the oid while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data: oid = oid[:-2]; return self.get(self.data_idx[self.data_idx.index(oid)+1]) except ValueError: # Not found: try to match partial oid for real_oid in self.data_idx: if real_oid.startswith(oid): return self.get(real_oid) return "NONE" # Unknown OID except IndexError: return "NONE" # End of MIB finally: self.lock.release()
Return snmp value for the next OID.
def topoplot(values, locations, axes=None, offset=(0, 0), plot_locations=True, plot_head=True, **kwargs): """Wrapper function for :class:`Topoplot. """ topo = Topoplot(**kwargs) topo.set_locations(locations) topo.set_values(values) topo.create_map() topo.plot_map(axes=axes, offset=offset) if plot_locations: topo.plot_locations(axes=axes, offset=offset) if plot_head: topo.plot_head(axes=axes, offset=offset) return topo
Wrapper function for :class:`Topoplot.
def groupby(self, group, squeeze: bool = True): """Returns a GroupBy object for performing grouped operations. Parameters ---------- group : str, DataArray or IndexVariable Array whose unique values should be used to group this array. If a string, must be the name of a variable contained in this dataset. squeeze : boolean, optional If "group" is a dimension of any arrays in this dataset, `squeeze` controls whether the subarrays have a dimension of length 1 along that dimension or if the dimension is squeezed out. Returns ------- grouped : GroupBy A `GroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. Examples -------- Calculate daily anomalies for daily data: >>> da = xr.DataArray(np.linspace(0, 1826, num=1827), ... coords=[pd.date_range('1/1/2000', '31/12/2004', ... freq='D')], ... dims='time') >>> da <xarray.DataArray (time: 1827)> array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03]) Coordinates: * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ... >>> da.groupby('time.dayofyear') - da.groupby('time.dayofyear').mean('time') <xarray.DataArray (time: 1827)> array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5]) Coordinates: * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ... dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ... See Also -------- core.groupby.DataArrayGroupBy core.groupby.DatasetGroupBy """ # noqa return self._groupby_cls(self, group, squeeze=squeeze)
Returns a GroupBy object for performing grouped operations. Parameters ---------- group : str, DataArray or IndexVariable Array whose unique values should be used to group this array. If a string, must be the name of a variable contained in this dataset. squeeze : boolean, optional If "group" is a dimension of any arrays in this dataset, `squeeze` controls whether the subarrays have a dimension of length 1 along that dimension or if the dimension is squeezed out. Returns ------- grouped : GroupBy A `GroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. Examples -------- Calculate daily anomalies for daily data: >>> da = xr.DataArray(np.linspace(0, 1826, num=1827), ... coords=[pd.date_range('1/1/2000', '31/12/2004', ... freq='D')], ... dims='time') >>> da <xarray.DataArray (time: 1827)> array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03]) Coordinates: * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ... >>> da.groupby('time.dayofyear') - da.groupby('time.dayofyear').mean('time') <xarray.DataArray (time: 1827)> array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5]) Coordinates: * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ... dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ... See Also -------- core.groupby.DataArrayGroupBy core.groupby.DatasetGroupBy
def _build_connstr(host, port, bucket): """ Converts a 1.x host:port specification to a connection string """ hostlist = [] if isinstance(host, (tuple, list)): for curhost in host: if isinstance(curhost, (list, tuple)): hostlist.append(_fmthost(*curhost)) else: hostlist.append(curhost) else: hostlist.append(_fmthost(host, port)) return 'http://{0}/{1}'.format(','.join(hostlist), bucket)
Converts a 1.x host:port specification to a connection string
def _screen(self, s, newline=False): """Print something on screen when self.verbose == True""" if self.verbose: if newline: print(s) else: print(s, end=' ')
Print something on screen when self.verbose == True
def freefn(self, key, free_fn): """ Set a free function for the specified hash table item. When the item is destroyed, the free function, if any, is called on that item. Use this when hash items are dynamically allocated, to ensure that you don't have memory leaks. You can pass 'free' or NULL as a free_fn. Returns the item, or NULL if there is no such item. """ return c_void_p(lib.zhash_freefn(self._as_parameter_, key, free_fn))
Set a free function for the specified hash table item. When the item is destroyed, the free function, if any, is called on that item. Use this when hash items are dynamically allocated, to ensure that you don't have memory leaks. You can pass 'free' or NULL as a free_fn. Returns the item, or NULL if there is no such item.
def which_roles_can(self, name): """Which role can SendMail? """ targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first() return [{'role': group.role} for group in targetPermissionRecords.groups]
Which role can SendMail?
def ping(self, reconnect=True): """ Check if the server is alive. :param reconnect: If the connection is closed, reconnect. :raise Error: If the connection is closed and reconnect=False. """ if self._sock is None: if reconnect: self.connect() reconnect = False else: raise err.Error("Already closed") try: self._execute_command(COMMAND.COM_PING, "") self._read_ok_packet() except Exception: if reconnect: self.connect() self.ping(False) else: raise
Check if the server is alive. :param reconnect: If the connection is closed, reconnect. :raise Error: If the connection is closed and reconnect=False.
def render(self, route=None): """ Renders the application and returns the HTML unicode that would normally appear when visiting in the browser. """ if route is None: route = '/' with self.test_client() as c: response = c.get(route, follow_redirects=True) encoding = response.charset return response.data.decode(encoding)
Renders the application and returns the HTML unicode that would normally appear when visiting in the browser.
def signed_tree_multiplier(A, B, reducer=adders.wallace_reducer, adder_func=adders.kogge_stone): """Same as tree_multiplier, but uses two's-complement signed integers""" if len(A) == 1 or len(B) == 1: raise pyrtl.PyrtlError("sign bit required, one or both wires too small") aneg, bneg = A[-1], B[-1] a = _twos_comp_conditional(A, aneg) b = _twos_comp_conditional(B, bneg) res = tree_multiplier(a[:-1], b[:-1]).zero_extended(len(A) + len(B)) return _twos_comp_conditional(res, aneg ^ bneg)
Same as tree_multiplier, but uses two's-complement signed integers
def calc_crc(raw_mac): """ This algorithm is the equivalent of esp_crc8() in ESP32 ROM code This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00. """ result = 0x00 for b in struct.unpack("B" * 6, raw_mac): result ^= b for _ in range(8): lsb = result & 1 result >>= 1 if lsb != 0: result ^= 0x8c return result
This algorithm is the equivalent of esp_crc8() in ESP32 ROM code This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00.
def progress(status_code): """Translate PROGRESS status codes from GnuPG to messages.""" lookup = { 'pk_dsa': 'DSA key generation', 'pk_elg': 'Elgamal key generation', 'primegen': 'Prime generation', 'need_entropy': 'Waiting for new entropy in the RNG', 'tick': 'Generic tick without any special meaning - still working.', 'starting_agent': 'A gpg-agent was started.', 'learncard': 'gpg-agent or gpgsm is learning the smartcard data.', 'card_busy': 'A smartcard is still working.' } for key, value in lookup.items(): if str(status_code) == key: return value
Translate PROGRESS status codes from GnuPG to messages.
def merge_objects(self, mujoco_objects): """Adds physical objects to the MJCF model.""" self.mujoco_objects = mujoco_objects self.objects = {} # xml manifestation self.max_horizontal_radius = 0 for obj_name, obj_mjcf in mujoco_objects.items(): self.merge_asset(obj_mjcf) # Load object obj = obj_mjcf.get_collision(name=obj_name, site=True) obj.append(new_joint(name=obj_name, type="free", damping="0.0005")) self.objects[obj_name] = obj self.worldbody.append(obj) self.max_horizontal_radius = max( self.max_horizontal_radius, obj_mjcf.get_horizontal_radius() )
Adds physical objects to the MJCF model.
def isInstalledBuild(self): """ Determines if the Engine is an Installed Build """ sentinelFile = os.path.join(self.getEngineRoot(), 'Engine', 'Build', 'InstalledBuild.txt') return os.path.exists(sentinelFile)
Determines if the Engine is an Installed Build
def setup_logger(): """ setup basic logger """ logger = logging.getLogger('dockerstache') logger.setLevel(logging.INFO) handler = logging.StreamHandler(stream=sys.stdout) handler.setLevel(logging.INFO) logger.addHandler(handler) return logger
setup basic logger
def update(self): """ updates the configuration settings """ with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file: self.config.write(config_file)
updates the configuration settings
def dict_fun(data, function): """ Apply a function to all values in a dictionary, return a dictionary with results. Parameters ---------- data : dict a dictionary whose values are adequate input to the second argument of this function. function : function a function that takes one argument Returns ------- a dictionary with the same keys as data, such that result[key] = function(data[key]) """ return dict((k, function(v)) for k, v in list(data.items()))
Apply a function to all values in a dictionary, return a dictionary with results. Parameters ---------- data : dict a dictionary whose values are adequate input to the second argument of this function. function : function a function that takes one argument Returns ------- a dictionary with the same keys as data, such that result[key] = function(data[key])
def update(self, key, value): """Updates a value of a given key and apply reduction""" if key not in self.value: self.value[key] = ReducedMetric(self.reducer) self.value[key].update(value)
Updates a value of a given key and apply reduction
def show_xys(self, xs, ys, max_len:int=70)->None: "Show the `xs` (inputs) and `ys` (targets). `max_len` is the maximum number of tokens displayed." from IPython.display import display, HTML names = ['idx','text'] if self._is_lm else ['text','target'] items = [] for i, (x,y) in enumerate(zip(xs,ys)): txt_x = ' '.join(x.text.split(' ')[:max_len]) if max_len is not None else x.text items.append([i, txt_x] if self._is_lm else [txt_x, y]) items = np.array(items) df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names) with pd.option_context('display.max_colwidth', -1): display(HTML(df.to_html(index=False)))
Show the `xs` (inputs) and `ys` (targets). `max_len` is the maximum number of tokens displayed.
def cleanup_docstamp_output(output_dir=''): """ Remove the 'tmp*.aux', 'tmp*.out' and 'tmp*.log' files in `output_dir`. :param output_dir: """ suffixes = ['aux', 'out', 'log'] files = [f for suf in suffixes for f in glob(os.path.join(output_dir, 'tmp*.{}'.format(suf)))] [os.remove(file) for file in files]
Remove the 'tmp*.aux', 'tmp*.out' and 'tmp*.log' files in `output_dir`. :param output_dir:
def main(): """ NAME orientation_magic.py DESCRIPTION takes tab delimited field notebook information and converts to MagIC formatted tables SYNTAX orientation_magic.py [command line options] OPTIONS -f FILE: specify input file, default is: orient.txt -Fsa FILE: specify output file, default is: er_samples.txt -Fsi FILE: specify output site location file, default is: er_sites.txt -app append/update these data in existing er_samples.txt, er_sites.txt files -ocn OCON: specify orientation convention, default is #1 below -dcn DCON [DEC]: specify declination convention, default is #1 below if DCON = 2, you must supply the declination correction -BCN don't correct bedding_dip_dir for magnetic declination -already corrected -ncn NCON: specify naming convention: default is #1 below -a: averages all bedding poles and uses average for all samples: default is NO -gmt HRS: specify hours to subtract from local time to get GMT: default is 0 -mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM] FS-FD field sampling done with a drill FS-H field sampling done with hand samples FS-LOC-GPS field location done with GPS FS-LOC-MAP field location done with map SO-POM a Pomeroy orientation device was used SO-ASC an ASC orientation device was used -DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format. Orientation convention: Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below. [1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down) of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth; lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade. [2] Field arrow is the strike of the plane orthogonal to the drill direction, Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90 Lab arrow dip = -field_dip [3] Lab arrow is the same as the drill direction; hade was measured in the field. Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip [4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too [5] Same as AZDIP convention explained below - azimuth and inclination of the drill direction are mag_azimuth and field_dip; lab arrow is as in [1] above. lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90 [6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip [7] all others you will have to either customize your self or e-mail ltauxe@ucsd.edu for help. Magnetic declination convention: [1] Use the IGRF value at the lat/long and date supplied [default] [2] Will supply declination correction [3] mag_az is already corrected in file [4] Correct mag_az but not bedding_dip_dir Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name = sample name [6] site name entered in site_name column in the orient.txt format input file [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY NB: all others you will have to either customize your self or e-mail ltauxe@ucsd.edu for help. OUTPUT output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0) - this will overwrite any existing files """ args = sys.argv if "-h" in args: print(main.__doc__) sys.exit() else: info = [['WD', False, '.'], ['ID', False, ''], ['f', False, 'orient.txt'], ['app', False, False], ['ocn', False, 1], ['dcn', False, 1], ['BCN', False, True], ['ncn', False, '1'], ['gmt', False, 0], ['mcd', False, ''], ['a', False, False], ['DM', False, 3]] #output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, samp_con, hours_from_gmt, method_codes, average_bedding # leave off -Fsa, -Fsi b/c defaults in command_line_extractor dataframe = extractor.command_line_dataframe(info) checked_args = extractor.extract_and_check_args(args, dataframe) output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, samp_file, site_file, data_model = extractor.get_vars(['WD', 'ID', 'f', 'app', 'ocn', 'dcn', 'BCN', 'ncn', 'gmt', 'mcd', 'a', 'Fsa', 'Fsi', 'DM'], checked_args) if input_dir_path == '.': input_dir_path = output_dir_path if not isinstance(dec_correction_con, int): if len(dec_correction_con) > 1: dec_correction = int(dec_correction_con.split()[1]) dec_correction_con = int(dec_correction_con.split()[0]) else: dec_correction = 0 else: dec_correction = 0 ipmag.orientation_magic(or_con, dec_correction_con, dec_correction, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, orient_file, samp_file, site_file, output_dir_path, input_dir_path, append, data_model)
NAME orientation_magic.py DESCRIPTION takes tab delimited field notebook information and converts to MagIC formatted tables SYNTAX orientation_magic.py [command line options] OPTIONS -f FILE: specify input file, default is: orient.txt -Fsa FILE: specify output file, default is: er_samples.txt -Fsi FILE: specify output site location file, default is: er_sites.txt -app append/update these data in existing er_samples.txt, er_sites.txt files -ocn OCON: specify orientation convention, default is #1 below -dcn DCON [DEC]: specify declination convention, default is #1 below if DCON = 2, you must supply the declination correction -BCN don't correct bedding_dip_dir for magnetic declination -already corrected -ncn NCON: specify naming convention: default is #1 below -a: averages all bedding poles and uses average for all samples: default is NO -gmt HRS: specify hours to subtract from local time to get GMT: default is 0 -mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM] FS-FD field sampling done with a drill FS-H field sampling done with hand samples FS-LOC-GPS field location done with GPS FS-LOC-MAP field location done with map SO-POM a Pomeroy orientation device was used SO-ASC an ASC orientation device was used -DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format. Orientation convention: Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below. [1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down) of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth; lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade. [2] Field arrow is the strike of the plane orthogonal to the drill direction, Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90 Lab arrow dip = -field_dip [3] Lab arrow is the same as the drill direction; hade was measured in the field. Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip [4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too [5] Same as AZDIP convention explained below - azimuth and inclination of the drill direction are mag_azimuth and field_dip; lab arrow is as in [1] above. lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90 [6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip [7] all others you will have to either customize your self or e-mail ltauxe@ucsd.edu for help. Magnetic declination convention: [1] Use the IGRF value at the lat/long and date supplied [default] [2] Will supply declination correction [3] mag_az is already corrected in file [4] Correct mag_az but not bedding_dip_dir Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name = sample name [6] site name entered in site_name column in the orient.txt format input file [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY NB: all others you will have to either customize your self or e-mail ltauxe@ucsd.edu for help. OUTPUT output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0) - this will overwrite any existing files
def _inject(): """ Copy functions from OpenGL.GL into _pyopengl namespace. """ NS = _pyopengl2.__dict__ for glname, ourname in _pyopengl2._functions_to_import: func = _get_function_from_pyopengl(glname) NS[ourname] = func
Copy functions from OpenGL.GL into _pyopengl namespace.
def load_ipa_data(): """ Load the IPA data from the built-in IPA database, creating the following globals: 1. ``IPA_CHARS``: list of all IPAChar objects 2. ``UNICODE_TO_IPA``: dict mapping a Unicode string (often, a single char) to an IPAChar 3. ``UNICODE_TO_IPA_MAX_KEY_LENGTH``: length of a longest key in ``UNICODE_TO_IPA`` 4. ``IPA_TO_UNICODE``: map an IPAChar canonical representation to the corresponding Unicode string (or char) """ ipa_signs = [] unicode_to_ipa = {} ipa_to_unicode = {} max_key_length = 0 for line in load_data_file( file_path=u"ipa.dat", file_path_is_relative=True, line_format=u"sU" ): # unpack data i_desc, i_unicode_keys = line name = re.sub(r" [ ]*", " ", i_desc) # create a suitable IPACharacter obj if u"consonant" in i_desc: obj = IPAConsonant(name=name, descriptors=i_desc) elif u"vowel" in i_desc: obj = IPAVowel(name=name, descriptors=i_desc) elif u"diacritic" in i_desc: obj = IPADiacritic(name=name, descriptors=i_desc) elif u"suprasegmental" in i_desc: obj = IPASuprasegmental(name=name, descriptors=i_desc) elif u"tone" in i_desc: obj = IPATone(name=name, descriptors=i_desc) else: raise ValueError("The IPA data file contains a bad line, defining an unknown type: '%s'" % (line)) ipa_signs.append(obj) # map Unicode codepoint to object, if the former is available if len(i_unicode_keys) > 0: # canonical Unicode string first_key = i_unicode_keys[0] ipa_to_unicode[obj.canonical_representation] = first_key obj.unicode_repr = first_key max_key_length = max(max_key_length, len(first_key)) # add all Unicode strings for key in i_unicode_keys: if key in unicode_to_ipa: raise ValueError("The IPA data file contains a bad line, redefining codepoint '%s': '%s'" % (key, line)) unicode_to_ipa[key] = obj return ipa_signs, unicode_to_ipa, max_key_length, ipa_to_unicode
Load the IPA data from the built-in IPA database, creating the following globals: 1. ``IPA_CHARS``: list of all IPAChar objects 2. ``UNICODE_TO_IPA``: dict mapping a Unicode string (often, a single char) to an IPAChar 3. ``UNICODE_TO_IPA_MAX_KEY_LENGTH``: length of a longest key in ``UNICODE_TO_IPA`` 4. ``IPA_TO_UNICODE``: map an IPAChar canonical representation to the corresponding Unicode string (or char)