content
stringlengths
22
815k
id
int64
0
4.91M
def test_get_sensitive_hits_object_detail_when_invalid_values_are_provided(client, snapshot_id, snappable_id): """ Tests get_sensitive_hits_object_detail method of PolarisClient when invalid values are provided """ from rubrik_polaris.sonar.object import get_sensitive_hits_object_detail, ERROR_MESSAGES with pytest.raises(ValueError) as e: get_sensitive_hits_object_detail(client, snapshot_id=snapshot_id, snappable_id=snappable_id) assert str(e.value) == ERROR_MESSAGES['MISSING_PARAMETERS']
5,329,800
def qc(): """quick-command: a cli for remembering commands""" pass
5,329,801
def credentials(): """ Fixture to extract the user credentials for logging into the Tark database. Requires that there is a crd.json file in the root directory of the module. Returns ------- crd : dict Dictionary object of the user, password, host, port and db """ with open('crd.json') as file_handle: crd = json.loads(file_handle.read()) yield crd
5,329,802
def gunzip(content): """ Decompression is applied if the first to bytes matches with the gzip magic numbers. There is once chance in 65536 that a file that is not gzipped will be ungzipped. """ if len(content) == 0: raise DecompressionError('File contains zero bytes.') gzip_magic_numbers = [ 0x1f, 0x8b ] first_two_bytes = [ byte for byte in bytearray(content)[:2] ] if first_two_bytes != gzip_magic_numbers: raise DecompressionError('File is not in gzip format. Magic numbers {}, {} did not match {}, {}.'.format( hex(first_two_bytes[0]), hex(first_two_bytes[1]), hex(gzip_magic_numbers[0]), hex(gzip_magic_numbers[1]) )) return deflate.gzip_decompress(content)
5,329,803
def write_to_fits(file_name, data): """Write FITS file This method writes the output image array data to a FITS file. Parameters ---------- file_name : str Name of file with path data : np.ndarray Image data array """ fits.PrimaryHDU(data).writeto(file_name)
5,329,804
def get_rigid_elements_with_node_ids(model: BDF, node_ids): """ Gets the series of rigid elements that use specific nodes Parameters ---------- node_ids : List[int] the node ids to check Returns ------- rbes : List[int] the set of self.rigid_elements """ try: nids = set(node_ids) except TypeError: print(node_ids) raise rbes = [] for eid, rigid_element in model.rigid_elements.items(): if rigid_element.type in ['RBE3', 'RBE2', 'RBE1', 'RBAR', 'RSPLINE', 'RROD', 'RBAR1']: independent_nodes = set(rigid_element.independent_nodes) dependent_nodes = set(rigid_element.dependent_nodes) rbe_nids = independent_nodes | dependent_nodes if nids.intersection(rbe_nids): rbes.append(eid) elif rigid_element.type == 'RSSCON': msg = 'skipping card in get_rigid_elements_with_node_ids\n%s' % str(rigid_element) model.log.warning(msg) else: raise RuntimeError(rigid_element.type) return rbes
5,329,805
def genFileBase(f): """ Given a filename, generate a safe 'base' name for HTML and PNG filenames """ baseName = w2res.getBaseMulti(f) baseName = "R"+w2res.removeGDBCharacters(baseName) return baseName
5,329,806
def retrieve_results_average(query, index, k=10, verbose=False, tfidf=False): """ (NOT USED) Given a query, return most similar papers from the specified FAISS index. Also prunes the resulting papers by filtering out papers whose authors do not have tags. This uses the average paper representations per author. Parameters: query (string): The search query index (obj): The loaded FAISS index populated by paper embeddings k (int): The amount of papers to retrieve verbose (bool): Whether to output the debugging information or not tfidf (bool): Whether the tf-idf embeddings are used for retrieval instead of SBERT. Returns: pruned (list): pruned list of most similar papers to the query """ initial_retrieval = k*5 s = time.time() if tfidf: most_similar_raw = get_most_similar_ids(query, index, initial_retrieval, tfidf_clf) else: most_similar_raw = get_most_similar_ids(query, index, initial_retrieval) s1 = time.time() pruned = prune_results_for_authors_wo_tags_average(most_similar_raw, query, k) s2 = time.time() if verbose: print(f"Full search execution time: {time.time() - s} seconds") print(f"from which {s1-s} s. in the search and {s2 - s1} s. in the pruning.") print("===") print("Pruned author IDS, sorted by similarity:") print(pruned[0]) print('Similarity scores:') print(pruned[1]) return pruned
5,329,807
def part1(data): """ """
5,329,808
def _json_custom_hook(d): """Serialize NumPy arrays.""" if isinstance(d, dict) and '__ndarray__' in d: data = base64.b64decode(d['__ndarray__']) return np.frombuffer(data, d['dtype']).reshape(d['shape']) elif isinstance(d, dict) and '__qbytearray__' in d: return _decode_qbytearray(d['__qbytearray__']) return d
5,329,809
def basic_pyxll_function_3(x): """docstrings appear as help text in Excel""" return x
5,329,810
def log_mean_exp(x, dim=1): """ log(1/k * sum(exp(x))): this normalizes x. @param x: PyTorch.Tensor samples from gaussian @param dim: integer (default: 1) which dimension to take the mean over @return: PyTorch.Tensor mean of x """ m = torch.max(x, dim=dim, keepdim=True)[0] return m + torch.log(torch.mean(torch.exp(x - m), dim=dim, keepdim=True))
5,329,811
def get_shot_end_frame(shot_node): """ Returns the end frame of the given shot :param shot_node: str :return: int """ return maya.cmds.getAttr('{}.endFrame'.format(shot_node))
5,329,812
def query_iterator(query, limit=50): """Iterates over a datastore query while avoiding timeouts via a cursor. Especially helpful for usage in backend-jobs.""" cursor = None while True: bucket, cursor, more_objects = query.fetch_page(limit, start_cursor=cursor) if not bucket: break for entity in bucket: yield entity if not more_objects: break
5,329,813
def test_poll_for_activity_identity(monkeypatch, poll=poll): """Test that identity is passed to poll_for_activity. """ current_activity = activity_run(monkeypatch, poll) current_activity.poll_for_activity(identity='foo') current_activity.poll.assert_called_with(identity='foo')
5,329,814
def is_integer(): """ Generates a validator to validate if the value of a property is an integer. """ def wrapper(obj, prop): value = getattr(obj, prop) if value is None: return (True, None) try: int(value) except ValueError: return (False, (msg.INVALID_VALUE,)) except TypeError: return (False, (msg.INVALID_VALUE,)) return (True, None) return wrapper
5,329,815
def int_inputs(n): """An error handling function to get integer inputs from the user""" while True: try: option = int(input(Fore.LIGHTCYAN_EX + "\n >>> ")) if option not in range(1, n + 1): i_print_r("Invalid Entry :( Please Try Again.") continue else: return option except ValueError: i_print_r("Invalid Entry :( Please Try again") continue
5,329,816
def test_label_dict(): """@TODO: Docs. Contribution is welcome.""" dataset = TextClassificationDataset(texts, labels) label_dict = dataset.label_dict assert label_dict == {"negative": 0, "positive": 1}
5,329,817
def load_cElementTree(finder, module): """the cElementTree module implicitly loads the elementtree.ElementTree module; make sure this happens.""" finder.IncludeModule("elementtree.ElementTree")
5,329,818
def gen_binder_rst(fname, binder_conf): """Generate the RST + link for the Binder badge. Parameters ---------- fname: str The path to the `.py` file for which a Binder badge will be generated. binder_conf: dict | None If a dictionary it must have the following keys: 'url': The URL of the BinderHub instance that's running a Binder service. 'org': The GitHub organization to which the documentation will be pushed. 'repo': The GitHub repository to which the documentation will be pushed. 'branch': The Git branch on which the documentation exists (e.g., gh-pages). 'dependencies': A list of paths to dependency files that match the Binderspec. Returns ------- rst : str The reStructuredText for the Binder badge that links to this file. """ binder_url = gen_binder_url(fname, binder_conf) rst = ( "\n" " .. container:: binder-badge\n\n" " .. image:: https://static.mybinder.org/badge.svg\n" " :target: {}\n" " :width: 150 px\n").format(binder_url) return rst
5,329,819
def downgrade(): """Make refresh token field not nullable.""" bind = op.get_bind() session = Session(bind=bind) class CRUDMixin(object): """Mixin that adds convenience methods for CRUD (create, read, update, delete) ops.""" @classmethod def create_as(cls, current_user, **kwargs): """Create a new record and save it to the database as 'current_user'.""" assert hasattr(cls, 'modified_by') and hasattr(cls, 'created_by') instance = cls(**kwargs) return instance.save_as(current_user) @classmethod def create(cls, **kwargs): """Create a new record and save it to the database.""" instance = cls(**kwargs) return instance.save() def update_as(self, current_user, commit=True, preserve_modified=False, **kwargs): """Update specific fields of the record and save as 'current_user'.""" for attr, value in kwargs.items(): setattr(self, attr, value) return self.save_as(current_user, commit=commit, preserve_modified=preserve_modified) def update(self, commit=True, preserve_modified=False, **kwargs): """Update specific fields of a record.""" for attr, value in kwargs.items(): setattr(self, attr, value) return self.save(commit=commit, preserve_modified=preserve_modified) def save_as(self, current_user, commit=True, preserve_modified=False): """Save instance as 'current_user'.""" assert hasattr(self, 'modified_by') and hasattr(self, 'created_by') # noinspection PyUnresolvedReferences if current_user and not self.created_at: # noinspection PyAttributeOutsideInit self.created_by = current_user if current_user and not preserve_modified: # noinspection PyAttributeOutsideInit self.modified_by = current_user return self.save(commit=commit, preserve_modified=preserve_modified) def save(self, commit=True, preserve_modified=False): """Save the record.""" session.add(self) if commit: if preserve_modified and hasattr(self, 'modified_at'): modified_dt = self.modified_at session.commit() self.modified_at = modified_dt session.commit() return self def delete(self, commit=True): """Remove the record from the database.""" session.delete(self) return commit and session.commit() class Model(CRUDMixin, Base): """Base model class that includes CRUD convenience methods.""" __abstract__ = True @staticmethod def _get_rand_hex_str(length=32): """Create random hex string.""" return getencoder('hex')(urandom(length // 2))[0].decode('utf-8') class SurrogatePK(object): """A mixin that adds a surrogate integer primary key column to declarative-mapped class.""" __table_args__ = {'extend_existing': True} id = Column(Integer, primary_key=True) @classmethod def get_by_id(cls, record_id): """Get record by ID.""" if any((isinstance(record_id, basestring) and record_id.isdigit(), isinstance(record_id, (int, float))),): # noinspection PyUnresolvedReferences return cls.query.get(int(record_id)) else: return None def reference_col(tablename, nullable=False, pk_name='id', ondelete=None, **kwargs): """Column that adds primary key foreign key reference. Usage :: category_id = reference_col('category') category = relationship('Category', backref='categories') """ return Column( ForeignKey('{0}.{1}'.format(tablename, pk_name), ondelete=ondelete), nullable=nullable, **kwargs) class Client(Model): """An OAuth2 Client.""" __tablename__ = 'clients' client_id = Column(String(32), primary_key=True) client_secret = Column(String(256), unique=True, nullable=False) is_confidential = Column(Boolean(), default=True, nullable=False) _redirect_uris = Column(Text(), nullable=False) _default_scopes = Column(Text(), nullable=False) # Human readable info fields name = Column(String(64), nullable=False) description = Column(String(400), nullable=False) modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) modified_by_id = reference_col('users', nullable=False) modified_by = relationship('User', foreign_keys=modified_by_id) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_by_id = reference_col('users', nullable=False) created_by = relationship('User', foreign_keys=created_by_id) def __init__(self, redirect_uris=None, default_scopes=None, **kwargs): """Create instance.""" client_id = Client._get_rand_hex_str(32) client_secret = Client._get_rand_hex_str(256) Model.__init__(self, client_id=client_id, client_secret=client_secret, **kwargs) self.redirect_uris = redirect_uris self.default_scopes = default_scopes def __repr__(self): """Represent instance as a unique string.""" return '<Client({name!r})>'.format(name=self.name) class Collection(SurrogatePK, Model): """A collection of library stuff, a.k.a. 'a sigel'.""" __tablename__ = 'collections' code = Column(String(255), unique=True, nullable=False) friendly_name = Column(String(255), unique=False, nullable=False) category = Column(String(255), nullable=False) is_active = Column(Boolean(), default=True) permissions = relationship('Permission', back_populates='collection', lazy='joined') replaces = Column(String(255)) replaced_by = Column(String(255)) modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) modified_by_id = reference_col('users', nullable=False) modified_by = relationship('User', foreign_keys=modified_by_id) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_by_id = reference_col('users', nullable=False) created_by = relationship('User', foreign_keys=created_by_id) def __init__(self, code, friendly_name, category, **kwargs): """Create instance.""" Model.__init__(self, code=code, friendly_name=friendly_name, category=category, **kwargs) def __repr__(self): """Represent instance as a unique string.""" return '<Collection({code!r})>'.format(code=self.code) class Role(SurrogatePK, Model): """A role for a user.""" __tablename__ = 'roles' name = Column(String(80), unique=True, nullable=False) user_id = reference_col('users', nullable=True) user = relationship('User', back_populates='roles') def __init__(self, name, **kwargs): """Create instance.""" Model.__init__(self, name=name, **kwargs) def __repr__(self): """Represent instance as a unique string.""" return '<Role({name})>'.format(name=self.name) class PasswordReset(SurrogatePK, Model): """Password reset token for a user.""" __tablename__ = 'password_resets' user_id = reference_col('users', nullable=True) user = relationship('User', back_populates='password_resets') code = Column(String(32), unique=True, nullable=False) is_active = Column(Boolean(), default=True, nullable=False) expires_at = Column(DateTime, nullable=False, default=lambda: datetime.utcnow() + timedelta(hours=7 * 24)) modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) def __init__(self, user, **kwargs): """Create instance.""" Model.__init__(self, user=user, code=self._get_rand_hex_str(32), **kwargs) def __repr__(self): """Represent instance as a unique string.""" return '<PasswordReset({email!r})>'.format(email=self.user.email) class User(UserMixin, SurrogatePK, Model): """A user of the app.""" __tablename__ = 'users' id = Column(Integer, primary_key=True) email = Column(String(255), unique=True, nullable=False) full_name = Column(String(255), unique=False, nullable=False) password = Column(LargeBinary(128), nullable=False) last_login_at = Column(DateTime, default=None) tos_approved_at = Column(DateTime, default=None) is_active = Column(Boolean(), default=False, nullable=False) is_admin = Column(Boolean(), default=False, nullable=False) permissions = relationship('Permission', back_populates='user', foreign_keys='Permission.user_id', lazy='joined') roles = relationship('Role', back_populates='user') password_resets = relationship('PasswordReset', back_populates='user') modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) modified_by_id = reference_col('users', nullable=False) modified_by = relationship('User', remote_side=id, foreign_keys=modified_by_id) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_by_id = reference_col('users', nullable=False) created_by = relationship('User', remote_side=id, foreign_keys=created_by_id) def __init__(self, email, full_name, password=None, **kwargs): """Create instance.""" Model.__init__(self, email=email, full_name=full_name, **kwargs) if password: self.set_password(password) else: self.set_password(hexlify(urandom(16))) def __repr__(self): """Represent instance as a unique string.""" return '<User({email!r})>'.format(email=self.email) class Permission(SurrogatePK, Model): """A permission on a Collection, granted to a User.""" __table_args__ = (UniqueConstraint('user_id', 'collection_id'), SurrogatePK.__table_args__) __tablename__ = 'permissions' user_id = reference_col('users', nullable=False) user = relationship('User', back_populates='permissions', foreign_keys=user_id, lazy='joined') collection_id = reference_col('collections', nullable=False) collection = relationship('Collection', back_populates='permissions', lazy='joined') registrant = Column(Boolean(), default=False, nullable=False) cataloger = Column(Boolean(), default=False, nullable=False) cataloging_admin = Column(Boolean(), default=False, nullable=False) modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) modified_by_id = reference_col('users', nullable=False) modified_by = relationship('User', foreign_keys=modified_by_id) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_by_id = reference_col('users', nullable=False) created_by = relationship('User', foreign_keys=created_by_id) def __init__(self, **kwargs): """Create instance.""" Model.__init__(self, **kwargs) def __repr__(self): """Represent instance as a unique string.""" return '<Permission({user!r}@{collection!r})>'.format(user=self.user, collection=self.collection) class Token(SurrogatePK, Model): """An OAuth2 Bearer token.""" __tablename__ = 'tokens' user_id = reference_col('users', nullable=False, ondelete='CASCADE') user = relationship('User') client_id = reference_col('clients', pk_name='client_id', nullable=False, ondelete='CASCADE') client = relationship('Client') token_type = Column(String(40), nullable=False, default='Bearer') access_token = Column(String(256), nullable=False, unique=True) refresh_token = Column(String(256), unique=True) expires_at = Column(DateTime, nullable=False, default=lambda: datetime.utcnow() + timedelta(seconds=3600)) _scopes = Column(Text, nullable=False) def __init__(self, scopes=None, **kwargs): """Create instance.""" Model.__init__(self, **kwargs) self.scopes = scopes @staticmethod def get_all_by_user(user): """Get all tokens for specified user.""" return Token.query.filter_by(user=user).all() @staticmethod def delete_all_by_user(user): """Delete all tokens for specified user.""" Token.query.filter_by(user=user).delete() @hybrid_property def expires(self): """Return 'expires_at'.""" return self.expires_at @hybrid_property def is_active(self): """Return still active (now < expires_at).""" return self.expires_at > datetime.utcnow() @hybrid_property def scopes(self): """Return scopes list.""" return self._scopes.split(' ') @scopes.setter def scopes(self, value): """Store scopes list as string.""" if isinstance(value, string_types): self._scopes = value elif isinstance(value, list): self._scopes = ' '.join(value) else: self._scopes = value def __repr__(self): """Represent instance as a unique string.""" return '<Token({user!r},{client!r})>'.format(user=self.user.email, client=self.client.name) # ensure all tokens have a refresh_token for token in session.query(Token).filter(Token.refresh_token == None).all(): # noqa: E711 token.refresh_token = Model._get_rand_hex_str() token.save(commit=True, preserve_modified=True) with op.batch_alter_table('tokens', schema=None) as batch_op: batch_op.alter_column('refresh_token', existing_type=sa.VARCHAR(length=256), nullable=False)
5,329,820
def height(grid): """Gets the height of the grid (stored in row-major order).""" return len(grid)
5,329,821
def test_blank_index_upload_missing_indexd_credentials_unable_to_load_json( app, client, auth_client, encoded_creds_jwt, user_client ): """ test BlankIndex upload call but unable to load json with a ValueError """ class MockArboristResponse: """ Mock response for requests lib for Arborist """ def __init__(self, data, status_code=200): """ Set up mock response """ self.data = data self.status_code = status_code def json(self): """ Mock json() call """ return self.data class MockResponse: """ Mock response for requests lib """ def __init__(self, data, status_code=200): """ Set up mock response """ self.data = data self.status_code = status_code def json(self): """ Mock json() call """ raise ValueError("unable to get json") def text(self): """ Mock text() call """ return self.data data_requests_mocker = mock.patch( "fence.blueprints.data.indexd.requests", new_callable=mock.Mock ) arborist_requests_mocker = mock.patch( "gen3authz.client.arborist.client.httpx.Client.request", new_callable=mock.Mock ) with data_requests_mocker as data_requests, arborist_requests_mocker as arborist_requests: data_requests.post.return_value = MockResponse( { "did": str(uuid.uuid4()), "rev": str(uuid.uuid4())[:8], "baseid": str(uuid.uuid4()), } ) data_requests.post.return_value.status_code = 401 arborist_requests.return_value = MockArboristResponse({"auth": True}) arborist_requests.return_value.status_code = 200 headers = { "Authorization": "Bearer " + encoded_creds_jwt.jwt, "Content-Type": "application/json", } file_name = "asdf" data = json.dumps({"file_name": file_name}) response = client.post("/data/upload", headers=headers, data=data) indexd_url = app.config.get("INDEXD") or app.config.get("BASE_URL") + "/index" endpoint = indexd_url + "/index/blank/" indexd_auth = (config["INDEXD_USERNAME"], config["INDEXD_PASSWORD"]) data_requests.post.assert_called_once_with( endpoint, auth=indexd_auth, json={"file_name": file_name, "uploader": user_client.username}, headers={}, ) assert response.status_code == 500, response assert not response.json
5,329,822
def parameterize(url): """Encode input URL as POST parameter. url: a string which is the URL to be passed to ur1.ca service. Returns the POST parameter constructed from the URL. """ return urllib.urlencode({"longurl": url})
5,329,823
def sum_ints(*args, **kwargs): """ This function is contrived to illustrate args in a function. """ print args return sum(args)
5,329,824
def set_have_mods(have_mods: bool) -> None: """set_have_mods(have_mods: bool) -> None (internal) """ return None
5,329,825
def test_from_db_file(datapath): """Test instantiate SearchDB from a yaml file""" searchdb = SearchDB.from_db_file(datapath / 'disp_db.yaml') assert searchdb.user is None assert searchdb.host == 'localhost' assert searchdb.database
5,329,826
def log(ui, repo, *pats, **opts): """show revision history of entire repository or files Print the revision history of the specified files or the entire project. If no revision range is specified, the default is ``tip:0`` unless --follow is set, in which case the working directory parent is used as the starting revision. File history is shown without following rename or copy history of files. Use -f/--follow with a filename to follow history across renames and copies. --follow without a filename will only show ancestors of the starting revision. By default this command prints revision number and changeset id, tags, non-trivial parents, user, date and time, and a summary for each commit. When the -v/--verbose switch is used, the list of changed files and full commit message are shown. With --graph the revisions are shown as an ASCII art DAG with the most recent changeset at the top. 'o' is a changeset, '@' is a working directory parent, '_' closes a branch, 'x' is obsolete, '*' is unstable, and '+' represents a fork where the changeset from the lines below is a parent of the 'o' merge on the same line. Paths in the DAG are represented with '|', '/' and so forth. ':' in place of a '|' indicates one or more revisions in a path are omitted. .. container:: verbose Use -L/--line-range FILE,M:N options to follow the history of lines from M to N in FILE. With -p/--patch only diff hunks affecting specified line range will be shown. This option requires --follow; it can be specified multiple times. Currently, this option is not compatible with --graph. This option is experimental. .. note:: :hg:`log --patch` may generate unexpected diff output for merge changesets, as it will only compare the merge changeset against its first parent. Also, only files different from BOTH parents will appear in files:. .. note:: For performance reasons, :hg:`log FILE` may omit duplicate changes made on branches and will not show removals or mode changes. To see all such changes, use the --removed switch. .. container:: verbose .. note:: The history resulting from -L/--line-range options depends on diff options; for instance if white-spaces are ignored, respective changes with only white-spaces in specified line range will not be listed. .. container:: verbose Some examples: - changesets with full descriptions and file lists:: hg log -v - changesets ancestral to the working directory:: hg log -f - last 10 commits on the current branch:: hg log -l 10 -b . - changesets showing all modifications of a file, including removals:: hg log --removed file.c - all changesets that touch a directory, with diffs, excluding merges:: hg log -Mp lib/ - all revision numbers that match a keyword:: hg log -k bug --template "{rev}\\n" - the full hash identifier of the working directory parent:: hg log -r . --template "{node}\\n" - list available log templates:: hg log -T list - check if a given changeset is included in a tagged release:: hg log -r "a21ccf and ancestor(1.9)" - find all changesets by some user in a date range:: hg log -k alice -d "may 2008 to jul 2008" - summary of all changesets after the last tag:: hg log -r "last(tagged())::" --template "{desc|firstline}\\n" - changesets touching lines 13 to 23 for file.c:: hg log -L file.c,13:23 - changesets touching lines 13 to 23 for file.c and lines 2 to 6 of main.c with patch:: hg log -L file.c,13:23 -L main.c,2:6 -p See :hg:`help dates` for a list of formats valid for -d/--date. See :hg:`help revisions` for more about specifying and ordering revisions. See :hg:`help templates` for more about pre-packaged styles and specifying custom templates. The default template used by the log command can be customized via the ``ui.logtemplate`` configuration setting. Returns 0 on success. """ opts = pycompat.byteskwargs(opts) linerange = opts.get('line_range') if linerange and not opts.get('follow'): raise error.Abort(_('--line-range requires --follow')) if linerange and pats: # TODO: take pats as patterns with no line-range filter raise error.Abort( _('FILE arguments are not compatible with --line-range option') ) repo = scmutil.unhidehashlikerevs(repo, opts.get('rev'), 'nowarn') revs, differ = logcmdutil.getrevs(repo, pats, opts) if linerange: # TODO: should follow file history from logcmdutil._initialrevs(), # then filter the result by logcmdutil._makerevset() and --limit revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts) getrenamed = None if opts.get('copies'): endrev = None if revs: endrev = revs.max() + 1 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) ui.pager('log') displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ, buffered=True) if opts.get('graph'): displayfn = logcmdutil.displaygraphrevs else: displayfn = logcmdutil.displayrevs displayfn(ui, repo, revs, displayer, getrenamed)
5,329,827
def user_required(handler): """ Decorator for checking if there's a user associated with the current session. Will also fail if there's no session present. """ def check_login(self, *args, **kwargs): """ If handler has no login_url specified invoke a 403 error """ if self.request.query_string != '': query_string = '?' + self.request.query_string else: query_string = '' continue_url = self.request.path_url + query_string login_url = self.uri_for('login', **{'continue': continue_url}) try: auth = self.auth.get_user_by_session() if not auth: try: self.redirect(login_url, abort=True) except (AttributeError, KeyError), e: self.abort(403) except AttributeError, e: # avoid AttributeError when the session was delete from the server logging.error(e) self.auth.unset_session() self.redirect(login_url) return handler(self, *args, **kwargs) return check_login
5,329,828
def checkGlyphExistsInLayer(project): """Check whether a glyph in a layer exists, if the parent glyph specifies a layerName for a variation. """ for glyphSetName, glyphSet, glyphName, glyph in iterGlyphs(project): for layerName in getattr(glyph, "glyphNotInLayer", ()): yield f"'{glyphName}' does not exist in layer '{layerName}'"
5,329,829
def student_editapplication(request): """View allowing a student to edit and/or submit their saved application""" FSJ_user = get_FSJ_user(request.user.username) award_id = request.GET.get('award_id', '') try: award = Award.objects.get(awardid = award_id) application = Application.objects.get(award = award, student = FSJ_user) if (not application.award.is_active) or (not application.award.is_open()): return redirect('/awards/') if application.is_submitted: return redirect('/awards/') if request.method == "POST": form = ApplicationRestrictedForm(request.POST, request.FILES, instance = application) if form.is_valid(): application = form.save(commit = False) if '_save' in request.POST: application.is_submitted = False application.save() return redirect('/awards/') elif '_submit' in request.POST: if not award.is_open(): return redirect('/awards/') application.is_submitted = True if award.documents_needed == True and not application.application_file: messages.warning(request, 'Please upload a document.') else: application.save() return redirect('/awards/') elif '_delete' in request.POST: try: application = Application.objects.get(award=award, student=FSJ_user) if (not award.is_active) or (not award.is_open()): return redirect('/awards/') else: application.delete() except: pass return redirect('/awards/') else: form = ApplicationRestrictedForm(instance=application) context = get_standard_context(FSJ_user) template = loader.get_template("FSJ/student_apply.html") context["form"] = form context['award'] = award url = "/awards/edit/?award_id=" + str(award.awardid) context["url"] = url return HttpResponse(template.render(context, request)) except Application.DoesNotExist: return redirect('/awards/')
5,329,830
def create_and_calibrate(servers=None, nserver=8, npipeline_per_server=4, cal_directory='/home/ubuntu/mmanders'): """ Wraper to create a new BeamPointingControl instance and load bandpass calibration data from a directory. """ # Create the instance control_instance = BeamPointingControl(servers=servers, nserver=nserver, npipeline_per_server=npipeline_per_server, station=ovro) # Find the calibration files calfiles = glob.glob(os.path.join(cal_directory, '*.bcal')) calfiles.sort() if len(calfiles) == 0: warnings.warn(f"No calibration data found in '{cal_directory}'") # Load the calibration data, if found for calfile in calfiles: control_instance.set_beam1_calibration(calfile) # Start up the data flow control_instance.set_beam1_dest() control_instance.set_beam1_vlbi_dest() # Done return control_instance
5,329,831
def test_geometry_contains(feature_list, field_list): """ Assertions for 'contains' comparison operation :param feature_list: feature collection list :param field_list: feature field names """ cql_ast = get_ast('CONTAINS(geometry,POINT(-75 45))') assert cql_ast == SpatialPredicateNode( AttributeExpression('geometry'), LiteralExpression(Geometry('POINT(-75 45)')), 'CONTAINS') result = spatial_test(cql_ast, feature_list, field_list) assert len(result) == 0
5,329,832
def get_haps_from_variants(translation_table_path: str, vcf_data: str, sample_id: str, solver: str = "CBC", config_path: str = None, phased = False) -> tuple: """ Same as get_haps_from_vcf, but bypasses the VCF file so that you can provide formatted variants from another input Get called haplotypes and additional information Args: translation_table_path (str): [description] vcf_file_path (str): [description] sample_id (str): [description] config_path ([type], optional): [description]. Defaults to None. Returns: tuple: translation_table_version, called_haplotypes, variants_associated_with_haplotye, matched_translation_table """ config = get_config(config_path) gene = AbstractGene(translation_table_path, variants = vcf_data, solver = solver, config = config, phased = phased) haplotype = Haplotype(gene, sample_id, config = config) haplotype.table_matcher() return haplotype.optimize_hap()
5,329,833
def _populate_number_fields(data_dict): """Returns a dict with the number fields N_NODE, N_EDGE filled in. The N_NODE field is filled if the graph contains a non-`None` NODES field; otherwise, it is set to 0. The N_EDGE field is filled if the graph contains a non-`None` RECEIVERS field; otherwise, it is set to 0. Args: data_dict: An input `dict`. Returns: The data `dict` with number fields. """ dct = data_dict.copy() for number_field, data_field in [[N_NODE, NODES], [N_EDGE, RECEIVERS]]: if dct.get(number_field) is None: if dct[data_field] is not None: dct[number_field] = torch.tensor(dct[data_field].size()[0], dtype=torch.int64) else: dct[number_field] = torch.tensor(0, dtype=torch.int64) return dct
5,329,834
def to_graph(e, recursive=True, verbose=False, arg_values=None, arg_types=None, partial_types=None): """Compile a Python entity into equivalent TensorFlow code. Currently supported entities: * functions * classes Classes are handled by converting all their methods into a new class. Args: e: A Python entity. recursive: Whether to recursively convert any functions that the decorator function may call. verbose: Whether to output the compiled code in the logs. arg_values: A dict containing value hints for symbols like function parameters. arg_types: A dict containing type hints for symbols like function parameters. partial_types: A set of types (e.g. classes) that will not be converted entirely. Calls to member functions for these types will be renamed independently. Returns: A function with a signature identical to `o`, but which when executed it creates TF a graph that has the same functionality as the original entity. Raises: ValueError: If the converted function defines or refers to symbol names that are reserved for AutoGraph. """ program_ctx = converter.ProgramContext( recursive=recursive, autograph_decorators=(convert, do_not_convert, converted_call), partial_types=partial_types, autograph_module=tf_inspect.getmodule(to_graph), uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES) _, name, namespace = conversion.entity_to_graph(e, program_ctx, arg_values, arg_types) nodes = [] for dep in reversed(program_ctx.dependency_cache.values()): nodes.extend(dep) compiled_module, compiled_src = compiler.ast_to_object( nodes, source_prefix=program_ctx.required_imports, include_source_map=True) # The compiled code should see everything the entry entity saw. # TODO(mdan): This might not work well if the call tree spans modules? for key, val in namespace.items(): # Avoid overwriting entities that have been transformed. if key not in compiled_module.__dict__: compiled_module.__dict__[key] = val compiled = getattr(compiled_module, name) # Need this so the source_mapping attribute is available for the context # manager to access for runtime errors. # # Note that compiler.ast_to_object attaches the source map 'ag_source_map__' # symbol to the compiled module. # TODO(mdan): Record this statically in the generated code. # TODO(mdan): Rename this attribute to 'autograph_info__' source_map_attribute_name = 'ag_source_map' if getattr(compiled, source_map_attribute_name, None) is not None: raise ValueError('cannot convert %s because is has an attribute ' '"%s", which is reserved for AutoGraph.' % (compiled, source_map_attribute_name)) setattr(compiled, source_map_attribute_name, compiled_module.__dict__['ag_source_map__']) if verbose: logging.info('Compiled output of %s:\n\n%s\n', e, compiled_src) return compiled
5,329,835
def test_nullable_field_validation(e, m): """Detect if data source field breaks the contract. Data source cannot have nullable field if corresponding entity attribute is not annotated with Optional type. """ expected = "" with pytest.raises(MapperError) as exc_info: Mapper(e.Group, m.GroupModel, {"primary_key": "id"}) message = str(exc_info.value) assert message == expected
5,329,836
def to_frame(nc): """ Convert netCDF4 dataset to pandas frames """ s_params = ["time", "bmnum", "noise.sky", "tfreq", "scan", "nrang", "intt.sc", "intt.us", "mppul", "scnum"] v_params = ["v", "w_l", "gflg", "p_l", "slist", "gflg_conv", "gflg_kde", "v_mad", "cluster_tag", "ribiero_gflg"] _dict_ = {k: [] for k in s_params + v_params} tparam = {"units":nc.variables["time"].units, "calendar":nc.variables["time"].calendar, "only_use_cftime_datetimes":False} for i in range(nc.variables["slist"].shape[0]): sl = nc.variables["slist"][:][i,:] idx = np.isnan(sl) L = len(sl[~idx]) for k in s_params: _dict_[k].extend(L*[nc.variables[k][i]]) for k in v_params: _dict_[k].extend(nc.variables[k][i,~idx]) o = pd.DataFrame.from_dict(_dict_) time = o.time.apply(lambda x: num2date(x, tparam["units"], tparam["calendar"], only_use_cftime_datetimes=tparam["only_use_cftime_datetimes"])).tolist() time = np.array([x._to_real_datetime() for x in time]).astype("datetime64[ns]") time = [dt.datetime.utcfromtimestamp(x.astype(int) * 1e-9) for x in time] o["dates"] = time o["mdates"] = o.dates.apply(lambda x: mdates.date2num(x)).tolist() o = o.sort_values(by=["dates"]) return o
5,329,837
def gather_grade_info(fctx, flow_session, answer_visits): """ :returns: a :class:`GradeInfo` """ all_page_data = (FlowPageData.objects .filter( flow_session=flow_session, ordinal__isnull=False) .order_by("ordinal")) points = 0 provisional_points = 0 max_points = 0 max_reachable_points = 0 fully_correct_count = 0 partially_correct_count = 0 incorrect_count = 0 unknown_count = 0 for i, page_data in enumerate(all_page_data): page = instantiate_flow_page_with_ctx(fctx, page_data) assert i == page_data.ordinal if answer_visits[i] is None: # This is true in principle, but early code to deal with survey questions # didn't generate synthetic answer visits for survey questions, so this # can't actually be enforced. # assert not page.expects_answer() continue if not page.is_answer_gradable(): continue grade = answer_visits[i].get_most_recent_grade() assert grade is not None feedback = get_feedback_for_grade(grade) max_points += grade.max_points if feedback is None or feedback.correctness is None: unknown_count += 1 points = None continue max_reachable_points += grade.max_points page_points = grade.max_points*feedback.correctness if points is not None: points += page_points provisional_points += page_points if grade.max_points > 0: if feedback.correctness == 1: fully_correct_count += 1 elif feedback.correctness == 0: incorrect_count += 1 else: partially_correct_count += 1 return GradeInfo( points=points, provisional_points=provisional_points, max_points=max_points, max_reachable_points=max_reachable_points, fully_correct_count=fully_correct_count, partially_correct_count=partially_correct_count, incorrect_count=incorrect_count, unknown_count=unknown_count)
5,329,838
def test_bool_str(): """Test to string of bool literal renders inmanta true/false and not python""" statements = parse_code( """ val1 = true val2 = false """ ) assert len(statements) == 2 assert isinstance(statements[0], Assign) assert isinstance(statements[1], Assign) assert str(statements[0].rhs) == "true" assert str(statements[1].rhs) == "false"
5,329,839
def numericalSort(value): """ 複数ファイルの入力の際、ファイル名を昇順に並べる。 Input ------ value : 読み込みたいファイルへのパス Output ------ parts : ファイル中の数字 """ numbers = re.compile(r'(\d+)') parts = numbers.split(value) parts[1::2] = map(int, parts[1::2]) return parts
5,329,840
def load_and_validate(response): """ Loads JSON data from an HTTP response, then validates it. """ validate_response(response.json())
5,329,841
def add_markings(obj, marking, selectors): """ Append a granular marking to the granular_markings collection. The method makes a best-effort attempt to distinguish between a marking-definition or language granular marking. Args: obj: An SDO or SRO object. marking: identifier or list of marking identifiers that apply to the properties selected by `selectors`. selectors: list of type string, selectors must be relative to the TLO in which the properties appear. Raises: InvalidSelectorError: If `selectors` fail validation. Returns: A new version of the given SDO or SRO with specified markings added. """ selectors = utils.convert_to_list(selectors) marking = utils.convert_to_marking_list(marking) utils.validate(obj, selectors) granular_marking = [] for m in marking: if is_marking(m): granular_marking.append({'marking_ref': m, 'selectors': sorted(selectors)}) else: granular_marking.append({'lang': m, 'selectors': sorted(selectors)}) if obj.get('granular_markings'): granular_marking.extend(obj.get('granular_markings')) granular_marking = utils.expand_markings(granular_marking) granular_marking = utils.compress_markings(granular_marking) return new_version(obj, granular_markings=granular_marking, allow_custom=True)
5,329,842
def test_custom_font(): """Test figure using a custom font with fontspec in config file...""" with build_pypgf(srcdir, "custom_font.py") as res: assert res.returncode == 0, "Failed to build tests/sources/fonts/custom_font.py"
5,329,843
def add_to_list(str_to_add, dns_names): """ This will add a string to the dns_names array if it does not exist. It will then return the index of the string within the Array """ if str_to_add not in dns_names: dns_names.append(str_to_add) return dns_names.index(str_to_add)
5,329,844
def check_icmp_path(sniffer, path, nodes, icmp_type = ipv6.ICMP_ECHO_REQUEST): """Verify icmp message is forwarded along the path. """ len_path = len(path) # Verify icmp message is forwarded to the next node of the path. for i in range(0, len_path): node_msg = sniffer.get_messages_sent_by(path[i]) node_icmp_msg = node_msg.get_icmp_message(icmp_type) if i < len_path - 1: next_node = nodes[path[i + 1]] next_node_rloc16 = next_node.get_addr16() assert next_node_rloc16 == node_icmp_msg.mac_header.dest_address.rloc, "Error: The path is unexpected." else: return True return False
5,329,845
def ping(): """Determine if the container is working and healthy. In this sample container, we declare it healthy if we can load the model successfully.""" health = scoring_service.get_model() is not None # You can insert a health check here status = 200 if health else 404 return flask.Response(response="\n", status=status, mimetype="application/json")
5,329,846
def cyber_pose_to_carla_transform(cyber_pose): """ Convert a Cyber pose a carla transform. """ return carla.Transform( cyber_point_to_carla_location(cyber_pose.position), cyber_quaternion_to_carla_rotation(cyber_pose.orientation))
5,329,847
def _is_predator_testcase(testcase): """Return bool and error message for whether this testcase is applicable to predator or not.""" if build_manager.is_custom_binary(): return False, 'Not applicable to custom binaries.' if testcase.regression != 'NA': if not testcase.regression: return False, 'No regression range, wait for regression task to finish.' if ':' not in testcase.regression: return False, 'Invalid regression range %s.' % testcase.regression return True, None
5,329,848
def main(): """ How to run this script: python dump_tf_graph.py protxt_file_path """ with gfile.FastGFile(argv[1],'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) with tf.Graph().as_default() as graph: tf.import_graph_def(graph_def, name='') tf.summary.FileWriter('./log', graph)
5,329,849
def _a_ij_Aij_Dij2(A): """A term that appears in the ASE of Kendall's tau and Somers' D.""" # See `somersd` References [2] section 4: Modified ASEs to test the null hypothesis... m, n = A.shape count = 0 for i in range(m): for j in range(n): count += A[i, j]*(_Aij(A, i, j) - _Dij(A, i, j))**2 return count
5,329,850
def check_metadata_format(paramfile, is_file=True): """ Checks HLSP files for compliance. Logs errors and warnings to log file. :param paramfile: The parameter file created by 'precheck_data_format' and 'select_data_templates'. :type paramfile: str """ # Read in all the YAML standard template files once to pass along. # templates_to_read = ["TEMPLATES/timeseries_mast.yml", # "TEMPLATES/timeseries_k2.yml"] templates_to_read = ["TEMPLATES/timeseries_k2.yml", "TEMPLATES/timeseries_tess.yml", # "TEMPLATES/image_hst.yml" ] # Create FitsKeywordList object for each standard in all_standards array. # These are used to define the expected keywords for a given template # standard, but can have any part overwritten by the .hlsp file. # all_standards = numpy.asarray([]) all_standards = [] for ttr in templates_to_read: this_file = os.path.realpath(__file__) this_dir = "/".join(this_file.split("/")[:-1]) ttr = os.path.join(this_dir, ttr) if os.path.isfile(ttr): with open(ttr, 'r') as istream: yaml_data = yaml.load(istream) kw_list = FitsKeywordList(yaml_data['PRODUCT'], yaml_data['STANDARD'], yaml_data['KEYWORDS'] ) all_standards.append(kw_list) """ all_standards = numpy.append(all_standards, FitsKeywordList( yaml_data['PRODUCT'], yaml_data['STANDARD'], yaml_data['KEYWORDS'])) """ else: raise IOError("Template file not found: " + ttr) # Start logging to an output file. log_file_name = "check_metadata_format.log" metadata_log = new_logger(log_file_name) metadata_log.info('Started at ' + datetime.datetime.now().isoformat()) # This will allow us to support running via script by default with a # previously saved metadata precheck file, or live via the GUI with an # HLSPFile class object. # param_data = (_read_from_file(paramfile) if is_file else paramfile) if is_file: param_data = HLSPFile(path=paramfile) else: param_data = paramfile print("<check_metadata_format> check_metadata_format() got:") param_data.fits_keywords().__display__() print("<<<>>>") # The root directory of the HLSP files is stored in the parameter file. file_base_dir = param_data.get_data_path() # Loop over each file ending. Run the metadata checks on any file ending # marked to be checked for HLSP requirements. endings_to_check = param_data.get_check_extensions() """ for ending in param_data['FileTypes']: this_key = [*ending][0] if ending[this_key]['RunCheck']: endings_to_check.append(ending) """ # Pull any FITS keyword updates out of paramfile. (this can be either the # file created by precheck_data_format.py or the HLSPFile provided by # running through the GUI) try: kw_updates = param_data.keyword_updates except AttributeError: kw_updates = None print("type(kw_updates)={0}".format(type(kw_updates))) if isinstance(kw_updates, list): new_list = FitsKeywordList.empty_list() new_list.fill_from_list(kw_updates) kw_updates = new_list # Apply the metadata correction on the requested file endings. log_message_counts = apply_metadata_check(file_base_dir, param_data, all_standards ) c = int(log_message_counts['files_checked']) del log_message_counts['files_checked'] metadata_log.info('Finished at %s', datetime.datetime.now().isoformat()) # Add a summary of the number of log messages to the top of the log file. with open(log_file_name, 'r') as ilogfile: all_log_messages = ilogfile.read() with open(log_file_name, 'w') as ologfile: ologfile.write('# ------------------------------\n') ologfile.write('Total files checked: {0}\n'.format(c)) ologfile.write('Message Summary (# Files: [Type] Message)\n') for dkey in log_message_counts: ologfile.write(str(log_message_counts[dkey]['count']) + ': [' + log_message_counts[dkey]['type'] + '] ' + dkey + '\n') ologfile.write('# ------------------------------\n') ologfile.write(all_log_messages) # results = HLSPFile(from_dict=param_data) param_data.toggle_ingest(2, state=True) param_data.save(caller=__file__)
5,329,851
def getWeekHouseMsg(): """ 获取一周的房产信息 :return: """ response = requests.get(url=week_host, headers=headers).text soup = BeautifulSoup(response, 'lxml') house_raw = soup.select('div[class=xfjj]') # 二手房均价 second_hand_price = house_raw[0].select('.f36')[0].string # 二手房成交数目 second_hand_num = house_raw[1].select('.f36')[0].string # 新手房均价 new_house_price = house_raw[2].select('.f36')[0].string # 新房成交数目 new_house_num = house_raw[3].select('.f36')[0].string # print(second_hand_price, second_hand_num, new_house_price, new_house_num) return new_house_price, new_house_num, second_hand_price, second_hand_num
5,329,852
def _match(x, y): """Returns an array of the positions of (first) matches of y in x This is similar to R's `match` or Matlab's `[Lia, Locb] = ismember` See https://stackoverflow.com/a/8251757 This assumes that all values in y are in x, but no check is made Parameters ---------- x : 1-d array y : 1-d array Returns ------- yindex : 1-d array np.all(x[yindex] == y) should be True """ index = np.argsort(x) sorted_index = np.searchsorted(x, y, sorter=index) yindex = index[sorted_index] return yindex
5,329,853
def add_type(s, validator): """Add a type. May override most existing types. Raises ConfigValueError if type is reserved or invalid, and ConfigTypeError if it's not a string. The validator is not, umm, validated.""" if not isinstance(s, basestring): raise ConfigTypeError('type must be a string') if '_' in s: raise ConfigValueError('type name may not contain underscores') if s in (): raise ConfigTypeError('may not override a reserved type') types[s] = validator
5,329,854
def create_default_support_dir(): """ create a empty panzer support directory """ # - create .panzer os.mkdir(const.DEFAULT_SUPPORT_DIR) info.log('INFO', 'panzer', 'created "%s"' % const.DEFAULT_SUPPORT_DIR) # - create subdirectories of .panzer subdirs = ['preflight', 'filter', 'lua-filter', 'postprocess', 'postflight', 'cleanup', 'template', 'styles'] for subdir in subdirs: target = os.path.join(const.DEFAULT_SUPPORT_DIR, subdir) os.mkdir(target) info.log('INFO', 'panzer', 'created "%s"' % target) # - create styles.yaml style_definitions = os.path.join(const.DEFAULT_SUPPORT_DIR, 'styles', 'styles.yaml') open(style_definitions, 'w').close() info.log('INFO', 'panzer', 'created empty "styles/styles.yaml"')
5,329,855
def download_mnist_tfrecords() -> str: """ Return the path of a directory with the MNIST dataset in TFRecord format. The dataset will be downloaded into WORK_DIRECTORY, if it is not already present. """ if not tf.gfile.Exists(WORK_DIRECTORY): tf.gfile.MakeDirs(WORK_DIRECTORY) filepath = os.path.join(WORK_DIRECTORY, MNIST_TF_RECORDS_FILE) if not tf.gfile.Exists(filepath): logging.info("Downloading {}".format(MNIST_TF_RECORDS_URL)) r = requests.get(MNIST_TF_RECORDS_URL) with tf.gfile.Open(filepath, "wb") as f: f.write(r.content) logging.info("Downloaded {} ({} bytes)".format(MNIST_TF_RECORDS_FILE, f.size())) logging.info("Extracting {} to {}".format(MNIST_TF_RECORDS_FILE, WORK_DIRECTORY)) with tarfile.open(filepath, mode="r:gz") as f: f.extractall(path=WORK_DIRECTORY) data_dir = os.path.join(WORK_DIRECTORY, "mnist-tfrecord") assert tf.gfile.Exists(data_dir) return data_dir
5,329,856
def buildMeanAndCovMatFromRow(row): """ Build a covariance matrix from a row Paramters --------- row : astropy Table row Entries: {X, Y, Z, U, V, W, dX, dY, ..., cXY, cXZ, ...} Return ------ cov_mat : [6,6] numpy array Diagonal elements are dX^2, dY^2, ... Off-diagonal elements are cXY*dX*dY, cXZ*dX*dZ, ... """ dim = 6 # CART_COL_NAMES = ['X', 'Y', 'Z', 'U', 'V', 'W', # 'dX', 'dY', 'dZ', 'dU', 'dV', 'dW', # 'c_XY', 'c_XZ', 'c_XU', 'c_XV', 'c_XW', # 'c_YZ', 'c_YU', 'c_YV', 'c_YW', # 'c_ZU', 'c_ZV', 'c_ZW', # 'c_UV', 'c_UW', # 'c_VW'] mean = np.zeros(dim) for i, col_name in enumerate(CART_COL_NAMES[:6]): mean[i] = row[col_name] std_vec = np.zeros(dim) for i, col_name in enumerate(CART_COL_NAMES[6:12]): std_vec[i] = row[col_name] corr_tri = np.zeros((dim,dim)) # Insert upper triangle (top right) correlations for i, col_name in enumerate(CART_COL_NAMES[12:]): corr_tri[np.triu_indices(dim,1)[0][i],np.triu_indices(dim,1)[1][i]]\ =row[col_name] # Build correlation matrix corr_mat = np.eye(6) + corr_tri + corr_tri.T # Multiply through by standard deviations cov_mat = corr_mat * std_vec * std_vec.reshape(6,1) return mean, cov_mat
5,329,857
def getNumArgs(obj): """Return the number of "normal" arguments a callable object takes.""" sig = inspect.signature(obj) return sum(1 for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_ONLY or p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD)
5,329,858
def test_get_firmware_version() -> None: """Test that we can get the firmware version from the serial interface.""" backend = SRV4MotorBoardHardwareBackend("COM0", serial_class=MotorSerial) serial = cast(MotorSerial, backend._serial) serial.check_data_sent_by_constructor() assert backend.firmware_version == "3" serial.check_sent_data(b'\x01') serial.append_received_data(b'PBV4C:5', newline=True) with pytest.raises(CommunicationError): backend.firmware_version serial.check_sent_data(b'\x01')
5,329,859
def min_distance_from_point(vec, p): """ Minimial distance between a single point and each point along a vector (in N dimensions) """ return np.apply_along_axis(np.linalg.norm, 1, vec - p).min()
5,329,860
def create_environment(env_config): """Creates an simple sequential testing environment.""" if env_config['num_candidates'] < 4: raise ValueError('num_candidates must be at least 4.') SimpleSequentialResponse.MAX_DOC_ID = env_config['num_candidates'] - 1 user_model = SimpleSequentialUserModel( env_config['slate_size'], seed=env_config['seed'], starting_probs=env_config['starting_probs']) document_sampler = SimpleSequentialDocumentSampler(seed=env_config['seed']) simple_seq_env = environment.Environment( user_model, document_sampler, env_config['num_candidates'], env_config['slate_size'], resample_documents=env_config['resample_documents']) return recsim_gym.RecSimGymEnv(simple_seq_env, total_reward, lambda _, __, ___: None, lambda _, __: None)
5,329,861
def nb_to_python(nb_path): """convert notebook to python script""" exporter = python.PythonExporter() output, resources = exporter.from_filename(nb_path) return output
5,329,862
def add(data_source: DataSource) -> DataSource: """ Add a new data source to AuroraX Args: data_source: the data source to add (note: it must be a fully-defined DataSource object) Returns: the newly created data source Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXDuplicateException: duplicate data source, already exists """ # set up request request_data = { "program": data_source.program, "platform": data_source.platform, "instrument_type": data_source.instrument_type, "source_type": data_source.source_type, "display_name": data_source.display_name, "ephemeris_metadata_schema": data_source.ephemeris_metadata_schema, "data_product_metadata_schema": data_source.data_product_metadata_schema, "metadata": data_source.metadata } if (data_source.identifier is not None): request_data["identifier"] = data_source.identifier # make request req = AuroraXRequest(method="post", url=urls.data_sources_url, body=request_data) res = req.execute() # evaluate response if (res.status_code == 409): raise AuroraXDuplicateException("%s - %s" % (res.data["error_code"], res.data["error_message"])) # return try: return DataSource(**res.data) except Exception: raise AuroraXException("Could not create data source")
5,329,863
def list_installed(args): """List the installed models.""" # Find installed models, ignoring special folders like R. if os.path.exists(MLINIT): msg = " in '{}'.".format(MLINIT) models = [f for f in os.listdir(MLINIT) if os.path.isdir(os.path.join(MLINIT, f)) and f != "R" and not f.startswith('.')] else: msg = ". '{}' does not exist.".format(MLINIT) models = [] models.sort() # Only list model names if args.name_only: print('\n'.join(models)) return # Report on how many models we found installed. mcnt = len(models) plural = "s" if mcnt == 1: plural = "" print("Found {} model{} installed{}".format(mcnt, plural, msg)) # Report on each of the installed models. if mcnt > 0: print("") for p in models: entry = utils.load_description(p) utils.print_meta_line(entry) # Update available commands for the model for fast bash tab completion. utils.update_completion_list(COMPLETION_COMMANDS, set(entry['commands'])) # Suggest next step. if not args.quiet: if mcnt > 0: utils.print_next_step('installed', scenario='exist') else: utils.print_next_step('installed', scenario='none')
5,329,864
def initialize(): """Do all necessary actions before input loop starts""" isbench = False # udp,register,server,room arg_dict = get_args() if "udp" in arg_dict and arg_dict["udp"].isdigit(): StateHolder.udp_listen_port = int(arg_dict["udp"]) else: StateHolder.udp_listen_port = 5001 if "server" in arg_dict: StateHolder.server_ip = arg_dict["server"] else: StateHolder.server_ip = '0.0.0.0:5000' if "mode" in arg_dict: if arg_dict["mode"] == "total": StateHolder.room_type = roomTotal Benchmark.room_type = "total" else: # arg_dict["mode"] == "fifo": StateHolder.room_type = roomFIFO Benchmark.room_type = "fifo" else: StateHolder.room_type = roomFIFO Benchmark.room_type = "fifo" if "msgfile" in arg_dict: Benchmark.msg_file = arg_dict["msgfile"] if "benchname" in arg_dict: Benchmark.bench_name = arg_dict["benchname"] if "start" in arg_dict: isbench = True isfloat = False try: float(arg_dict["start"]) isfloat = True except ValueError: pass if isfloat: Benchmark.bench_start = float(arg_dict["start"]) Benchmark.schedule_benchmark() # StateHolder.server_ip = '0.0.0.0:5000' # StateHolder.udp_listen_port = 5001 if len(sys.argv) < 2 else int(sys.argv[1]) OutputHandler.initialize() CommandHandler.initialize() UDPbroker.initialize() print(">> Welcome to the chat client! Press `!h` for help.") print(InputHandler.prompt_msg,end="") sys.stdout.flush() if "register" in arg_dict: CommandHandler.pushCommand("!register {}".format(arg_dict["register"])) if "room" in arg_dict: CommandHandler.pushCommand("!j "+arg_dict["room"]) CommandHandler.pushCommand("!w "+arg_dict["room"]) # time.sleep(1) # CommandHandler.pushCommand("!register {}".format(arg_dict["register"])) return isbench
5,329,865
def jacobian_numba(coordinates, points, jac, greens_function): """ Calculate the Jacobian matrix using numba to speed things up. It works both for Cartesian and spherical coordiantes. We need to pass the corresponding Green's function through the ``greens_function`` argument. """ east, north, upward = coordinates[:] point_east, point_north, point_upward = points[:] for i in range(east.size): for j in range(point_east.size): jac[i, j] = greens_function( east[i], north[i], upward[i], point_east[j], point_north[j], point_upward[j], )
5,329,866
def read_text(file, num=False): """ Read from txt [file]. If [num], then data is numerical data and will need to convert each string to an int. """ with open(file,'r') as f: data = f.read().splitlines() if num: data = [int(i) for i in data] return data
5,329,867
def p_base_access(p): """base_access : BASE MEMBERACCESS IDENTIFIER | BASE LBRACKET expression_list RBRACKET """
5,329,868
def interpolate_rbf(x, y, z, x_val, y_val, z_val): """Radial basis function interpolation. Parameters ---------- x : np.ndarray x-faces or x-edges of a mesh y : np.ndarray y-faces or y-edges of a mesh z : np.ndarray z-faces or z-edges of a mesh x_val : np.ndarray curl values or electric field values in the x-direction y_val : np.ndarray curl values or electric field values in the y-direction z_val : np.ndarray curl values or electric field values in the z-direction Returns ------- scipy.interpolate.rbf.Rbf a radial basis function interpolation object """ x_interpolated = Rbf(x[:, 0], x[:, 1], x[:, 2], x_val) y_interpolated = Rbf(y[:, 0], y[:, 1], y[:, 2], y_val) z_interpolated = Rbf(z[:, 0], z[:, 1], z[:, 2], z_val) return x_interpolated, y_interpolated, z_interpolated
5,329,869
def svm_loss_naive(W, X, y, reg): """ Structured SVM loss function, naive implementation (with loops). Inputs have dimension D, there are C classes, and we operate on minibatches of N examples. Inputs: - W: A numpy array of shape (D, C) containing weights. - X: A numpy array of shape (N, D) containing a minibatch of data. - y: A numpy array of shape (N,) containing training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - reg: (float) regularization strength Returns a tuple of: - loss as single float - gradient with respect to weights W; an array of same shape as W """ dW = np.zeros(W.shape) # initialize the gradient as zero # compute the loss and the gradient num_classes = W.shape[1] num_train = X.shape[0] loss = 0.0 for i in range(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]] for j in range(num_classes): if j == y[i]: continue margin = scores[j] - correct_class_score + 1 # note delta = 1 if margin > 0: loss += margin dW[:, j] += X[i] dW[:, y[i]] -= X[i] # Right now the loss is a sum over all training examples, but we want it # to be an average instead so we divide by num_train. loss /= num_train dW /= num_train # Add regularization to the loss. loss += reg * np.sum(W * W) dW += reg * W ############################################################################# # TODO: # # Compute the gradient of the loss function and store it dW. # # Rather that first computing the loss and then computing the derivative, # # it may be simpler to compute the derivative at the same time that the # # loss is being computed. As a result you may need to modify some of the # # code above to compute the gradient. # ############################################################################# return loss, dW
5,329,870
def _chambollepock_tv2d(x, w, y, max_iters, info, **kwargs): """Chambolle and Pock's method for 2D TV proximity""" _condatchambollepock_tv2d(x, w, y, max_iters, info, algorithm=1)
5,329,871
def get_batch(image_files, width, height, mode='RGB'): """ Get a single batch of data as an NumPy array """ data_batch = np.array( [get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32) # Make sure the images are in 4 dimensions if len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape + (1,)) return data_batch
5,329,872
def ScriptProvenanceConst_get_decorator_type_name(): """ScriptProvenanceConst_get_decorator_type_name() -> std::string""" return _RMF.ScriptProvenanceConst_get_decorator_type_name()
5,329,873
def load(dataset, trainset_name = ''): """Load training sets ====== Add a new dataset to graph learning by saving the data and labels. Parameters ---------- dataset : string Name of dataset. trainset_name : string (optional), default='' A modifier to uniquely identify different training sets for each dataset. """ dataFile = dataset.lower() + trainset_name.lower() +"_permutations.npz" #Change this eventually dataFile_path = os.path.join(trainset_dir, dataFile) #Check if Data directory exists if not os.path.exists(trainset_dir): os.makedirs(trainset_dir) #Download trainset if needed if not os.path.exists(dataFile_path): urlpath = 'https://github.com/jwcalder/GraphLearning/raw/master/LabelPermutations/'+dataFile utils.download_file(urlpath, dataFile_path) trainset = utils.numpy_load(dataFile_path, 'perm') return trainset
5,329,874
def parse_annotation(parameter): """ Tries to parse an internal annotation referencing ``Client`` or ``InteractionEvent``. Parameters ---------- parameter : ``Parameter`` The respective parameter's representation. Returns ------- choices : `None` or `dict` of (`str` or `int`, `str`) items Parameter's choices. description : `None` or `str` Parameter's description. > Returned as `None` for internal parameters or if `description` could nto be detected. name : `str` The parameter's name. type_ : `int` The parameter's internal type identifier. channel_types : `None` or `tuple` of `int` The accepted channel types. max_value : `None`, `int`, `float` The maximal accepted value. min_value : `None`, `int`, `float` The minimal accepted value. Raises ------ ValueError - If `parameter` annotation tuple's length is out of range [2:3]. - If `parameter` annotation tuple refers to an internal type. TypeError Parameter's type refers to an unknown type or string value. """ if parameter.has_annotation: annotation_value = parameter.annotation if isinstance(annotation_value, tuple): if len(annotation_value) == 0: annotation_value = parameter.name else: return parse_annotation_tuple(parameter) elif isinstance(annotation_value, SlashParameter): return parse_annotation_slash_parameter(annotation_value, parameter.name) else: annotation_value = parameter.name if not isinstance(annotation_value, (str, type)): raise TypeError(f'Parameter `{parameter.name}` is not `tuple`, `str`, nor `str` instance ' f'{annotation_value.__class__.__name__}; {annotation_value!r}.') else: annotation_type = parse_annotation_internal(annotation_value) if annotation_type is None: annotation_type, choices, channel_types = parse_annotation_type_and_choice(annotation_value, parameter.name) else: choices = None channel_types = None return choices, None, parameter.name, annotation_type, channel_types, None, None
5,329,875
def search(request): """ Search results """ query = request.GET.get('query') res = MsVerse.objects.filter(raw_text__icontains=query).order_by( 'verse__chapter__book__num', 'verse__chapter__num', 'verse__num', 'hand__manuscript__liste_id') return default_response(request, 'search.html', {'results': res, 'query': query})
5,329,876
def _compare(pair: Tuple[List[int], List[int]]) -> float: """Just a wrapper for fingerprints.compare, that unpack its first argument""" return fingerprints.compare(*pair)
5,329,877
def output_onto(conll_tokens, markstart_dict, markend_dict, file_name): """ Outputs analysis results in OntoNotes .coref XML format :param conll_tokens: List of all processed ParsedToken objects in the document :param markstart_dict: Dictionary from markable starting token ids to Markable objects :param markend_dict: Dictionary from markable ending token ids to Markable objects :return: serialized XML """ output_string = '<DOC DOCNO="' + file_name + '">\n<TEXT PARTNO="000">\n' for out_tok in conll_tokens: if int(out_tok.id) in markstart_dict: for out_mark in sorted(markstart_dict[int(out_tok.id)], key=operator.attrgetter('end'), reverse=True): output_string += '<COREF ID="' + str(out_mark.group) + '" ENTITY="' + out_mark.entity + '" INFSTAT="' + out_mark.infstat if not out_mark.antecedent == "none": output_string += '" TYPE="' + out_mark.coref_type output_string += '">' if int(out_tok.id) > 0: output_string += re.sub("&","&amp;",out_tok.text) if ";" not in out_tok.text else out_tok.text if int(out_tok.id) in markend_dict: for out_mark in markend_dict[int(out_tok.id)]: output_string += "</COREF>" if int(out_tok.id) > 0: output_string += ' ' return output_string + "\n</TEXT>\n</DOC>\n"
5,329,878
def create_collection(zookeeper_quorum, solr_znode, collection, config_set, java64_home, user, group, shards = 1, replication_factor = 1, max_shards = 1, retry = 5, interval = 10): """ Create Solr collection based on a configuration set in zookeeper. If this method called again the with higher shard number (or max_shard number), then it will indicate the cli tool to add new shards to the Solr collection. This can be useful after added a new Solr Cloud instance to the cluster. """ solr_cli_prefix = __create_solr_cloud_cli_prefix(zookeeper_quorum, solr_znode, java64_home) if max_shards == 1: # if max shards is not specified use this strategy max_shards = replication_factor * shards Execute(format('{solr_cli_prefix} --create-collection -c {collection} -cs {config_set} -s {shards} -r {replication_factor} '\ '-m {max_shards} -rt {retry} -i {interval}'), user=user, group=group )
5,329,879
def vertical() -> np.array: """Returns the Jones matrix for a horizontal linear polarizer.""" return np.asarray([[0, 0], [0, 1]])
5,329,880
def resource_id(d, i, r): """Get resource id from meter reading. :param d: Report definition :type: d: Dict :param i: Item definition :type i: Dict :param r: Meter reading :type r: usage.reading.Reading """ return _get_reading_attr(r, 'resource_id')
5,329,881
def create_provider_router(neutron_client, project_id): """Create the provider router. :param neutron_client: Authenticated neutronclient :type neutron_client: neutronclient.Client object :param project_id: Project ID :type project_id: string :returns: Router object :rtype: dict """ routers = neutron_client.list_routers(name='provider-router') if len(routers['routers']) == 0: logging.info('Creating provider router for external network access') router_info = { 'router': { 'name': 'provider-router', 'tenant_id': project_id } } router = neutron_client.create_router(router_info)['router'] logging.info('New router created: %s', (router['id'])) else: logging.warning('Router provider-router already exists.') router = routers['routers'][0] return router
5,329,882
def prompt_roles_to_delete(role_for_the_profile_list:List[ProfileTuple]): """ロール削除時に選択番号をプロンプト""" print("\n削除するロールを選んでください") count = 1 for role in role_for_the_profile_list: print("{}) {}".format(count, role.name)) count += 1
5,329,883
def split(string: str) -> List[str]: """ Split string (which represents a command) into a list. This allows us to just copy/paste command prefixes without having to define a full list. """ return shlex.split(string)
5,329,884
def compute_prevalence_percentage(df, groupby_fields): """ base: ['topic_id', 'year'] """ # agg_df = df.groupby(groupby_fields)['topic_weight'].sum().reset_index() # groupby_fields.append('topic_weight') # wide_df = agg_df[groupby_fields].copy().pivot(index=groupby_fields[0],columns=groupby_fields[1],values="topic_weight").fillna(0) # new_df = pd.DataFrame(index=wide_df.index.values) # for column in list(wide_df.columns.values): # new_df[column] = (wide_df[column]/wide_df[column].sum())*100 # long_df = new_df.unstack().reset_index() # merged_df = pd.merge(agg_df, # long_df, # how='left', # left_on=[groupby_fields[0],groupby_fields[1]], # right_on = ['level_1','level_0']) # merged_df.rename(columns = {0:'normalized_weights'}, inplace = True) # merged_df.drop(['level_0','level_1'], axis=1, inplace=True) pdf = df.groupby(groupby_fields).agg({'norm_topic_weight': 'sum'}) pdf2 = pdf.groupby(level=0).apply(lambda x: x / x.sum()).reset_index() groupby_fields.append('proportional_weight') pdf2.columns = groupby_fields pdf2 = pdf2.merge(labels, on=groupby_fields[1]) return merged_df
5,329,885
def get_token_history(address) -> pd.DataFrame: """Get info about token historical transactions. [Source: Ethplorer] Parameters ---------- address: str Token e.g. 0xf3db5fa2c66b7af3eb0c0b782510816cbe4813b8 Returns ------- pd.DataFrame: DataFrame with token historical transactions. """ response = make_request("getTokenHistory", address, limit=1000) all_operations = [] operations = response["operations"] try: first_row = operations[0]["tokenInfo"] name, symbol, _ = ( first_row.get("name"), first_row.get("symbol"), first_row.get("balance"), ) decimals = first_row.get("decimals") except Exception: name, symbol = "", "" decimals = None for operation in operations: operation.pop("type") operation.pop("tokenInfo") operation["timestamp"] = datetime.fromtimestamp(operation["timestamp"]) all_operations.append(operation) df = pd.DataFrame(all_operations) if df.empty: return df df[["name", "symbol"]] = name, symbol df["value"] = df["value"].astype(float) / (10 ** int(decimals)) return df[["timestamp", "name", "symbol", "value", "from", "to", "transactionHash"]]
5,329,886
def top1_accuracy(pred, y): """Main evaluation metric.""" return sum(pred.argmax(axis=1) == y) / float(len(y))
5,329,887
def lookup_default_client_credentials_json() -> Optional[str]: """ Try to look up the default Json file containing the Mix client credentials :return: str or None, the path to the default Json file, or none if not found """ path_client_cred_json = os.path.realpath(os.path.join(os.getcwd(), DEFAULT_MIX_SERVICE_CRED_JSON)) if os.path.isfile(path_client_cred_json): return path_client_cred_json else: return lookup_file_from_mixcli_userhome(DEFAULT_MIX_SERVICE_CRED_JSON)
5,329,888
def dividend_history (symbol): """ This function returns the dividend historical data of the seed stock symbol. Args: symbol (:obj:`str`, required): 3 digits name of the desired stock. """ data = requests.get('https://apipubaws.tcbs.com.vn/tcanalysis/v1/company/{}/dividend-payment-histories?page=0&size=20'.format(symbol)).json() df = json_normalize(data['listDividendPaymentHis']).drop(columns=['no', 'ticker']) return df
5,329,889
def run(): """Requirements for Task 1E""" # Build list of stations stations = build_station_list() # Build set of rivers with stations rivers = rivers_with_station(stations) print("List of 9 rivers with most monitoring stations: ", rivers_by_station_number(stations, 9))
5,329,890
def reftype_to_pipelines(reftype, cal_ver=None, context=None): """Given `exp_type` and `cal_ver` and `context`, locate the appropriate SYSTEM CRDSCFG reference file and determine the sequence of pipeline .cfgs required to process that exp_type. """ context = _get_missing_context(context) cal_ver = _get_missing_calver(cal_ver) with log.augment_exception("Failed determining required pipeline .cfgs for", "EXP_TYPE", srepr(reftype), "CAL_VER", srepr(cal_ver)): config_manager = _get_config_manager(context, cal_ver) return config_manager.reftype_to_pipelines(reftype)
5,329,891
def get_forecast(filename): """ """ folder = os.path.join(DATA_RAW, 'population_scenarios') with open(os.path.join(folder, filename), 'r') as source: reader = csv.DictReader(source) for line in reader: yield { 'year': line['timestep'], 'lad': line['lad_uk_2016'], 'population': line['population'], }
5,329,892
def levy(x: np.ndarray): """ The function is usually evaluated on the hypercube xi ∈ [-10, 10], for all i = 1, …, d. :param x: c(x1, x2, ..., xd) :return: the y-value (float) """ w = 1 + (x - 1) / 4 # same shape as x term1 = (np.sin(np.pi * w.T[0])) ** 2 term3 = (w.T[-1] - 1) ** 2 * (1 + 1 * (np.sin(2 * np.pi * w.T[-1])) ** 2) wi = w.T[:-1] sum = np.sum((wi - 1) ** 2 * (1 + 10 * (np.sin(np.pi * wi + 1)) ** 2)) return term1 + sum + term3
5,329,893
def play(sequence, rate=30, bitrate=None, width=None, height=None, autoscale=True): """In an IPython notebook, display a sequence of images as an embedded video. N.B. If the quality and detail are insufficient, increase the bit rate. Parameters ---------- sequence : any iterator or array of array-like images The images should have two dimensions plus an optional third dimensions representing color. rate : integer frame rate of output file, 30 by default bitrate : integer Video bitrate is crudely guessed if None is given. width : integer By default, set the width of the images. height : integer By default, set the height of the images. If width is specified and height is not, the height is autoscaled to maintain the aspect ratio. autoscale : boolean Linearly rescale the brightness to use the full gamut of black to white values. If the datatype of the images is not 'uint8', this must be set to True, as it is by default. """ try: from IPython.display import display except ImportError: raise ImportError("This feature requires IPython.") with tempfile.NamedTemporaryFile(suffix='.webm') as temp: export_pyav(sequence, bytes(temp.name), codec='libvpx', rate=rate, width=width, height=height, bitrate=bitrate, format='yuv420p', autoscale=True) temp.flush() display(repr_video(temp.name, 'x-webm'))
5,329,894
def setup_plot_defaults(): """ Sets up default plot settings for figures. Parameters ---------- Returns ------- """ plt.rcParams['ps.useafm'] = True plt.rcParams['pdf.use14corefonts'] = True plt.rcParams['text.usetex'] = True plt.rcParams['font.size'] = 14 plt.rcParams['figure.subplot.hspace'] = 0.1 plt.rc('font', family='sans-serif') plt.rc('font', serif='Helvetica') pass
5,329,895
def _select_programme(state, audio_programme=None): """Select an audioProgramme to render. If audio_programme_id is provided, use that to make the selection, otherwise select the only audioProgramme, or the one with the lowest id. Parameters: state (_ItemSelectionState): 'adm' must be set. audio_programme (AudioProgramme): audioProgramme to select if there are multiple programmes. Returns: _ItemSelectionState: state with audioProgramme set if one is found, None otherwise. """ if audio_programme is None: if len(state.adm.audioProgrammes) > 1: warnings.warn("more than one audioProgramme; selecting the one with the lowest id") return evolve(state, audioProgramme=min(state.adm.audioProgrammes, key=lambda programme: programme.id)) elif len(state.adm.audioProgrammes) == 1: return evolve(state, audioProgramme=state.adm.audioProgrammes[0]) else: return evolve(state, audioProgramme=None) else: assert in_by_id(audio_programme, state.adm.audioProgrammes), "selected audioProgramme not in ADM." return evolve(state, audioProgramme=audio_programme)
5,329,896
def puzzles(): """ Pick one of the TOP95 puzzle strings """ return [l for l in TOP95.split("\n") if l]
5,329,897
def remove_shortcut(name: str): """Removes a shortcut that is given by its name.""" current = query_all_shortcuts() to_keep = list(filter(lambda p: p.split('/')[-2] != name, current)) overwrite_shortcut_list(to_keep)
5,329,898
def test_declarative_sfc_obs_full(ccrs): """Test making a full surface observation plot.""" data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), infer_datetime_format=True, parse_dates=['valid']) obs = PlotObs() obs.data = data obs.time = datetime(1993, 3, 12, 13) obs.time_window = timedelta(minutes=15) obs.level = None obs.fields = ['tmpf', 'dwpf', 'emsl', 'cloud_cover', 'wxsym'] obs.locations = ['NW', 'SW', 'NE', 'C', 'W'] obs.colors = ['red', 'green', 'black', 'black', 'blue'] obs.formats = [None, None, lambda v: format(10 * v, '.0f')[-3:], 'sky_cover', 'current_weather'] obs.vector_field = ('uwind', 'vwind') obs.reduce_points = 1 # Panel for plot with Map features panel = MapPanel() panel.layout = (1, 1, 1) panel.area = (-124, -72, 20, 53) panel.area = 'il' panel.projection = ccrs.PlateCarree() panel.layers = ['coastline', 'borders', 'states'] panel.plots = [obs] # Bringing it all together pc = PanelContainer() pc.size = (10, 10) pc.panels = [panel] pc.draw() return pc.figure
5,329,899