hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace76534aa955f63db5061a94e6b3323dbc10949 | 935 | py | Python | examples/run_train.py | leilin-research/Time-series-prediction | 97ca6a7525e2c6329276b66ece1747124da8ab42 | [
"MIT"
] | 552 | 2019-07-23T10:17:49.000Z | 2022-03-23T13:37:46.000Z | examples/run_train.py | leilin-research/Time-series-prediction | 97ca6a7525e2c6329276b66ece1747124da8ab42 | [
"MIT"
] | 12 | 2020-05-16T04:22:09.000Z | 2022-03-23T13:38:45.000Z | examples/run_train.py | leilin-research/Time-series-prediction | 97ca6a7525e2c6329276b66ece1747124da8ab42 | [
"MIT"
] | 122 | 2019-09-09T11:34:19.000Z | 2022-03-16T08:06:24.000Z | # -*- coding: utf-8 -*-
# @author: Longxing Tan, tanlongxing888@163.com
# @date: 2020-01
import sys
import os
filePath = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.split(filePath)[0])
from data.load_data import DataLoader
from tfts.model import Model
from config import params
def main():
data_loader = DataLoader()
train_dataset = data_loader(params, data_dir=params['data_dir'], batch_size=params['batch_size'], training=True, sample=0.8)
valid_dataset = data_loader(params, data_dir=params['data_dir'], batch_size=params['batch_size'], training=True, sample=0.2)
# use_model: seq2seq, wavenet, transformer
model = Model(params=params, use_model=params['use_model'], use_loss='mse', use_optimizer='adam', custom_model_params={})
# mode: eager or fit
model.train(train_dataset, valid_dataset, n_epochs=params['n_epochs'], mode='eager')
if __name__ == '__main__':
main()
| 32.241379 | 128 | 0.729412 |
ace765bb376a351efb3ec3be0b4ae1c1ccec9206 | 915 | py | Python | webserver.py | styrup/simplepytonweb | 4db77a8e728bcdb26da19615fc05eb42688a0a04 | [
"Apache-2.0"
] | null | null | null | webserver.py | styrup/simplepytonweb | 4db77a8e728bcdb26da19615fc05eb42688a0a04 | [
"Apache-2.0"
] | null | null | null | webserver.py | styrup/simplepytonweb | 4db77a8e728bcdb26da19615fc05eb42688a0a04 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os;
portnumber = int(os.getenv('PORT_NUMBER', 80))
from http.server import BaseHTTPRequestHandler, HTTPServer,socket
# HTTPRequestHandler class
class testHTTPServer_RequestHandler(BaseHTTPRequestHandler):
# GET
def do_GET(self):
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# Send message back to client
message = "Webserver connection test succeed to " + socket.gethostname()
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
def run():
print('starting server on port ' + str(portnumber) + '...')
# Server settings
server_address = ('', portnumber)
httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)
print('running server...')
httpd.serve_forever()
run() | 27.727273 | 80 | 0.686339 |
ace765f0201990791a95a1eb923c3761ef61685c | 35,325 | py | Python | portal/models/organization.py | pep8speaks/true_nth_usa_portal | 31ff755b0cfe61ab908e2a399e3c41ef17ca8c16 | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:25:20.000Z | 2019-03-11T12:25:20.000Z | portal/models/organization.py | pep8speaks/true_nth_usa_portal | 31ff755b0cfe61ab908e2a399e3c41ef17ca8c16 | [
"BSD-3-Clause"
] | null | null | null | portal/models/organization.py | pep8speaks/true_nth_usa_portal | 31ff755b0cfe61ab908e2a399e3c41ef17ca8c16 | [
"BSD-3-Clause"
] | null | null | null | """Model classes for organizations and related entities.
Designed around FHIR guidelines for representation of organizations, locations
and healthcare services which are used to describe hospitals and clinics.
"""
from datetime import datetime
from flask import abort, current_app, url_for
from sqlalchemy import UniqueConstraint, and_
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref
from werkzeug.exceptions import Unauthorized
from . import address
from ..database import db
from ..date_tools import FHIR_datetime
from ..dict_tools import strip_empties
from ..system_uri import IETF_LANGUAGE_TAG, SHORTNAME_ID, TRUENTH_RP_EXTENSION
from .app_text import (
ConsentByOrg_ATMA,
UndefinedAppText,
UnversionedResource,
VersionedResource,
app_text,
)
from .codeable_concept import CodeableConcept
from .coding import Coding
from .extension import CCExtension, TimezoneExtension
from .identifier import Identifier
from .reference import Reference
from .research_protocol import ResearchProtocol
from .role import ROLE, Role
from .telecom import ContactPoint, Telecom
USE_SPECIFIC_CODINGS_MASK = 0b0001
RACE_CODINGS_MASK = 0b0010
ETHNICITY_CODINGS_MASK = 0b0100
INDIGENOUS_CODINGS_MASK = 0b1000
class Organization(db.Model):
"""Model representing a FHIR organization
Organizations represent a collection of people that have come together
to achieve an objective. As an example, all the healthcare
services provided by the same university hospital will belong to
the organization representing said university hospital.
Organizations can reference other organizations via the 'partOf_id',
where children name their parent organization id.
"""
__tablename__ = 'organizations'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
email = db.Column(db.String(120))
phone_id = db.Column(
db.Integer, db.ForeignKey('contact_points.id', ondelete='cascade'))
type_id = db.Column(db.ForeignKey(
'codeable_concepts.id', ondelete='cascade'))
partOf_id = db.Column(db.ForeignKey('organizations.id'))
coding_options = db.Column(db.Integer, nullable=False, default=0)
default_locale_id = db.Column(db.ForeignKey('codings.id'))
_timezone = db.Column('timezone', db.String(20))
addresses = db.relationship(
'Address', lazy='dynamic', secondary="organization_addresses")
identifiers = db.relationship(
'Identifier', lazy='dynamic', secondary="organization_identifiers")
locales = db.relationship(
'Coding', lazy='dynamic', secondary="organization_locales")
_phone = db.relationship(
'ContactPoint', foreign_keys=phone_id, cascade="save-update")
research_protocols = association_proxy(
"organization_research_protocols", "research_protocol",
creator=lambda rp: OrganizationResearchProtocol(research_protocol=rp))
type = db.relationship('CodeableConcept', cascade="save-update")
def __init__(self, **kwargs):
self.coding_options = 14
super(Organization, self).__init__(**kwargs)
def __str__(self):
part_of = 'partOf {} '.format(self.partOf_id) if self.partOf_id else ''
addresses = '; '.join([str(a) for a in self.addresses])
return 'Organization {0.name} {0.type} {0.phone} {0.email} '.format(
self) + part_of + addresses
@hybrid_property
def use_specific_codings(self):
return self.coding_options & USE_SPECIFIC_CODINGS_MASK
@use_specific_codings.setter
def use_specific_codings(self, value):
if value:
self.coding_options = self.coding_options | USE_SPECIFIC_CODINGS_MASK
else:
self.coding_options = self.coding_options & ~USE_SPECIFIC_CODINGS_MASK
@hybrid_property
def race_codings(self):
if self.use_specific_codings:
return self.coding_options & RACE_CODINGS_MASK
elif self.partOf_id:
org = Organization.query.get(self.partOf_id)
return org.race_codings
else:
return True
@race_codings.setter
def race_codings(self, value):
if value:
self.coding_options = self.coding_options | RACE_CODINGS_MASK
else:
self.coding_options = self.coding_options & ~RACE_CODINGS_MASK
@hybrid_property
def ethnicity_codings(self):
if self.use_specific_codings:
return self.coding_options & ETHNICITY_CODINGS_MASK
elif self.partOf_id:
org = Organization.query.get(self.partOf_id)
return org.ethnicity_codings
else:
return True
@ethnicity_codings.setter
def ethnicity_codings(self, value):
if value:
self.coding_options = self.coding_options | ETHNICITY_CODINGS_MASK
else:
self.coding_options = self.coding_options & ~ETHNICITY_CODINGS_MASK
@hybrid_property
def indigenous_codings(self):
if self.use_specific_codings:
return self.coding_options & INDIGENOUS_CODINGS_MASK
elif self.partOf_id:
org = Organization.query.get(self.partOf_id)
return org.indigenous_codings
else:
return True
@indigenous_codings.setter
def indigenous_codings(self, value):
if value:
self.coding_options = self.coding_options | INDIGENOUS_CODINGS_MASK
else:
self.coding_options = self.coding_options & ~INDIGENOUS_CODINGS_MASK
@property
def phone(self):
if self._phone:
return self._phone.value
@phone.setter
def phone(self, val):
if self._phone:
self._phone.value = val
else:
self._phone = ContactPoint(system='phone', use='work', value=val)
@property
def default_locale(self):
coding = None
org = self
if org.default_locale_id:
coding = Coding.query.get(org.default_locale_id)
while org.partOf_id and not coding:
org = Organization.query.get(org.partOf_id)
if org.default_locale_id:
coding = Coding.query.get(org.default_locale_id)
if coding:
return coding.code
@default_locale.setter
def default_locale(self, value):
if not value:
self.default_locale_id = None
else:
coding = Coding.query.filter_by(system=IETF_LANGUAGE_TAG, code=value).first()
if not coding:
raise ValueError(
"Can't find locale code {value} - constrained to "
"pre-existing values in the {system} system".format(
value=value, system=IETF_LANGUAGE_TAG))
self.default_locale_id = coding.id
@property
def shortname(self):
"""Return shortname identifier if found, else the org name"""
shortnames = [
id for id in self.identifiers if id.system == SHORTNAME_ID]
if len(shortnames) > 1:
raise ValueError(
"multiple shortname identifiers found for {}".format(self))
return shortnames[0].value if shortnames else self.name
@property
def timezone(self):
org = self
if org._timezone:
return org._timezone
while org.partOf_id:
org = Organization.query.get(org.partOf_id)
if org._timezone:
return org._timezone
# return 'UTC' if no parent inheritances found
return 'UTC'
@timezone.setter
def timezone(self, value):
self._timezone = value
def rps_w_retired(self):
"""accessor to collate research protocols and retired_as_of values
The SQLAlchemy association proxy doesn't provide easy access to
`intermediary` table data - i.e. columns in the link table between
a many:many association. This accessor collates the value stored
in the intermediary table, `retired_as_of` with the research protocols
for this organization.
:returns: ready query for use in iteration or count or other methods.
Query will produce a list of tuples (ResearchProtocol, retired_as_of)
associated with the organization, ordered by `retired_as_of` dates
with nulls last.
"""
items = OrganizationResearchProtocol.query.join(
ResearchProtocol).filter(
OrganizationResearchProtocol.research_protocol_id ==
ResearchProtocol.id).filter(
OrganizationResearchProtocol.organization_id == self.id
).with_entities(
ResearchProtocol,
OrganizationResearchProtocol.retired_as_of).order_by(
OrganizationResearchProtocol.retired_as_of.desc())
return items
def research_protocol(self, as_of_date):
"""Lookup research protocol for this org valid at as_of_date
Complicated scenario as it may only be defined on the parent or
further up the tree. Secondly, we keep history of research protocols
in case backdated entry is necessary.
:return: research protocol for org (or parent org) valid as_of_date
"""
def rp_from_org(org):
best_candidate = None
for rp, retired_as_of in org.rps_w_retired():
if not retired_as_of:
best_candidate = rp
elif retired_as_of > as_of_date:
best_candidate = rp
return best_candidate
rp = rp_from_org(self)
if rp:
return rp
org = self
while org.partOf_id:
org = Organization.query.get(org.partOf_id)
rp = rp_from_org(org)
if rp:
return rp
@classmethod
def from_fhir(cls, data):
org = cls()
return org.update_from_fhir(data)
def update_from_fhir(self, data):
if 'id' in data:
self.id = data['id']
self.name = data.get('name')
if 'telecom' in data:
telecom = Telecom.from_fhir(data['telecom'])
self.email = telecom.email
telecom_cps = telecom.cp_dict()
self.phone = telecom_cps.get(('phone', 'work')) \
or telecom_cps.get(('phone', None))
if 'address' in data:
if not data.get('address'):
for addr in self.addresses:
self.addresses.remove(addr)
for addr in data['address']:
self.addresses.append(address.Address.from_fhir(addr))
self.type = (
CodeableConcept.from_fhir(data['type']) if data.get('type')
else None)
self.partOf_id = (
Reference.parse(data['partOf']).id if data.get('partOf')
else None)
for attr in (
'use_specific_codings',
'race_codings',
'ethnicity_codings',
'indigenous_codings',
):
if attr in data:
setattr(self, attr, data.get(attr))
by_extension_url = {ext['url']: ext for ext in data.get('extension', [])}
for kls in org_extension_classes:
args = by_extension_url.get(kls.extension_url, {'url': kls.extension_url})
instance = org_extension_map(self, args)
instance.apply_fhir()
if 'identifier' in data:
# track current identifiers - must remove any not requested
remove_if_not_requested = [i for i in self.identifiers]
for id in data['identifier']:
identifier = Identifier.from_fhir(id).add_if_not_found()
if identifier not in self.identifiers.all():
self.identifiers.append(identifier)
else:
remove_if_not_requested.remove(identifier)
for obsolete in remove_if_not_requested:
self.identifiers.remove(obsolete)
self.default_locale = data.get('language')
return self
def as_fhir(self, include_empties=True):
"""Return JSON representation of organization
:param include_empties: if True, returns entire object definition;
if False, empty elements are removed from the result
:return: JSON representation of a FHIR Organization resource
"""
d = {}
d['resourceType'] = 'Organization'
d['id'] = self.id
d['name'] = self.name
telecom = Telecom(email=self.email, contact_points=[self._phone])
d['telecom'] = telecom.as_fhir()
d['address'] = []
for addr in self.addresses:
d['address'].append(addr.as_fhir())
d['type'] = self.type.as_fhir() if self.type else None
d['partOf'] = (
Reference.organization(self.partOf_id).as_fhir() if
self.partOf_id else None)
for attr in ('use_specific_codings', 'race_codings',
'ethnicity_codings', 'indigenous_codings'):
if getattr(self, attr):
d[attr] = True
else:
d[attr] = False
extensions = []
for kls in org_extension_classes:
instance = org_extension_map(self, {'url': kls.extension_url})
data = instance.as_fhir(include_empties)
if data:
extensions.append(data)
d['extension'] = extensions
d['identifier'] = []
for id in self.identifiers:
d['identifier'].append(id.as_fhir())
d['language'] = self.default_locale
if not include_empties:
return strip_empties(d)
return d
@classmethod
def generate_bundle(cls, limit_to_ids=None, include_empties=True):
"""Generate a FHIR bundle of existing orgs ordered by ID
:param limit_to_ids: if defined, only return the matching set, otherwise
all organizations found
:param include_empties: set to include empty attributes
:return:
"""
query = Organization.query.order_by(Organization.id)
if limit_to_ids:
query = query.filter(Organization.id.in_(limit_to_ids))
orgs = [o.as_fhir(include_empties=include_empties) for o in query]
bundle = {
'resourceType': 'Bundle',
'updated': FHIR_datetime.now(),
'total': len(orgs),
'type': 'searchset',
'link': {
'rel': 'self',
'href': url_for(
'org_api.organization_search', _external=True),
},
'entry': orgs,
}
return bundle
@staticmethod
def consent_agreements(locale_code):
"""Return consent agreements for all top level organizations
:param locale_code: preferred locale, typically user's.
:return: dictionary keyed by top level organization id containing
a VersionedResource for each organization IFF the organization
has a custom consent agreement on file. The `organization_name`
is also added to the versioned resource to simplify UI code.
"""
from ..views.portal import stock_consent # local avoids cycle
agreements = {}
for org_id in OrgTree().all_top_level_ids():
org = Organization.query.get(org_id)
# Not all organizations maintain consent agreements
# include only those with such defined
try:
url = app_text(ConsentByOrg_ATMA.name_key(organization=org))
resource = VersionedResource(url, locale_code=locale_code)
except UndefinedAppText:
# no consent found for this organization, provide
# the dummy template
url = url_for('portal.stock_consent', org_name=org.name,
_external=True)
asset = stock_consent(org_name=org.shortname)
resource = UnversionedResource(url=url, asset=asset)
resource.organization_name = org.name
resource.organization_shortname = org.shortname
agreements[org.id] = resource
return agreements
class OrganizationLocale(db.Model):
__tablename__ = 'organization_locales'
id = db.Column(db.Integer, primary_key=True)
organization_id = db.Column(db.ForeignKey('organizations.id', ondelete='CASCADE'),
nullable=False)
coding_id = db.Column(db.ForeignKey('codings.id'), nullable=False)
__table_args__ = (UniqueConstraint('organization_id', 'coding_id',
name='_organization_locale_coding'),)
class LocaleExtension(CCExtension):
def __init__(self, organization, extension):
self.organization, self.extension = organization, extension
extension_url = "http://hl7.org/fhir/valueset/languages"
@property
def children(self):
return self.organization.locales
class OrganizationResearchProtocol(db.Model):
__tablename__ = 'organization_research_protocols'
id = db.Column(db.Integer, primary_key=True)
organization_id = db.Column(db.ForeignKey(
'organizations.id', ondelete='CASCADE'), nullable=False)
research_protocol_id = db.Column(db.ForeignKey(
'research_protocols.id', ondelete='CASCADE'), nullable=False)
retired_as_of = db.Column(db.DateTime, nullable=True)
# bidirectional attribute/collection of
# "organization"/"organization_research_protocols"
organization = db.relationship(
Organization, backref=backref(
"organization_research_protocols", cascade="all, delete-orphan"))
# reference to the "ResearchProtocol" object
research_protocol = db.relationship("ResearchProtocol")
__table_args__ = (UniqueConstraint(
'organization_id', 'research_protocol_id',
name='_organization_research_protocol'),)
def __init__(self, research_protocol=None, organization=None, retired_as_of=None):
if research_protocol:
assert isinstance(research_protocol, ResearchProtocol)
if organization:
assert isinstance(organization, Organization)
self.organization = organization
self.research_protocol = research_protocol
self.retired_as_of = retired_as_of
def __repr__(self):
return 'OrganizationResearchProtocol({}:{})'.format(
self.organization, self.research_protocol)
class ResearchProtocolExtension(CCExtension):
def __init__(self, organization, extension):
self.organization, self.extension = organization, extension
extension_url = TRUENTH_RP_EXTENSION
def as_fhir(self, include_empties=True):
rps = []
for rp, retired_as_of in self.organization.rps_w_retired():
d = {'name': rp.name}
if retired_as_of:
d['retired_as_of'] = FHIR_datetime.as_fhir(retired_as_of)
rps.append(d)
if rps:
return {'url': self.extension_url, 'research_protocols': rps}
elif include_empties:
return {'url': self.extension_url}
def apply_fhir(self):
if self.extension['url'] != self.extension_url:
raise ValueError('invalid url for ResearchProtocolExtension')
remove_if_not_requested = [
rp for rp in self.organization.research_protocols]
rps = self.extension.get('research_protocols', [])
for rp in rps:
name = rp.get('name')
if not name:
abort(400, "ResearchProtocol requires well defined name")
existing = ResearchProtocol.query.filter_by(name=name).first()
if not existing:
abort(
404,
"ResearchProtocol with name {} not found".format(name))
if existing not in self.organization.research_protocols:
# Add the intermediary table type to include the
# retired_as_of value. Magic of association proxy, bringing
# one to life commits, and trying to add directly will fail
OrganizationResearchProtocol(
research_protocol=existing, organization=self.organization,
retired_as_of=FHIR_datetime.parse(
rp.get('retired_as_of'), none_safe=True))
else:
remove_if_not_requested.remove(existing)
# Unfortunately, the association proxy requires we now query for the
# intermediary (link) table to check/set the value of `retired_as_of`
o_rp = OrganizationResearchProtocol.query.filter(
OrganizationResearchProtocol.organization_id ==
self.organization.id).filter(
OrganizationResearchProtocol.research_protocol_id ==
existing.id).one()
o_rp.retired_as_of = FHIR_datetime.parse(
rp.get('retired_as_of'), none_safe=True)
for obsolete in remove_if_not_requested:
self.organization.research_protocols.remove(obsolete)
@property
def children(self):
raise NotImplementedError
org_extension_classes = (LocaleExtension, TimezoneExtension,
ResearchProtocolExtension)
def org_extension_map(organization, extension):
"""Map the given extension to the Organization
FHIR uses extensions for elements beyond base set defined. Lookup
an adapter to handle the given extension for the organization.
:param organization: the org to apply to or read the extension from
:param extension: a dictionary with at least a 'url' key defining
the extension.
:returns: adapter implementing apply_fhir and as_fhir methods
:raises :py:exc:`exceptions.ValueError`: if the extension isn't recognized
"""
for kls in org_extension_classes:
if extension['url'] == kls.extension_url:
return kls(organization, extension)
# still here implies an extension we don't know how to handle
raise ValueError("unknown extension: {}".format(extension['url']))
class UserOrganization(db.Model):
"""link table for users (n) : organizations (n)"""
__tablename__ = 'user_organizations'
id = db.Column(db.Integer, primary_key=True)
organization_id = db.Column(db.ForeignKey(
'organizations.id', ondelete='cascade'), nullable=False)
user_id = db.Column(db.ForeignKey(
'users.id', ondelete='cascade'), nullable=False)
__table_args__ = (UniqueConstraint('user_id', 'organization_id',
name='_user_organization'),)
organization = db.relationship('Organization')
class OrganizationAddress(db.Model):
"""link table for organization : n addresses"""
__tablename__ = 'organization_addresses'
id = db.Column(db.Integer, primary_key=True)
organization_id = db.Column(db.ForeignKey(
'organizations.id', ondelete='cascade'), nullable=False)
address_id = db.Column(db.ForeignKey(
'addresses.id', ondelete='cascade'), nullable=False)
__table_args__ = (UniqueConstraint('organization_id', 'address_id',
name='_organization_address'),)
class OrganizationIdentifier(db.Model):
"""link table for organization : n identifiers"""
__tablename__ = 'organization_identifiers'
id = db.Column(db.Integer, primary_key=True)
organization_id = db.Column(db.ForeignKey(
'organizations.id', ondelete='cascade'), nullable=False)
identifier_id = db.Column(db.ForeignKey(
'identifiers.id', ondelete='cascade'), nullable=False)
__table_args__ = (UniqueConstraint('organization_id', 'identifier_id',
name='_organization_identifier'),)
class OrgNode(object):
"""Node in tree of organizations - used by org tree
Simple tree implementation to house organizations in a hierarchical
structure. One root - any number of nodes at each tier. The organization
identifiers (integers referring to the database primary key) are used
as reference keys.
"""
def __init__(self, id, parent = None, children = None):
self.id = id # root node alone has id = None
self.parent = parent
self.children = children if children else {}
if self.id is None:
assert self.parent is None
def insert(self, id, partOf_id=None):
"""Insert new nodes into the org tree
Designed for this special organizaion purpose, we expect the
tree is built from the top (root) down, so no rebalancing is
necessary.
:param id: of organizaiton to insert
:param partOf_id: if organization has a parent - its identifier
:returns: the newly inserted node
"""
if id is None:
# Only allowed on root node - building top down, don't allow
raise ValueError("only root node can have null id")
if self.id == id:
# Referring to self, don't allow
raise ValueError("{} already in tree".format(id))
if self.id == partOf_id:
# Adding child, confirm it's new
assert id not in self.children
node = OrgNode(id=id, parent=self)
self.children[id] = node
return node
else:
# Could be adding to root node, confirm it's top level
assert(self.id is None and partOf_id is None)
node = OrgNode(id=id, parent=self)
assert id not in self.children
self.children[id] = node
return node
def top_level(self):
"""Lookup top_level organization id from the given node
Use OrgTree.find() to locate starter node, if necessary
"""
if not self.parent:
raise ValueError('popped off the top')
if self.parent.id is None:
return self.id
return self.parent.top_level()
class OrgTree(object):
"""In-memory organizations tree for hierarchy and structure
Organizations may define a 'partOf' in the database records to describe
where the organization fits in a hierarchy. As there may be any
number of organization tiers, and the need exists to lookup where
an organiztion fits in this hiearchy. For example, needing to lookup
the top level organization for any node, or all the organizations at or
below a level for permission issues. etc.
This singleton class will build up the tree when it's first needed (i.e.
lazy load).
Note, the root of the tree is a dummy object, so the first tier can be
multiple `top-level` organizations.
"""
root = None
lookup_table = None
def __init__(self):
# Maintain a singleton root object and lookup_table
if not OrgTree.root:
self.__reset_cache()
def __reset_cache(self):
# Internal method to manage cached org data
OrgTree.root = OrgNode(id=None)
OrgTree.lookup_table = {}
self.populate_tree()
@classmethod
def invalidate_cache(cls):
"""Invalidate cache on org changes"""
cls.root = None
def populate_tree(self):
"""Recursively build tree from top down"""
if self.root.children: # Done if already populated
return
def add_descendents(node):
partOf_id = node.id
for org in Organization.query.filter(and_(
Organization.id != 0, # none of the above doesn't apply
Organization.partOf_id == partOf_id)):
new_node = node.insert(id=org.id, partOf_id=partOf_id)
if org.id in self.lookup_table:
raise ValueError(
"Found cycle in org graph - can't add {} to table: {}"
"".format(org.id, self.lookup_table.keys()))
self.lookup_table[org.id] = new_node
if Organization.query.filter(
Organization.partOf_id == new_node.id).count():
add_descendents(new_node)
# Add top level orgs first, recurse on down
add_descendents(self.root)
def find(self, organization_id):
"""Locates and returns node in OrgTree for given organization_id
:param organization_id: primary key of organization to locate
:return: OrgNode from OrgTree
:raises: ValueError if not found - unexpected
"""
organization_id = int(organization_id)
if organization_id == 0:
raise ValueError(
"'none of the above' not found as it doesn't belong "
"in OrgTree")
if organization_id not in self.lookup_table:
# Strange race condition - if this org id is found, reload
# the lookup_table
if Organization.query.get(organization_id):
current_app.logger.warn(
"existing org not found in OrgTree. "
"lookup_table size {}".format(len(self.lookup_table)))
self.__reset_cache()
else:
raise ValueError(
"{} not found in OrgTree".format(organization_id))
return self.lookup_table[organization_id]
def all_top_level_ids(self):
"""Return list of all top level organization identifiers"""
return self.root.children.keys()
def top_level_names(self):
"""Fetch org names for `all_top_level_ids`
:returns: list of top level org names
"""
results = Organization.query.filter(
Organization.id.in_(self.all_top_level_ids())).with_entities(
Organization.name).all()
return [r[0] for r in results]
def all_leaf_ids(self):
nodes = set()
for id in self.all_top_level_ids():
nodes.update(self.all_leaves_below_id(id))
return list(nodes)
def all_leaves_below_id(self, organization_id):
"""Given org at arbitrary level, return list of leaf nodes below it"""
arb = self.find(organization_id)
def fetch_leaves(node):
stack = [node]
while stack:
node = stack.pop()
if not node.children:
yield node.id
for child_node in node.children.values():
stack.append(child_node)
return list(fetch_leaves(arb))
def here_and_below_id(self, organization_id):
"""Given org at arbitrary level, return list at and below"""
try:
arb = self.find(organization_id)
except ValueError:
return []
def fetch_nodes(node):
stack = [node]
while stack:
node = stack.pop()
yield node.id
for id, child_node in node.children.items():
stack.append(child_node)
return list(fetch_nodes(arb))
def at_or_below_ids(self, organization_id, other_organizations):
"""Check if the other_organizations are at or below given organization
:param organization_id: effective parent to check against
:param other_organizations: iterable of organization_ids as potential
children.
:return: True if any org in other_organizations is equal to the
given organization_id, or a child of it.
"""
## work through list - shortcircuit out if a qualified node is found
for other_organization_id in other_organizations:
if organization_id == other_organization_id:
return True
children = self.here_and_below_id(organization_id)
if other_organization_id in children:
return True
def at_and_above_ids(self, organization_id):
"""Returns list of ids from any point in tree and up the parent stack
:param organization_id: node in tree, will be included in return list
:return: list of organization ids from the one given on up including
every parent found in chain
"""
ids = []
node = self.find(organization_id)
while node is not self.root:
ids.append(node.id)
node = node.parent
return ids
def find_top_level_orgs(self, organizations, first=False):
"""Returns top level organization(s) from those provided
:param organizations: organizations against which top level
organization(s) will be queried
:param first: if set, return the first org in the result list
rather than a set of orgs.
:return: set of top level organization(s), or a single org if
``first`` is set.
"""
results = set()
for org in (o for o in organizations if o.id):
top_org_id = self.find(org.id).top_level()
results.add(Organization.query.get(top_org_id))
if first:
return next(iter(results)) if results else None
return results
@staticmethod
def all_ids_with_rp(research_protocol):
"""Returns set of org IDs that are associated with Research Protocol
As child orgs are considered to be associated if the parent org
is, this will return the full list for optimized comparisons.
"""
results = set()
for o in Organization.query.all():
if research_protocol == o.research_protocol:
results.add(o.id)
return results
def visible_patients(self, staff_user):
"""Returns patient IDs for whom the current staff_user can view
Staff users can view all patients at or below their own org
level.
NB - no patients should ever have a consent on file with the special
organization 'none of the above' - said organization is ignored in the
search.
"""
from .user import User, UserRoles # local to avoid cycle
from .user_consent import UserConsent
if not (
staff_user.has_role(ROLE.STAFF.value) or
staff_user.has_role(ROLE.STAFF_ADMIN.value)):
raise Unauthorized("visible_patients() exclusive to staff use")
staff_user_orgs = set()
for org in (o for o in staff_user.organizations if o.id != 0):
staff_user_orgs.update(self.here_and_below_id(org.id))
if not staff_user_orgs:
return []
patient_role_id = Role.query.filter_by(
name=ROLE.PATIENT.value).one().id
now = datetime.utcnow()
query = db.session.query(User.id).join(
UserRoles).join(UserConsent).join(UserOrganization).filter(
User.deleted_id.is_(None),
UserRoles.role_id == patient_role_id,
UserConsent.deleted_id.is_(None),
UserConsent.expires > now,
UserOrganization.organization_id.in_(staff_user_orgs))
return [u[0] for u in query] # flaten return tuples to list of ids
def add_static_organization():
"""Insert special `none of the above` org at index 0"""
existing = Organization.query.get(0)
if not existing:
db.session.add(Organization(id=0, name='none of the above'))
| 37.659915 | 89 | 0.634338 |
ace76855fe2d723c6686f20b3d31ab6eb1af22de | 671 | py | Python | Leetcode/1071. Greatest Common Divisor of Strings/solution2.py | asanoviskhak/Outtalent | c500e8ad498f76d57eb87a9776a04af7bdda913d | [
"MIT"
] | 51 | 2020-07-12T21:27:47.000Z | 2022-02-11T19:25:36.000Z | Leetcode/1071. Greatest Common Divisor of Strings/solution2.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | null | null | null | Leetcode/1071. Greatest Common Divisor of Strings/solution2.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | 32 | 2020-07-27T13:54:24.000Z | 2021-12-25T18:12:50.000Z | class Solution:
def gcd(self, a, b):
if b == 0:
return a
else:
return self.gcd(b, a % b)
def gcdOfStrings(self, str1: str, str2: str) -> str:
t1 = len(str1)
t2 = len(str2)
t = self.gcd(t1, t2)
if len(str1) > len(str2):
if str1 == str2 + str1[:t1 - t2]:
return str1[:t]
else:
return ""
if len(str1) < len(str2):
if str2 == str1 + str2[:t2 - t1]:
return str2[:t]
else:
return ""
if str1 == str2:
return str1[:t]
else:
return ""
| 22.366667 | 56 | 0.391952 |
ace7688d5191db51657afe6046019ea5ea0d875a | 4,463 | py | Python | main.py | psh150204/FBS | 40aed2d3a10d592b3f83f106a9bccafff1184ec3 | [
"MIT"
] | 3 | 2021-12-31T12:53:33.000Z | 2022-01-13T11:38:40.000Z | main.py | psh150204/FBS | 40aed2d3a10d592b3f83f106a9bccafff1184ec3 | [
"MIT"
] | 1 | 2021-12-31T12:54:42.000Z | 2022-01-11T05:33:07.000Z | main.py | psh150204/FBS | 40aed2d3a10d592b3f83f106a9bccafff1184ec3 | [
"MIT"
] | 2 | 2020-10-16T09:58:17.000Z | 2021-03-07T12:54:25.000Z | import os
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
from dataset import get_loader
from model import CifarNet
import utils
parser = argparse.ArgumentParser()
parser.add_argument(
'--fbs',
type=utils.str2bool,
default=False
)
parser.add_argument(
'--sparsity_ratio',
type=float,
default=1.0
)
parser.add_argument(
'--lasso_lambda',
type=float,
default=1e-8
)
parser.add_argument(
'--epochs',
type=int,
default=500
)
parser.add_argument(
'--batch_size',
type=int,
default=256
)
parser.add_argument(
'--lr',
type=float,
default=1e-3
)
parser.add_argument(
'--seed',
type=int,
default=1
)
parser.add_argument(
'--num_worker',
type=int,
default=4
)
parser.add_argument(
'--ckpt_path',
type=str,
default='checkpoints'
)
parser.add_argument(
'--pretrained',
type=str,
default='checkpoints/best_False_1.0.pt'
)
args = parser.parse_args()
os.makedirs(args.ckpt_path, exist_ok=True)
with open(f'{args.ckpt_path}/train_log_{args.fbs}_{args.sparsity_ratio}.tsv', 'w') as log_file:
log_file.write(
'epoch\ttrain_loss\ttest_loss\ttrain_acc\ttest_acc\tbest_acc\n')
utils.set_seed(args.seed)
train_loader, test_loader = get_loader(args.batch_size, args.num_worker)
model = CifarNet(fbs=args.fbs, sparsity_ratio=args.sparsity_ratio).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# TODO: initialize current model parameters with previous model parameters
if args.fbs:
if args.sparsity_ratio == 1.0 :
base_state_dict = torch.load(args.pretrained)
model_state_dict = model.state_dict()
for k, v in model_state_dict.items():
if 'conv' in k:
model_state_dict[k] = base_state_dict[k]
model.load_state_dict(model_state_dict)
else:
base_state_dict = torch.load(args.pretrained)
model_state_dict = model.state_dict()
for k, v in model_state_dict.items():
if 'weight' in k or 'bias' in k :
model_state_dict[k] = base_state_dict[k]
model.load_state_dict(model_state_dict)
best_acc = 0.
for epoch in range(1, args.epochs+1):
print(f'Epoch: {epoch}')
train_loss = 0
total_num = 0
correct_num = 0
total_step = len(train_loader)
model.train()
for img_batch, lb_batch in tqdm(train_loader, total=total_step):
img_batch = img_batch.cuda()
lb_batch = lb_batch.cuda()
if not args.fbs:
pred_batch = model(img_batch)
loss = criterion(pred_batch, lb_batch)
else:
pred_batch, lasso = model(img_batch)
loss = criterion(pred_batch, lb_batch) + lasso * args.lasso_lambda
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, pred_lb_batch = pred_batch.max(dim=1)
total_num += lb_batch.shape[0]
correct_num += pred_lb_batch.eq(lb_batch).sum().item()
train_loss = train_loss / total_step
train_acc = 100.*correct_num/total_num
with torch.no_grad():
test_loss = 0
total_num = 0
correct_num = 0
total_step = len(test_loader)
model.eval()
for img_batch, lb_batch in tqdm(test_loader, total=len(test_loader)):
img_batch = img_batch.cuda()
lb_batch = lb_batch.cuda()
if not args.fbs:
pred_batch = model(img_batch)
loss = criterion(pred_batch, lb_batch)
else:
pred_batch, lasso = model(img_batch, True)
loss = criterion(pred_batch, lb_batch) + lasso * args.lasso_lambda
test_loss += loss.item()
_, pred_lb_batch = pred_batch.max(dim=1)
total_num += lb_batch.shape[0]
correct_num += pred_lb_batch.eq(lb_batch).sum().item()
test_loss = test_loss / total_step
test_acc = 100.*correct_num/total_num
if test_acc > best_acc:
best_acc = test_acc
torch.save(model.state_dict(),
f'{args.ckpt_path}/best_{args.fbs}_{args.sparsity_ratio}.pt')
with open(f'{args.ckpt_path}/train_log_{args.fbs}_{args.sparsity_ratio}.tsv', 'a') as log_file:
log_file.write(
f'{epoch}\t{train_loss}\t{test_loss}\t{train_acc}\t{test_acc}\t{best_acc}\n') | 25.947674 | 99 | 0.635447 |
ace769b94ce828635de61512aa5722cfe0cef2c9 | 4,521 | py | Python | examples/postprocess/NIMROD_RCI/parse_results_history.py | xinranzhu/GPTune-1 | 1e502295e790ab68990f657492243fd4fb3dfc0a | [
"BSD-3-Clause-LBNL"
] | null | null | null | examples/postprocess/NIMROD_RCI/parse_results_history.py | xinranzhu/GPTune-1 | 1e502295e790ab68990f657492243fd4fb3dfc0a | [
"BSD-3-Clause-LBNL"
] | null | null | null | examples/postprocess/NIMROD_RCI/parse_results_history.py | xinranzhu/GPTune-1 | 1e502295e790ab68990f657492243fd4fb3dfc0a | [
"BSD-3-Clause-LBNL"
] | null | null | null | import os
import os.path as osp
import argparse
import pickle
import numpy as np
from operator import itemgetter
import re
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-appstr', type=str, default='unknown')
parser.add_argument('-ntask', type=int, default=1, help='number of tasks')
parser.add_argument("-bmin", type=int, default=1, help ='minimum value for bandit budget')
parser.add_argument("-bmax", type=int, default=8, help ='maximum value for bandit budget')
parser.add_argument("-eta", type=int, default=2, help ='base value for bandit structure')
parser.add_argument("-Nloop", type=int, default=1, help ='number of bandit loops')
parser.add_argument('-expid', type=str, default='0')
return parser.parse_args()
def main(args):
summary = []
my_source = f'./{args.appstr}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt'
save_path = f'./{args.appstr}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.pkl'
GPTuneBand_source = f'./{args.appstr}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}_parsed.pkl'
with open(my_source, "r") as f:
line = f.readline()
while line:
info = line.split()
if (info[0] == 'Tuner:' and info[1] == "GPTuneBand"):
results = []
tunername = info[1]
results.append(tunername)
line = f.readline()
line = f.readline().split()
for _ in range(int(args.ntask)):
tid = int(line[1])
line = f.readline().split()
line = f.readline()
result = pickle.load(open(GPTuneBand_source, "rb"))
results.append(result)
if int(args.ntask) > 1:
line = f.readline().split()
summary.append(results)
line = f.readline()
elif (info[0] == 'Tuner:' and info[1] == "hpbandster"):
results = []
tunername = info[1]
results.append(tunername)
line = f.readline()
line = f.readline().split()
for _ in range(int(args.ntask)):
tid = int(line[1])
line = f.readline().split()
task = line[0][7:]
line = f.readline().strip(" Os ")
data = [[float(y) for y in x.split(", ")] for x in re.split('\[\[|\]\]|\), \(|\(|\)', line) if len(x) > 2]
data = [y for y in data if y[1] < float("Inf")]
x = []
y = []
pre_fix = 0
max_num = -999
for info in data:
if info[0] > max_num:
max_num = info[0]
for info in data:
pre_fix += info[0]/max_num
if np.isclose(info[0], max_num):
x.append(pre_fix)
y.append(info[1])
results.append([tid, task, [x, y]])
if int(args.ntask) > 1:
line = f.readline().split()
summary.append(results)
line = f.readline()
else: # GPTune OpenTuner and TPE
results = []
tunername = info[1]
results.append(tunername)
line = f.readline()
line = f.readline().split()
for _ in range(int(args.ntask)):
tid = int(line[1])
line = f.readline().split()
task = [x for x in line]
line = f.readline().strip(' Os [ ]\n')
history = [float(x) for x in re.split('\], \[', line)]
x = list(np.arange(1,len(history)+1))
results.append([tid, task, [x,history]])
if int(args.ntask) > 1:
line = f.readline().split()
summary.append(results)
line = f.readline()
print(summary[0])
print(summary[1])
print("Results saved to", save_path)
pickle.dump(summary, open(save_path, "wb"))
if __name__ == "__main__":
main(parse_args())
| 44.323529 | 149 | 0.483521 |
ace76a580416d83a0c0774a3216c7de68628539b | 255 | py | Python | DataStructures/Trees/SegmentTree/SegmentTreeNode.py | Yarintop/Data-Structures-And-Algorithms-In-Python | 55db9e7f39211c42988171d51ef2659041df1aa1 | [
"MIT"
] | null | null | null | DataStructures/Trees/SegmentTree/SegmentTreeNode.py | Yarintop/Data-Structures-And-Algorithms-In-Python | 55db9e7f39211c42988171d51ef2659041df1aa1 | [
"MIT"
] | null | null | null | DataStructures/Trees/SegmentTree/SegmentTreeNode.py | Yarintop/Data-Structures-And-Algorithms-In-Python | 55db9e7f39211c42988171d51ef2659041df1aa1 | [
"MIT"
] | null | null | null | class SegmentTreeNode:
def __init__(self, minRange, maxRange, sum=0, left=None, right=None) -> None:
self.sum = sum
self.minRange = minRange
self.maxRange = maxRange
self.left = left
self.right = right
| 28.333333 | 81 | 0.596078 |
ace76a6af2c18088bbadae4bd9873191604bf602 | 1,226 | py | Python | tests/setup/init_omnicorp.py | ranking-agent/aragorn-ranker | a61fd208803d9871cbd2b87c122a0d2785722676 | [
"MIT"
] | null | null | null | tests/setup/init_omnicorp.py | ranking-agent/aragorn-ranker | a61fd208803d9871cbd2b87c122a0d2785722676 | [
"MIT"
] | 32 | 2020-10-08T15:08:59.000Z | 2022-02-04T13:53:30.000Z | tests/setup/init_omnicorp.py | ranking-agent/aragorn-ranker | a61fd208803d9871cbd2b87c122a0d2785722676 | [
"MIT"
] | null | null | null | """Initialize omnicorp testing instance."""
import os
from dotenv import load_dotenv
import psycopg2
file_path = os.path.dirname(os.path.realpath(__file__))
dotenv_path = os.path.abspath(os.path.join(file_path, '..', '.env'))
load_dotenv(dotenv_path=dotenv_path)
print('Connecting')
conn = psycopg2.connect(
dbname=os.environ['OMNICORP_DB'],
user=os.environ['OMNICORP_USER'],
host=os.environ['OMNICORP_HOST'],
port=os.environ['OMNICORP_PORT'],
password=os.environ['OMNICORP_PASSWORD'])
print('Connected'
)
cur = conn.cursor()
statement = f"CREATE SCHEMA IF NOT EXISTS omnicorp;\n"
curie_types: list = ['mesh', 'mondo', 'ncbigene', 'ncbitaxon', 'chebi', '"chembl.compound"']
for item in curie_types:
print(f"working: {item}")
statement += f"CREATE TABLE IF NOT EXISTS omnicorp.{item} (pubmedid varchar(255), curie varchar(255));\n"
statement += f"CREATE INDEX ON omnicorp.{item} (pubmedid);\n"
statement += f"CREATE INDEX ON omnicorp.{item} (curie);\n"
new_item = item.replace('"', '')
statement += f"COPY omnicorp.{item} (curie,pubmedid) FROM '/data/omnicorp_{new_item}.csv' DELIMITER ',' CSV HEADER;\n"
cur.execute(statement)
cur.close()
conn.commit()
conn.close()
| 29.902439 | 122 | 0.699021 |
ace76ade906f57fc9aee004f4874eb596686657b | 1,218 | py | Python | tests/swarm_based/test_CSA.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | tests/swarm_based/test_CSA.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | tests/swarm_based/test_CSA.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Created by "Thieu" at 14:44, 20/03/2022 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
from mealpy.swarm_based import CSA
from mealpy.optimizer import Optimizer
import numpy as np
import pytest
@pytest.fixture(scope="module") # scope: Call only 1 time at the beginning
def problem():
def fitness_function(solution):
return np.sum(solution ** 2)
problem = {
"fit_func": fitness_function,
"lb": [-10, -10, -10, -10, -10],
"ub": [10, 10, 10, 10, 10],
"minmax": "min",
"log_to": None
}
return problem
def test_CSA_results(problem):
models = [
CSA.BaseCSA(problem, epoch=10, pop_size=50, p_a=0.3)
]
for model in models:
best_position, best_fitness = model.solve()
assert isinstance(model, Optimizer)
assert isinstance(best_position, np.ndarray)
assert len(best_position) == len(problem["lb"])
| 32.918919 | 132 | 0.504926 |
ace76bd9374c80a480f0ef33beaa4f145e25072d | 50,763 | py | Python | tests/utilities/test_cli.py | rongcuid/pytorch-lightning | 5841ca97825bd9786ab84d70f0abfa6e673528b4 | [
"Apache-2.0"
] | 3 | 2020-04-11T01:39:41.000Z | 2022-03-09T16:21:01.000Z | tests/utilities/test_cli.py | rongcuid/pytorch-lightning | 5841ca97825bd9786ab84d70f0abfa6e673528b4 | [
"Apache-2.0"
] | 1 | 2021-10-06T16:54:11.000Z | 2021-11-18T19:02:07.000Z | tests/utilities/test_cli.py | rongcuid/pytorch-lightning | 5841ca97825bd9786ab84d70f0abfa6e673528b4 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
import pickle
import sys
from argparse import Namespace
from contextlib import redirect_stdout
from io import StringIO
from typing import List, Optional, Union
from unittest import mock
from unittest.mock import ANY
import pytest
import torch
import yaml
from packaging import version
from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins.environments import SLURMEnvironment
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import _TPU_AVAILABLE
from pytorch_lightning.utilities.cli import (
CALLBACK_REGISTRY,
instantiate_class,
LightningArgumentParser,
LightningCLI,
LR_SCHEDULER_REGISTRY,
MODEL_REGISTRY,
OPTIMIZER_REGISTRY,
SaveConfigCallback,
)
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _TORCHVISION_AVAILABLE
from tests.helpers import BoringDataModule, BoringModel
from tests.helpers.runif import RunIf
from tests.helpers.utils import no_warning_call
torchvision_version = version.parse("0")
if _TORCHVISION_AVAILABLE:
torchvision_version = version.parse(__import__("torchvision").__version__)
@mock.patch("argparse.ArgumentParser.parse_args")
def test_default_args(mock_argparse, tmpdir):
"""Tests default argument parser for Trainer."""
mock_argparse.return_value = Namespace(**Trainer.default_attributes())
parser = LightningArgumentParser(add_help=False, parse_as_dict=False)
args = parser.parse_args([])
args.max_epochs = 5
trainer = Trainer.from_argparse_args(args)
assert isinstance(trainer, Trainer)
assert trainer.max_epochs == 5
@pytest.mark.parametrize("cli_args", [["--accumulate_grad_batches=22"], ["--weights_save_path=./"], []])
def test_add_argparse_args_redefined(cli_args):
"""Redefines some default Trainer arguments via the cli and tests the Trainer initialization correctness."""
parser = LightningArgumentParser(add_help=False, parse_as_dict=False)
parser.add_lightning_class_args(Trainer, None)
args = parser.parse_args(cli_args)
# make sure we can pickle args
pickle.dumps(args)
# Check few deprecated args are not in namespace:
for depr_name in ("gradient_clip", "nb_gpu_nodes", "max_nb_epochs"):
assert depr_name not in args
trainer = Trainer.from_argparse_args(args=args)
pickle.dumps(trainer)
assert isinstance(trainer, Trainer)
@pytest.mark.parametrize("cli_args", [["--callbacks=1", "--logger"], ["--foo", "--bar=1"]])
def test_add_argparse_args_redefined_error(cli_args, monkeypatch):
"""Asserts error raised in case of passing not default cli arguments."""
class _UnkArgError(Exception):
pass
def _raise():
raise _UnkArgError
parser = LightningArgumentParser(add_help=False, parse_as_dict=False)
parser.add_lightning_class_args(Trainer, None)
monkeypatch.setattr(parser, "exit", lambda *args: _raise(), raising=True)
with pytest.raises(_UnkArgError):
parser.parse_args(cli_args)
@pytest.mark.parametrize(
["cli_args", "expected"],
[
("--auto_lr_find=True --auto_scale_batch_size=power", dict(auto_lr_find=True, auto_scale_batch_size="power")),
(
"--auto_lr_find any_string --auto_scale_batch_size ON",
dict(auto_lr_find="any_string", auto_scale_batch_size=True),
),
("--auto_lr_find=Yes --auto_scale_batch_size=On", dict(auto_lr_find=True, auto_scale_batch_size=True)),
("--auto_lr_find Off --auto_scale_batch_size No", dict(auto_lr_find=False, auto_scale_batch_size=False)),
("--auto_lr_find TRUE --auto_scale_batch_size FALSE", dict(auto_lr_find=True, auto_scale_batch_size=False)),
("--tpu_cores=8", dict(tpu_cores=8)),
("--tpu_cores=1,", dict(tpu_cores="1,")),
("--limit_train_batches=100", dict(limit_train_batches=100)),
("--limit_train_batches 0.8", dict(limit_train_batches=0.8)),
("--weights_summary=null", dict(weights_summary=None)),
(
"",
dict(
# These parameters are marked as Optional[...] in Trainer.__init__,
# with None as default. They should not be changed by the argparse
# interface.
min_steps=None,
max_steps=None,
log_gpu_memory=None,
distributed_backend=None,
weights_save_path=None,
resume_from_checkpoint=None,
profiler=None,
),
),
],
)
def test_parse_args_parsing(cli_args, expected):
"""Test parsing simple types and None optionals not modified."""
cli_args = cli_args.split(" ") if cli_args else []
with mock.patch("sys.argv", ["any.py"] + cli_args):
parser = LightningArgumentParser(add_help=False, parse_as_dict=False)
parser.add_lightning_class_args(Trainer, None)
args = parser.parse_args()
for k, v in expected.items():
assert getattr(args, k) == v
if "tpu_cores" not in expected or _TPU_AVAILABLE:
assert Trainer.from_argparse_args(args)
@pytest.mark.parametrize(
["cli_args", "expected", "instantiate"],
[
(["--gpus", "[0, 2]"], dict(gpus=[0, 2]), False),
(["--tpu_cores=[1,3]"], dict(tpu_cores=[1, 3]), False),
(['--accumulate_grad_batches={"5":3,"10":20}'], dict(accumulate_grad_batches={5: 3, 10: 20}), True),
],
)
def test_parse_args_parsing_complex_types(cli_args, expected, instantiate):
"""Test parsing complex types."""
with mock.patch("sys.argv", ["any.py"] + cli_args):
parser = LightningArgumentParser(add_help=False, parse_as_dict=False)
parser.add_lightning_class_args(Trainer, None)
args = parser.parse_args()
for k, v in expected.items():
assert getattr(args, k) == v
if instantiate:
assert Trainer.from_argparse_args(args)
@pytest.mark.parametrize(["cli_args", "expected_gpu"], [("--gpus 1", [0]), ("--gpus 0,", [0]), ("--gpus 0,1", [0, 1])])
def test_parse_args_parsing_gpus(monkeypatch, cli_args, expected_gpu):
"""Test parsing of gpus and instantiation of Trainer."""
monkeypatch.setattr("torch.cuda.device_count", lambda: 2)
cli_args = cli_args.split(" ") if cli_args else []
with mock.patch("sys.argv", ["any.py"] + cli_args):
parser = LightningArgumentParser(add_help=False, parse_as_dict=False)
parser.add_lightning_class_args(Trainer, None)
args = parser.parse_args()
trainer = Trainer.from_argparse_args(args)
assert trainer.data_parallel_device_ids == expected_gpu
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason="signature inspection while mocking is not working in Python < 3.7 despite autospec",
)
@pytest.mark.parametrize(
["cli_args", "extra_args"],
[
({}, {}),
(dict(logger=False), {}),
(dict(logger=False), dict(logger=True)),
(dict(logger=False), dict(checkpoint_callback=True)),
],
)
def test_init_from_argparse_args(cli_args, extra_args):
unknown_args = dict(unknown_arg=0)
# unkown args in the argparser/namespace should be ignored
with mock.patch("pytorch_lightning.Trainer.__init__", autospec=True, return_value=None) as init:
trainer = Trainer.from_argparse_args(Namespace(**cli_args, **unknown_args), **extra_args)
expected = dict(cli_args)
expected.update(extra_args) # extra args should override any cli arg
init.assert_called_with(trainer, **expected)
# passing in unknown manual args should throw an error
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'unknown_arg'"):
Trainer.from_argparse_args(Namespace(**cli_args), **extra_args, **unknown_args)
class Model(LightningModule):
def __init__(self, model_param: int):
super().__init__()
self.model_param = model_param
def _model_builder(model_param: int) -> Model:
return Model(model_param)
def _trainer_builder(
limit_train_batches: int, fast_dev_run: bool = False, callbacks: Optional[Union[List[Callback], Callback]] = None
) -> Trainer:
return Trainer(limit_train_batches=limit_train_batches, fast_dev_run=fast_dev_run, callbacks=callbacks)
@pytest.mark.parametrize(["trainer_class", "model_class"], [(Trainer, Model), (_trainer_builder, _model_builder)])
def test_lightning_cli(trainer_class, model_class, monkeypatch):
"""Test that LightningCLI correctly instantiates model, trainer and calls fit."""
expected_model = dict(model_param=7)
expected_trainer = dict(limit_train_batches=100)
def fit(trainer, model):
for k, v in expected_model.items():
assert getattr(model, k) == v
for k, v in expected_trainer.items():
assert getattr(trainer, k) == v
save_callback = [x for x in trainer.callbacks if isinstance(x, SaveConfigCallback)]
assert len(save_callback) == 1
save_callback[0].on_train_start(trainer, model)
def on_train_start(callback, trainer, _):
config_dump = callback.parser.dump(callback.config, skip_none=False)
for k, v in expected_model.items():
assert f" {k}: {v}" in config_dump
for k, v in expected_trainer.items():
assert f" {k}: {v}" in config_dump
trainer.ran_asserts = True
monkeypatch.setattr(Trainer, "fit", fit)
monkeypatch.setattr(SaveConfigCallback, "on_train_start", on_train_start)
with mock.patch("sys.argv", ["any.py", "fit", "--model.model_param=7", "--trainer.limit_train_batches=100"]):
cli = LightningCLI(model_class, trainer_class=trainer_class, save_config_callback=SaveConfigCallback)
assert hasattr(cli.trainer, "ran_asserts") and cli.trainer.ran_asserts
def test_lightning_cli_args_callbacks(tmpdir):
callbacks = [
dict(
class_path="pytorch_lightning.callbacks.LearningRateMonitor",
init_args=dict(logging_interval="epoch", log_momentum=True),
),
dict(class_path="pytorch_lightning.callbacks.ModelCheckpoint", init_args=dict(monitor="NAME")),
]
class TestModel(BoringModel):
def on_fit_start(self):
callback = [c for c in self.trainer.callbacks if isinstance(c, LearningRateMonitor)]
assert len(callback) == 1
assert callback[0].logging_interval == "epoch"
assert callback[0].log_momentum is True
callback = [c for c in self.trainer.callbacks if isinstance(c, ModelCheckpoint)]
assert len(callback) == 1
assert callback[0].monitor == "NAME"
self.trainer.ran_asserts = True
with mock.patch("sys.argv", ["any.py", "fit", f"--trainer.callbacks={json.dumps(callbacks)}"]):
cli = LightningCLI(TestModel, trainer_defaults=dict(default_root_dir=str(tmpdir), fast_dev_run=True))
assert cli.trainer.ran_asserts
@pytest.mark.parametrize("run", (False, True))
def test_lightning_cli_configurable_callbacks(tmpdir, run):
class MyLightningCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.add_lightning_class_args(LearningRateMonitor, "learning_rate_monitor")
def fit(self, **_):
pass
cli_args = ["fit"] if run else []
cli_args += [f"--trainer.default_root_dir={tmpdir}", "--learning_rate_monitor.logging_interval=epoch"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = MyLightningCLI(BoringModel, run=run)
callback = [c for c in cli.trainer.callbacks if isinstance(c, LearningRateMonitor)]
assert len(callback) == 1
assert callback[0].logging_interval == "epoch"
def test_lightning_cli_args_cluster_environments(tmpdir):
plugins = [dict(class_path="pytorch_lightning.plugins.environments.SLURMEnvironment")]
class TestModel(BoringModel):
def on_fit_start(self):
# Ensure SLURMEnvironment is set, instead of default LightningEnvironment
assert isinstance(self.trainer.accelerator_connector._cluster_environment, SLURMEnvironment)
self.trainer.ran_asserts = True
with mock.patch("sys.argv", ["any.py", "fit", f"--trainer.plugins={json.dumps(plugins)}"]):
cli = LightningCLI(TestModel, trainer_defaults=dict(default_root_dir=str(tmpdir), fast_dev_run=True))
assert cli.trainer.ran_asserts
def test_lightning_cli_args(tmpdir):
cli_args = [
"fit",
f"--data.data_dir={tmpdir}",
f"--trainer.default_root_dir={tmpdir}",
"--trainer.max_epochs=1",
"--trainer.weights_summary=null",
"--seed_everything=1234",
]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = LightningCLI(BoringModel, BoringDataModule, trainer_defaults={"callbacks": [LearningRateMonitor()]})
config_path = tmpdir / "lightning_logs" / "version_0" / "config.yaml"
assert os.path.isfile(config_path)
with open(config_path) as f:
loaded_config = yaml.safe_load(f.read())
loaded_config = loaded_config["fit"]
cli_config = cli.config["fit"]
assert cli_config["seed_everything"] == 1234
assert "model" not in loaded_config and "model" not in cli_config # no arguments to include
assert loaded_config["data"] == cli_config["data"]
assert loaded_config["trainer"] == cli_config["trainer"]
def test_lightning_cli_save_config_cases(tmpdir):
config_path = tmpdir / "config.yaml"
cli_args = ["fit", f"--trainer.default_root_dir={tmpdir}", "--trainer.logger=False", "--trainer.fast_dev_run=1"]
# With fast_dev_run!=False config should not be saved
with mock.patch("sys.argv", ["any.py"] + cli_args):
LightningCLI(BoringModel)
assert not os.path.isfile(config_path)
# With fast_dev_run==False config should be saved
cli_args[-1] = "--trainer.max_epochs=1"
with mock.patch("sys.argv", ["any.py"] + cli_args):
LightningCLI(BoringModel)
assert os.path.isfile(config_path)
# If run again on same directory exception should be raised since config file already exists
with mock.patch("sys.argv", ["any.py"] + cli_args), pytest.raises(RuntimeError):
LightningCLI(BoringModel)
def test_lightning_cli_config_and_subclass_mode(tmpdir):
input_config = {
"fit": {
"model": {"class_path": "tests.helpers.BoringModel"},
"data": {"class_path": "tests.helpers.BoringDataModule", "init_args": {"data_dir": str(tmpdir)}},
"trainer": {"default_root_dir": str(tmpdir), "max_epochs": 1, "weights_summary": None},
}
}
config_path = tmpdir / "config.yaml"
with open(config_path, "w") as f:
f.write(yaml.dump(input_config))
with mock.patch("sys.argv", ["any.py", "--config", str(config_path)]):
cli = LightningCLI(
BoringModel,
BoringDataModule,
subclass_mode_model=True,
subclass_mode_data=True,
trainer_defaults={"callbacks": LearningRateMonitor()},
)
config_path = tmpdir / "lightning_logs" / "version_0" / "config.yaml"
assert os.path.isfile(config_path)
with open(config_path) as f:
loaded_config = yaml.safe_load(f.read())
loaded_config = loaded_config["fit"]
cli_config = cli.config["fit"]
assert loaded_config["model"] == cli_config["model"]
assert loaded_config["data"] == cli_config["data"]
assert loaded_config["trainer"] == cli_config["trainer"]
def any_model_any_data_cli():
LightningCLI(LightningModule, LightningDataModule, subclass_mode_model=True, subclass_mode_data=True)
def test_lightning_cli_help():
cli_args = ["any.py", "fit", "--help"]
out = StringIO()
with mock.patch("sys.argv", cli_args), redirect_stdout(out), pytest.raises(SystemExit):
any_model_any_data_cli()
out = out.getvalue()
assert "--print_config" in out
assert "--config" in out
assert "--seed_everything" in out
assert "--model.help" in out
assert "--data.help" in out
skip_params = {"self"}
for param in inspect.signature(Trainer.__init__).parameters.keys():
if param not in skip_params:
assert f"--trainer.{param}" in out
cli_args = ["any.py", "fit", "--data.help=tests.helpers.BoringDataModule"]
out = StringIO()
with mock.patch("sys.argv", cli_args), redirect_stdout(out), pytest.raises(SystemExit):
any_model_any_data_cli()
assert "--data.init_args.data_dir" in out.getvalue()
def test_lightning_cli_print_config():
cli_args = [
"any.py",
"predict",
"--seed_everything=1234",
"--model=tests.helpers.BoringModel",
"--data=tests.helpers.BoringDataModule",
"--print_config",
]
out = StringIO()
with mock.patch("sys.argv", cli_args), redirect_stdout(out), pytest.raises(SystemExit):
any_model_any_data_cli()
outval = yaml.safe_load(out.getvalue())
assert outval["seed_everything"] == 1234
assert outval["model"]["class_path"] == "tests.helpers.BoringModel"
assert outval["data"]["class_path"] == "tests.helpers.BoringDataModule"
assert outval["ckpt_path"] is None
def test_lightning_cli_submodules(tmpdir):
class MainModule(BoringModel):
def __init__(self, submodule1: LightningModule, submodule2: LightningModule, main_param: int = 1):
super().__init__()
self.submodule1 = submodule1
self.submodule2 = submodule2
config = """model:
main_param: 2
submodule1:
class_path: tests.helpers.BoringModel
submodule2:
class_path: tests.helpers.BoringModel
"""
config_path = tmpdir / "config.yaml"
with open(config_path, "w") as f:
f.write(config)
cli_args = [f"--trainer.default_root_dir={tmpdir}", f"--config={str(config_path)}"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = LightningCLI(MainModule, run=False)
assert cli.config["model"]["main_param"] == 2
assert isinstance(cli.model.submodule1, BoringModel)
assert isinstance(cli.model.submodule2, BoringModel)
@pytest.mark.skipif(torchvision_version < version.parse("0.8.0"), reason="torchvision>=0.8.0 is required")
def test_lightning_cli_torch_modules(tmpdir):
class TestModule(BoringModel):
def __init__(self, activation: torch.nn.Module = None, transform: Optional[List[torch.nn.Module]] = None):
super().__init__()
self.activation = activation
self.transform = transform
config = """model:
activation:
class_path: torch.nn.LeakyReLU
init_args:
negative_slope: 0.2
transform:
- class_path: torchvision.transforms.Resize
init_args:
size: 64
- class_path: torchvision.transforms.CenterCrop
init_args:
size: 64
"""
config_path = tmpdir / "config.yaml"
with open(config_path, "w") as f:
f.write(config)
cli_args = [f"--trainer.default_root_dir={tmpdir}", f"--config={str(config_path)}"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = LightningCLI(TestModule, run=False)
assert isinstance(cli.model.activation, torch.nn.LeakyReLU)
assert cli.model.activation.negative_slope == 0.2
assert len(cli.model.transform) == 2
assert all(isinstance(v, torch.nn.Module) for v in cli.model.transform)
class BoringModelRequiredClasses(BoringModel):
def __init__(self, num_classes: int, batch_size: int = 8):
super().__init__()
self.num_classes = num_classes
self.batch_size = batch_size
class BoringDataModuleBatchSizeAndClasses(BoringDataModule):
def __init__(self, batch_size: int = 8):
super().__init__()
self.batch_size = batch_size
self.num_classes = 5 # only available after instantiation
def test_lightning_cli_link_arguments(tmpdir):
class MyLightningCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.link_arguments("data.batch_size", "model.batch_size")
parser.link_arguments("data.num_classes", "model.num_classes", apply_on="instantiate")
cli_args = [f"--trainer.default_root_dir={tmpdir}", "--data.batch_size=12"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = MyLightningCLI(BoringModelRequiredClasses, BoringDataModuleBatchSizeAndClasses, run=False)
assert cli.model.batch_size == 12
assert cli.model.num_classes == 5
class MyLightningCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.link_arguments("data.batch_size", "model.init_args.batch_size")
parser.link_arguments("data.num_classes", "model.init_args.num_classes", apply_on="instantiate")
cli_args[-1] = "--model=tests.utilities.test_cli.BoringModelRequiredClasses"
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = MyLightningCLI(
BoringModelRequiredClasses, BoringDataModuleBatchSizeAndClasses, subclass_mode_model=True, run=False
)
assert cli.model.batch_size == 8
assert cli.model.num_classes == 5
class EarlyExitTestModel(BoringModel):
def on_fit_start(self):
raise Exception("Error on fit start")
@pytest.mark.parametrize("logger", (False, True))
@pytest.mark.parametrize(
"trainer_kwargs",
(
dict(accelerator="ddp_cpu"),
dict(accelerator="ddp_cpu", plugins="ddp_find_unused_parameters_false"),
pytest.param({"tpu_cores": 1}, marks=RunIf(tpu=True)),
),
)
def test_cli_ddp_spawn_save_config_callback(tmpdir, logger, trainer_kwargs):
with mock.patch("sys.argv", ["any.py", "fit"]), pytest.raises(Exception, match=r"Error on fit start"):
LightningCLI(
EarlyExitTestModel,
trainer_defaults={
"default_root_dir": str(tmpdir),
"logger": logger,
"max_steps": 1,
"max_epochs": 1,
**trainer_kwargs,
},
)
if logger:
config_dir = tmpdir / "lightning_logs"
# no more version dirs should get created
assert os.listdir(config_dir) == ["version_0"]
config_path = config_dir / "version_0" / "config.yaml"
else:
config_path = tmpdir / "config.yaml"
assert os.path.isfile(config_path)
def test_cli_config_overwrite(tmpdir):
trainer_defaults = {"default_root_dir": str(tmpdir), "logger": False, "max_steps": 1, "max_epochs": 1}
argv = ["any.py", "fit"]
with mock.patch("sys.argv", argv):
LightningCLI(BoringModel, trainer_defaults=trainer_defaults)
with mock.patch("sys.argv", argv), pytest.raises(RuntimeError, match="Aborting to avoid overwriting"):
LightningCLI(BoringModel, trainer_defaults=trainer_defaults)
with mock.patch("sys.argv", argv):
LightningCLI(BoringModel, save_config_overwrite=True, trainer_defaults=trainer_defaults)
@pytest.mark.parametrize("run", (False, True))
def test_lightning_cli_optimizer(tmpdir, run):
class MyLightningCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.add_optimizer_args(torch.optim.Adam)
match = (
"BoringModel.configure_optimizers` will be overridden by "
"`MyLightningCLI.add_configure_optimizers_method_to_model`"
)
argv = ["fit", f"--trainer.default_root_dir={tmpdir}", "--trainer.fast_dev_run=1"] if run else []
with mock.patch("sys.argv", ["any.py"] + argv), pytest.warns(UserWarning, match=match):
cli = MyLightningCLI(BoringModel, run=run)
assert cli.model.configure_optimizers is not BoringModel.configure_optimizers
if not run:
optimizer = cli.model.configure_optimizers()
assert isinstance(optimizer, torch.optim.Adam)
else:
assert len(cli.trainer.optimizers) == 1
assert isinstance(cli.trainer.optimizers[0], torch.optim.Adam)
assert len(cli.trainer.lr_schedulers) == 0
def test_lightning_cli_optimizer_and_lr_scheduler(tmpdir):
class MyLightningCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.add_optimizer_args(torch.optim.Adam)
parser.add_lr_scheduler_args(torch.optim.lr_scheduler.ExponentialLR)
cli_args = ["fit", f"--trainer.default_root_dir={tmpdir}", "--trainer.fast_dev_run=1", "--lr_scheduler.gamma=0.8"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = MyLightningCLI(BoringModel)
assert cli.model.configure_optimizers is not BoringModel.configure_optimizers
assert len(cli.trainer.optimizers) == 1
assert isinstance(cli.trainer.optimizers[0], torch.optim.Adam)
assert len(cli.trainer.lr_schedulers) == 1
assert isinstance(cli.trainer.lr_schedulers[0]["scheduler"], torch.optim.lr_scheduler.ExponentialLR)
assert cli.trainer.lr_schedulers[0]["scheduler"].gamma == 0.8
def test_lightning_cli_optimizer_and_lr_scheduler_subclasses(tmpdir):
class MyLightningCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.add_optimizer_args((torch.optim.SGD, torch.optim.Adam))
parser.add_lr_scheduler_args((torch.optim.lr_scheduler.StepLR, torch.optim.lr_scheduler.ExponentialLR))
optimizer_arg = dict(class_path="torch.optim.Adam", init_args=dict(lr=0.01))
lr_scheduler_arg = dict(class_path="torch.optim.lr_scheduler.StepLR", init_args=dict(step_size=50))
cli_args = [
"fit",
f"--trainer.default_root_dir={tmpdir}",
"--trainer.max_epochs=1",
f"--optimizer={json.dumps(optimizer_arg)}",
f"--lr_scheduler={json.dumps(lr_scheduler_arg)}",
]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = MyLightningCLI(BoringModel)
assert len(cli.trainer.optimizers) == 1
assert isinstance(cli.trainer.optimizers[0], torch.optim.Adam)
assert len(cli.trainer.lr_schedulers) == 1
assert isinstance(cli.trainer.lr_schedulers[0]["scheduler"], torch.optim.lr_scheduler.StepLR)
assert cli.trainer.lr_schedulers[0]["scheduler"].step_size == 50
@pytest.mark.parametrize("use_registries", [False, True])
def test_lightning_cli_optimizers_and_lr_scheduler_with_link_to(use_registries, tmpdir):
class MyLightningCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.add_optimizer_args(
OPTIMIZER_REGISTRY.classes if use_registries else torch.optim.Adam,
nested_key="optim1",
link_to="model.optim1",
)
parser.add_optimizer_args((torch.optim.ASGD, torch.optim.SGD), nested_key="optim2", link_to="model.optim2")
parser.add_lr_scheduler_args(
LR_SCHEDULER_REGISTRY.classes if use_registries else torch.optim.lr_scheduler.ExponentialLR,
link_to="model.scheduler",
)
class TestModel(BoringModel):
def __init__(self, optim1: dict, optim2: dict, scheduler: dict):
super().__init__()
self.optim1 = instantiate_class(self.parameters(), optim1)
self.optim2 = instantiate_class(self.parameters(), optim2)
self.scheduler = instantiate_class(self.optim1, scheduler)
cli_args = ["fit", f"--trainer.default_root_dir={tmpdir}", "--trainer.max_epochs=1", "--lr_scheduler.gamma=0.2"]
if use_registries:
cli_args += [
"--optim1",
"Adam",
"--optim1.weight_decay",
"0.001",
"--optim2=SGD",
"--optim2.lr=0.01",
"--lr_scheduler=ExponentialLR",
]
else:
cli_args += ["--optim2.class_path=torch.optim.SGD", "--optim2.init_args.lr=0.01"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = MyLightningCLI(TestModel)
assert isinstance(cli.model.optim1, torch.optim.Adam)
assert isinstance(cli.model.optim2, torch.optim.SGD)
assert cli.model.optim2.param_groups[0]["lr"] == 0.01
assert isinstance(cli.model.scheduler, torch.optim.lr_scheduler.ExponentialLR)
@pytest.mark.parametrize("fn", [fn.value for fn in TrainerFn])
def test_lightning_cli_trainer_fn(fn):
class TestCLI(LightningCLI):
def __init__(self, *args, **kwargs):
self.called = []
super().__init__(*args, **kwargs)
def before_fit(self):
self.called.append("before_fit")
def fit(self, **_):
self.called.append("fit")
def after_fit(self):
self.called.append("after_fit")
def before_validate(self):
self.called.append("before_validate")
def validate(self, **_):
self.called.append("validate")
def after_validate(self):
self.called.append("after_validate")
def before_test(self):
self.called.append("before_test")
def test(self, **_):
self.called.append("test")
def after_test(self):
self.called.append("after_test")
def before_predict(self):
self.called.append("before_predict")
def predict(self, **_):
self.called.append("predict")
def after_predict(self):
self.called.append("after_predict")
def before_tune(self):
self.called.append("before_tune")
def tune(self, **_):
self.called.append("tune")
def after_tune(self):
self.called.append("after_tune")
with mock.patch("sys.argv", ["any.py", fn]):
cli = TestCLI(BoringModel)
assert cli.called == [f"before_{fn}", fn, f"after_{fn}"]
def test_lightning_cli_subcommands():
subcommands = LightningCLI.subcommands()
trainer = Trainer()
for subcommand, exclude in subcommands.items():
fn = getattr(trainer, subcommand)
parameters = list(inspect.signature(fn).parameters)
for e in exclude:
# if this fails, it's because the parameter has been removed from the associated `Trainer` function
# and the `LightningCLI` subcommand exclusion list needs to be updated
assert e in parameters
def test_lightning_cli_custom_subcommand():
class TestTrainer(Trainer):
def foo(self, model: LightningModule, x: int, y: float = 1.0):
"""Sample extra function.
Args:
model: A model
x: The x
y: The y
"""
class TestCLI(LightningCLI):
@staticmethod
def subcommands():
subcommands = LightningCLI.subcommands()
subcommands["foo"] = {"model"}
return subcommands
out = StringIO()
with mock.patch("sys.argv", ["any.py", "-h"]), redirect_stdout(out), pytest.raises(SystemExit):
TestCLI(BoringModel, trainer_class=TestTrainer)
out = out.getvalue()
assert "Sample extra function." in out
assert "{fit,validate,test,predict,tune,foo}" in out
out = StringIO()
with mock.patch("sys.argv", ["any.py", "foo", "-h"]), redirect_stdout(out), pytest.raises(SystemExit):
TestCLI(BoringModel, trainer_class=TestTrainer)
out = out.getvalue()
assert "A model" not in out
assert "Sample extra function:" in out
assert "--x X" in out
assert "The x (required, type: int)" in out
assert "--y Y" in out
assert "The y (type: float, default: 1.0)" in out
def test_lightning_cli_run():
with mock.patch("sys.argv", ["any.py"]):
cli = LightningCLI(BoringModel, run=False)
assert cli.trainer.global_step == 0
assert isinstance(cli.trainer, Trainer)
assert isinstance(cli.model, LightningModule)
with mock.patch("sys.argv", ["any.py", "fit"]):
cli = LightningCLI(BoringModel, trainer_defaults={"max_steps": 1, "max_epochs": 1})
assert cli.trainer.global_step == 1
assert isinstance(cli.trainer, Trainer)
assert isinstance(cli.model, LightningModule)
@OPTIMIZER_REGISTRY
class CustomAdam(torch.optim.Adam):
pass
@LR_SCHEDULER_REGISTRY
class CustomCosineAnnealingLR(torch.optim.lr_scheduler.CosineAnnealingLR):
pass
@CALLBACK_REGISTRY
class CustomCallback(Callback):
pass
def test_registries(tmpdir):
assert "SGD" in OPTIMIZER_REGISTRY.names
assert "RMSprop" in OPTIMIZER_REGISTRY.names
assert "CustomAdam" in OPTIMIZER_REGISTRY.names
assert "CosineAnnealingLR" in LR_SCHEDULER_REGISTRY.names
assert "CosineAnnealingWarmRestarts" in LR_SCHEDULER_REGISTRY.names
assert "CustomCosineAnnealingLR" in LR_SCHEDULER_REGISTRY.names
assert "EarlyStopping" in CALLBACK_REGISTRY.names
assert "CustomCallback" in CALLBACK_REGISTRY.names
with pytest.raises(MisconfigurationException, match="is already present in the registry"):
OPTIMIZER_REGISTRY.register_classes(torch.optim, torch.optim.Optimizer)
OPTIMIZER_REGISTRY.register_classes(torch.optim, torch.optim.Optimizer, override=True)
# test `_Registry.__call__` returns the class
assert isinstance(CustomCallback(), CustomCallback)
@MODEL_REGISTRY
class TestModel(BoringModel):
def __init__(self, foo, bar=5):
super().__init__()
self.foo = foo
self.bar = bar
MODEL_REGISTRY(cls=BoringModel)
def test_lightning_cli_model_choices():
with mock.patch("sys.argv", ["any.py", "fit", "--model=BoringModel"]), mock.patch(
"pytorch_lightning.Trainer._fit_impl"
) as run:
cli = LightningCLI(trainer_defaults={"fast_dev_run": 1})
assert isinstance(cli.model, BoringModel)
run.assert_called_once_with(cli.model, ANY, ANY, ANY)
with mock.patch("sys.argv", ["any.py", "--model=TestModel", "--model.foo", "123"]):
cli = LightningCLI(run=False)
assert isinstance(cli.model, TestModel)
assert cli.model.foo == 123
assert cli.model.bar == 5
@pytest.mark.parametrize("use_class_path_callbacks", [False, True])
def test_registries_resolution(use_class_path_callbacks):
"""This test validates registries are used when simplified command line are being used."""
cli_args = [
"--optimizer",
"Adam",
"--optimizer.lr",
"0.0001",
"--trainer.callbacks=LearningRateMonitor",
"--trainer.callbacks.logging_interval=epoch",
"--trainer.callbacks.log_momentum=True",
"--model=BoringModel",
"--trainer.callbacks=ModelCheckpoint",
"--trainer.callbacks.monitor=loss",
"--lr_scheduler",
"StepLR",
"--lr_scheduler.step_size=50",
]
extras = []
if use_class_path_callbacks:
callbacks = [
{"class_path": "pytorch_lightning.callbacks.Callback"},
{"class_path": "pytorch_lightning.callbacks.Callback", "init_args": {}},
]
cli_args += [f"--trainer.callbacks={json.dumps(callbacks)}"]
extras = [Callback, Callback]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = LightningCLI(run=False)
assert isinstance(cli.model, BoringModel)
optimizers, lr_scheduler = cli.model.configure_optimizers()
assert isinstance(optimizers[0], torch.optim.Adam)
assert optimizers[0].param_groups[0]["lr"] == 0.0001
assert lr_scheduler[0].step_size == 50
callback_types = [type(c) for c in cli.trainer.callbacks]
expected = [LearningRateMonitor, SaveConfigCallback, ModelCheckpoint] + extras
assert all(t in callback_types for t in expected)
def test_argv_transformation_noop():
base = ["any.py", "--trainer.max_epochs=1"]
argv = LightningArgumentParser._convert_argv_issue_85(CALLBACK_REGISTRY.classes, "trainer.callbacks", base)
assert argv == base
def test_argv_transformation_single_callback():
base = ["any.py", "--trainer.max_epochs=1"]
input = base + ["--trainer.callbacks=ModelCheckpoint", "--trainer.callbacks.monitor=val_loss"]
callbacks = [
{
"class_path": "pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint",
"init_args": {"monitor": "val_loss"},
}
]
expected = base + ["--trainer.callbacks", str(callbacks)]
argv = LightningArgumentParser._convert_argv_issue_85(CALLBACK_REGISTRY.classes, "trainer.callbacks", input)
assert argv == expected
def test_argv_transformation_multiple_callbacks():
base = ["any.py", "--trainer.max_epochs=1"]
input = base + [
"--trainer.callbacks=ModelCheckpoint",
"--trainer.callbacks.monitor=val_loss",
"--trainer.callbacks=ModelCheckpoint",
"--trainer.callbacks.monitor=val_acc",
]
callbacks = [
{
"class_path": "pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint",
"init_args": {"monitor": "val_loss"},
},
{
"class_path": "pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint",
"init_args": {"monitor": "val_acc"},
},
]
expected = base + ["--trainer.callbacks", str(callbacks)]
argv = LightningArgumentParser._convert_argv_issue_85(CALLBACK_REGISTRY.classes, "trainer.callbacks", input)
assert argv == expected
def test_argv_transformation_multiple_callbacks_with_config():
base = ["any.py", "--trainer.max_epochs=1"]
nested_key = "trainer.callbacks"
input = base + [
f"--{nested_key}=ModelCheckpoint",
f"--{nested_key}.monitor=val_loss",
f"--{nested_key}=ModelCheckpoint",
f"--{nested_key}.monitor=val_acc",
f"--{nested_key}=[{{'class_path': 'pytorch_lightning.callbacks.Callback'}}]",
]
callbacks = [
{
"class_path": "pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint",
"init_args": {"monitor": "val_loss"},
},
{
"class_path": "pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint",
"init_args": {"monitor": "val_acc"},
},
{"class_path": "pytorch_lightning.callbacks.Callback"},
]
expected = base + ["--trainer.callbacks", str(callbacks)]
nested_key = "trainer.callbacks"
argv = LightningArgumentParser._convert_argv_issue_85(CALLBACK_REGISTRY.classes, nested_key, input)
assert argv == expected
@pytest.mark.parametrize(
["args", "expected", "nested_key", "registry"],
[
(
["--optimizer", "Adadelta"],
{"class_path": "torch.optim.adadelta.Adadelta", "init_args": {}},
"optimizer",
OPTIMIZER_REGISTRY,
),
(
["--optimizer", "Adadelta", "--optimizer.lr", "10"],
{"class_path": "torch.optim.adadelta.Adadelta", "init_args": {"lr": "10"}},
"optimizer",
OPTIMIZER_REGISTRY,
),
(
["--lr_scheduler", "OneCycleLR"],
{"class_path": "torch.optim.lr_scheduler.OneCycleLR", "init_args": {}},
"lr_scheduler",
LR_SCHEDULER_REGISTRY,
),
(
["--lr_scheduler", "OneCycleLR", "--lr_scheduler.anneal_strategy=linear"],
{"class_path": "torch.optim.lr_scheduler.OneCycleLR", "init_args": {"anneal_strategy": "linear"}},
"lr_scheduler",
LR_SCHEDULER_REGISTRY,
),
],
)
def test_argv_transformations_with_optimizers_and_lr_schedulers(args, expected, nested_key, registry):
base = ["any.py", "--trainer.max_epochs=1"]
argv = base + args
new_argv = LightningArgumentParser._convert_argv_issue_84(registry.classes, nested_key, argv)
assert new_argv == base + [f"--{nested_key}", str(expected)]
def test_optimizers_and_lr_schedulers_reload(tmpdir):
base = ["any.py", "--trainer.max_epochs=1"]
input = base + [
"--lr_scheduler",
"OneCycleLR",
"--lr_scheduler.total_steps=10",
"--lr_scheduler.max_lr=1",
"--optimizer",
"Adam",
"--optimizer.lr=0.1",
]
# save config
out = StringIO()
with mock.patch("sys.argv", input + ["--print_config"]), redirect_stdout(out), pytest.raises(SystemExit):
LightningCLI(BoringModel, run=False)
# validate yaml
yaml_config = out.getvalue()
dict_config = yaml.safe_load(yaml_config)
assert dict_config["optimizer"]["class_path"] == "torch.optim.adam.Adam"
assert dict_config["optimizer"]["init_args"]["lr"] == 0.1
assert dict_config["lr_scheduler"]["class_path"] == "torch.optim.lr_scheduler.OneCycleLR"
# reload config
yaml_config_file = tmpdir / "config.yaml"
yaml_config_file.write_text(yaml_config, "utf-8")
with mock.patch("sys.argv", base + [f"--config={yaml_config_file}"]):
LightningCLI(BoringModel, run=False)
def test_optimizers_and_lr_schedulers_add_arguments_to_parser_implemented_reload(tmpdir):
class TestLightningCLI(LightningCLI):
def __init__(self, *args):
super().__init__(*args, run=False)
def add_arguments_to_parser(self, parser):
parser.add_optimizer_args(OPTIMIZER_REGISTRY.classes, nested_key="opt1", link_to="model.opt1_config")
parser.add_optimizer_args(
(torch.optim.ASGD, torch.optim.SGD), nested_key="opt2", link_to="model.opt2_config"
)
parser.add_lr_scheduler_args(LR_SCHEDULER_REGISTRY.classes, link_to="model.sch_config")
parser.add_argument("--something", type=str, nargs="+")
class TestModel(BoringModel):
def __init__(self, opt1_config: dict, opt2_config: dict, sch_config: dict):
super().__init__()
self.opt1_config = opt1_config
self.opt2_config = opt2_config
self.sch_config = sch_config
opt1 = instantiate_class(self.parameters(), opt1_config)
assert isinstance(opt1, torch.optim.Adam)
opt2 = instantiate_class(self.parameters(), opt2_config)
assert isinstance(opt2, torch.optim.ASGD)
sch = instantiate_class(opt1, sch_config)
assert isinstance(sch, torch.optim.lr_scheduler.OneCycleLR)
base = ["any.py", "--trainer.max_epochs=1"]
input = base + [
"--lr_scheduler",
"OneCycleLR",
"--lr_scheduler.total_steps=10",
"--lr_scheduler.max_lr=1",
"--opt1",
"Adam",
"--opt2.lr=0.1",
"--opt2",
"ASGD",
"--lr_scheduler.anneal_strategy=linear",
"--something",
"a",
"b",
"c",
]
# save config
out = StringIO()
with mock.patch("sys.argv", input + ["--print_config"]), redirect_stdout(out), pytest.raises(SystemExit):
TestLightningCLI(TestModel)
# validate yaml
yaml_config = out.getvalue()
dict_config = yaml.safe_load(yaml_config)
assert dict_config["opt1"]["class_path"] == "torch.optim.adam.Adam"
assert dict_config["opt2"]["class_path"] == "torch.optim.asgd.ASGD"
assert dict_config["opt2"]["init_args"]["lr"] == 0.1
assert dict_config["lr_scheduler"]["class_path"] == "torch.optim.lr_scheduler.OneCycleLR"
assert dict_config["lr_scheduler"]["init_args"]["anneal_strategy"] == "linear"
assert dict_config["something"] == ["a", "b", "c"]
# reload config
yaml_config_file = tmpdir / "config.yaml"
yaml_config_file.write_text(yaml_config, "utf-8")
with mock.patch("sys.argv", base + [f"--config={yaml_config_file}"]):
cli = TestLightningCLI(TestModel)
assert cli.model.opt1_config["class_path"] == "torch.optim.adam.Adam"
assert cli.model.opt2_config["class_path"] == "torch.optim.asgd.ASGD"
assert cli.model.opt2_config["init_args"]["lr"] == 0.1
assert cli.model.sch_config["class_path"] == "torch.optim.lr_scheduler.OneCycleLR"
assert cli.model.sch_config["init_args"]["anneal_strategy"] == "linear"
@RunIf(min_python="3.7.3") # bpo-17185: `autospec=True` and `inspect.signature` do not play well
def test_lightning_cli_config_with_subcommand():
config = {"test": {"trainer": {"limit_test_batches": 1}, "verbose": True, "ckpt_path": "foobar"}}
with mock.patch("sys.argv", ["any.py", f"--config={config}"]), mock.patch(
"pytorch_lightning.Trainer.test", autospec=True
) as test_mock:
cli = LightningCLI(BoringModel)
test_mock.assert_called_once_with(cli.trainer, cli.model, verbose=True, ckpt_path="foobar")
assert cli.trainer.limit_test_batches == 1
@RunIf(min_python="3.7.3")
def test_lightning_cli_config_before_subcommand():
config = {
"validate": {"trainer": {"limit_val_batches": 1}, "verbose": False, "ckpt_path": "barfoo"},
"test": {"trainer": {"limit_test_batches": 1}, "verbose": True, "ckpt_path": "foobar"},
}
with mock.patch("sys.argv", ["any.py", f"--config={config}", "test"]), mock.patch(
"pytorch_lightning.Trainer.test", autospec=True
) as test_mock:
cli = LightningCLI(BoringModel)
test_mock.assert_called_once_with(cli.trainer, model=cli.model, verbose=True, ckpt_path="foobar")
assert cli.trainer.limit_test_batches == 1
with mock.patch("sys.argv", ["any.py", f"--config={config}", "validate"]), mock.patch(
"pytorch_lightning.Trainer.validate", autospec=True
) as validate_mock:
cli = LightningCLI(BoringModel)
validate_mock.assert_called_once_with(cli.trainer, cli.model, verbose=False, ckpt_path="barfoo")
assert cli.trainer.limit_val_batches == 1
@RunIf(min_python="3.7.3")
def test_lightning_cli_config_before_subcommand_two_configs():
config1 = {"validate": {"trainer": {"limit_val_batches": 1}, "verbose": False, "ckpt_path": "barfoo"}}
config2 = {"test": {"trainer": {"limit_test_batches": 1}, "verbose": True, "ckpt_path": "foobar"}}
with mock.patch("sys.argv", ["any.py", f"--config={config1}", f"--config={config2}", "test"]), mock.patch(
"pytorch_lightning.Trainer.test", autospec=True
) as test_mock:
cli = LightningCLI(BoringModel)
test_mock.assert_called_once_with(cli.trainer, model=cli.model, verbose=True, ckpt_path="foobar")
assert cli.trainer.limit_test_batches == 1
with mock.patch("sys.argv", ["any.py", f"--config={config1}", f"--config={config2}", "validate"]), mock.patch(
"pytorch_lightning.Trainer.validate", autospec=True
) as validate_mock:
cli = LightningCLI(BoringModel)
validate_mock.assert_called_once_with(cli.trainer, cli.model, verbose=False, ckpt_path="barfoo")
assert cli.trainer.limit_val_batches == 1
@RunIf(min_python="3.7.3")
def test_lightning_cli_config_after_subcommand():
config = {"trainer": {"limit_test_batches": 1}, "verbose": True, "ckpt_path": "foobar"}
with mock.patch("sys.argv", ["any.py", "test", f"--config={config}"]), mock.patch(
"pytorch_lightning.Trainer.test", autospec=True
) as test_mock:
cli = LightningCLI(BoringModel)
test_mock.assert_called_once_with(cli.trainer, cli.model, verbose=True, ckpt_path="foobar")
assert cli.trainer.limit_test_batches == 1
@RunIf(min_python="3.7.3")
def test_lightning_cli_config_before_and_after_subcommand():
config1 = {"test": {"trainer": {"limit_test_batches": 1}, "verbose": True, "ckpt_path": "foobar"}}
config2 = {"trainer": {"fast_dev_run": 1}, "verbose": False, "ckpt_path": "foobar"}
with mock.patch("sys.argv", ["any.py", f"--config={config1}", "test", f"--config={config2}"]), mock.patch(
"pytorch_lightning.Trainer.test", autospec=True
) as test_mock:
cli = LightningCLI(BoringModel)
test_mock.assert_called_once_with(cli.trainer, model=cli.model, verbose=False, ckpt_path="foobar")
assert cli.trainer.limit_test_batches == 1
assert cli.trainer.fast_dev_run == 1
def test_lightning_cli_parse_kwargs_with_subcommands(tmpdir):
fit_config = {"trainer": {"limit_train_batches": 2}}
fit_config_path = tmpdir / "fit.yaml"
fit_config_path.write_text(str(fit_config), "utf8")
validate_config = {"trainer": {"limit_val_batches": 3}}
validate_config_path = tmpdir / "validate.yaml"
validate_config_path.write_text(str(validate_config), "utf8")
parser_kwargs = {
"fit": {"default_config_files": [str(fit_config_path)]},
"validate": {"default_config_files": [str(validate_config_path)]},
}
with mock.patch("sys.argv", ["any.py", "fit"]), mock.patch(
"pytorch_lightning.Trainer.fit", autospec=True
) as fit_mock:
cli = LightningCLI(BoringModel, parser_kwargs=parser_kwargs)
fit_mock.assert_called()
assert cli.trainer.limit_train_batches == 2
assert cli.trainer.limit_val_batches == 1.0
with mock.patch("sys.argv", ["any.py", "validate"]), mock.patch(
"pytorch_lightning.Trainer.validate", autospec=True
) as validate_mock:
cli = LightningCLI(BoringModel, parser_kwargs=parser_kwargs)
validate_mock.assert_called()
assert cli.trainer.limit_train_batches == 1.0
assert cli.trainer.limit_val_batches == 3
def test_lightning_cli_reinstantiate_trainer():
with mock.patch("sys.argv", ["any.py"]):
cli = LightningCLI(BoringModel, run=False)
assert cli.trainer.max_epochs == 1000
class TestCallback(Callback):
...
# make sure a new trainer can be easily created
trainer = cli.instantiate_trainer(max_epochs=123, callbacks=[TestCallback()])
# the new config is used
assert trainer.max_epochs == 123
assert {c.__class__ for c in trainer.callbacks} == {c.__class__ for c in cli.trainer.callbacks}.union(
{TestCallback}
)
# the existing config is not updated
assert cli.config_init["trainer"]["max_epochs"] is None
def test_cli_configure_optimizers_warning(tmpdir):
match = "configure_optimizers` will be overridden by `LightningCLI"
with mock.patch("sys.argv", ["any.py"]), no_warning_call(UserWarning, match=match):
LightningCLI(BoringModel, run=False)
with mock.patch("sys.argv", ["any.py", "--optimizer=Adam"]), pytest.warns(UserWarning, match=match):
LightningCLI(BoringModel, run=False)
| 38.750382 | 119 | 0.670882 |
ace76bf35c704392ed2844efdb5a59308d89c8fc | 777 | py | Python | cephprimarystorage/setup.py | zstackio/zstack-utility | 919d686d46c68836cbcad51ab0b8bf53bc88abda | [
"ECL-2.0",
"Apache-2.0"
] | 55 | 2017-02-10T07:55:21.000Z | 2021-09-01T00:59:36.000Z | cephprimarystorage/setup.py | zstackio/zstack-utility | 919d686d46c68836cbcad51ab0b8bf53bc88abda | [
"ECL-2.0",
"Apache-2.0"
] | 106 | 2017-02-13T09:58:27.000Z | 2022-02-15T09:51:48.000Z | cephprimarystorage/setup.py | zstackio/zstack-utility | 919d686d46c68836cbcad51ab0b8bf53bc88abda | [
"ECL-2.0",
"Apache-2.0"
] | 68 | 2017-02-13T11:02:01.000Z | 2021-12-16T11:02:01.000Z | from setuptools import setup, find_packages
import sys, os
version = '4.0.0'
setup(name='cephprimarystorage',
version=version,
description="ZStack ceph primary storage",
long_description="""\
ZStack ceph primary storage""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='zstack ceph',
author='Frank Zhang',
author_email='xing5820@gmail.com',
url='http://zstack.org',
license='Apache License 2',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| 28.777778 | 95 | 0.624196 |
ace76c2c803dc1d87fa1a3bed6397ca08df8f132 | 591 | py | Python | RecoCTPPS/TotemRPLocal/python/ctppsLocalTrackLiteProducer_cff.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | 1 | 2019-03-09T19:47:49.000Z | 2019-03-09T19:47:49.000Z | RecoCTPPS/TotemRPLocal/python/ctppsLocalTrackLiteProducer_cff.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | RecoCTPPS/TotemRPLocal/python/ctppsLocalTrackLiteProducer_cff.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | 1 | 2019-03-19T13:44:54.000Z | 2019-03-19T13:44:54.000Z | import FWCore.ParameterSet.Config as cms
from RecoCTPPS.TotemRPLocal.ctppsLocalTrackLiteDefaultProducer_cfi import ctppsLocalTrackLiteDefaultProducer
ctppsLocalTrackLiteProducer = ctppsLocalTrackLiteDefaultProducer.clone()
# enable the module for CTPPS era(s)
from Configuration.Eras.Modifier_ctpps_2016_cff import ctpps_2016
ctpps_2016.toModify(
ctppsLocalTrackLiteProducer,
doNothing = cms.bool(False)
)
from Configuration.Eras.Modifier_run2_miniAOD_80XLegacy_cff import run2_miniAOD_80XLegacy
run2_miniAOD_80XLegacy.toModify(ctppsLocalTrackLiteProducer, tagPixelTrack = "" )
| 34.764706 | 108 | 0.864636 |
ace76ce8a9bdabf2a5f7d8e9c13d95505608aaf4 | 55,097 | py | Python | tests/unit/utils/test_data.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/test_data.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 1 | 2017-07-10T21:44:39.000Z | 2017-07-10T21:44:39.000Z | tests/unit/utils/test_data.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | """
Tests for salt.utils.data
"""
import builtins
import logging
import salt.utils.data
import salt.utils.stringutils
from salt.utils.odict import OrderedDict
from tests.support.mock import patch
from tests.support.unit import LOREM_IPSUM, TestCase
log = logging.getLogger(__name__)
_b = lambda x: x.encode("utf-8")
_s = lambda x: salt.utils.stringutils.to_str(x, normalize=True)
# Some randomized data that will not decode
BYTES = b"1\x814\x10"
# This is an example of a unicode string with й constructed using two separate
# code points. Do not modify it.
EGGS = "\u044f\u0438\u0306\u0446\u0430"
class DataTestCase(TestCase):
test_data = [
"unicode_str",
_b("питон"),
123,
456.789,
True,
False,
None,
EGGS,
BYTES,
[123, 456.789, _b("спам"), True, False, None, EGGS, BYTES],
(987, 654.321, _b("яйца"), EGGS, None, (True, EGGS, BYTES)),
{
_b("str_key"): _b("str_val"),
None: True,
123: 456.789,
EGGS: BYTES,
_b("subdict"): {
"unicode_key": EGGS,
_b("tuple"): (123, "hello", _b("world"), True, EGGS, BYTES),
_b("list"): [456, _b("спам"), False, EGGS, BYTES],
},
},
OrderedDict([(_b("foo"), "bar"), (123, 456), (EGGS, BYTES)]),
]
def test_sorted_ignorecase(self):
test_list = ["foo", "Foo", "bar", "Bar"]
expected_list = ["bar", "Bar", "foo", "Foo"]
self.assertEqual(salt.utils.data.sorted_ignorecase(test_list), expected_list)
def test_mysql_to_dict(self):
test_mysql_output = [
"+----+------+-----------+------+---------+------+-------+------------------+",
"| Id | User | Host | db | Command | Time | State | Info |",
"+----+------+-----------+------+---------+------+-------+------------------+",
"| 7 | root | localhost | NULL | Query | 0 | init | show processlist |",
"+----+------+-----------+------+---------+------+-------+------------------+",
]
ret = salt.utils.data.mysql_to_dict(test_mysql_output, "Info")
expected_dict = {
"show processlist": {
"Info": "show processlist",
"db": "NULL",
"State": "init",
"Host": "localhost",
"Command": "Query",
"User": "root",
"Time": 0,
"Id": 7,
}
}
self.assertDictEqual(ret, expected_dict)
def test_subdict_match(self):
test_two_level_dict = {"foo": {"bar": "baz"}}
test_two_level_comb_dict = {"foo": {"bar": "baz:woz"}}
test_two_level_dict_and_list = {
"abc": ["def", "ghi", {"lorem": {"ipsum": [{"dolor": "sit"}]}}],
}
test_three_level_dict = {"a": {"b": {"c": "v"}}}
self.assertTrue(
salt.utils.data.subdict_match(test_two_level_dict, "foo:bar:baz")
)
# In test_two_level_comb_dict, 'foo:bar' corresponds to 'baz:woz', not
# 'baz'. This match should return False.
self.assertFalse(
salt.utils.data.subdict_match(test_two_level_comb_dict, "foo:bar:baz")
)
# This tests matching with the delimiter in the value part (in other
# words, that the path 'foo:bar' corresponds to the string 'baz:woz').
self.assertTrue(
salt.utils.data.subdict_match(test_two_level_comb_dict, "foo:bar:baz:woz")
)
# This would match if test_two_level_comb_dict['foo']['bar'] was equal
# to 'baz:woz:wiz', or if there was more deep nesting. But it does not,
# so this should return False.
self.assertFalse(
salt.utils.data.subdict_match(
test_two_level_comb_dict, "foo:bar:baz:woz:wiz"
)
)
# This tests for cases when a key path corresponds to a list. The
# value part 'ghi' should be successfully matched as it is a member of
# the list corresponding to key path 'abc'. It is somewhat a
# duplication of a test within test_traverse_dict_and_list, but
# salt.utils.data.subdict_match() does more than just invoke
# salt.utils.traverse_list_and_dict() so this particular assertion is a
# sanity check.
self.assertTrue(
salt.utils.data.subdict_match(test_two_level_dict_and_list, "abc:ghi")
)
# This tests the use case of a dict embedded in a list, embedded in a
# list, embedded in a dict. This is a rather absurd case, but it
# confirms that match recursion works properly.
self.assertTrue(
salt.utils.data.subdict_match(
test_two_level_dict_and_list, "abc:lorem:ipsum:dolor:sit"
)
)
# Test four level dict match for reference
self.assertTrue(salt.utils.data.subdict_match(test_three_level_dict, "a:b:c:v"))
# Test regression in 2015.8 where 'a:c:v' would match 'a:b:c:v'
self.assertFalse(salt.utils.data.subdict_match(test_three_level_dict, "a:c:v"))
# Test wildcard match
self.assertTrue(salt.utils.data.subdict_match(test_three_level_dict, "a:*:c:v"))
def test_subdict_match_with_wildcards(self):
"""
Tests subdict matching when wildcards are used in the expression
"""
data = {"a": {"b": {"ç": "d", "é": ["eff", "gee", "8ch"], "ĩ": {"j": "k"}}}}
assert salt.utils.data.subdict_match(data, "*:*:*:*")
assert salt.utils.data.subdict_match(data, "a:*:*:*")
assert salt.utils.data.subdict_match(data, "a:b:*:*")
assert salt.utils.data.subdict_match(data, "a:b:ç:*")
assert salt.utils.data.subdict_match(data, "a:b:*:d")
assert salt.utils.data.subdict_match(data, "a:*:ç:d")
assert salt.utils.data.subdict_match(data, "*:b:ç:d")
assert salt.utils.data.subdict_match(data, "*:*:ç:d")
assert salt.utils.data.subdict_match(data, "*:*:*:d")
assert salt.utils.data.subdict_match(data, "a:*:*:d")
assert salt.utils.data.subdict_match(data, "a:b:*:ef*")
assert salt.utils.data.subdict_match(data, "a:b:*:g*")
assert salt.utils.data.subdict_match(data, "a:b:*:j:*")
assert salt.utils.data.subdict_match(data, "a:b:*:j:k")
assert salt.utils.data.subdict_match(data, "a:b:*:*:k")
assert salt.utils.data.subdict_match(data, "a:b:*:*:*")
def test_traverse_dict(self):
test_two_level_dict = {"foo": {"bar": "baz"}}
self.assertDictEqual(
{"not_found": "nope"},
salt.utils.data.traverse_dict(
test_two_level_dict, "foo:bar:baz", {"not_found": "nope"}
),
)
self.assertEqual(
"baz",
salt.utils.data.traverse_dict(
test_two_level_dict, "foo:bar", {"not_found": "not_found"}
),
)
def test_traverse_dict_and_list(self):
test_two_level_dict = {"foo": {"bar": "baz"}}
test_two_level_dict_and_list = {
"foo": ["bar", "baz", {"lorem": {"ipsum": [{"dolor": "sit"}]}}]
}
# Check traversing too far: salt.utils.data.traverse_dict_and_list() returns
# the value corresponding to a given key path, and baz is a value
# corresponding to the key path foo:bar.
self.assertDictEqual(
{"not_found": "nope"},
salt.utils.data.traverse_dict_and_list(
test_two_level_dict, "foo:bar:baz", {"not_found": "nope"}
),
)
# Now check to ensure that foo:bar corresponds to baz
self.assertEqual(
"baz",
salt.utils.data.traverse_dict_and_list(
test_two_level_dict, "foo:bar", {"not_found": "not_found"}
),
)
# Check traversing too far
self.assertDictEqual(
{"not_found": "nope"},
salt.utils.data.traverse_dict_and_list(
test_two_level_dict_and_list, "foo:bar", {"not_found": "nope"}
),
)
# Check index 1 (2nd element) of list corresponding to path 'foo'
self.assertEqual(
"baz",
salt.utils.data.traverse_dict_and_list(
test_two_level_dict_and_list, "foo:1", {"not_found": "not_found"}
),
)
# Traverse a couple times into dicts embedded in lists
self.assertEqual(
"sit",
salt.utils.data.traverse_dict_and_list(
test_two_level_dict_and_list,
"foo:lorem:ipsum:dolor",
{"not_found": "not_found"},
),
)
# Traverse and match integer key in a nested dict
# https://github.com/saltstack/salt/issues/56444
self.assertEqual(
"it worked",
salt.utils.data.traverse_dict_and_list(
{"foo": {1234: "it worked"}}, "foo:1234", "it didn't work",
),
)
# Make sure that we properly return the default value when the initial
# attempt fails and YAML-loading the target key doesn't change its
# value.
self.assertEqual(
"default",
salt.utils.data.traverse_dict_and_list(
{"foo": {"baz": "didn't work"}}, "foo:bar", "default",
),
)
def test_issue_39709(self):
test_two_level_dict_and_list = {
"foo": ["bar", "baz", {"lorem": {"ipsum": [{"dolor": "sit"}]}}]
}
self.assertEqual(
"sit",
salt.utils.data.traverse_dict_and_list(
test_two_level_dict_and_list,
["foo", "lorem", "ipsum", "dolor"],
{"not_found": "not_found"},
),
)
def test_compare_dicts(self):
ret = salt.utils.data.compare_dicts(old={"foo": "bar"}, new={"foo": "bar"})
self.assertEqual(ret, {})
ret = salt.utils.data.compare_dicts(old={"foo": "bar"}, new={"foo": "woz"})
expected_ret = {"foo": {"new": "woz", "old": "bar"}}
self.assertDictEqual(ret, expected_ret)
def test_compare_lists_no_change(self):
ret = salt.utils.data.compare_lists(
old=[1, 2, 3, "a", "b", "c"], new=[1, 2, 3, "a", "b", "c"]
)
expected = {}
self.assertDictEqual(ret, expected)
def test_compare_lists_changes(self):
ret = salt.utils.data.compare_lists(
old=[1, 2, 3, "a", "b", "c"], new=[1, 2, 4, "x", "y", "z"]
)
expected = {"new": [4, "x", "y", "z"], "old": [3, "a", "b", "c"]}
self.assertDictEqual(ret, expected)
def test_compare_lists_changes_new(self):
ret = salt.utils.data.compare_lists(old=[1, 2, 3], new=[1, 2, 3, "x", "y", "z"])
expected = {"new": ["x", "y", "z"]}
self.assertDictEqual(ret, expected)
def test_compare_lists_changes_old(self):
ret = salt.utils.data.compare_lists(old=[1, 2, 3, "a", "b", "c"], new=[1, 2, 3])
expected = {"old": ["a", "b", "c"]}
self.assertDictEqual(ret, expected)
def test_decode(self):
"""
Companion to test_decode_to_str, they should both be kept up-to-date
with one another.
NOTE: This uses the lambda "_b" defined above in the global scope,
which encodes a string to a bytestring, assuming utf-8.
"""
expected = [
"unicode_str",
"питон",
123,
456.789,
True,
False,
None,
"яйца",
BYTES,
[123, 456.789, "спам", True, False, None, "яйца", BYTES],
(987, 654.321, "яйца", "яйца", None, (True, "яйца", BYTES)),
{
"str_key": "str_val",
None: True,
123: 456.789,
"яйца": BYTES,
"subdict": {
"unicode_key": "яйца",
"tuple": (123, "hello", "world", True, "яйца", BYTES),
"list": [456, "спам", False, "яйца", BYTES],
},
},
OrderedDict([("foo", "bar"), (123, 456), ("яйца", BYTES)]),
]
ret = salt.utils.data.decode(
self.test_data,
keep=True,
normalize=True,
preserve_dict_class=True,
preserve_tuples=True,
)
self.assertEqual(ret, expected)
# The binary data in the data structure should fail to decode, even
# using the fallback, and raise an exception.
self.assertRaises(
UnicodeDecodeError,
salt.utils.data.decode,
self.test_data,
keep=False,
normalize=True,
preserve_dict_class=True,
preserve_tuples=True,
)
# Now munge the expected data so that we get what we would expect if we
# disable preservation of dict class and tuples
expected[10] = [987, 654.321, "яйца", "яйца", None, [True, "яйца", BYTES]]
expected[11]["subdict"]["tuple"] = [123, "hello", "world", True, "яйца", BYTES]
expected[12] = {"foo": "bar", 123: 456, "яйца": BYTES}
ret = salt.utils.data.decode(
self.test_data,
keep=True,
normalize=True,
preserve_dict_class=False,
preserve_tuples=False,
)
self.assertEqual(ret, expected)
# Now test single non-string, non-data-structure items, these should
# return the same value when passed to this function
for item in (123, 4.56, True, False, None):
log.debug("Testing decode of %s", item)
self.assertEqual(salt.utils.data.decode(item), item)
# Test single strings (not in a data structure)
self.assertEqual(salt.utils.data.decode("foo"), "foo")
self.assertEqual(salt.utils.data.decode(_b("bar")), "bar")
self.assertEqual(salt.utils.data.decode(EGGS, normalize=True), "яйца")
self.assertEqual(salt.utils.data.decode(EGGS, normalize=False), EGGS)
# Test binary blob
self.assertEqual(salt.utils.data.decode(BYTES, keep=True), BYTES)
self.assertRaises(UnicodeDecodeError, salt.utils.data.decode, BYTES, keep=False)
def test_circular_refs_dicts(self):
test_dict = {"key": "value", "type": "test1"}
test_dict["self"] = test_dict
ret = salt.utils.data._remove_circular_refs(ob=test_dict)
self.assertDictEqual(ret, {"key": "value", "type": "test1", "self": None})
def test_circular_refs_lists(self):
test_list = {
"foo": [],
}
test_list["foo"].append((test_list,))
ret = salt.utils.data._remove_circular_refs(ob=test_list)
self.assertDictEqual(ret, {"foo": [(None,)]})
def test_circular_refs_tuple(self):
test_dup = {"foo": "string 1", "bar": "string 1", "ham": 1, "spam": 1}
ret = salt.utils.data._remove_circular_refs(ob=test_dup)
self.assertDictEqual(
ret, {"foo": "string 1", "bar": "string 1", "ham": 1, "spam": 1}
)
def test_decode_to_str(self):
"""
Companion to test_decode, they should both be kept up-to-date with one
another.
NOTE: This uses the lambda "_s" defined above in the global scope,
which converts the string/bytestring to a str type.
"""
expected = [
_s("unicode_str"),
_s("питон"),
123,
456.789,
True,
False,
None,
_s("яйца"),
BYTES,
[123, 456.789, _s("спам"), True, False, None, _s("яйца"), BYTES],
(987, 654.321, _s("яйца"), _s("яйца"), None, (True, _s("яйца"), BYTES)),
{
_s("str_key"): _s("str_val"),
None: True,
123: 456.789,
_s("яйца"): BYTES,
_s("subdict"): {
_s("unicode_key"): _s("яйца"),
_s("tuple"): (
123,
_s("hello"),
_s("world"),
True,
_s("яйца"),
BYTES,
),
_s("list"): [456, _s("спам"), False, _s("яйца"), BYTES],
},
},
OrderedDict([(_s("foo"), _s("bar")), (123, 456), (_s("яйца"), BYTES)]),
]
ret = salt.utils.data.decode(
self.test_data,
keep=True,
normalize=True,
preserve_dict_class=True,
preserve_tuples=True,
to_str=True,
)
self.assertEqual(ret, expected)
# The binary data in the data structure should fail to decode, even
# using the fallback, and raise an exception.
self.assertRaises(
UnicodeDecodeError,
salt.utils.data.decode,
self.test_data,
keep=False,
normalize=True,
preserve_dict_class=True,
preserve_tuples=True,
to_str=True,
)
# Now munge the expected data so that we get what we would expect if we
# disable preservation of dict class and tuples
expected[10] = [
987,
654.321,
_s("яйца"),
_s("яйца"),
None,
[True, _s("яйца"), BYTES],
]
expected[11][_s("subdict")][_s("tuple")] = [
123,
_s("hello"),
_s("world"),
True,
_s("яйца"),
BYTES,
]
expected[12] = {_s("foo"): _s("bar"), 123: 456, _s("яйца"): BYTES}
ret = salt.utils.data.decode(
self.test_data,
keep=True,
normalize=True,
preserve_dict_class=False,
preserve_tuples=False,
to_str=True,
)
self.assertEqual(ret, expected)
# Now test single non-string, non-data-structure items, these should
# return the same value when passed to this function
for item in (123, 4.56, True, False, None):
log.debug("Testing decode of %s", item)
self.assertEqual(salt.utils.data.decode(item, to_str=True), item)
# Test single strings (not in a data structure)
self.assertEqual(salt.utils.data.decode("foo", to_str=True), _s("foo"))
self.assertEqual(salt.utils.data.decode(_b("bar"), to_str=True), _s("bar"))
# Test binary blob
self.assertEqual(salt.utils.data.decode(BYTES, keep=True, to_str=True), BYTES)
self.assertRaises(
UnicodeDecodeError, salt.utils.data.decode, BYTES, keep=False, to_str=True,
)
def test_decode_fallback(self):
"""
Test fallback to utf-8
"""
with patch.object(builtins, "__salt_system_encoding__", "ascii"):
self.assertEqual(salt.utils.data.decode(_b("яйца")), "яйца")
def test_encode(self):
"""
NOTE: This uses the lambda "_b" defined above in the global scope,
which encodes a string to a bytestring, assuming utf-8.
"""
expected = [
_b("unicode_str"),
_b("питон"),
123,
456.789,
True,
False,
None,
_b(EGGS),
BYTES,
[123, 456.789, _b("спам"), True, False, None, _b(EGGS), BYTES],
(987, 654.321, _b("яйца"), _b(EGGS), None, (True, _b(EGGS), BYTES)),
{
_b("str_key"): _b("str_val"),
None: True,
123: 456.789,
_b(EGGS): BYTES,
_b("subdict"): {
_b("unicode_key"): _b(EGGS),
_b("tuple"): (123, _b("hello"), _b("world"), True, _b(EGGS), BYTES),
_b("list"): [456, _b("спам"), False, _b(EGGS), BYTES],
},
},
OrderedDict([(_b("foo"), _b("bar")), (123, 456), (_b(EGGS), BYTES)]),
]
# Both keep=True and keep=False should work because the BYTES data is
# already bytes.
ret = salt.utils.data.encode(
self.test_data, keep=True, preserve_dict_class=True, preserve_tuples=True
)
self.assertEqual(ret, expected)
ret = salt.utils.data.encode(
self.test_data, keep=False, preserve_dict_class=True, preserve_tuples=True
)
self.assertEqual(ret, expected)
# Now munge the expected data so that we get what we would expect if we
# disable preservation of dict class and tuples
expected[10] = [
987,
654.321,
_b("яйца"),
_b(EGGS),
None,
[True, _b(EGGS), BYTES],
]
expected[11][_b("subdict")][_b("tuple")] = [
123,
_b("hello"),
_b("world"),
True,
_b(EGGS),
BYTES,
]
expected[12] = {_b("foo"): _b("bar"), 123: 456, _b(EGGS): BYTES}
ret = salt.utils.data.encode(
self.test_data, keep=True, preserve_dict_class=False, preserve_tuples=False
)
self.assertEqual(ret, expected)
ret = salt.utils.data.encode(
self.test_data, keep=False, preserve_dict_class=False, preserve_tuples=False
)
self.assertEqual(ret, expected)
# Now test single non-string, non-data-structure items, these should
# return the same value when passed to this function
for item in (123, 4.56, True, False, None):
log.debug("Testing encode of %s", item)
self.assertEqual(salt.utils.data.encode(item), item)
# Test single strings (not in a data structure)
self.assertEqual(salt.utils.data.encode("foo"), _b("foo"))
self.assertEqual(salt.utils.data.encode(_b("bar")), _b("bar"))
# Test binary blob, nothing should happen even when keep=False since
# the data is already bytes
self.assertEqual(salt.utils.data.encode(BYTES, keep=True), BYTES)
self.assertEqual(salt.utils.data.encode(BYTES, keep=False), BYTES)
def test_encode_keep(self):
"""
Whereas we tested the keep argument in test_decode, it is much easier
to do a more comprehensive test of keep in its own function where we
can force the encoding.
"""
unicode_str = "питон"
encoding = "ascii"
# Test single string
self.assertEqual(
salt.utils.data.encode(unicode_str, encoding, keep=True), unicode_str
)
self.assertRaises(
UnicodeEncodeError,
salt.utils.data.encode,
unicode_str,
encoding,
keep=False,
)
data = [
unicode_str,
[b"foo", [unicode_str], {b"key": unicode_str}, (unicode_str,)],
{
b"list": [b"foo", unicode_str],
b"dict": {b"key": unicode_str},
b"tuple": (b"foo", unicode_str),
},
([b"foo", unicode_str], {b"key": unicode_str}, (unicode_str,)),
]
# Since everything was a bytestring aside from the bogus data, the
# return data should be identical. We don't need to test recursive
# decoding, that has already been tested in test_encode.
self.assertEqual(
salt.utils.data.encode(data, encoding, keep=True, preserve_tuples=True),
data,
)
self.assertRaises(
UnicodeEncodeError,
salt.utils.data.encode,
data,
encoding,
keep=False,
preserve_tuples=True,
)
for index, _ in enumerate(data):
self.assertEqual(
salt.utils.data.encode(
data[index], encoding, keep=True, preserve_tuples=True
),
data[index],
)
self.assertRaises(
UnicodeEncodeError,
salt.utils.data.encode,
data[index],
encoding,
keep=False,
preserve_tuples=True,
)
def test_encode_fallback(self):
"""
Test fallback to utf-8
"""
with patch.object(builtins, "__salt_system_encoding__", "ascii"):
self.assertEqual(salt.utils.data.encode("яйца"), _b("яйца"))
with patch.object(builtins, "__salt_system_encoding__", "CP1252"):
self.assertEqual(salt.utils.data.encode("Ψ"), _b("Ψ"))
def test_repack_dict(self):
list_of_one_element_dicts = [
{"dict_key_1": "dict_val_1"},
{"dict_key_2": "dict_val_2"},
{"dict_key_3": "dict_val_3"},
]
expected_ret = {
"dict_key_1": "dict_val_1",
"dict_key_2": "dict_val_2",
"dict_key_3": "dict_val_3",
}
ret = salt.utils.data.repack_dictlist(list_of_one_element_dicts)
self.assertDictEqual(ret, expected_ret)
# Try with yaml
yaml_key_val_pair = "- key1: val1"
ret = salt.utils.data.repack_dictlist(yaml_key_val_pair)
self.assertDictEqual(ret, {"key1": "val1"})
# Make sure we handle non-yaml junk data
ret = salt.utils.data.repack_dictlist(LOREM_IPSUM)
self.assertDictEqual(ret, {})
def test_stringify(self):
self.assertRaises(TypeError, salt.utils.data.stringify, 9)
self.assertEqual(
salt.utils.data.stringify(["one", "two", "three", 4, 5]),
["one", "two", "three", "4", "5"],
)
def test_json_query(self):
# Raises exception if jmespath module is not found
with patch("salt.utils.data.jmespath", None):
self.assertRaisesRegex(
RuntimeError, "requires jmespath", salt.utils.data.json_query, {}, "@"
)
# Test search
user_groups = {
"user1": {"groups": ["group1", "group2", "group3"]},
"user2": {"groups": ["group1", "group2"]},
"user3": {"groups": ["group3"]},
}
expression = "*.groups[0]"
primary_groups = ["group1", "group1", "group3"]
self.assertEqual(
sorted(salt.utils.data.json_query(user_groups, expression)), primary_groups
)
class FilterFalseyTestCase(TestCase):
"""
Test suite for salt.utils.data.filter_falsey
"""
def test_nop(self):
"""
Test cases where nothing will be done.
"""
# Test with dictionary without recursion
old_dict = {
"foo": "bar",
"bar": {"baz": {"qux": "quux"}},
"baz": ["qux", {"foo": "bar"}],
}
new_dict = salt.utils.data.filter_falsey(old_dict)
self.assertEqual(old_dict, new_dict)
# Check returned type equality
self.assertIs(type(old_dict), type(new_dict))
# Test dictionary with recursion
new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=3)
self.assertEqual(old_dict, new_dict)
# Test with list
old_list = ["foo", "bar"]
new_list = salt.utils.data.filter_falsey(old_list)
self.assertEqual(old_list, new_list)
# Check returned type equality
self.assertIs(type(old_list), type(new_list))
# Test with set
old_set = {"foo", "bar"}
new_set = salt.utils.data.filter_falsey(old_set)
self.assertEqual(old_set, new_set)
# Check returned type equality
self.assertIs(type(old_set), type(new_set))
# Test with OrderedDict
old_dict = OrderedDict(
[
("foo", "bar"),
("bar", OrderedDict([("qux", "quux")])),
("baz", ["qux", OrderedDict([("foo", "bar")])]),
]
)
new_dict = salt.utils.data.filter_falsey(old_dict)
self.assertEqual(old_dict, new_dict)
self.assertIs(type(old_dict), type(new_dict))
# Test excluding int
old_list = [0]
new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type(0)])
self.assertEqual(old_list, new_list)
# Test excluding str (or unicode) (or both)
old_list = [""]
new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type("")])
self.assertEqual(old_list, new_list)
# Test excluding list
old_list = [[]]
new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type([])])
self.assertEqual(old_list, new_list)
# Test excluding dict
old_list = [{}]
new_list = salt.utils.data.filter_falsey(old_list, ignore_types=[type({})])
self.assertEqual(old_list, new_list)
def test_filter_dict_no_recurse(self):
"""
Test filtering a dictionary without recursing.
This will only filter out key-values where the values are falsey.
"""
old_dict = {
"foo": None,
"bar": {"baz": {"qux": None, "quux": "", "foo": []}},
"baz": ["qux"],
"qux": {},
"quux": [],
}
new_dict = salt.utils.data.filter_falsey(old_dict)
expect_dict = {
"bar": {"baz": {"qux": None, "quux": "", "foo": []}},
"baz": ["qux"],
}
self.assertEqual(expect_dict, new_dict)
self.assertIs(type(expect_dict), type(new_dict))
def test_filter_dict_recurse(self):
"""
Test filtering a dictionary with recursing.
This will filter out any key-values where the values are falsey or when
the values *become* falsey after filtering their contents (in case they
are lists or dicts).
"""
old_dict = {
"foo": None,
"bar": {"baz": {"qux": None, "quux": "", "foo": []}},
"baz": ["qux"],
"qux": {},
"quux": [],
}
new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=3)
expect_dict = {"baz": ["qux"]}
self.assertEqual(expect_dict, new_dict)
self.assertIs(type(expect_dict), type(new_dict))
def test_filter_list_no_recurse(self):
"""
Test filtering a list without recursing.
This will only filter out items which are falsey.
"""
old_list = ["foo", None, [], {}, 0, ""]
new_list = salt.utils.data.filter_falsey(old_list)
expect_list = ["foo"]
self.assertEqual(expect_list, new_list)
self.assertIs(type(expect_list), type(new_list))
# Ensure nested values are *not* filtered out.
old_list = [
"foo",
["foo"],
["foo", None],
{"foo": 0},
{"foo": "bar", "baz": []},
[{"foo": ""}],
]
new_list = salt.utils.data.filter_falsey(old_list)
self.assertEqual(old_list, new_list)
self.assertIs(type(old_list), type(new_list))
def test_filter_list_recurse(self):
"""
Test filtering a list with recursing.
This will filter out any items which are falsey, or which become falsey
after filtering their contents (in case they are lists or dicts).
"""
old_list = [
"foo",
["foo"],
["foo", None],
{"foo": 0},
{"foo": "bar", "baz": []},
[{"foo": ""}],
]
new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=3)
expect_list = ["foo", ["foo"], ["foo"], {"foo": "bar"}]
self.assertEqual(expect_list, new_list)
self.assertIs(type(expect_list), type(new_list))
def test_filter_set_no_recurse(self):
"""
Test filtering a set without recursing.
Note that a set cannot contain unhashable types, so recursion is not possible.
"""
old_set = {"foo", None, 0, ""}
new_set = salt.utils.data.filter_falsey(old_set)
expect_set = {"foo"}
self.assertEqual(expect_set, new_set)
self.assertIs(type(expect_set), type(new_set))
def test_filter_ordereddict_no_recurse(self):
"""
Test filtering an OrderedDict without recursing.
"""
old_dict = OrderedDict(
[
("foo", None),
(
"bar",
OrderedDict(
[
(
"baz",
OrderedDict([("qux", None), ("quux", ""), ("foo", [])]),
)
]
),
),
("baz", ["qux"]),
("qux", {}),
("quux", []),
]
)
new_dict = salt.utils.data.filter_falsey(old_dict)
expect_dict = OrderedDict(
[
(
"bar",
OrderedDict(
[
(
"baz",
OrderedDict([("qux", None), ("quux", ""), ("foo", [])]),
)
]
),
),
("baz", ["qux"]),
]
)
self.assertEqual(expect_dict, new_dict)
self.assertIs(type(expect_dict), type(new_dict))
def test_filter_ordereddict_recurse(self):
"""
Test filtering an OrderedDict with recursing.
"""
old_dict = OrderedDict(
[
("foo", None),
(
"bar",
OrderedDict(
[
(
"baz",
OrderedDict([("qux", None), ("quux", ""), ("foo", [])]),
)
]
),
),
("baz", ["qux"]),
("qux", {}),
("quux", []),
]
)
new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=3)
expect_dict = OrderedDict([("baz", ["qux"])])
self.assertEqual(expect_dict, new_dict)
self.assertIs(type(expect_dict), type(new_dict))
def test_filter_list_recurse_limit(self):
"""
Test filtering a list with recursing, but with a limited depth.
Note that the top-level is always processed, so a recursion depth of 2
means that two *additional* levels are processed.
"""
old_list = [None, [None, [None, [None]]]]
new_list = salt.utils.data.filter_falsey(old_list, recurse_depth=2)
self.assertEqual([[[[None]]]], new_list)
def test_filter_dict_recurse_limit(self):
"""
Test filtering a dict with recursing, but with a limited depth.
Note that the top-level is always processed, so a recursion depth of 2
means that two *additional* levels are processed.
"""
old_dict = {
"one": None,
"foo": {"two": None, "bar": {"three": None, "baz": {"four": None}}},
}
new_dict = salt.utils.data.filter_falsey(old_dict, recurse_depth=2)
self.assertEqual({"foo": {"bar": {"baz": {"four": None}}}}, new_dict)
def test_filter_exclude_types(self):
"""
Test filtering a list recursively, but also ignoring (i.e. not filtering)
out certain types that can be falsey.
"""
# Ignore int, unicode
old_list = [
"foo",
["foo"],
["foo", None],
{"foo": 0},
{"foo": "bar", "baz": []},
[{"foo": ""}],
]
new_list = salt.utils.data.filter_falsey(
old_list, recurse_depth=3, ignore_types=[type(0), type("")]
)
self.assertEqual(
["foo", ["foo"], ["foo"], {"foo": 0}, {"foo": "bar"}, [{"foo": ""}]],
new_list,
)
# Ignore list
old_list = [
"foo",
["foo"],
["foo", None],
{"foo": 0},
{"foo": "bar", "baz": []},
[{"foo": ""}],
]
new_list = salt.utils.data.filter_falsey(
old_list, recurse_depth=3, ignore_types=[type([])]
)
self.assertEqual(
["foo", ["foo"], ["foo"], {"foo": "bar", "baz": []}, []], new_list
)
# Ignore dict
old_list = [
"foo",
["foo"],
["foo", None],
{"foo": 0},
{"foo": "bar", "baz": []},
[{"foo": ""}],
]
new_list = salt.utils.data.filter_falsey(
old_list, recurse_depth=3, ignore_types=[type({})]
)
self.assertEqual(["foo", ["foo"], ["foo"], {}, {"foo": "bar"}, [{}]], new_list)
# Ignore NoneType
old_list = [
"foo",
["foo"],
["foo", None],
{"foo": 0},
{"foo": "bar", "baz": []},
[{"foo": ""}],
]
new_list = salt.utils.data.filter_falsey(
old_list, recurse_depth=3, ignore_types=[type(None)]
)
self.assertEqual(["foo", ["foo"], ["foo", None], {"foo": "bar"}], new_list)
class FilterRecursiveDiff(TestCase):
"""
Test suite for salt.utils.data.recursive_diff
"""
def test_list_equality(self):
"""
Test cases where equal lists are compared.
"""
test_list = [0, 1, 2]
self.assertEqual({}, salt.utils.data.recursive_diff(test_list, test_list))
test_list = [[0], [1], [0, 1, 2]]
self.assertEqual({}, salt.utils.data.recursive_diff(test_list, test_list))
def test_dict_equality(self):
"""
Test cases where equal dicts are compared.
"""
test_dict = {"foo": "bar", "bar": {"baz": {"qux": "quux"}}, "frop": 0}
self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_dict))
def test_ordereddict_equality(self):
"""
Test cases where equal OrderedDicts are compared.
"""
test_dict = OrderedDict(
[
("foo", "bar"),
("bar", OrderedDict([("baz", OrderedDict([("qux", "quux")]))])),
("frop", 0),
]
)
self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_dict))
def test_mixed_equality(self):
"""
Test cases where mixed nested lists and dicts are compared.
"""
test_data = {
"foo": "bar",
"baz": [0, 1, 2],
"bar": {"baz": [{"qux": "quux"}, {"froop", 0}]},
}
self.assertEqual({}, salt.utils.data.recursive_diff(test_data, test_data))
def test_set_equality(self):
"""
Test cases where equal sets are compared.
"""
test_set = {0, 1, 2, 3, "foo"}
self.assertEqual({}, salt.utils.data.recursive_diff(test_set, test_set))
# This is a bit of an oddity, as python seems to sort the sets in memory
# so both sets end up with the same ordering (0..3).
set_one = {0, 1, 2, 3}
set_two = {3, 2, 1, 0}
self.assertEqual({}, salt.utils.data.recursive_diff(set_one, set_two))
def test_tuple_equality(self):
"""
Test cases where equal tuples are compared.
"""
test_tuple = (0, 1, 2, 3, "foo")
self.assertEqual({}, salt.utils.data.recursive_diff(test_tuple, test_tuple))
def test_list_inequality(self):
"""
Test cases where two inequal lists are compared.
"""
list_one = [0, 1, 2]
list_two = ["foo", "bar", "baz"]
expected_result = {"old": list_one, "new": list_two}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(list_one, list_two)
)
expected_result = {"new": list_one, "old": list_two}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(list_two, list_one)
)
list_one = [0, "foo", 1, "bar"]
list_two = [1, "foo", 1, "qux"]
expected_result = {"old": [0, "bar"], "new": [1, "qux"]}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(list_one, list_two)
)
expected_result = {"new": [0, "bar"], "old": [1, "qux"]}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(list_two, list_one)
)
list_one = [0, 1, [2, 3]]
list_two = [0, 1, ["foo", "bar"]]
expected_result = {"old": [[2, 3]], "new": [["foo", "bar"]]}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(list_one, list_two)
)
expected_result = {"new": [[2, 3]], "old": [["foo", "bar"]]}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(list_two, list_one)
)
def test_dict_inequality(self):
"""
Test cases where two inequal dicts are compared.
"""
dict_one = {"foo": 1, "bar": 2, "baz": 3}
dict_two = {"foo": 2, 1: "bar", "baz": 3}
expected_result = {"old": {"foo": 1, "bar": 2}, "new": {"foo": 2, 1: "bar"}}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)
)
expected_result = {"new": {"foo": 1, "bar": 2}, "old": {"foo": 2, 1: "bar"}}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(dict_two, dict_one)
)
dict_one = {"foo": {"bar": {"baz": 1}}}
dict_two = {"foo": {"qux": {"baz": 1}}}
expected_result = {"old": dict_one, "new": dict_two}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)
)
expected_result = {"new": dict_one, "old": dict_two}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(dict_two, dict_one)
)
def test_ordereddict_inequality(self):
"""
Test cases where two inequal OrderedDicts are compared.
"""
odict_one = OrderedDict([("foo", "bar"), ("bar", "baz")])
odict_two = OrderedDict([("bar", "baz"), ("foo", "bar")])
expected_result = {"old": odict_one, "new": odict_two}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(odict_one, odict_two)
)
def test_set_inequality(self):
"""
Test cases where two inequal sets are compared.
Tricky as the sets are compared zipped, so shuffled sets of equal values
are considered different.
"""
set_one = {0, 1, 2, 4}
set_two = {0, 1, 3, 4}
expected_result = {"old": {2}, "new": {3}}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(set_one, set_two)
)
expected_result = {"new": {2}, "old": {3}}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(set_two, set_one)
)
# It is unknown how different python versions will store sets in memory.
# Python 2.7 seems to sort it (i.e. set_one below becomes {0, 1, 'foo', 'bar'}
# However Python 3.6.8 stores it differently each run.
# So just test for "not equal" here.
set_one = {0, "foo", 1, "bar"}
set_two = {"foo", 1, "bar", 2}
expected_result = {}
self.assertNotEqual(
expected_result, salt.utils.data.recursive_diff(set_one, set_two)
)
def test_mixed_inequality(self):
"""
Test cases where two mixed dicts/iterables that are different are compared.
"""
dict_one = {"foo": [1, 2, 3]}
dict_two = {"foo": [3, 2, 1]}
expected_result = {"old": {"foo": [1, 3]}, "new": {"foo": [3, 1]}}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)
)
expected_result = {"new": {"foo": [1, 3]}, "old": {"foo": [3, 1]}}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(dict_two, dict_one)
)
list_one = [1, 2, {"foo": ["bar", {"foo": 1, "bar": 2}]}]
list_two = [3, 4, {"foo": ["qux", {"foo": 1, "bar": 2}]}]
expected_result = {
"old": [1, 2, {"foo": ["bar"]}],
"new": [3, 4, {"foo": ["qux"]}],
}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(list_one, list_two)
)
expected_result = {
"new": [1, 2, {"foo": ["bar"]}],
"old": [3, 4, {"foo": ["qux"]}],
}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(list_two, list_one)
)
mixed_one = {"foo": {0, 1, 2}, "bar": [0, 1, 2]}
mixed_two = {"foo": {1, 2, 3}, "bar": [1, 2, 3]}
expected_result = {
"old": {"foo": {0}, "bar": [0, 1, 2]},
"new": {"foo": {3}, "bar": [1, 2, 3]},
}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two)
)
expected_result = {
"new": {"foo": {0}, "bar": [0, 1, 2]},
"old": {"foo": {3}, "bar": [1, 2, 3]},
}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one)
)
def test_tuple_inequality(self):
"""
Test cases where two tuples that are different are compared.
"""
tuple_one = (1, 2, 3)
tuple_two = (3, 2, 1)
expected_result = {"old": (1, 3), "new": (3, 1)}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(tuple_one, tuple_two)
)
def test_list_vs_set(self):
"""
Test case comparing a list with a set, will be compared unordered.
"""
mixed_one = [1, 2, 3]
mixed_two = {3, 2, 1}
expected_result = {}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(mixed_one, mixed_two)
)
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(mixed_two, mixed_one)
)
def test_dict_vs_ordereddict(self):
"""
Test case comparing a dict with an ordereddict, will be compared unordered.
"""
test_dict = {"foo": "bar", "bar": "baz"}
test_odict = OrderedDict([("foo", "bar"), ("bar", "baz")])
self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_odict))
self.assertEqual({}, salt.utils.data.recursive_diff(test_odict, test_dict))
test_odict2 = OrderedDict([("bar", "baz"), ("foo", "bar")])
self.assertEqual({}, salt.utils.data.recursive_diff(test_dict, test_odict2))
self.assertEqual({}, salt.utils.data.recursive_diff(test_odict2, test_dict))
def test_list_ignore_ignored(self):
"""
Test case comparing two lists with ignore-list supplied (which is not used
when comparing lists).
"""
list_one = [1, 2, 3]
list_two = [3, 2, 1]
expected_result = {"old": [1, 3], "new": [3, 1]}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(list_one, list_two, ignore_keys=[1, 3]),
)
def test_dict_ignore(self):
"""
Test case comparing two dicts with ignore-list supplied.
"""
dict_one = {"foo": 1, "bar": 2, "baz": 3}
dict_two = {"foo": 3, "bar": 2, "baz": 1}
expected_result = {"old": {"baz": 3}, "new": {"baz": 1}}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=["foo"]),
)
def test_ordereddict_ignore(self):
"""
Test case comparing two OrderedDicts with ignore-list supplied.
"""
odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)])
odict_two = OrderedDict([("baz", 1), ("bar", 2), ("foo", 3)])
# The key 'foo' will be ignored, which means the key from the other OrderedDict
# will always be considered "different" since OrderedDicts are compared ordered.
expected_result = {
"old": OrderedDict([("baz", 3)]),
"new": OrderedDict([("baz", 1)]),
}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(odict_one, odict_two, ignore_keys=["foo"]),
)
def test_dict_vs_ordereddict_ignore(self):
"""
Test case comparing a dict with an OrderedDict with ignore-list supplied.
"""
dict_one = {"foo": 1, "bar": 2, "baz": 3}
odict_two = OrderedDict([("foo", 3), ("bar", 2), ("baz", 1)])
expected_result = {"old": {"baz": 3}, "new": OrderedDict([("baz", 1)])}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(dict_one, odict_two, ignore_keys=["foo"]),
)
def test_mixed_nested_ignore(self):
"""
Test case comparing mixed, nested items with ignore-list supplied.
"""
dict_one = {"foo": [1], "bar": {"foo": 1, "bar": 2}, "baz": 3}
dict_two = {"foo": [2], "bar": {"foo": 3, "bar": 2}, "baz": 1}
expected_result = {"old": {"baz": 3}, "new": {"baz": 1}}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(dict_one, dict_two, ignore_keys=["foo"]),
)
def test_ordered_dict_unequal_length(self):
"""
Test case comparing two OrderedDicts of unequal length.
"""
odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)])
odict_two = OrderedDict([("foo", 1), ("bar", 2)])
expected_result = {"old": OrderedDict([("baz", 3)]), "new": {}}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(odict_one, odict_two)
)
def test_list_unequal_length(self):
"""
Test case comparing two lists of unequal length.
"""
list_one = [1, 2, 3]
list_two = [1, 2, 3, 4]
expected_result = {"old": [], "new": [4]}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(list_one, list_two)
)
def test_set_unequal_length(self):
"""
Test case comparing two sets of unequal length.
This does not do anything special, as it is unordered.
"""
set_one = {1, 2, 3}
set_two = {4, 3, 2, 1}
expected_result = {"old": set(), "new": {4}}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(set_one, set_two)
)
def test_tuple_unequal_length(self):
"""
Test case comparing two tuples of unequal length.
This should be the same as comparing two ordered lists.
"""
tuple_one = (1, 2, 3)
tuple_two = (1, 2, 3, 4)
expected_result = {"old": (), "new": (4,)}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(tuple_one, tuple_two)
)
def test_list_unordered(self):
"""
Test case comparing two lists unordered.
"""
list_one = [1, 2, 3, 4]
list_two = [4, 3, 2]
expected_result = {"old": [1], "new": []}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(list_one, list_two, ignore_order=True),
)
def test_mixed_nested_unordered(self):
"""
Test case comparing nested dicts/lists unordered.
"""
dict_one = {"foo": {"bar": [1, 2, 3]}, "bar": [{"foo": 4}, 0]}
dict_two = {"foo": {"bar": [3, 2, 1]}, "bar": [0, {"foo": 4}]}
expected_result = {}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(dict_one, dict_two, ignore_order=True),
)
expected_result = {
"old": {"foo": {"bar": [1, 3]}, "bar": [{"foo": 4}, 0]},
"new": {"foo": {"bar": [3, 1]}, "bar": [0, {"foo": 4}]},
}
self.assertEqual(
expected_result, salt.utils.data.recursive_diff(dict_one, dict_two)
)
def test_ordered_dict_unordered(self):
"""
Test case comparing OrderedDicts unordered.
"""
odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)])
odict_two = OrderedDict([("baz", 3), ("bar", 2), ("foo", 1)])
expected_result = {}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(odict_one, odict_two, ignore_order=True),
)
def test_ignore_missing_keys_dict(self):
"""
Test case ignoring missing keys on a comparison of dicts.
"""
dict_one = {"foo": 1, "bar": 2, "baz": 3}
dict_two = {"bar": 3}
expected_result = {"old": {"bar": 2}, "new": {"bar": 3}}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(
dict_one, dict_two, ignore_missing_keys=True
),
)
def test_ignore_missing_keys_ordered_dict(self):
"""
Test case not ignoring missing keys on a comparison of OrderedDicts.
"""
odict_one = OrderedDict([("foo", 1), ("bar", 2), ("baz", 3)])
odict_two = OrderedDict([("bar", 3)])
expected_result = {"old": odict_one, "new": odict_two}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(
odict_one, odict_two, ignore_missing_keys=True
),
)
def test_ignore_missing_keys_recursive(self):
"""
Test case ignoring missing keys on a comparison of nested dicts.
"""
dict_one = {"foo": {"bar": 2, "baz": 3}}
dict_two = {"foo": {"baz": 3}}
expected_result = {}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(
dict_one, dict_two, ignore_missing_keys=True
),
)
# Compare from dict-in-dict
dict_two = {}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(
dict_one, dict_two, ignore_missing_keys=True
),
)
# Compare from dict-in-list
dict_one = {"foo": ["bar", {"baz": 3}]}
dict_two = {"foo": ["bar", {}]}
self.assertEqual(
expected_result,
salt.utils.data.recursive_diff(
dict_one, dict_two, ignore_missing_keys=True
),
)
| 36.804943 | 91 | 0.523785 |
ace76cff57b3f07859fa375e1057987759ead49b | 33,036 | py | Python | test/ext/test_baked.py | kai3341/sqlalchemy | bb3560851280d338ffb03b72da25488f7db34d22 | [
"MIT"
] | null | null | null | test/ext/test_baked.py | kai3341/sqlalchemy | bb3560851280d338ffb03b72da25488f7db34d22 | [
"MIT"
] | null | null | null | test/ext/test_baked.py | kai3341/sqlalchemy | bb3560851280d338ffb03b72da25488f7db34d22 | [
"MIT"
] | null | null | null | import contextlib
import itertools
from sqlalchemy import bindparam
from sqlalchemy import event
from sqlalchemy import exc as sa_exc
from sqlalchemy import func
from sqlalchemy import testing
from sqlalchemy.ext import baked
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.query import Query
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not
from sqlalchemy.testing import mock
from sqlalchemy.testing.fixtures import fixture_session
from test.orm import _fixtures
class BakedTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
def setup_test(self):
self.bakery = baked.bakery()
class StateChangeTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
cls.mapper_registry.map_imperatively(User, cls.tables.users)
def _assert_cache_key(self, key, elements):
eq_(key, tuple(elem.__code__ for elem in elements))
def test_initial_key(self):
User = self.classes.User
session = fixture_session()
def l1():
return session.query(User)
q1 = self.bakery(l1)
self._assert_cache_key(q1._cache_key, [l1])
eq_(q1.steps, [l1])
def test_inplace_add(self):
User = self.classes.User
session = fixture_session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
self._assert_cache_key(q1._cache_key, [l1])
eq_(q1.steps, [l1])
q2 = q1.add_criteria(l2)
is_(q2, q1)
self._assert_cache_key(q1._cache_key, [l1, l2])
eq_(q1.steps, [l1, l2])
def test_inplace_add_operator(self):
User = self.classes.User
session = fixture_session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
self._assert_cache_key(q1._cache_key, [l1])
q1 += l2
self._assert_cache_key(q1._cache_key, [l1, l2])
def test_chained_add(self):
User = self.classes.User
session = fixture_session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
q2 = q1.with_criteria(l2)
is_not(q2, q1)
self._assert_cache_key(q1._cache_key, [l1])
self._assert_cache_key(q2._cache_key, [l1, l2])
def test_chained_add_operator(self):
User = self.classes.User
session = fixture_session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
q2 = q1 + l2
is_not(q2, q1)
self._assert_cache_key(q1._cache_key, [l1])
self._assert_cache_key(q2._cache_key, [l1, l2])
class LikeQueryTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
cls.mapper_registry.map_imperatively(User, cls.tables.users)
def test_first_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
eq_(bq(fixture_session()).first(), None)
def test_first_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
bq += lambda q: q.filter(User.name.like("%ed%")).order_by(User.id)
eq_(bq(fixture_session()).first(), (8,))
def test_one_or_none_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
eq_(bq(fixture_session()).one_or_none(), None)
def test_one_or_none_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "ed")
u1 = bq(fixture_session()).one_or_none()
eq_(u1.name, "ed")
def test_one_or_none_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like("%ed%"))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found when one or none was required",
bq(fixture_session()).one_or_none,
)
def test_one_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
assert_raises_message(
orm_exc.NoResultFound,
"No row was found when one was required",
bq(fixture_session()).one,
)
def test_one_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "ed")
u1 = bq(fixture_session()).one()
eq_(u1.name, "ed")
def test_one_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like("%ed%"))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found when exactly one was required",
bq(fixture_session()).one,
)
def test_get(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = fixture_session()
def go():
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
self.assert_sql_count(testing.db, go, 1)
u1 = sess.query(User).get(7) # noqa
def go():
u2 = bq(sess).get(7)
eq_(u2.name, "jack")
self.assert_sql_count(testing.db, go, 0)
def go():
u2 = bq(sess).get(8)
eq_(u2.name, "ed")
self.assert_sql_count(testing.db, go, 1)
def test_scalar(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
sess = fixture_session()
bq += lambda q: q.filter(User.id == 7)
eq_(bq(sess).scalar(), 7)
def test_count(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = fixture_session()
eq_(bq(sess).count(), 4)
bq += lambda q: q.filter(User.id.in_([8, 9]))
eq_(bq(sess).count(), 2)
# original query still works
eq_(
set([(u.id, u.name) for u in bq(sess).all()]),
set([(8, "ed"), (9, "fred")]),
)
def test_count_with_bindparams(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = fixture_session()
eq_(bq(sess).count(), 4)
bq += lambda q: q.filter(User.name == bindparam("uname"))
# calling with *args
eq_(bq(sess).params(uname="fred").count(), 1)
# with multiple params, the **kwargs will be used
bq += lambda q: q.filter(User.id == bindparam("anid"))
eq_(bq(sess).params(uname="fred", anid=9).count(), 1)
eq_(
# wrong id, so 0 results:
bq(sess).params(uname="fred", anid=8).count(),
0,
)
def test_get_pk_w_null(self):
"""test the re-implementation of logic to do get with IS NULL."""
class AddressUser(object):
pass
self.mapper_registry.map_imperatively(
AddressUser,
self.tables.users.outerjoin(self.tables.addresses),
properties={
"id": self.tables.users.c.id,
"address_id": self.tables.addresses.c.id,
},
)
bq = self.bakery(lambda s: s.query(AddressUser))
sess = fixture_session()
def go():
u1 = bq(sess).get((10, None))
eq_(u1.name, "chuck")
self.assert_sql_count(testing.db, go, 1)
u1 = sess.query(AddressUser).get((10, None)) # noqa
def go():
u2 = bq(sess).get((10, None))
eq_(u2.name, "chuck")
self.assert_sql_count(testing.db, go, 0)
def test_get_includes_getclause(self):
# test issue #3597
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
for i in range(5):
sess = fixture_session()
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
sess.close()
eq_(len(bq._bakery), 2)
# simulate race where mapper._get_clause
# may be generated more than once
from sqlalchemy import inspect
del inspect(User).__dict__["_get_clause"]
for i in range(5):
sess = fixture_session()
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
sess.close()
# this went from 4 to 3 as a result of #6055. by giving a name
# to the bind param in mapper._get_clause, while the baked cache
# here grows by one element, the SQL compiled_cache no longer
# changes because the keys of the bindparam() objects are passed
# explicitly as params to the execute() call as a result of
# _load_on_pk_identity() (either the one in baked or the one in
# loading.py), which then puts them
# in column_keys which makes them part of the cache key. These
# were previously anon names, now they are explicit so they
# stay across resets
eq_(len(bq._bakery), 3)
class ResultPostCriteriaTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
Order = cls.classes.Order
cls.mapper_registry.map_imperatively(
User,
cls.tables.users,
properties={
"addresses": relationship(
Address, order_by=cls.tables.addresses.c.id
),
"orders": relationship(Order, order_by=cls.tables.orders.c.id),
},
)
cls.mapper_registry.map_imperatively(Address, cls.tables.addresses)
cls.mapper_registry.map_imperatively(Order, cls.tables.orders)
@contextlib.contextmanager
def _fixture(self):
from sqlalchemy import event
User = self.classes.User
with testing.db.connect() as conn:
@event.listens_for(conn, "before_execute")
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
# execution options are kind of moving around a bit,
# test both places
assert (
"yes" in clauseelement._execution_options
or "yes" in execution_options
)
bq = self.bakery(lambda s: s.query(User.id).order_by(User.id))
sess = Session(conn)
yield sess, bq
def test_first(self):
with self._fixture() as (sess, bq):
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(result.first(), (7,))
def test_iter(self):
with self._fixture() as (sess, bq):
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(list(result)[0], (7,))
def test_spoiled(self):
with self._fixture() as (sess, bq):
result = bq.spoil()(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(list(result)[0], (7,))
def test_get(self):
User = self.classes.User
with self._fixture() as (sess, bq):
bq = self.bakery(lambda s: s.query(User))
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(result.get(7), User(id=7))
class ResultTest(BakedTest):
__backend__ = True
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
Order = cls.classes.Order
cls.mapper_registry.map_imperatively(
User,
cls.tables.users,
properties={
"addresses": relationship(
Address, order_by=cls.tables.addresses.c.id
),
"orders": relationship(Order, order_by=cls.tables.orders.c.id),
},
)
cls.mapper_registry.map_imperatively(Address, cls.tables.addresses)
cls.mapper_registry.map_imperatively(Order, cls.tables.orders)
def test_cachekeys_on_constructor(self):
User = self.classes.User
queue = [7, 8]
def fn(s):
return s.query(User.id).filter_by(id=queue.pop(0))
bq1 = self.bakery(fn, 7)
bq2 = self.bakery(fn, 8)
for i in range(3):
session = fixture_session()
eq_(bq1(session).all(), [(7,)])
eq_(bq2(session).all(), [(8,)])
def test_no_steps(self):
User = self.classes.User
bq = self.bakery(
lambda s: s.query(User.id, User.name).order_by(User.id)
)
for i in range(3):
session = fixture_session()
eq_(
bq(session).all(),
[(7, "jack"), (8, "ed"), (9, "fred"), (10, "chuck")],
)
def test_different_limits(self):
User = self.classes.User
bq = self.bakery(
lambda s: s.query(User.id, User.name).order_by(User.id)
)
bq += lambda q: q.limit(bindparam("limit")).offset(bindparam("offset"))
session = fixture_session()
for i in range(4):
for limit, offset, exp in [
(2, 1, [(8, "ed"), (9, "fred")]),
(3, 0, [(7, "jack"), (8, "ed"), (9, "fred")]),
(1, 2, [(9, "fred")]),
]:
eq_(bq(session).params(limit=limit, offset=offset).all(), exp)
def test_disable_on_session(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam("id"))
def fn3(q):
canary.fn3()
return q
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = fixture_session(autocommit=True, enable_baked_queries=False)
eq_(bq.add_criteria(fn3)(sess).params(id=7).all(), [(7, "jack")])
eq_(
canary.mock_calls,
[
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
],
)
def test_spoiled_full_w_params(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam("id"))
def fn3(q):
canary.fn3()
return q
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = fixture_session()
eq_(
bq.spoil(full=True).add_criteria(fn3)(sess).params(id=7).all(),
[(7, "jack")],
)
eq_(
canary.mock_calls,
[
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
],
)
def test_spoiled_half_w_params(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam("id"))
def fn3(q):
canary.fn3()
return q
bq = self.bakery(fn1)
bq += fn2
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = fixture_session()
eq_(
bq.spoil().add_criteria(fn3)(sess).params(id=7).all(),
[(7, "jack")],
)
eq_(
canary.mock_calls,
[
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn3(),
mock.call.fn3(),
],
)
def test_w_new_entities(self):
"""Test that the query can have its entities modified in
an arbitrary callable, and that this new entity list is preserved
when the query is invoked.
"""
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id, User.name))
bq += lambda q: q._from_self().with_entities(func.count(User.id))
for i in range(3):
session = fixture_session()
eq_(bq(session).all(), [(4,)])
def test_conditional_step(self):
"""Test a large series of conditionals and assert that
results remain correct between all of them within a series
of loops.
"""
User = self.classes.User
base_bq = self.bakery(lambda s: s.query(User.id, User.name))
base_bq += lambda q: q.order_by(User.id)
for i in range(4):
for cond1, cond2, cond3, cond4 in itertools.product(
*[(False, True) for j in range(4)]
):
bq = base_bq._clone()
if cond1:
bq += lambda q: q.filter(User.name != "jack")
if cond2:
bq += lambda q: q.join(User.addresses)
else:
bq += lambda q: q.outerjoin(User.addresses)
elif cond3:
bq += lambda q: q.filter(User.name.like("%ed%"))
else:
bq += lambda q: q.filter(User.name == "jack")
if cond4:
bq += lambda q: q._from_self().with_entities(
func.count(User.id)
)
sess = fixture_session()
result = bq(sess).all()
if cond4:
if cond1:
if cond2:
eq_(result, [(4,)])
else:
eq_(result, [(5,)])
elif cond3:
eq_(result, [(2,)])
else:
eq_(result, [(1,)])
else:
if cond1:
if cond2:
eq_(
result,
[(8, "ed"), (8, "ed"), (8, "ed"), (9, "fred")],
)
else:
eq_(
result,
[
(8, "ed"),
(8, "ed"),
(8, "ed"),
(9, "fred"),
(10, "chuck"),
],
)
elif cond3:
eq_(result, [(8, "ed"), (9, "fred")])
else:
eq_(result, [(7, "jack")])
sess.close()
def test_conditional_step_oneline(self):
User = self.classes.User
base_bq = self.bakery(lambda s: s.query(User.id, User.name))
base_bq += lambda q: q.order_by(User.id)
for i in range(4):
for cond1 in (False, True):
bq = base_bq._clone()
# we were using (filename, firstlineno) as cache key,
# which fails for this kind of thing!
bq += (
(lambda q: q.filter(User.name != "jack"))
if cond1
else (lambda q: q.filter(User.name == "jack"))
) # noqa
sess = fixture_session()
result = bq(sess).all()
if cond1:
eq_(result, [(8, u"ed"), (9, u"fred"), (10, u"chuck")])
else:
eq_(result, [(7, "jack")])
sess.close()
def test_to_query_query(self):
User = self.classes.User
Address = self.classes.Address
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += (
lambda q: q.filter(User.id == Address.user_id)
.filter(User.name == "ed")
.correlate(Address)
)
main_bq = self.bakery(lambda s: s.query(Address.id))
main_bq += lambda q: q.filter(sub_bq.to_query(q).exists())
main_bq += lambda q: q.order_by(Address.id)
sess = fixture_session()
result = main_bq(sess).all()
eq_(result, [(2,), (3,), (4,)])
def test_to_query_session(self):
User = self.classes.User
Address = self.classes.Address
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += lambda q: q.filter(User.id == Address.user_id).correlate(
Address
)
main_bq = self.bakery(
lambda s: s.query(Address.id, sub_bq.to_query(s).scalar_subquery())
)
main_bq += lambda q: q.filter(
sub_bq.to_query(q).scalar_subquery() == "ed"
)
main_bq += lambda q: q.order_by(Address.id)
sess = fixture_session()
result = main_bq(sess).all()
eq_(result, [(2, "ed"), (3, "ed"), (4, "ed")])
def test_to_query_args(self):
User = self.classes.User
sub_bq = self.bakery(lambda s: s.query(User.name))
q = Query([], None)
assert_raises_message(
sa_exc.ArgumentError,
"Given Query needs to be associated with a Session",
sub_bq.to_query,
q,
)
assert_raises_message(
TypeError,
"Query or Session object expected, got .*'int'.*",
sub_bq.to_query,
5,
)
def test_subquery_eagerloading(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
self.bakery = baked.bakery()
base_bq = self.bakery(lambda s: s.query(User))
base_bq += lambda q: q.options(
subqueryload(User.addresses), subqueryload(User.orders)
)
base_bq += lambda q: q.order_by(User.id)
assert_result = [
User(
id=7,
addresses=[Address(id=1, email_address="jack@bean.com")],
orders=[Order(id=1), Order(id=3), Order(id=5)],
),
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
],
),
User(
id=9,
addresses=[Address(id=5)],
orders=[Order(id=2), Order(id=4)],
),
User(id=10, addresses=[]),
]
for i in range(4):
for cond1, cond2 in itertools.product(
*[(False, True) for j in range(2)]
):
print("HI----")
bq = base_bq._clone()
sess = fixture_session()
if cond1:
bq += lambda q: q.filter(User.name == "jack")
else:
bq += lambda q: q.filter(User.name.like("%ed%"))
if cond2:
ct = func.count(Address.id).label("count")
subq = (
sess.query(ct, Address.user_id)
.group_by(Address.user_id)
.having(ct > 2)
.subquery()
)
bq += lambda q: q.join(subq)
if cond2:
if cond1:
def go():
result = bq(sess).all()
eq_([], result)
self.assert_sql_count(testing.db, go, 1)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:2], result)
self.assert_sql_count(testing.db, go, 3)
else:
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 3)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:3], result)
self.assert_sql_count(testing.db, go, 3)
sess.close()
def test_subqueryload_post_context(self):
User = self.classes.User
Address = self.classes.Address
assert_result = [
User(
id=7, addresses=[Address(id=1, email_address="jack@bean.com")]
)
]
self.bakery = baked.bakery()
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.options(subqueryload(User.addresses))
bq += lambda q: q.order_by(User.id)
bq += lambda q: q.filter(User.name == bindparam("name"))
sess = fixture_session()
def set_params(q):
return q.params(name="jack")
# test that the changes we make using with_post_criteria()
# are also applied to the subqueryload query.
def go():
result = bq(sess).with_post_criteria(set_params).all()
eq_(assert_result, result)
self.assert_sql_count(testing.db, go, 2)
@testing.fixture()
def before_compile_nobake_fixture(self):
@event.listens_for(Query, "before_compile", retval=True)
def _modify_query(query):
query = query.enable_assertions(False)
return query
yield
event.remove(Query, "before_compile", _modify_query)
def test_subqueryload_post_context_w_cancelling_event(
self, before_compile_nobake_fixture
):
User = self.classes.User
Address = self.classes.Address
assert_result = [
User(
id=7, addresses=[Address(id=1, email_address="jack@bean.com")]
)
]
self.bakery = baked.bakery(size=3)
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.options(subqueryload(User.addresses))
bq += lambda q: q.order_by(User.id)
bq += lambda q: q.filter(User.name == bindparam("name"))
sess = fixture_session()
def set_params(q):
return q.params(name="jack")
# test that the changes we make using with_post_criteria()
# are also applied to the subqueryload query.
def go():
result = bq(sess).with_post_criteria(set_params).all()
eq_(assert_result, result)
self.assert_sql_count(testing.db, go, 2)
# assert that the integration style illustrated in the dogpile.cache
# example works w/ baked
class CustomIntegrationTest(testing.AssertsCompiledSQL, BakedTest):
run_setup_mappers = "each"
def _o2m_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
self.mapper_registry.map_imperatively(
User,
self.tables.users,
properties={
"addresses": relationship(
Address,
order_by=self.tables.addresses.c.id,
lazy=lazy,
**kw
)
},
)
self.mapper_registry.map_imperatively(Address, self.tables.addresses)
return User, Address
def _query_fixture(self):
from sqlalchemy.orm.query import Query
class CachingQuery(Query):
cache = {}
def set_cache_key(self, key):
return self.execution_options(_cache_key=key)
def set_cache_key_for_path(self, path, key):
return self.execution_options(**{"_cache_key_%s" % path: key})
def get_value(cache_key, cache, createfunc):
if cache_key in cache:
return cache[cache_key]()
else:
cache[cache_key] = retval = createfunc().freeze()
return retval()
s1 = fixture_session(query_cls=CachingQuery)
@event.listens_for(s1, "do_orm_execute", retval=True)
def do_orm_execute(orm_context):
ckey = None
for opt in orm_context.user_defined_options:
ckey = opt.get_cache_key(orm_context)
if ckey:
break
else:
if "_cache_key" in orm_context.execution_options:
ckey = orm_context.execution_options["_cache_key"]
if ckey is not None:
return get_value(
ckey,
CachingQuery.cache,
orm_context.invoke_statement,
)
return s1
def _option_fixture(self):
from sqlalchemy.orm.interfaces import UserDefinedOption
class RelationshipCache(UserDefinedOption):
propagate_to_loaders = True
def get_cache_key(self, orm_context):
if orm_context.loader_strategy_path:
return "user7_addresses"
else:
return None
return RelationshipCache()
def test_non_baked(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).set_cache_key("user7")
eq_(q.all(), [User(id=7, addresses=[Address(id=1)])])
eq_(list(q.cache), ["user7"])
eq_(q.all(), [User(id=7, addresses=[Address(id=1)])])
def test_non_baked_tuples(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).set_cache_key("user7")
eq_(
sess.execute(q.statement).all(),
[(User(id=7, addresses=[Address(id=1)]),)],
)
eq_(list(q.cache), ["user7"])
eq_(
sess.execute(q.statement).all(),
[(User(id=7, addresses=[Address(id=1)]),)],
)
def test_use_w_baked(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
base_bq = self.bakery(lambda s: s.query(User))
base_bq += lambda q: q.filter(User.id == 7)
base_bq += lambda q: q.set_cache_key("user7")
eq_(base_bq(sess).all(), [User(id=7, addresses=[Address(id=1)])])
eq_(list(q.cache), ["user7"])
eq_(base_bq(sess).all(), [User(id=7, addresses=[Address(id=1)])])
def test_plain_w_baked_lazyload(self):
User, Address = self._o2m_fixture()
opt = self._option_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).options(opt)
u = q.first()
eq_(u.addresses, [Address(id=1)])
eq_(list(q.cache), ["user7_addresses"])
sess.close()
# ensure caching logic works after query has been baked
q.cache.clear()
u = q.first()
eq_(u.addresses, [Address(id=1)])
eq_(list(q.cache), ["user7_addresses"])
| 29.106608 | 79 | 0.51344 |
ace76d19ddcb0151cac44630d54f694ae9277280 | 61 | py | Python | wagtail_snippet_image/__init__.py | acrius/wagtail-snippet-image | 01823c72eab2c24aaaed155d9873a992bed89fa0 | [
"MIT"
] | 1 | 2021-06-14T20:22:19.000Z | 2021-06-14T20:22:19.000Z | wagtail_snippet_image/__init__.py | acrius/wagtail-snippet-image | 01823c72eab2c24aaaed155d9873a992bed89fa0 | [
"MIT"
] | null | null | null | wagtail_snippet_image/__init__.py | acrius/wagtail-snippet-image | 01823c72eab2c24aaaed155d9873a992bed89fa0 | [
"MIT"
] | null | null | null | from .fields import SnippetImageField
__version__ = '0.1.4'
| 15.25 | 37 | 0.770492 |
ace76e9f3d8dfcdbceb15f1fdf6db8902a4a43ee | 790 | py | Python | BackendBaggie/reviewproduct/permissions.py | Baggie-App/Updateapi | 80f200d7ffd4695e6348ce6bb9a7a31a6b821e77 | [
"MIT"
] | null | null | null | BackendBaggie/reviewproduct/permissions.py | Baggie-App/Updateapi | 80f200d7ffd4695e6348ce6bb9a7a31a6b821e77 | [
"MIT"
] | null | null | null | BackendBaggie/reviewproduct/permissions.py | Baggie-App/Updateapi | 80f200d7ffd4695e6348ce6bb9a7a31a6b821e77 | [
"MIT"
] | null | null | null | from rest_framework.permissions import BasePermission, SAFE_METHODS
class CanCreatePermissionforCustomer(BasePermission):
"""Client admins should be able to edit property they own"""
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
if user.role == 'customer' and user.is_authenticated:
return True
else:
return False
class CanUpdateDeletePermissionforVendor(BasePermission):
"""Client admins should be able to edit property they own"""
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
if user.role == 'vendor' and user.is_authenticated:
return True
else:
return False
| 28.214286 | 67 | 0.664557 |
ace76ea57d9b3703a1624f0f7a03b8e748c18d0c | 2,528 | py | Python | controller.py | elsayed5454/Signal-Flow-Graph-Solver | 4a37992c14bd659be1aad96fa9010d2a245e8fac | [
"MIT"
] | 1 | 2021-11-26T03:00:06.000Z | 2021-11-26T03:00:06.000Z | controller.py | elsayed5454/Signal-Flow-Graph-Solver | 4a37992c14bd659be1aad96fa9010d2a245e8fac | [
"MIT"
] | null | null | null | controller.py | elsayed5454/Signal-Flow-Graph-Solver | 4a37992c14bd659be1aad96fa9010d2a245e8fac | [
"MIT"
] | null | null | null | import networkx as nx
from matplotlib.pyplot import draw, show, clf
from mason import mason
def add_node(g, node):
if node == "":
return "Add name to the node"
g.add_node(node)
return "Node added successfully"
def remove_node(g, node):
if g.has_node(node):
g.remove_node(node)
return "Node removed successfully"
else:
return "Node doesn't exist in graph"
def add_edge(g, from_node, to_node, weight):
if len(weight) == 0:
weight = '1'
if g.has_node(from_node) and g.has_node(to_node):
if weight.isdigit():
g.add_weighted_edges_from([(from_node, to_node, int(weight))])
return "Edge added successfully\nDefault weight is 1"
else:
"The weight must be positive integer"
else:
return "One of the nodes is not in the graph"
def remove_edge(g, from_node, to_node, weight):
if g.has_node(from_node) and g.has_node(to_node):
if len(g.get_edge_data(from_node, to_node)) == 0:
return "No edge exists"
elif len(g.get_edge_data(from_node, to_node)) == 1:
g.remove_edge_clicked(from_node, to_node)
return "Edge removed successfully (Weight is neglected because it's the only edge between the nodes)"
else:
if len(weight) == 0:
return "There are multiple edges, specify the weight"
try:
to_remove = [(u, v, k) for u, v, k in g.edges(data=True) if k['weight'] == int(weight)]
g.remove_edges_from(to_remove)
except:
return "An exception occurred"
return "Edge removed successfully"
else:
return "One of the nodes is not in the graph"
def refresh(g):
clf()
pos = nx.spring_layout(g)
nx.draw(g, pos, with_labels=True, connectionstyle='arc3, rad=0.1')
labels = {}
for u, v, data in g.edges(data=True):
labels[(u, v)] = data['weight']
nx.draw_networkx_edge_labels(g, pos, edge_labels=labels, label_pos=0.3)
draw()
show()
def solve(g, source, sink):
nodes = list(g.nodes)
if len(nodes) == 0:
return "The graph is empty"
if len(source) == 0:
source = nodes[0]
if len(sink) == 0:
sink = nodes[len(nodes) - 1]
if g.has_node(source) and g.has_node(sink):
return mason(g, source, sink)
else:
return "One of the nodes is not in the graph"
| 31.6 | 114 | 0.583861 |
ace76f89af28d918f2090cc571a4b8f75e04a4a8 | 3,353 | py | Python | tempest/tests/lib/services/compute/test_floating_ips_bulk_client.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 254 | 2015-01-05T19:22:52.000Z | 2022-03-29T08:14:54.000Z | tempest/tests/lib/services/compute/test_floating_ips_bulk_client.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 13 | 2015-03-02T15:53:04.000Z | 2022-02-16T02:28:14.000Z | tempest/tests/lib/services/compute/test_floating_ips_bulk_client.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 367 | 2015-01-07T15:05:39.000Z | 2022-03-04T09:50:35.000Z | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.tests.lib import fake_auth_provider
from tempest.lib.services.compute import floating_ips_bulk_client
from tempest.tests.lib.services import base
class TestFloatingIPsBulkClient(base.BaseServiceTest):
FAKE_FIP_BULK_LIST = {"floating_ip_info": [{
"address": "10.10.10.1",
"instance_uuid": None,
"fixed_ip": None,
"interface": "eth0",
"pool": "nova",
"project_id": None
},
{
"address": "10.10.10.2",
"instance_uuid": None,
"fixed_ip": None,
"interface": "eth0",
"pool": "nova",
"project_id": None
}]}
def setUp(self):
super(TestFloatingIPsBulkClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = floating_ips_bulk_client.FloatingIPsBulkClient(
fake_auth, 'compute', 'regionOne')
def _test_list_floating_ips_bulk(self, bytes_body=False):
self.check_service_client_function(
self.client.list_floating_ips_bulk,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_FIP_BULK_LIST,
to_utf=bytes_body)
def _test_create_floating_ips_bulk(self, bytes_body=False):
fake_fip_create_data = {"floating_ips_bulk_create": {
"ip_range": "192.168.1.0/24", "pool": "nova", "interface": "eth0"}}
self.check_service_client_function(
self.client.create_floating_ips_bulk,
'tempest.lib.common.rest_client.RestClient.post',
fake_fip_create_data,
to_utf=bytes_body,
ip_range="192.168.1.0/24", pool="nova", interface="eth0")
def _test_delete_floating_ips_bulk(self, bytes_body=False):
fake_fip_delete_data = {"floating_ips_bulk_delete": "192.168.1.0/24"}
self.check_service_client_function(
self.client.delete_floating_ips_bulk,
'tempest.lib.common.rest_client.RestClient.put',
fake_fip_delete_data,
to_utf=bytes_body,
ip_range="192.168.1.0/24")
def test_list_floating_ips_bulk_with_str_body(self):
self._test_list_floating_ips_bulk()
def test_list_floating_ips_bulk_with_bytes_body(self):
self._test_list_floating_ips_bulk(True)
def test_create_floating_ips_bulk_with_str_body(self):
self._test_create_floating_ips_bulk()
def test_create_floating_ips_bulk_with_bytes_body(self):
self._test_create_floating_ips_bulk(True)
def test_delete_floating_ips_bulk_with_str_body(self):
self._test_delete_floating_ips_bulk()
def test_delete_floating_ips_bulk_with_bytes_body(self):
self._test_delete_floating_ips_bulk(True)
| 37.674157 | 79 | 0.687146 |
ace76f9f6aa1992ec549b5b881519a5a35fba457 | 14,311 | py | Python | mycroft_ptt/speech/listener.py | JarbasAl/HiveMind-PTT | c20fd5be6327f61d460999b4e0738751dae405f4 | [
"Apache-2.0"
] | 2 | 2021-03-04T01:26:45.000Z | 2021-05-28T05:18:21.000Z | mycroft_ptt/speech/listener.py | JarbasAl/HiveMind-PTT | c20fd5be6327f61d460999b4e0738751dae405f4 | [
"Apache-2.0"
] | 2 | 2020-11-18T23:47:18.000Z | 2020-12-18T11:33:45.000Z | mycroft_ptt/speech/listener.py | JarbasAl/HiveMind-PTT | c20fd5be6327f61d460999b4e0738751dae405f4 | [
"Apache-2.0"
] | 1 | 2020-12-04T16:29:13.000Z | 2020-12-04T16:29:13.000Z | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import time
from time import sleep, time as get_time
import json
from threading import Thread
import speech_recognition as sr
import pyaudio
from pyee import EventEmitter
from requests import RequestException
from requests.exceptions import ConnectionError
from mycroft_ptt.configuration import CONFIGURATION
from mycroft_ptt.speech.mic import MutableMicrophone, \
ResponsiveRecognizer
from mycroft_ptt.playback import play_audio, play_mp3, play_ogg, play_wav, \
resolve_resource_file
from speech2text import STTFactory
from queue import Queue, Empty
from ovos_utils.log import LOG
MAX_MIC_RESTARTS = 20
AUDIO_DATA = 0
STREAM_START = 1
STREAM_DATA = 2
STREAM_STOP = 3
def find_input_device(device_name):
""" Find audio input device by name.
Arguments:
device_name: device name or regex pattern to match
Returns: device_index (int) or None if device wasn't found
"""
LOG.info('Searching for input device: {}'.format(device_name))
LOG.debug('Devices: ')
pa = pyaudio.PyAudio()
pattern = re.compile(device_name)
for device_index in range(pa.get_device_count()):
dev = pa.get_device_info_by_index(device_index)
LOG.debug(' {}'.format(dev['name']))
if dev['maxInputChannels'] > 0 and pattern.match(dev['name']):
LOG.debug(' ^-- matched')
return device_index
return None
class AudioStreamHandler:
def __init__(self, queue):
self.queue = queue
def stream_start(self):
self.queue.put((STREAM_START, None))
def stream_chunk(self, chunk):
self.queue.put((STREAM_DATA, chunk))
def stream_stop(self):
self.queue.put((STREAM_STOP, None))
class AudioProducer(Thread):
"""AudioProducer
Given a mic and a recognizer implementation, continuously listens to the
mic for potential speech chunks and pushes them onto the queue.
"""
def __init__(self, state, queue, mic, recognizer, emitter, stream_handler):
super(AudioProducer, self).__init__()
self.daemon = True
self.state = state
self.queue = queue
self.mic = mic
self.recognizer = recognizer
self.emitter = emitter
self.stream_handler = stream_handler
def run(self):
restart_attempts = 0
with self.mic as source:
LOG.info("Adjusting for ambient noise, be silent!!!")
self.recognizer.adjust_for_ambient_noise(source)
LOG.info("Ambient noise profile has been created")
while self.state.running:
try:
audio = self.recognizer.listen(source, self.emitter,
self.stream_handler)
if audio is not None:
self.queue.put((AUDIO_DATA, audio))
else:
LOG.warning("Audio contains no data.")
except IOError as e:
# IOError will be thrown if the read is unsuccessful.
# If self.recognizer.overflow_exc is False (default)
# input buffer overflow IOErrors due to not consuming the
# buffers quickly enough will be silently ignored.
LOG.exception('IOError Exception in AudioProducer')
if e.errno == pyaudio.paInputOverflowed:
pass # Ignore overflow errors
elif restart_attempts < MAX_MIC_RESTARTS:
# restart the mic
restart_attempts += 1
LOG.info('Restarting the microphone...')
source.restart()
LOG.info('Restarted...')
else:
LOG.error('Restarting mic doesn\'t seem to work. '
'Stopping...')
raise
except Exception:
LOG.exception('Exception in AudioProducer')
raise
else:
# Reset restart attempt counter on sucessful audio read
restart_attempts = 0
finally:
if self.stream_handler is not None:
self.stream_handler.stream_stop()
def stop(self):
"""Stop producer thread."""
self.state.running = False
self.recognizer.stop()
class AudioConsumer(Thread):
"""AudioConsumer
Consumes AudioData chunks off the queue
"""
# In seconds, the minimum audio size to be sent to remote STT
MIN_AUDIO_SIZE = 0.5
def __init__(self, state, queue, emitter, stt):
super(AudioConsumer, self).__init__()
self.daemon = True
self.queue = queue
self.state = state
self.emitter = emitter
self.stt = stt
data_path = os.path.expanduser(CONFIGURATION["data_dir"])
listener_config = CONFIGURATION["listener"]
self.save_utterances = listener_config.get('record_utterances', False)
self.saved_utterances_dir = os.path.join(data_path, 'utterances')
if not os.path.isdir(data_path):
os.makedirs(data_path)
if not os.path.isdir(self.saved_utterances_dir):
os.makedirs(self.saved_utterances_dir)
def run(self):
while self.state.running:
self.read()
def read(self):
try:
audio = self.queue.get(timeout=0.5)
except Empty:
return
if audio is None:
return
tag, data = audio
if tag == AUDIO_DATA:
if data is not None:
self.process(data)
elif tag == STREAM_START:
self.stt.stream_start()
elif tag == STREAM_DATA:
self.stt.stream_data(data)
elif tag == STREAM_STOP:
self.stt.stream_stop()
else:
LOG.error("Unknown audio queue type %r" % audio)
@staticmethod
def _audio_length(audio):
return float(len(audio.frame_data)) / (
audio.sample_rate * audio.sample_width)
# TODO: Localization
def process(self, audio):
if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
LOG.warning("Audio too short to be processed")
else:
transcription = self.transcribe(audio)
if transcription:
# STT succeeded, send the transcribed speech on for processing
payload = {
'utterances': [transcription],
'lang': self.stt.lang
}
self.emitter.emit("recognizer_loop:utterance", payload)
def _compile_metadata(self, utterance):
timestamp = str(int(1000 * get_time()))
if utterance:
name = utterance.replace(" ", "_").lower() + "_" + timestamp + ".wav"
else:
name = "UNK_" + timestamp + ".wav"
return {
'name': name,
'transcript': utterance,
'engine': self.stt.__class__.__name__,
'time': timestamp
}
def transcribe(self, audio):
def send_unknown_intent():
""" Send message that nothing was transcribed. """
self.emitter.emit('recognizer_loop:speech.recognition.unknown')
try:
# Invoke the STT engine on the audio clip
text = self.stt.execute(audio)
if text is not None:
text = text.lower().strip()
LOG.debug("STT: " + text)
else:
send_unknown_intent()
LOG.info('no words were transcribed')
if self.save_utterances:
mtd = self._compile_metadata(text)
filename = os.path.join(self.saved_utterances_dir, mtd["name"])
with open(filename, 'wb') as f:
f.write(audio.get_wav_data())
filename = os.path.join(self.saved_utterances_dir,
mtd["name"].replace(".wav", ".json"))
with open(filename, 'w') as f:
json.dump(mtd, f, indent=4)
return text
except sr.RequestError as e:
LOG.error("Could not request Speech Recognition {0}".format(e))
except ConnectionError as e:
LOG.error("Connection Error: {0}".format(e))
self.emitter.emit("recognizer_loop:no_internet")
except RequestException as e:
LOG.error(e.__class__.__name__ + ': ' + str(e))
except Exception as e:
send_unknown_intent()
LOG.error(e)
LOG.error("Speech Recognition could not understand audio")
# If enabled, play a wave file with a short sound to audibly
# indicate speech recognition failed
sound = CONFIGURATION["listener"].get('error_sound')
audio_file = resolve_resource_file(sound)
try:
if audio_file:
if audio_file.endswith(".wav"):
play_wav(audio_file).wait()
elif audio_file.endswith(".mp3"):
play_mp3(audio_file).wait()
elif audio_file.endswith(".ogg"):
play_ogg(audio_file).wait()
else:
play_audio(audio_file).wait()
except Exception as e:
LOG.warning(e)
return None
dialog_name = 'not connected to the internet'
self.emitter.emit('speak', {'utterance': dialog_name})
class RecognizerLoopState:
def __init__(self):
self.running = False
class RecognizerLoop(EventEmitter):
""" EventEmitter loop running speech recognition.
Local wake word recognizer and remote general speech recognition.
"""
def __init__(self, config=None):
super(RecognizerLoop, self).__init__()
self.mute_calls = 0
self.config = config or CONFIGURATION
self._load_config(config)
def _load_config(self, config=None):
"""Load configuration parameters from configuration."""
config = config or self.config
self.config_core = config
self.lang = config.get('lang')
self.config = config.get('listener')
rate = self.config.get('sample_rate')
device_index = self.config.get('device_index')
device_name = self.config.get('device_name')
if not device_index and device_name:
device_index = find_input_device(device_name)
LOG.debug('Using microphone (None = default): ' + str(device_index))
self.microphone = MutableMicrophone(device_index, rate,
mute=self.mute_calls > 0)
self.responsive_recognizer = ResponsiveRecognizer()
self.state = RecognizerLoopState()
def start_async(self):
"""Start consumer and producer threads."""
self.state.running = True
stt = STTFactory.create(self.config_core["stt"])
queue = Queue()
stream_handler = None
if stt.can_stream:
stream_handler = AudioStreamHandler(queue)
LOG.debug("Using STT engine: " + stt.__class__.__name__)
self.producer = AudioProducer(self.state, queue, self.microphone,
self.responsive_recognizer, self,
stream_handler)
self.producer.start()
self.consumer = AudioConsumer(self.state, queue, self, stt)
self.consumer.start()
def stop(self):
self.state.running = False
self.producer.stop()
# wait for threads to shutdown
self.producer.join()
self.consumer.join()
def mute(self):
"""Mute microphone and increase number of requests to mute."""
self.mute_calls += 1
if self.microphone:
self.microphone.mute()
def unmute(self):
"""Unmute mic if as many unmute calls as mute calls have been received.
"""
if self.mute_calls > 0:
self.mute_calls -= 1
if self.mute_calls <= 0 and self.microphone:
self.microphone.unmute()
self.mute_calls = 0
def force_unmute(self):
"""Completely unmute mic regardless of the number of calls to mute."""
self.mute_calls = 0
self.unmute()
def is_muted(self):
if self.microphone:
return self.microphone.is_muted()
else:
return True # consider 'no mic' muted
def sleep(self):
self.state.sleeping = True
def awaken(self):
self.state.sleeping = False
def run(self):
"""Start and reload mic and STT handling threads as needed.
Wait for KeyboardInterrupt and shutdown cleanly.
"""
try:
self.start_async()
except Exception:
LOG.exception('Starting producer/consumer threads for listener '
'failed.')
return
# Handle reload of consumer / producer if config changes
while self.state.running:
try:
time.sleep(1)
except KeyboardInterrupt as e:
LOG.error(e)
self.stop()
raise # Re-raise KeyboardInterrupt
except Exception:
LOG.exception('Exception in RecognizerLoop')
raise
def reload(self):
"""Reload configuration and restart consumer and producer."""
self.stop()
# load config
self._load_config()
# restart
self.start_async()
| 34.735437 | 81 | 0.580812 |
ace76ffa5b3efcd0e3db9ec55e2dec4976e76f33 | 2,565 | py | Python | app/recipe/tests/test_tags_api.py | jackyFeng/recipe-app-api | d6000758d880578f441f8c3d2b08ce72dabcbb4d | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | jackyFeng/recipe-app-api | d6000758d880578f441f8c3d2b08ce72dabcbb4d | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | jackyFeng/recipe-app-api | d6000758d880578f441f8c3d2b08ce72dabcbb4d | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
""" Test the publicly available tags API """
def setUp(self):
self.client = APIClient()
def test_login_required(self):
""" Test the login is required to retrieve tags """
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
""" Test the authorized user tags API """
def setUp(self):
self.user = get_user_model().objects.create_user(
'jackyfeng1218@gmail.com',
'password'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
""" Test retrieving tags """
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True) # serialize list
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
""" Test that tags returned are for the authenticated user """
user2 = get_user_model().objects.create_user(
'other@gmail.com',
'password'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Hotpot')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
""" Test creating a new tag """
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
""" Test creating a new tag with invalid payload """
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 30.176471 | 71 | 0.650292 |
ace7703ff861a670614164b899053fe22bb00987 | 1,221 | py | Python | pastepwn/analyzers/__init__.py | mikek2/pastepwn | cf40629f7f3a19b26a81db656ec41a2d5d19c661 | [
"MIT"
] | null | null | null | pastepwn/analyzers/__init__.py | mikek2/pastepwn | cf40629f7f3a19b26a81db656ec41a2d5d19c661 | [
"MIT"
] | null | null | null | pastepwn/analyzers/__init__.py | mikek2/pastepwn | cf40629f7f3a19b26a81db656ec41a2d5d19c661 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .alwaystrueanalyzer import AlwaysTrueAnalyzer
from .basicanalyzer import BasicAnalyzer
from .bcrypthashanalyzer import BcryptHashAnalyzer
from .md5hashanalyzer import MD5HashAnalyzer
from .shahashanalyzer import SHAHashAnalyzer
from .creditcardanalyzer import CreditCardAnalyzer
from .genericanalyzer import GenericAnalyzer
from .mailanalyzer import MailAnalyzer
from .pastebinurlanalyzer import PastebinURLAnalyzer
from .regexanalyzer import RegexAnalyzer
from .urlanalyzer import URLAnalyzer
from .wordanalyzer import WordAnalyzer
from .ibananalyzer import IBANAnalyzer
from .databasedumpanalyzer import DatabaseDumpAnalyzer
from .dbconnstringanalyzer import DBConnAnalyzer
from .privatekeyanalyzer import PrivateKeyAnalyzer
from .phonenumberanalyzer import PhoneNumberAnalyzer
__all__ = (
'AlwaysTrueAnalyzer',
'BasicAnalyzer',
'BcryptHashAnalyzer',
'MD5HashAnalyzer',
'SHAHashAnalyzer',
'CreditCardAnalyzer',
'GenericAnalyzer',
'MailAnalyzer',
'PastebinURLAnalyzer',
'RegexAnalyzer',
'URLAnalyzer',
'WordAnalyzer',
'IBANAnalyzer',
'DatabaseDumpAnalyzer',
'DBConnAnalyzer',
'PrivateKeyAnalyzer',
'PhoneNumberAnalyzer'
)
| 31.307692 | 54 | 0.801802 |
ace770c847b931628fb7e2d5ab8a8fb4e0b16975 | 2,503 | py | Python | include/ClientGUIFrames.py | DonaldTsang/hydrus | 1ffd13469c0ea98ea78961ab377aff1c6325379b | [
"WTFPL"
] | 3 | 2019-06-19T09:29:50.000Z | 2019-12-18T14:17:21.000Z | include/ClientGUIFrames.py | DonaldTsang/hydrus | 1ffd13469c0ea98ea78961ab377aff1c6325379b | [
"WTFPL"
] | 50 | 2019-04-05T02:25:13.000Z | 2019-04-27T04:29:31.000Z | include/ClientGUIFrames.py | DonaldTsang/hydrus | 1ffd13469c0ea98ea78961ab377aff1c6325379b | [
"WTFPL"
] | null | null | null | from . import ClientConstants as CC
from . import ClientGUICommon
from . import ClientGUITopLevelWindows
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusGlobals as HG
import os
import wx
class ShowKeys( ClientGUITopLevelWindows.Frame ):
def __init__( self, key_type, keys ):
if key_type == 'registration': title = 'Registration Keys'
elif key_type == 'access': title = 'Access Keys'
# give it no parent, so this doesn't close when the dialog is closed!
ClientGUITopLevelWindows.Frame.__init__( self, None, HG.client_controller.PrepStringForDisplay( title ), float_on_parent = False )
self._key_type = key_type
self._keys = keys
#
self._text_ctrl = ClientGUICommon.SaneMultilineTextCtrl( self, style = wx.TE_READONLY | wx.TE_DONTWRAP )
self._save_to_file = wx.Button( self, label = 'save to file' )
self._save_to_file.Bind( wx.EVT_BUTTON, self.EventSaveToFile )
self._done = wx.Button( self, label = 'done' )
self._done.Bind( wx.EVT_BUTTON, self.EventDone )
#
if key_type == 'registration': prepend = 'r'
else: prepend = ''
self._text = os.linesep.join( [ prepend + key.hex() for key in self._keys ] )
self._text_ctrl.SetValue( self._text )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._text_ctrl, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( self._save_to_file, CC.FLAGS_LONE_BUTTON )
vbox.Add( self._done, CC.FLAGS_LONE_BUTTON )
self.SetSizer( vbox )
( x, y ) = self.GetEffectiveMinSize()
if x < 500: x = 500
if y < 200: y = 200
self.SetInitialSize( ( x, y ) )
self.Show( True )
def EventDone( self, event ):
self.Close()
def EventSaveToFile( self, event ):
filename = 'keys.txt'
with wx.FileDialog( self, style=wx.FD_SAVE, defaultFile = filename ) as dlg:
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
with open( path, 'w', encoding = 'utf-8' ) as f:
f.write( self._text )
| 29.447059 | 138 | 0.535757 |
ace771b9c9330acfc5ee4349a380948bfe045ebf | 2,453 | py | Python | nsd1805/python/day13/deploy_web.py | MrWangwf/nsd1806 | 069e993b0bb64cb21adc2a25aa56f6da674453bc | [
"Apache-2.0"
] | null | null | null | nsd1805/python/day13/deploy_web.py | MrWangwf/nsd1806 | 069e993b0bb64cb21adc2a25aa56f6da674453bc | [
"Apache-2.0"
] | null | null | null | nsd1805/python/day13/deploy_web.py | MrWangwf/nsd1806 | 069e993b0bb64cb21adc2a25aa56f6da674453bc | [
"Apache-2.0"
] | null | null | null | import requests
import os
import hashlib
import tarfile
def download(url, fname):
'url: 服务器文件位置,fname是本地文件路径'
r = requests.get(url)
with open(fname, 'wb') as fobj:
fobj.write(r.content)
def check_version(url, fname):
'url: 远程服务器上live_version的路径,fname是本地文件路径'
if not os.path.isfile(fname):
download(url, fname)
return True # 本地没有live_version文件,有新版本
r = requests.get(url)
with open(fname) as fobj:
local_version = fobj.read()
if local_version != r.text:
download(url, fname)
return True # 服务器和本地的live_version文件不一样,有新版本
return False # 服务器和本地的live_verion文件内容一致,没有新版本
def check_md5(md5_url, fname):
# 读取远程md5文件中的值
r = requests.get(md5_url)
remote_md5 = r.text.strip()
# 计算本地文件的md5值
m = hashlib.md5()
with open(fname, 'rb') as fobj:
while True:
data = fobj.read(4096)
if not data:
break
m.update(data)
local_md5 = m.hexdigest()
if local_md5 == remote_md5:
return True # 如果md5相同,表示文件在下载过程中未损坏
return False
def deploy(app_path, deploy_dir):
os.chdir(deploy_dir) # 进入到部署目录,解压程序文件
tar = tarfile.open(app_path, 'r:gz')
tar.extractall()
tar.close()
fname = os.path.basename(app_path) # 获取文件名
# 获取解压文件的绝对路径
src_dir = os.path.join(deploy_dir, fname.replace('.tar.gz', ''))
web_link = '/var/www/html/nsd1805'
if os.path.exists(web_link): # 如果链接已存在,则删除
os.unlink(web_link)
os.symlink(src_dir, web_link)
if __name__ == '__main__':
version_url = 'http://192.168.4.3/deploy/live_version'
local_ver = '/var/www/download/live_version'
new = check_version(version_url, local_ver)
if not new:
print('没有发现新版本')
exit(1)
# 下载最新的软件版本
with open(local_ver) as fobj:
version = fobj.read().strip() # 获取版本号
# 下载软件
app_url = 'http://192.168.4.3/deploy/packages/core_py_%s.tar.gz' % version
app_path = '/var/www/download/core_py_%s.tar.gz' % version
download(app_url, app_path)
# 校验md5值,如果文件损坏则中止程序
md5_url = 'http://192.168.4.3/deploy/packages/core_py_%s.tar.gz.md5' % version
file_ok = check_md5(md5_url, app_path)
if not file_ok:
print('文件在下载过程中已损坏')
os.rename(local_ver, '/var/www/download/live_version.save')
exit(2)
# 如果应用文件是完好的,把它部署到服务器
deploy_dir = '/var/www/deploy/'
deploy(app_path, deploy_dir)
| 26.376344 | 82 | 0.636771 |
ace772f33ad0ed7538439d26d47b76bd41277b41 | 368 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/constants.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/constants.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/constants.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Constants and Enums used by Grading.
"""
class ScoreDatabaseTableEnum:
"""
The various database tables that store scores.
"""
courseware_student_module = 'csm'
submissions = 'submissions'
overrides = 'overrides'
class GradeOverrideFeatureEnum:
proctoring = 'PROCTORING'
gradebook = 'GRADEBOOK'
grade_import = 'grade-import'
| 19.368421 | 50 | 0.690217 |
ace774e60cde13cf5320586c8c75bd9e57fde475 | 282 | py | Python | ultiplayground/closures/returner.py | UltiRequiem/python-playground | cb17138df7ce5a21f107ded5a97019491d37c8b2 | [
"MIT"
] | 4 | 2021-08-15T03:20:04.000Z | 2021-09-24T03:26:23.000Z | ultiplayground/closures/returner.py | UltiRequiem/python-playground | cb17138df7ce5a21f107ded5a97019491d37c8b2 | [
"MIT"
] | null | null | null | ultiplayground/closures/returner.py | UltiRequiem/python-playground | cb17138df7ce5a21f107ded5a97019491d37c8b2 | [
"MIT"
] | 3 | 2021-08-15T03:20:05.000Z | 2021-08-18T13:26:51.000Z | from typing import Callable
def return_msg(msg: str = "Hello") -> Callable:
def returner() -> str:
return msg
return returner
say_cheese = return_msg("cheese")
say_hello = return_msg()
if __name__ == "__main__":
print(say_hello())
print(say_cheese())
| 15.666667 | 47 | 0.656028 |
ace774ece672420a7089ab63fd7873df99f35fd0 | 16,374 | py | Python | js2py/internals/prototypes/jsarray.py | renesugar/Js2Py | 0d12da37910aca94203d431d4c9fb9a8f41a5dea | [
"MIT"
] | 1,926 | 2015-01-17T05:57:22.000Z | 2022-03-28T09:24:41.000Z | js2py/internals/prototypes/jsarray.py | renesugar/Js2Py | 0d12da37910aca94203d431d4c9fb9a8f41a5dea | [
"MIT"
] | 691 | 2015-11-05T21:32:26.000Z | 2022-03-17T10:52:45.000Z | js2py/internals/prototypes/jsarray.py | renesugar/Js2Py | 0d12da37910aca94203d431d4c9fb9a8f41a5dea | [
"MIT"
] | 260 | 2015-04-07T12:05:31.000Z | 2022-03-22T18:15:35.000Z | from __future__ import unicode_literals
from ..conversions import *
from ..func_utils import *
from ..operations import strict_equality_op
import six
if six.PY3:
xrange = range
import functools
ARR_STACK = set({})
class ArrayPrototype:
def toString(this, args):
arr = to_object(this, args.space)
func = arr.get('join')
if not is_callable(func):
return u'[object %s]' % GetClass(arr)
return func.call(this, ())
def toLocaleString(this, args):
array = to_object(this, args.space)
arr_len = js_arr_length(array)
# separator is simply a comma ','
if not arr_len:
return ''
res = []
for i in xrange(arr_len):
element = array.get(unicode(i))
if is_undefined(element) or is_null(element):
res.append('')
else:
cand = to_object(element, args.space)
str_func = cand.get('toLocaleString')
if not is_callable(str_func):
raise MakeError(
'TypeError',
'toLocaleString method of item at index %d is not callable'
% i)
res.append(to_string(str_func.call(cand, ())))
return ','.join(res)
def concat(this, args):
array = to_object(this, args.space)
items = [array]
items.extend(tuple(args))
A = []
for E in items:
if GetClass(E) == 'Array':
k = 0
e_len = js_arr_length(E)
while k < e_len:
if E.has_property(unicode(k)):
A.append(E.get(unicode(k)))
k += 1
else:
A.append(E)
return args.space.ConstructArray(A)
def join(this, args):
ARR_STACK.add(this)
array = to_object(this, args.space)
separator = get_arg(args, 0)
arr_len = js_arr_length(array)
separator = ',' if is_undefined(separator) else to_string(separator)
elems = []
for e in xrange(arr_len):
elem = array.get(unicode(e))
if elem in ARR_STACK:
s = ''
else:
s = to_string(elem)
elems.append(
s if not (is_undefined(elem) or is_null(elem)) else '')
res = separator.join(elems)
ARR_STACK.remove(this)
return res
def pop(this, args): #todo check
array = to_object(this, args.space)
arr_len = js_arr_length(array)
if not arr_len:
array.put('length', float(arr_len))
return undefined
ind = unicode(arr_len - 1)
element = array.get(ind)
array.delete(ind)
array.put('length', float(arr_len - 1))
return element
def push(this, args):
array = to_object(this, args.space)
arr_len = js_arr_length(array)
to_put = tuple(args)
i = arr_len
for i, e in enumerate(to_put, arr_len):
array.put(unicode(i), e, True)
array.put('length', float(arr_len + len(to_put)), True)
return float(i)
def reverse(this, args):
array = to_object(this, args.space)
vals = js_array_to_list(array)
has_props = [
array.has_property(unicode(e))
for e in xrange(js_arr_length(array))
]
vals.reverse()
has_props.reverse()
for i, val in enumerate(vals):
if has_props[i]:
array.put(unicode(i), val)
else:
array.delete(unicode(i))
return array
def shift(this, args):
array = to_object(this, args.space)
arr_len = js_arr_length(array)
if not arr_len:
array.put('length', 0.)
return undefined
first = array.get('0')
for k in xrange(1, arr_len):
from_s, to_s = unicode(k), unicode(k - 1)
if array.has_property(from_s):
array.put(to_s, array.get(from_s))
else:
array.delete(to_s)
array.delete(unicode(arr_len - 1))
array.put('length', float(arr_len - 1))
return first
def slice(this, args): # todo check
array = to_object(this, args.space)
start = get_arg(args, 0)
end = get_arg(args, 1)
arr_len = js_arr_length(array)
relative_start = to_int(start)
k = max((arr_len + relative_start), 0) if relative_start < 0 else min(
relative_start, arr_len)
relative_end = arr_len if is_undefined(end) else to_int(end)
final = max((arr_len + relative_end), 0) if relative_end < 0 else min(
relative_end, arr_len)
res = []
n = 0
while k < final:
pk = unicode(k)
if array.has_property(pk):
res.append(array.get(pk))
k += 1
n += 1
return args.space.ConstructArray(res)
def sort(
this, args
): # todo: this assumes array continous (not sparse) - fix for sparse arrays
cmpfn = get_arg(args, 0)
if not GetClass(this) in ('Array', 'Arguments'):
return to_object(this, args.space) # do nothing
arr_len = js_arr_length(this)
if not arr_len:
return this
arr = [
(this.get(unicode(e)) if this.has_property(unicode(e)) else None)
for e in xrange(arr_len)
]
if not is_callable(cmpfn):
cmpfn = None
cmp = lambda a, b: sort_compare(a, b, cmpfn)
if six.PY3:
key = functools.cmp_to_key(cmp)
arr.sort(key=key)
else:
arr.sort(cmp=cmp)
for i in xrange(arr_len):
if arr[i] is None:
this.delete(unicode(i))
else:
this.put(unicode(i), arr[i])
return this
def splice(this, args):
# 1-8
array = to_object(this, args.space)
start = get_arg(args, 0)
deleteCount = get_arg(args, 1)
arr_len = js_arr_length(this)
relative_start = to_int(start)
actual_start = max(
(arr_len + relative_start), 0) if relative_start < 0 else min(
relative_start, arr_len)
actual_delete_count = min(
max(to_int(deleteCount), 0), arr_len - actual_start)
k = 0
A = args.space.NewArray(0)
# 9
while k < actual_delete_count:
if array.has_property(unicode(actual_start + k)):
A.put(unicode(k), array.get(unicode(actual_start + k)))
k += 1
# 10-11
items = list(args)[2:]
items_len = len(items)
# 12
if items_len < actual_delete_count:
k = actual_start
while k < (arr_len - actual_delete_count):
fr = unicode(k + actual_delete_count)
to = unicode(k + items_len)
if array.has_property(fr):
array.put(to, array.get(fr))
else:
array.delete(to)
k += 1
k = arr_len
while k > (arr_len - actual_delete_count + items_len):
array.delete(unicode(k - 1))
k -= 1
# 13
elif items_len > actual_delete_count:
k = arr_len - actual_delete_count
while k > actual_start:
fr = unicode(k + actual_delete_count - 1)
to = unicode(k + items_len - 1)
if array.has_property(fr):
array.put(to, array.get(fr))
else:
array.delete(to)
k -= 1
# 14-17
k = actual_start
while items:
E = items.pop(0)
array.put(unicode(k), E)
k += 1
array.put('length', float(arr_len - actual_delete_count + items_len))
return A
def unshift(this, args):
array = to_object(this, args.space)
arr_len = js_arr_length(array)
argCount = len(args)
k = arr_len
while k > 0:
fr = unicode(k - 1)
to = unicode(k + argCount - 1)
if array.has_property(fr):
array.put(to, array.get(fr))
else:
array.delete(to)
k -= 1
items = tuple(args)
for j, e in enumerate(items):
array.put(unicode(j), e)
array.put('length', float(arr_len + argCount))
return float(arr_len + argCount)
def indexOf(this, args):
array = to_object(this, args.space)
searchElement = get_arg(args, 0)
arr_len = js_arr_length(array)
if arr_len == 0:
return -1.
if len(args) > 1:
n = to_int(args[1])
else:
n = 0
if n >= arr_len:
return -1.
if n >= 0:
k = n
else:
k = arr_len - abs(n)
if k < 0:
k = 0
while k < arr_len:
if array.has_property(unicode(k)):
elementK = array.get(unicode(k))
if strict_equality_op(searchElement, elementK):
return float(k)
k += 1
return -1.
def lastIndexOf(this, args):
array = to_object(this, args.space)
searchElement = get_arg(args, 0)
arr_len = js_arr_length(array)
if arr_len == 0:
return -1.
if len(args) > 1:
n = to_int(args[1])
else:
n = arr_len - 1
if n >= 0:
k = min(n, arr_len - 1)
else:
k = arr_len - abs(n)
while k >= 0:
if array.has_property(unicode(k)):
elementK = array.get(unicode(k))
if strict_equality_op(searchElement, elementK):
return float(k)
k -= 1
return -1.
def every(this, args):
array = to_object(this, args.space)
callbackfn = get_arg(args, 0)
arr_len = js_arr_length(array)
if not is_callable(callbackfn):
raise MakeError('TypeError', 'callbackfn must be a function')
T = get_arg(args, 1)
k = 0
while k < arr_len:
if array.has_property(unicode(k)):
kValue = array.get(unicode(k))
if not to_boolean(
callbackfn.call(T, (kValue, float(k), array))):
return False
k += 1
return True
def some(this, args):
array = to_object(this, args.space)
callbackfn = get_arg(args, 0)
arr_len = js_arr_length(array)
if not is_callable(callbackfn):
raise MakeError('TypeError', 'callbackfn must be a function')
T = get_arg(args, 1)
k = 0
while k < arr_len:
if array.has_property(unicode(k)):
kValue = array.get(unicode(k))
if to_boolean(callbackfn.call(T, (kValue, float(k), array))):
return True
k += 1
return False
def forEach(this, args):
array = to_object(this, args.space)
callbackfn = get_arg(args, 0)
arr_len = js_arr_length(array)
if not is_callable(callbackfn):
raise MakeError('TypeError', 'callbackfn must be a function')
_this = get_arg(args, 1)
k = 0
while k < arr_len:
sk = unicode(k)
if array.has_property(sk):
kValue = array.get(sk)
callbackfn.call(_this, (kValue, float(k), array))
k += 1
return undefined
def map(this, args):
array = to_object(this, args.space)
callbackfn = get_arg(args, 0)
arr_len = js_arr_length(array)
if not is_callable(callbackfn):
raise MakeError('TypeError', 'callbackfn must be a function')
_this = get_arg(args, 1)
k = 0
A = args.space.NewArray(0)
while k < arr_len:
Pk = unicode(k)
if array.has_property(Pk):
kValue = array.get(Pk)
mappedValue = callbackfn.call(_this, (kValue, float(k), array))
A.define_own_property(
Pk, {
'value': mappedValue,
'writable': True,
'enumerable': True,
'configurable': True
}, False)
k += 1
return A
def filter(this, args):
array = to_object(this, args.space)
callbackfn = get_arg(args, 0)
arr_len = js_arr_length(array)
if not is_callable(callbackfn):
raise MakeError('TypeError', 'callbackfn must be a function')
_this = get_arg(args, 1)
k = 0
res = []
while k < arr_len:
if array.has_property(unicode(k)):
kValue = array.get(unicode(k))
if to_boolean(
callbackfn.call(_this, (kValue, float(k), array))):
res.append(kValue)
k += 1
return args.space.ConstructArray(res)
def reduce(this, args):
array = to_object(this, args.space)
callbackfn = get_arg(args, 0)
arr_len = js_arr_length(array)
if not is_callable(callbackfn):
raise MakeError('TypeError', 'callbackfn must be a function')
if not arr_len and len(args) < 2:
raise MakeError('TypeError',
'Reduce of empty array with no initial value')
k = 0
accumulator = undefined
if len(args) > 1: # initial value present
accumulator = args[1]
else:
kPresent = False
while not kPresent and k < arr_len:
kPresent = array.has_property(unicode(k))
if kPresent:
accumulator = array.get(unicode(k))
k += 1
if not kPresent:
raise MakeError('TypeError',
'Reduce of empty array with no initial value')
while k < arr_len:
if array.has_property(unicode(k)):
kValue = array.get(unicode(k))
accumulator = callbackfn.call(
undefined, (accumulator, kValue, float(k), array))
k += 1
return accumulator
def reduceRight(this, args):
array = to_object(this, args.space)
callbackfn = get_arg(args, 0)
arr_len = js_arr_length(array)
if not is_callable(callbackfn):
raise MakeError('TypeError', 'callbackfn must be a function')
if not arr_len and len(args) < 2:
raise MakeError('TypeError',
'Reduce of empty array with no initial value')
k = arr_len - 1
accumulator = undefined
if len(args) > 1: # initial value present
accumulator = args[1]
else:
kPresent = False
while not kPresent and k >= 0:
kPresent = array.has_property(unicode(k))
if kPresent:
accumulator = array.get(unicode(k))
k -= 1
if not kPresent:
raise MakeError('TypeError',
'Reduce of empty array with no initial value')
while k >= 0:
if array.has_property(unicode(k)):
kValue = array.get(unicode(k))
accumulator = callbackfn.call(
undefined, (accumulator, kValue, float(k), array))
k -= 1
return accumulator
def sort_compare(a, b, comp):
if a is None:
if b is None:
return 0
return 1
if b is None:
if a is None:
return 0
return -1
if is_undefined(a):
if is_undefined(b):
return 0
return 1
if is_undefined(b):
if is_undefined(a):
return 0
return -1
if comp is not None:
res = comp.call(undefined, (a, b))
return to_int(res)
x, y = to_string(a), to_string(b)
if x < y:
return -1
elif x > y:
return 1
return 0
| 33.416327 | 83 | 0.50684 |
ace775274fd8ede70d6040ccd1f3edb722365ff5 | 527 | py | Python | 15/solution.py | msagi/advent-of-code-2020 | e0269f9db9a1bb7d04bef3d2084d612c256ed953 | [
"Apache-2.0"
] | null | null | null | 15/solution.py | msagi/advent-of-code-2020 | e0269f9db9a1bb7d04bef3d2084d612c256ed953 | [
"Apache-2.0"
] | null | null | null | 15/solution.py | msagi/advent-of-code-2020 | e0269f9db9a1bb7d04bef3d2084d612c256ed953 | [
"Apache-2.0"
] | null | null | null | # https://adventofcode.com/2020/day/15
infile = open('input.txt', 'r')
line = infile.readline()
infile.close()
numbers = [int(i) for i in line.split(',')]
last_turn = 30000000
while len(numbers) < last_turn:
new_number = True
number = numbers[len(numbers) - 1]
for j in range(len(numbers) - 2, -1, -1):
if numbers[j] == number:
new_number = False
numbers.append(len(numbers) - 1 - j)
break
if new_number:
numbers.append(0)
print(numbers[len(numbers) - 1])
| 25.095238 | 48 | 0.59962 |
ace775bbce431c7f942ab9bba08266ee0048b348 | 836 | py | Python | flexget/plugins/output/mock_output.py | tvcsantos/Flexget | e08ce2957dd4f0668911d1e56347369939e4d0a5 | [
"MIT"
] | null | null | null | flexget/plugins/output/mock_output.py | tvcsantos/Flexget | e08ce2957dd4f0668911d1e56347369939e4d0a5 | [
"MIT"
] | 1 | 2018-06-09T18:03:35.000Z | 2018-06-09T18:03:35.000Z | flexget/plugins/output/mock_output.py | tvcsantos/Flexget | e08ce2957dd4f0668911d1e56347369939e4d0a5 | [
"MIT"
] | null | null | null | import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('mock_output')
class MockOutput(object):
"""
Debugging plugin which records a copy of all accepted entries into a list stored in `mock_output` attribute
of the task.
"""
schema = {'type': 'boolean'}
def on_task_start(self, task, config):
task.mock_output = []
def on_task_output(self, task, config):
task.mock_output.extend(e.copy() for e in task.all_entries if e.accepted)
def on_task_exit(self, task, config):
log.verbose('The following titles were output during this task run: %s' %
', '.join(e['title'] for e in task.mock_output))
@event('plugin.register')
def register_plugin():
plugin.register(MockOutput, 'mock_output', debug=True, api_ver=2)
| 28.827586 | 111 | 0.679426 |
ace77627acd2419ca5ec9e75a7dad57867259194 | 226 | py | Python | listaExercicios4/ex7.py | GabrielSouzaGit/PythonStudies | 49ec26d4ae45999695ab32f8e1f27587adb5ca4b | [
"MIT"
] | null | null | null | listaExercicios4/ex7.py | GabrielSouzaGit/PythonStudies | 49ec26d4ae45999695ab32f8e1f27587adb5ca4b | [
"MIT"
] | null | null | null | listaExercicios4/ex7.py | GabrielSouzaGit/PythonStudies | 49ec26d4ae45999695ab32f8e1f27587adb5ca4b | [
"MIT"
] | null | null | null | '''Elabore um programa que efetue a leitura de cinco números inteiros, adicione-os a uma lista e mostre-a.'''
lista = []
for i in range(5):
numero = int(input('Digite o um número: '))
lista.append(numero)
print(lista) | 32.285714 | 109 | 0.690265 |
ace777091a5fb8255477fe05ee70202f1d9267d7 | 4,815 | py | Python | pyccel/parser/parser.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | pyccel/parser/parser.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | pyccel/parser/parser.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import OrderedDict
from pyccel.parser.base import get_filename_from_import
from pyccel.parser.syntactic import SyntaxParser
from pyccel.parser.semantic import SemanticParser
# TODO to be modified as a function
class Parser(object):
def __init__(self, filename, **kwargs):
self._filename = filename
self._kwargs = kwargs
# we use it to store the imports
self._parents = []
# a Parser can have parents, who are importing it.
# imports are then its sons.
self._sons = []
self._d_parsers = OrderedDict()
self._syntax_parser = None
self._semantic_parser = None
self._output_folder = kwargs.pop('output_folder', '')
@property
def d_parsers(self):
"""Returns the d_parsers parser."""
return self._d_parsers
@property
def parents(self):
"""Returns the parents parser."""
return self._parents
@property
def sons(self):
"""Returns the sons parser."""
return self._sons
@property
def metavars(self):
if self._semantic_parser:
return self._semantic_parser.metavars
else:
return self._syntax_parser.metavars
@property
def namespace(self):
if self._semantic_parser:
return self._semantic_parser.namespace
else:
return self._syntax_parser.namespace
@property
def imports(self):
if self._semantic_parser:
raise NotImplementedError('TODO')
else:
return self._syntax_parser.namespace.imports['imports']
@property
def fst(self):
return self._syntax_parser.fst
def parse(self, d_parsers=None, verbose=False):
parser = SyntaxParser(self._filename, **self._kwargs)
self._syntax_parser = parser
if d_parsers is None:
d_parsers = OrderedDict()
self._d_parsers = self._parse_sons(d_parsers, verbose=verbose)
return parser.ast
def annotate(self, **settings):
# we first treat all sons to get imports
verbose = settings.pop('verbose', False)
self._annotate_parents(verbose=verbose)
parser = SemanticParser(self._syntax_parser,
d_parsers=self.d_parsers,
parents=self.parents,
**settings)
self._semantic_parser = parser
return parser
def append_parent(self, parent):
"""."""
# TODO check parent is not in parents
self._parents.append(parent)
def append_son(self, son):
"""."""
# TODO check son is not in sons
self._sons.append(son)
def _parse_sons(self, d_parsers, verbose=False):
"""Recursive algorithm for syntax analysis on a given file and its
dependencies.
This function always terminates with an OrderedDict that contains parsers
for all involved files.
"""
treated = set(d_parsers.keys())
imports = set(self.imports.keys())
imports = imports.difference(treated)
if not imports:
return d_parsers
for source in imports:
if verbose:
print ('>>> treating :: {}'.format(source))
# get the absolute path corresponding to source
filename = get_filename_from_import(source,self._output_folder)
q = Parser(filename)
q.parse(d_parsers=d_parsers)
d_parsers[source] = q
# link self to its sons
imports = list(self.imports.keys())
for source in imports:
d_parsers[source].append_parent(self)
self.append_son(d_parsers[source])
return d_parsers
def _annotate_parents(self, **settings):
verbose = settings.pop('verbose', False)
# we first treat sons that have no imports
for p in self.sons:
if not p.sons:
if verbose:
print ('>>> treating :: {}'.format(p.filename))
p.annotate(**settings)
# finally we treat the remaining sons recursively
for p in self.sons:
if p.sons:
if verbose:
print ('>>> treating :: {}'.format(p.filename))
p.annotate(**settings)
#==============================================================================
if __name__ == '__main__':
import sys
try:
filename = sys.argv[1]
except:
raise ValueError('Expecting an argument for filename')
pyccel = Parser(filename)
pyccel.parse(verbose=True)
settings = {}
pyccel.annotate(**settings)
| 26.75 | 81 | 0.575701 |
ace777f2259b5f68041abef067a2600b32932045 | 51,632 | py | Python | python-3.4.4.amd64/Lib/unittest/test/test_loader.py | CSnap/photogate | 208272ef39f4e86f40d431da2ca523e21701f789 | [
"CC0-1.0"
] | null | null | null | python-3.4.4.amd64/Lib/unittest/test/test_loader.py | CSnap/photogate | 208272ef39f4e86f40d431da2ca523e21701f789 | [
"CC0-1.0"
] | null | null | null | python-3.4.4.amd64/Lib/unittest/test/test_loader.py | CSnap/photogate | 208272ef39f4e86f40d431da2ca523e21701f789 | [
"CC0-1.0"
] | null | null | null | import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegex(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError as e:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignoring the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# #14971: Make sure the dotted name resolution works even if the actual
# function doesn't have the same name as is used to find it.
def test_loadTestsFromName__function_with_different_name_than_method(self):
# lambdas have the name '<lambda>'.
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
test = lambda: 1
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertEqual(loader.testMethodPrefix, 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
# Since cmp is now defunct, we simply verify that the results
# occur in the same order as they would with the default sort.
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
class Foo(unittest.TestCase):
def test_2(self): pass
def test_3(self): pass
def test_1(self): pass
test_names = ['test_2', 'test_3', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names))
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertIs(loader.suiteClass, unittest.TestSuite)
if __name__ == "__main__":
unittest.main()
| 39.323686 | 92 | 0.622443 |
ace77806822203eb743f72f95109d3b07dfe711c | 1,776 | py | Python | workflows/common/ext/EQ-Py/eqpy.py | mdorier/Supervisor | f1e43b2b33fb2cf9e03ea3ac49378aba37bd9839 | [
"MIT"
] | 10 | 2017-03-14T14:36:19.000Z | 2021-01-21T00:39:36.000Z | workflows/common/ext/EQ-Py/eqpy.py | mdorier/Supervisor | f1e43b2b33fb2cf9e03ea3ac49378aba37bd9839 | [
"MIT"
] | 58 | 2017-03-03T21:07:53.000Z | 2021-07-19T18:51:03.000Z | src/eqpy.py | emews/EQ-Py | 9322cb07a71f264fd581474b880154e77d8b1795 | [
"BSD-3-Clause"
] | 21 | 2017-03-08T16:07:47.000Z | 2020-11-24T04:23:00.000Z | import threading
import sys
import importlib, traceback
EQPY_ABORT = "EQPY_ABORT"
try:
import queue as q
except ImportError:
# queue is Queue in python 2
import Queue as q
input_q = q.Queue()
output_q = q.Queue()
p = None
aborted = False
wait_info = None
class WaitInfo:
def __init__(self):
self.wait = 4
def getWait(self):
if self.wait < 60:
self.wait += 1
return self.wait
class ThreadRunner(threading.Thread):
def __init__(self, runnable):
threading.Thread.__init__(self)
self.runnable = runnable
self.exc = "Exited normally"
def run(self):
try:
self.runnable.run()
except BaseException:
# tuple of type, value and traceback
self.exc = traceback.format_exc()
def init(pkg):
global p, wait_info
wait_info = WaitInfo()
imported_pkg = importlib.import_module(pkg)
p = ThreadRunner(imported_pkg)
p.start()
def output_q_get():
global output_q, aborted
wait = wait_info.getWait()
# thread's runnable might put work on queue
# and finish, so it would not longer be alive
# but something remains on the queue to be pulled
while p.is_alive() or not output_q.empty():
try:
result = output_q.get(True, wait)
break
except q.Empty:
pass
else:
# if we haven't yet set the abort flag then
# return that, otherwise return the formated exception
if aborted:
result = p.exc
else:
result = EQPY_ABORT
aborted = True
return result
def OUT_put(string_params):
output_q.put(string_params)
def IN_get():
#global input_q
result = input_q.get()
return result
| 21.925926 | 62 | 0.618806 |
ace77877c03b4089336d676f016001217801ec09 | 3,478 | py | Python | cumulusci/tasks/salesforce/tests/test_insert_record.py | SalesforceFoundation/CumulusCI | f7811a881939ef86c10ccb9b2d7ec8d7af73fd1c | [
"BSD-3-Clause"
] | 109 | 2015-01-20T14:28:48.000Z | 2018-08-31T12:12:39.000Z | cumulusci/tasks/salesforce/tests/test_insert_record.py | SalesforceFoundation/CumulusCI | f7811a881939ef86c10ccb9b2d7ec8d7af73fd1c | [
"BSD-3-Clause"
] | 365 | 2015-01-07T19:54:25.000Z | 2018-09-11T15:10:02.000Z | cumulusci/tasks/salesforce/tests/test_insert_record.py | SalesforceFoundation/CumulusCI | f7811a881939ef86c10ccb9b2d7ec8d7af73fd1c | [
"BSD-3-Clause"
] | 125 | 2015-01-17T16:05:39.000Z | 2018-09-06T19:05:00.000Z | import re
from unittest import mock
import pytest
import responses
from simple_salesforce.exceptions import SalesforceError
from cumulusci.core.exceptions import SalesforceException
from cumulusci.tasks.salesforce.insert_record import InsertRecord
from cumulusci.tests.util import CURRENT_SF_API_VERSION
from .util import create_task
class TestCreateRecord:
def test_run_task(self):
task = create_task(
InsertRecord,
{
"object": "PermissionSet",
"values": "Name:HardDelete,PermissionsBulkApiHardDelete:true",
},
)
create = mock.Mock()
task.sf = mock.Mock(create=create)
task.sf.PermissionSet.create.return_value = {
"id": "0PS3D000000MKTqWAO",
"success": True,
"errors": [],
}
task._run_task()
task.sf.PermissionSet.create.assert_called_with(
{"Name": "HardDelete", "PermissionsBulkApiHardDelete": "true"}
)
def test_run_task__dict_tooling(self):
task = create_task(
InsertRecord,
{
"object": "PermissionSet",
"tooling": True,
"values": {"Name": "HardDelete", "PermissionsBulkApiHardDelete": True},
},
)
create = mock.Mock()
task.tooling = mock.Mock(create=create)
task.tooling.PermissionSet.create.return_value = {
"id": "0PS3D000000MKTqWAO",
"success": True,
"errors": [],
}
task._run_task()
task.tooling.PermissionSet.create.assert_called_with(
{"Name": "HardDelete", "PermissionsBulkApiHardDelete": True}
)
def test_salesforce_error_returned_by_simple_salesforce(self):
"Tests the just-in-case path where SimpleSalesforce does not raise an exception"
task = create_task(
InsertRecord,
{
"object": "PermissionSet",
"values": "Name:HardDelete,PermissionsBulkApiHardDelete:true",
},
)
create = mock.Mock()
task.sf = mock.Mock(create=create)
task.sf.PermissionSet.create.return_value = {
"success": False,
"errors": [
{
"errorCode": "NOT_FOUND",
"message": "The requested resource does not exist",
}
],
}
with pytest.raises(SalesforceException):
task._run_task()
@responses.activate
def test_salesforce_error_raised_by_simple_salesforce(self):
task = create_task(
InsertRecord,
{
"object": "PermissionSet",
"values": "Name:HardDelete,PermissionsBulkApiHardDelete:true",
},
)
responses.add(
responses.POST,
re.compile(
rf"https://test.salesforce.com/services/data/v{CURRENT_SF_API_VERSION}/.*"
),
content_type="application/json",
status=404,
json={
"success": False,
"errors": [
{
"errorCode": "NOT_FOUND",
"message": "The requested resource does not exist",
}
],
},
)
task._init_task()
with pytest.raises(SalesforceError):
task._run_task()
| 31.333333 | 90 | 0.541403 |
ace7788c1556004e9184bc04e7043509bb1a4d8f | 2,539 | py | Python | sine.py | ParticleBert/sinetable | 9472c8282d19a15044701870c258c2f62568b5ea | [
"MIT"
] | null | null | null | sine.py | ParticleBert/sinetable | 9472c8282d19a15044701870c258c2f62568b5ea | [
"MIT"
] | null | null | null | sine.py | ParticleBert/sinetable | 9472c8282d19a15044701870c258c2f62568b5ea | [
"MIT"
] | null | null | null | import math
import sys
SAMPLEFREQUENCY = 192000
AUDIOFREQUENCY = 1000
AUDIOBITS = 24
MEMORYBITS = 32
def tc (val):
if val >= 0:
return val
else:
maximum = int("0xFFFFFF", 16) # We are calculating with 24 Bit
return maximum + val + 1 # It should be "maximum - val + 1"
# val should be subtracted from the maximum
# But according to the IF-Loop we only get negative values
# So we add them.
# for norm in range(0,1920):
# print(norm, norm / 1919, math.sin(2*math.pi*norm/1919), math.sin(2*math.pi*norm/1919) * 8388608, twos_complement(math.sin(2*math.pi*norm/1919) * 8388608) )
print("Samplefrequency: ", SAMPLEFREQUENCY, "Hz")
print("Audiofrequency: ", AUDIOFREQUENCY, "Hz")
samples = (1/AUDIOFREQUENCY) / (1/SAMPLEFREQUENCY) # Calculate the number of samples needed for one loop
# FIXME check if the result of the division is not integer. As soon as any decimal places exist
# the program will not work.
samples = int(samples) # Convert to integer
print(samples, "samples needed.")
print(samples * MEMORYBITS, "kBit needed.") # FIXME If the memory needed is bigger than the 36k the Spartan-7 has there should be at least a warning.
x = []
y = []
twos = [] # Create a list
for var in range(0, samples):
x.append(var) # Backup var for display on the x-axis
var = var / samples # Normalize var between 0 and 1
var = math.sin(2*math.pi*var) # Generate a sine between -1 and 1
var = round(var * 2 ** (AUDIOBITS-2), 0) # Scale up to the maximum amplitude of AUDIOBITS.
# It's AUDIOBITS-2, because the sine goes from -1 to +1
y.append(var) # Make a Backup
var = int(var) # Convert to integer
var = tc(var) # Convert to Two's complement
var = hex(var) # Convert from int to string
var = var.replace("0x","") # Delete "0x"
var = var.replace("-","") # Delete "-"
var = var.rjust(8,"0") # Stuff with zeroes
twos.append(var) # Append to the list
print(twos)
zeilen = int(samples / 8) # The destination text file will get 8 samples per line
file = open(f'table_{AUDIOFREQUENCY}.txt','w') # FIXME Please, some basic error check...
for outer in range(0, zeilen):
addr_hex = hex(outer)
addr_hex = addr_hex.replace("0x", "")
addr_hex = addr_hex.rjust(2, "0")
file.write(f'INIT_{addr_hex} => X"')
for inner in reversed(range(8)):
file.write(twos[outer*8+inner])
file.write("\",\n")
file.close | 39.061538 | 159 | 0.634502 |
ace778ece3c1bc827caea308678457e4d83911ee | 7,106 | py | Python | main.py | denypradana/TelePhy | bb024b3aa972536bbfac652e4f681ff6b25276de | [
"MIT"
] | 1 | 2020-03-31T08:17:11.000Z | 2020-03-31T08:17:11.000Z | main.py | denypradana/TelePhy | bb024b3aa972536bbfac652e4f681ff6b25276de | [
"MIT"
] | null | null | null | main.py | denypradana/TelePhy | bb024b3aa972536bbfac652e4f681ff6b25276de | [
"MIT"
] | null | null | null | from time import sleep
import function
import login
import telegram
# Fungsi utama untuk menghandle semua perintah masuk
def handle(msg):
# Mendapatkan ID user dan Perintah yang dikirimkan
chat_id = msg['chat']['id']
command = msg['text']
"""
Memecah perintah masuk menjadi beberapa bagian
dengan berpatokan pada spasi
"""
command_split = command.split(' ')
# Mengambil indeks pertama dari perintah masuk yang telah dipecah
command_cmd = command_split[0]
"""
Menampilkan perintah yang diterima dari user beserta detail tanggal,
waktu, serta ID user yang mengirimkan perintah
"""
print("Pada "
+ str(function.tanggal())
+ " "+ str(function.jam())
+ ", user id "
+ str(chat_id)
+ " memberikan perintah '"
+ str(command) + "'.")
# Melakukan aksi berdasarkan perintah yang diterima
if command == '/start':
bot.sendMessage(chat_id,
"Selamat Datang di RB07-Pi, "
+ "saya sebagai bot siap melayani anda. "
+ "Ketik '/bantuan' untuk melihat bantuan "
+ "atau ketik '/pwd Password_Anda' "
+ " untuk login.")
elif command == '/waktu':
bot.sendMessage(chat_id,
"Waktu server menunjukkan sekarang tanggal "
+ function.tanggal()
+ " dan pukul "
+ function.jam()
+ " WIB.")
elif command == '/bantuan':
function.bantuan(chat_id)
elif command_cmd == '/pwd':
if function.cekpass(chat_id, command_split[1]):
bot.sendMessage(chat_id,
"Login Sukses.")
else:
bot.sendMessage(chat_id,
"Maaf, password salah atau user "
+ "tidak terdaftar, "
+ "silahkan coba lagi.")
elif command_cmd == '/on':
if function.cekpass(chat_id,login.password_sekarang):
if command_split[1] != 'semua':
bot.sendMessage(chat_id,
function.ondevice(
command_split[1]))
elif command_split[1] == 'semua':
function.onalldevice(chat_id)
else:
bot.sendMessage(chat_id,
"Anda belum login, harap login "
+ "dahulu dengan perintah "
+ "'/pwd Password_Anda'.")
elif command_cmd == '/off':
if function.cekpass(chat_id,login.password_sekarang):
if command_split[1] != 'semua':
bot.sendMessage(chat_id,
function.offdevice(
command_split[1]))
elif command_split[1] == 'semua':
function.offalldevice(chat_id)
else:
bot.sendMessage(chat_id,
"Anda belum login, harap login "
+ "dahulu dengan perintah "
+ "'/pwd Password_Anda'.")
elif command_cmd == '/info':
if function.cekpass(chat_id,login.password_sekarang):
if command_split[1] == 'suhu':
bot.sendMessage(chat_id,
function.suhu())
else:
bot.sendMessage(chat_id,
"Sensor tidak ada.")
else:
bot.sendMessage(chat_id,
"Anda belum login, harap login "
+ "dahulu dengan perintah "
+ "'/pwd Password_Anda'.")
elif command_cmd == '/status':
if function.cekpass(chat_id,login.password_sekarang):
if command_split[1] != 'semua':
bot.sendMessage(chat_id,
function.statdevice(
command_split[1]))
elif command_split[1] == 'semua':
function.statalldevice(chat_id)
else:
bot.sendMessage(chat_id,
"Anda belum login, harap login "
+ "dahulu dengan perintah "
+ "'/pwd Password_Anda'.")
elif command_cmd == '/listdevice':
if function.cekpass(chat_id,login.password_sekarang):
function.listdevice(chat_id)
else:
bot.sendMessage(chat_id,
"Anda belum login, harap login "
+ "dahulu dengan perintah "
+ "'/pwd Password_Anda'.")
else:
bot.sendMessage(chat_id,
"Perintah tidak ditemukan, gunakan "
+ "perintah '/bantuan' untuk melihat "
+ "bantuan yang ada.")
# Inisialisasi bot telegram
bot = telegram.bot
bot.message_loop(handle)
# Menampilkan Banner Program
print("################################################")
print("# TelePhy versi 1.0 #")
print("# kontrol RaspberryPi anda dengan bot telegram #")
print("# https://github.com/denypradana/TelePhy #")
print("# #")
print("# dibuat oleh Deny Pradana #")
print("# https://denypradana.com #")
print("# email : dp@denypradana.com #")
print("################################################")
# Menampilkan keterangan kapan program mulai berjalan
print("Program mulai berjalan pada tanggal "
+ function.tanggal()
+ " pukul "
+ function.jam()
+ ".")
print("Menunggu Perintah...")
"""
Menjaga agar program tetap berjalan dan hanya akan berhenti
apabila ada perintah CTRL + C
"""
try:
while 1:
sleep(10)
except KeyboardInterrupt:
print('Sambungan Terputus') | 39.477778 | 76 | 0.41275 |
ace77938e78bdbd1e96801065b5114fbb3d379e0 | 399 | py | Python | kubernetes_typed/client/models/v2beta1_container_resource_metric_source.py | nikhiljha/kubernetes-typed | 4f4b969aa400c88306f92560e56bda6d19b2a895 | [
"Apache-2.0"
] | null | null | null | kubernetes_typed/client/models/v2beta1_container_resource_metric_source.py | nikhiljha/kubernetes-typed | 4f4b969aa400c88306f92560e56bda6d19b2a895 | [
"Apache-2.0"
] | null | null | null | kubernetes_typed/client/models/v2beta1_container_resource_metric_source.py | nikhiljha/kubernetes-typed | 4f4b969aa400c88306f92560e56bda6d19b2a895 | [
"Apache-2.0"
] | null | null | null | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V2beta1ContainerResourceMetricSourceDict generated type."""
from typing import TypedDict
V2beta1ContainerResourceMetricSourceDict = TypedDict(
"V2beta1ContainerResourceMetricSourceDict",
{
"container": str,
"name": str,
"targetAverageUtilization": int,
"targetAverageValue": str,
},
total=False,
)
| 26.6 | 62 | 0.701754 |
ace77968e3b38c3e16fc1afab999d5734160f2f1 | 541 | py | Python | examples/customization/normal.py | eLBati/pyxb | 14737c23a125fd12c954823ad64fc4497816fae3 | [
"Apache-2.0"
] | 123 | 2015-01-12T06:43:22.000Z | 2022-03-20T18:06:46.000Z | examples/customization/normal.py | eLBati/pyxb | 14737c23a125fd12c954823ad64fc4497816fae3 | [
"Apache-2.0"
] | 103 | 2015-01-08T18:35:57.000Z | 2022-01-18T01:44:14.000Z | examples/customization/normal.py | eLBati/pyxb | 14737c23a125fd12c954823ad64fc4497816fae3 | [
"Apache-2.0"
] | 54 | 2015-02-15T17:12:00.000Z | 2022-03-07T23:02:32.000Z | # Demonstrate alternatives for bindings customization
# Traditional customization
from raw.custom import *
import raw.custom as raw_custom
class ta0 (raw_custom.ta0):
def xa0 (self):
return 'extend ta0'
raw_custom.ta0._SetSupersedingClass(ta0)
class tc01 (raw_custom.tc01):
def xc01 (self):
return 'extend tc01'
raw_custom.tc01._SetSupersedingClass(tc01)
class tc02 (raw_custom.tc02, ta0):
def xc02 (self):
return 'extend tc02'
raw_custom.tc02._SetSupersedingClass(tc02)
# class tc03 left as original
| 24.590909 | 53 | 0.74122 |
ace779951df3e0a96174931ebc812e78ac971199 | 871 | py | Python | datamart/unit_tests/test_fbi_materializer.py | liangmuxin/datamart | 495a21588db39c9ad239409208bec701dca07f30 | [
"MIT"
] | 7 | 2018-10-02T01:32:23.000Z | 2020-10-08T00:42:35.000Z | datamart/unit_tests/test_fbi_materializer.py | liangmuxin/datamart | 495a21588db39c9ad239409208bec701dca07f30 | [
"MIT"
] | 47 | 2018-10-02T05:41:13.000Z | 2021-02-02T21:50:31.000Z | datamart/unit_tests/test_fbi_materializer.py | liangmuxin/datamart | 495a21588db39c9ad239409208bec701dca07f30 | [
"MIT"
] | 19 | 2018-10-01T22:27:20.000Z | 2019-02-28T18:59:53.000Z | import unittest
import json
import pandas as pd
import os
from datamart.materializers.fbi_materializer import FbiMaterializer
from datamart.utilities.utils import Utils
resources_path = os.path.join(os.path.dirname(__file__), "./resources")
class TestFbiMaterializer(unittest.TestCase):
def setUp(self):
self.materializer = FbiMaterializer()
self.sample_result_file = os.path.join(resources_path,"CIUS_2009_table_8.csv")
@Utils.test_print
def test_get(self):
metadata = {
"materialization": {
"arguments": {
"url": "https://www2.fbi.gov/ucr/cius2009/data/documents/09tbl08.xls"
}
}
}
result = self.materializer.get(metadata)
sample_result = pd.read_csv(self.sample_result_file)
self.assertTrue(result.equals(sample_result)) | 31.107143 | 89 | 0.667049 |
ace77a4d6faa22ef820edf36e7d927f912c6b8e4 | 2,440 | py | Python | problems/trees-and-graphs/01_matrix.py | andrenbrandao/algorithm-problems | a6413cc7b3810008dd80766ae0d398a7edc58407 | [
"MIT"
] | null | null | null | problems/trees-and-graphs/01_matrix.py | andrenbrandao/algorithm-problems | a6413cc7b3810008dd80766ae0d398a7edc58407 | [
"MIT"
] | null | null | null | problems/trees-and-graphs/01_matrix.py | andrenbrandao/algorithm-problems | a6413cc7b3810008dd80766ae0d398a7edc58407 | [
"MIT"
] | null | null | null | """
LeetCode 542. 01 Matrix
https://leetcode.com/problems/01-matrix/
"""
"""
We want to find the distance of the nearest 0 for each cell.
0 cells always have a distance of 0 to the nearest zero.
1 cells will have at least a distance of 1.
What if we have a matrix with all 1s?
- The contraints tell us there is a least one zero in the matrix.
With all zeroes?
- All have a zero distance
[1 1 1]
[1 1 1]
[1 1 0]
Output:
[4 3 2]
[3 2 1]
[2 1 0]
Brute Force:
- Iterate over the matrix
- When we find a 1, we run a BFS to find the nearest 0 and calculate the distance
TC: O((m*n)^2)
Better Approach:
We could add all zeroes to a queue and run a BFS to calculate the distance
to the neighbors.
This could be optimized by only adding the 1s neighbors to zeroes and start them
with distance equal to 1. Then, run a BFS.
Algorithm:
- Initialize a result matrix with zero as the distances.
- Iterate over the matrix and when we find a zero, add the neighbors that are 1 to
a queue. Mark them as already visited to make sure they are not also added by other zeroes.
- Run BFS with these added 1s and increase the distance.
- Store this distances in the result matrix.
Time Complexity: O(m*n)
Space Complexity: O(m*n)
"""
from typing import List
from collections import deque
class Solution:
def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:
height = len(mat)
width = len(mat[0])
result = [[0 for _ in range(width)] for _ in range(height)]
queue = deque()
def add_neighbor_to_queue(i, j, distance):
if i < 0 or i == height or j < 0 or j == width or mat[i][j] != 1:
return
queue.append((i, j, distance))
mat[i][j] = -1 # mark as visited
for i in range(height):
for j in range(width):
if mat[i][j] == 0:
add_neighbor_to_queue(i + 1, j, 1)
add_neighbor_to_queue(i - 1, j, 1)
add_neighbor_to_queue(i, j + 1, 1)
add_neighbor_to_queue(i, j - 1, 1)
while queue:
i, j, distance = queue.popleft()
result[i][j] = distance
add_neighbor_to_queue(i + 1, j, distance + 1)
add_neighbor_to_queue(i - 1, j, distance + 1)
add_neighbor_to_queue(i, j + 1, distance + 1)
add_neighbor_to_queue(i, j - 1, distance + 1)
return result
| 26.813187 | 91 | 0.621721 |
ace77b4f0d881759ef6098d71e641e190a191f06 | 13,003 | py | Python | recognition/image_iter.py | 994374821/face-recognition- | e5cdc04bc88cc9630e25050888f17c059b790be0 | [
"MIT"
] | 6 | 2019-09-22T11:46:00.000Z | 2019-12-26T02:51:09.000Z | recognition/image_iter.py | 994374821/face-recognition- | e5cdc04bc88cc9630e25050888f17c059b790be0 | [
"MIT"
] | 1 | 2019-10-24T10:32:51.000Z | 2019-10-24T10:32:51.000Z | recognition/image_iter.py | 994374821/face-recognition- | e5cdc04bc88cc9630e25050888f17c059b790be0 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import logging
import sys
import numbers
import math
import sklearn
import datetime
import numpy as np
import cv2
from PIL import Image
from io import BytesIO
import mxnet as mx
from mxnet import ndarray as nd
from mxnet import io
from mxnet import recordio
logger = logging.getLogger()
class FaceImageIter(io.DataIter):
def __init__(self, batch_size, data_shape,
path_imgrec = None,
shuffle=False, aug_list=None, mean = None,
rand_mirror = False, cutoff = 0, color_jittering = 0,
images_filter = 0,
data_name='data', label_name='softmax_label', **kwargs):
super(FaceImageIter, self).__init__()
assert path_imgrec
if path_imgrec:
logging.info('loading recordio %s...',
path_imgrec)
path_imgidx = path_imgrec[0:-4]+".idx"
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
s = self.imgrec.read_idx(0)
header, _ = recordio.unpack(s)
if header.flag>0:
print('header0 label', header.label)
self.header0 = (int(header.label[0]), int(header.label[1]))
#assert(header.flag==1)
#self.imgidx = range(1, int(header.label[0]))
self.imgidx = []
self.id2range = {}
self.seq_identity = range(int(header.label[0]), int(header.label[1]))
for identity in self.seq_identity:
s = self.imgrec.read_idx(identity)
header, _ = recordio.unpack(s)
a,b = int(header.label[0]), int(header.label[1])
count = b-a
if count<images_filter:
continue
self.id2range[identity] = (a,b)
self.imgidx += range(a, b)
print('id2range', len(self.id2range))
else:
self.imgidx = list(self.imgrec.keys)
if shuffle:
self.seq = self.imgidx
self.oseq = self.imgidx
print(len(self.seq))
else:
self.seq = None
self.mean = mean
self.nd_mean = None
if self.mean:
self.mean = np.array(self.mean, dtype=np.float32).reshape(1,1,3)
self.nd_mean = mx.nd.array(self.mean).reshape((1,1,3))
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
self.batch_size = batch_size
self.data_shape = data_shape
self.shuffle = shuffle
self.image_size = '%d,%d'%(data_shape[1],data_shape[2])
self.rand_mirror = rand_mirror
print('rand_mirror', rand_mirror)
self.cutoff = cutoff
self.color_jittering = color_jittering
self.CJA = mx.image.ColorJitterAug(0.125, 0.125, 0.125)
self.provide_label = [(label_name, (batch_size,))]
#print(self.provide_label[0][1])
self.cur = 0
self.nbatch = 0
self.is_init = False
def reset(self):
"""Resets the iterator to the beginning of the data."""
print('call reset()')
self.cur = 0
if self.shuffle:
random.shuffle(self.seq)
if self.seq is None and self.imgrec is not None:
self.imgrec.reset()
def num_samples(self):
return len(self.seq)
def next_sample(self):
"""Helper function for reading in next sample."""
#set total batch size, for example, 1800, and maximum size for each people, for example 45
if self.seq is not None:
while True:
if self.cur >= len(self.seq):
raise StopIteration
idx = self.seq[self.cur]
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
return label, img, None, None
else:
label, fname, bbox, landmark = self.imglist[idx]
return label, self.read_image(fname), bbox, landmark
else:
s = self.imgrec.read()
if s is None:
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img, None, None
def brightness_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
src *= alpha
return src
def contrast_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = nd.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
def saturation_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = nd.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
def color_aug(self, img, x):
#augs = [self.brightness_aug, self.contrast_aug, self.saturation_aug]
#random.shuffle(augs)
#for aug in augs:
# #print(img.shape)
# img = aug(img, x)
# #print(img.shape)
#return img
return self.CJA(img)
def mirror_aug(self, img):
_rd = random.randint(0,1)
if _rd==1:
for c in xrange(img.shape[2]):
img[:,:,c] = np.fliplr(img[:,:,c])
return img
def compress_aug(self, img):
buf = BytesIO()
img = Image.fromarray(img.asnumpy(), 'RGB')
q = random.randint(2, 20)
img.save(buf, format='JPEG', quality=q)
buf = buf.getvalue()
img = Image.open(BytesIO(buf))
return nd.array(np.asarray(img, 'float32'))
def next(self):
if not self.is_init:
self.reset()
self.is_init = True
"""Returns the next batch of data."""
#print('in next', self.cur, self.labelcur)
self.nbatch+=1
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.empty((batch_size, c, h, w))
if self.provide_label is not None:
batch_label = nd.empty(self.provide_label[0][1])
i = 0
try:
while i < batch_size:
label, s, bbox, landmark = self.next_sample()
_data = self.imdecode(s)
if _data.shape[0]!=self.data_shape[1]:
_data = mx.image.resize_short(_data, self.data_shape[1])
if self.rand_mirror:
_rd = random.randint(0,1)
if _rd==1:
_data = mx.ndarray.flip(data=_data, axis=1)
if self.color_jittering>0:
if self.color_jittering>1:
_rd = random.randint(0,1)
if _rd==1:
_data = self.compress_aug(_data)
#print('do color aug')
_data = _data.astype('float32', copy=False)
#print(_data.__class__)
_data = self.color_aug(_data, 0.125)
if self.nd_mean is not None:
_data = _data.astype('float32', copy=False)
_data -= self.nd_mean
_data *= 0.0078125
if self.cutoff>0:
_rd = random.randint(0,1)
if _rd==1:
#print('do cutoff aug', self.cutoff)
centerh = random.randint(0, _data.shape[0]-1)
centerw = random.randint(0, _data.shape[1]-1)
half = self.cutoff//2
starth = max(0, centerh-half)
endh = min(_data.shape[0], centerh+half)
startw = max(0, centerw-half)
endw = min(_data.shape[1], centerw+half)
#print(starth, endh, startw, endw, _data.shape)
_data[starth:endh, startw:endw, :] = 128
data = [_data]
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
#print('aa',data[0].shape)
#data = self.augmentation_transform(data)
#print('bb',data[0].shape)
for datum in data:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
#print(datum.shape)
batch_data[i][:] = self.postprocess_data(datum)
batch_label[i][:] = label
i += 1
except StopIteration:
if i<batch_size:
raise StopIteration
return io.DataBatch([batch_data], [batch_label], batch_size - i)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
img = mx.image.imdecode(s) #mx.ndarray
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = [ret for src in data for ret in aug(src)]
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
class FaceImageIterList(io.DataIter):
def __init__(self, iter_list):
assert len(iter_list)>0
self.provide_data = iter_list[0].provide_data
self.provide_label = iter_list[0].provide_label
self.iter_list = iter_list
self.cur_iter = None
def reset(self):
self.cur_iter.reset()
def next(self):
self.cur_iter = random.choice(self.iter_list)
while True:
try:
ret = self.cur_iter.next()
except StopIteration:
self.cur_iter.reset()
continue
return ret
if __name__=="__main__":
import json
import matplotlib.pyplot as plt
# """
train_dataiter = FaceImageIter(
batch_size=4,
data_shape=(3, 112, 112),
path_imgrec="/home/gaomingda/insightface/datasets/ms1m-retinaface-t1/train.rec",
shuffle=True,
rand_mirror=False,
mean=None,
cutoff=False,
color_jittering=0,
images_filter=0,
)
data_nums = train_dataiter.num_samples()
max_id = max(train_dataiter.seq)
min_id = min(train_dataiter.seq)
label, img = train_dataiter.next()
# test train dataset
# import cv2
#
# for i in range(1000):
# label, img, box, landmark = train_dataiter.next_sample()
# img_ = train_dataiter.imdecode(img)
# cv2.imwrite('/home/gaomingda/insightface/recognition/data/imgs_show/{}.jpg'.format(train_dataiter.cur),
# np.array(img_.asnumpy(), dtype=np.uint8))
labels_cnt = {}
labels = []
for i in range(data_nums):
label, img, box, landmark = train_dataiter.next_sample()
labels.append(int(label))
if label in labels_cnt:
labels_cnt[label] += 1
else:
labels_cnt[label] = 1
labels_set = set(labels)
max_label = max(labels_set)
print('max label: ', max_label)
len_label_id = len(labels_set)
print('len label_id: ', len_label_id)
counts = np.bincount(np.array(labels))
# """
# show counts
# count = counts.tolist()
# plt.bar(range(len(count)), count)
# plt.show()
with open('/home/gaomingda/insightface/datasets/label_cnt.txt', 'wb') as f:
for i in range(counts.shape[0]):
f.write(str(counts[i]))
f.write('\n')
json.dump(labels_cnt, open("/home/gaomingda/insightface/datasets/label_cnt.json", 'wb'))
print("saved label_cnt to json file")
| 34.582447 | 126 | 0.556718 |
ace77cd185938304f732662254e622b4eb2d00ac | 557 | py | Python | hermes_demo_helper/hermes_demo_helper/slots.py | snipsco/snips-skill-coffee | 4353feee77211df8157c47ae709947dfc5e30816 | [
"MIT"
] | null | null | null | hermes_demo_helper/hermes_demo_helper/slots.py | snipsco/snips-skill-coffee | 4353feee77211df8157c47ae709947dfc5e30816 | [
"MIT"
] | null | null | null | hermes_demo_helper/hermes_demo_helper/slots.py | snipsco/snips-skill-coffee | 4353feee77211df8157c47ae709947dfc5e30816 | [
"MIT"
] | 11 | 2019-04-06T02:37:42.000Z | 2020-06-22T04:29:21.000Z | class Slots:
def __init__(self, data):
self.data = data.get('slot', None)
def get_value(self, group):
res = self.data.get(group, {"name":None}).get("name")
if (res is None):
return None
if (isinstance(res, (str))):
return res
return None
def get_threshold(self, group):
res = self.data.get(group, {"threshold": 0}).get("threshold")
if (res is None):
return 0
if (isinstance(res, (int, float))):
return res
return 0
| 27.85 | 69 | 0.517056 |
ace77d53ad84b7e323ea6670116b3365f31da8e5 | 798 | py | Python | devilry/devilry_account/middleware.py | aless80/devilry-django | 416c262e75170d5662542f15e2d7fecf5ab84730 | [
"BSD-3-Clause"
] | null | null | null | devilry/devilry_account/middleware.py | aless80/devilry-django | 416c262e75170d5662542f15e2d7fecf5ab84730 | [
"BSD-3-Clause"
] | null | null | null | devilry/devilry_account/middleware.py | aless80/devilry-django | 416c262e75170d5662542f15e2d7fecf5ab84730 | [
"BSD-3-Clause"
] | null | null | null | from django.utils import translation
from django.utils.deprecation import MiddlewareMixin
from devilry.devilry_account.models import User
class LocalMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.user.is_authenticated():
user = self.__get_devilry_user(request=request)
languagecode = user.languagecode
else:
languagecode = request.session.get('SELECTED_LANGUAGE_CODE')
translation.activate(languagecode)
request.LANGUAGE_CODE = translation.get_language()
def __get_devilry_user(self, request):
return User.objects.get(id=request.user.id)
def process_response(self, request, response):
response['Content-Language'] = translation.get_language()
return response
| 33.25 | 72 | 0.721805 |
ace77d592fe6bb52b967b44ff9e43739e6a66507 | 7,249 | py | Python | scripts/drs_out_name.py | jonseddon/primavera-dmt | 1239044e37f070b925a3d06db68351f285df780c | [
"BSD-3-Clause"
] | null | null | null | scripts/drs_out_name.py | jonseddon/primavera-dmt | 1239044e37f070b925a3d06db68351f285df780c | [
"BSD-3-Clause"
] | 49 | 2018-11-14T17:00:03.000Z | 2021-12-20T11:04:22.000Z | scripts/drs_out_name.py | jonseddon/primavera-dmt | 1239044e37f070b925a3d06db68351f285df780c | [
"BSD-3-Clause"
] | 2 | 2018-07-04T10:58:43.000Z | 2018-09-29T14:55:08.000Z | #!/usr/bin/env python
"""
drs_out_name.py
This script is used to update the directory structure so that the variable in
the structure is out_name rather than cmor_name. All files must be online and
in the same directory.
"""
from __future__ import print_function
import argparse
import logging.config
import os
import shutil
import sys
import django
django.setup()
from pdata_app.models import DataRequest, Settings
from pdata_app.utils.common import construct_drs_path, get_gws, is_same_gws
__version__ = '0.1.0b'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
# The top-level directory to write output data to
BASE_OUTPUT_DIR = Settings.get_solo().base_output_dir
def construct_old_drs_path(data_file):
"""
Make the CMIP6 DRS directory path for the specified file but using
out_name.
:param pdata_app.models.DataFile data_file: the file
:returns: A string containing the DRS directory structure
"""
return os.path.join(
data_file.project.short_name,
data_file.activity_id.short_name,
data_file.institute.short_name,
data_file.climate_model.short_name,
data_file.experiment.short_name,
data_file.rip_code,
data_file.variable_request.table_name,
data_file.variable_request.cmor_name,
data_file.grid,
data_file.version
)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(
description='Change DRS directory to out_name'
)
parser.add_argument('request_id', help="the data request's id")
parser.add_argument('-m', '--move', help="move the data request to this "
"GWS number", type=int)
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the '
'default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
Data request IDs are in the form:
<climate-model>_<experiment>_<variant-label>_<table>_<variable>
e.g.:
HadGEM3-GC31-LM_highresSST-present_r1i1p1f1_Amon_psl
"""
(source_id, experiment_id, variant_label, table_id, variable_id) = (
args.request_id.split('_')
)
try:
data_req = DataRequest.objects.get(
climate_model__short_name=source_id,
experiment__short_name=experiment_id,
rip_code=variant_label,
variable_request__table_name=table_id,
variable_request__cmor_name=variable_id
)
except django.core.exceptions.ObjectDoesNotExist:
logger.error('No data requests found for {}'.format(args.request_id))
sys.exit(1)
except django.core.exceptions.MultipleObjectsReturned:
logger.error('Multiple data requests found for {}'.
format(args.request_id))
sys.exit(1)
# Some quick checks that all files are online, in the same directory and
# that we need to make a change.
if data_req.online_status() != 'online':
logger.error('Not all files are online')
sys.exit(1)
directories = data_req.directories()
if None in directories:
logger.error('None in directories')
sys.exit(1)
if len(directories) != 1:
logger.error('Length of directories is not 1')
sys.exit(1)
first_file = data_req.datafile_set.first()
var_req = first_file.variable_request
if var_req.out_name is None:
logger.error('out_name is None. Nothing needs to be done.')
sys.exit(1)
if data_req.esgfdataset_set.count():
esgf = data_req.esgfdataset_set.order_by('-version').first()
if esgf.status in ['SUBMITTED', 'AT_CEDA', 'PUBLISHED', 'REJECTED']:
logger.error('Already submitted to CREPP')
sys.exit(1)
# Construct the new directory names
directory = directories[0]
existing_dir = construct_old_drs_path(first_file)
if not directory.endswith(existing_dir):
logger.error(f'Directory does not end with {existing_dir}. '
f'It is {directory}')
sys.exit(1)
new_drs_dir = os.path.join(get_gws(directory),
construct_drs_path(first_file))
if not os.path.exists(new_drs_dir):
os.makedirs(new_drs_dir)
# Set-up for sym links if required
do_sym_links = False
if not is_same_gws(BASE_OUTPUT_DIR, directory):
do_sym_links = True
sym_link_dir = os.path.join(BASE_OUTPUT_DIR,
construct_old_drs_path(first_file))
new_sym_link_dir = os.path.join(BASE_OUTPUT_DIR,
construct_drs_path(first_file))
if not os.path.exists(new_sym_link_dir):
os.makedirs(new_sym_link_dir)
logger.debug('All checks complete. Starting to move files.')
# Move the files
for data_file in data_req.datafile_set.order_by('name'):
new_path = os.path.join(new_drs_dir, data_file.name)
shutil.move(
os.path.join(data_file.directory, data_file.name),
new_path
)
data_file.directory = new_drs_dir
data_file.save()
if do_sym_links:
old_link_path = os.path.join(sym_link_dir, data_file.name)
if os.path.lexists(old_link_path):
if os.path.islink(old_link_path):
os.remove(old_link_path)
else:
logger.warning(f"{old_link_path} exists but isn't a link")
os.symlink(
new_path,
os.path.join(new_sym_link_dir, data_file.name)
)
# Delete empty directories
if not os.listdir(directory):
os.rmdir(directory)
if do_sym_links:
if not os.listdir(sym_link_dir):
os.rmdir(sym_link_dir)
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
| 32.217778 | 80 | 0.61443 |
ace77dc3bb55806de6da01e4d6f16bef79d5279d | 10,299 | py | Python | project2/tests/q3_2_1.py | DrRossTaylor/intro-DS-Assignments | 88f0747b89869cae4e4227e6f3a936f0f1583937 | [
"CC0-1.0"
] | null | null | null | project2/tests/q3_2_1.py | DrRossTaylor/intro-DS-Assignments | 88f0747b89869cae4e4227e6f3a936f0f1583937 | [
"CC0-1.0"
] | null | null | null | project2/tests/q3_2_1.py | DrRossTaylor/intro-DS-Assignments | 88f0747b89869cae4e4227e6f3a936f0f1583937 | [
"CC0-1.0"
] | null | null | null | test = {
'name': 'q3_2_1',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(0, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(0, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(1, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(1, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(2, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(2, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(3, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(3, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(4, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(4, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(5, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(5, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(6, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(6, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(7, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(7, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(8, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(8, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(9, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(9, 11)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(10, 5)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from collections import Counter;
>>> g = train_movies.column('Genre');
>>> def check(r, k):
... t = test_20.row(r)
... return classify(t, train_20, g, k) == Counter(np.take(g, np.argsort(fast_distances(t, train_20))[:k])).most_common(1)[0][0];
>>> check(10, 11)
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| 34.215947 | 142 | 0.430624 |
ace77f4976901172bcdf7e631a70607f6d74c46b | 421 | py | Python | web3/gas_strategies/rpc.py | bhardwajRahul/web3.py | efecadcdea64f9481fcace558a8ea103462e2923 | [
"MIT"
] | null | null | null | web3/gas_strategies/rpc.py | bhardwajRahul/web3.py | efecadcdea64f9481fcace558a8ea103462e2923 | [
"MIT"
] | null | null | null | web3/gas_strategies/rpc.py | bhardwajRahul/web3.py | efecadcdea64f9481fcace558a8ea103462e2923 | [
"MIT"
] | null | null | null | from typing import (
Optional,
)
from web3 import Web3
from web3._utils.rpc_abi import (
RPC,
)
from web3.types import (
TxParams,
Wei,
)
def rpc_gas_price_strategy(
w3: Web3, transaction_params: Optional[TxParams] = None
) -> Wei:
"""
A simple gas price strategy deriving it's value from the eth_gasPrice JSON-RPC call.
"""
return w3.manager.request_blocking(RPC.eth_gasPrice, [])
| 19.136364 | 88 | 0.686461 |
ace77f6aed6f6bc8234fcfcc17f039ac1eb23149 | 2,645 | py | Python | sumo/tests/sikulixTestRunner.py | iltempe/osmosi | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | [
"MIT"
] | null | null | null | sumo/tests/sikulixTestRunner.py | iltempe/osmosi | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | [
"MIT"
] | null | null | null | sumo/tests/sikulixTestRunner.py | iltempe/osmosi | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | [
"MIT"
] | 2 | 2017-12-14T16:41:59.000Z | 2020-10-16T17:51:27.000Z | #!/usr/bin/env python
"""
@file sikulixTestRunner.py
@date 2016-0-01
@author Pablo Alvarez Lopez
@version $Id$
Wrapper script for running gui tests with SikuliX and TextTest.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
import socket
import os
import sys
# get enviroment values
SUMOFolder = os.environ.get('SUMO_HOME', '.')
neteditApp = os.environ.get('NETEDIT_BINARY', 'netedit')
textTestSandBox = os.environ.get('TEXTTEST_SANDBOX', '.')
# Write enviroment variables in currentEnvironment.tmp
file = open(SUMOFolder + "/tests/netedit/currentEnvironment.tmp", "w")
file.write(neteditApp + "\n" + textTestSandBox)
file.close()
# Check status of sikulix Server
statusSocket = socket.socket()
try:
statusSocket.connect(("localhost", 50001))
statusSocket.send("GET / HTTP/1.1\n\n")
statusReceived = statusSocket.recv(1024)
statusSocket.close()
# If status of server contains "200 OK", Sikulix server is ready, in other
# case is ocuppied
if "200 OK" not in statusReceived:
sys.exit("Sikulix server not ready")
except:
# Cannot connect to SikulixServer, then Sikulix Server isn't running
sys.exit("Sikulix server isn't running")
# IMAGES
imagesSocket = socket.socket()
imagesSocket.connect(("localhost", 50001))
# From Sikuli 1.1.1 Path has to be fixed
SUMOFolderPathFixed = SUMOFolder.replace("c:\\", "")
imagesSocket.send("GET /images/" + SUMOFolderPathFixed +
"/tests/netedit/imageResources HTTP/1.1\n\n")
imagesReceived = (imagesSocket.recv(1024))
imagesSocket.close()
if "200 OK" not in imagesReceived:
sys.exit("Error adding imageResources folder '" +
SUMOFolderPathFixed + "/tests/netedit/imageResources'")
# SCRIPT
scriptSocket = socket.socket()
scriptSocket.connect(("localhost", 50001))
scriptSocket.send("GET /scripts/" + textTestSandBox + " HTTP/1.1\n\n")
scriptReceived = (scriptSocket.recv(1024))
scriptSocket.close()
if "200 OK" not in scriptReceived:
sys.exit("Error adding script folder '" + textTestSandBox + "'")
# RUN
runSocket = socket.socket()
runSocket.connect(("localhost", 50001))
runSocket.send("GET /run/test.sikuli HTTP/1.1\n\n")
runReceived = (runSocket.recv(1024))
runSocket.close()
if "200 OK" not in runReceived:
sys.exit("error running 'test.sikuli' %s" % runReceived)
| 33.910256 | 78 | 0.72552 |
ace77fc5bc24609ecfa9a2c4b8a92b1fda20a21a | 2,359 | py | Python | cumulusci/tasks/salesforce/tests/test_InstallPackageVersion.py | hamedizadpanah-ibm/CumulusCI | eb93723e2da1ca66a7639b3197e6fab02d1bd24a | [
"BSD-3-Clause"
] | null | null | null | cumulusci/tasks/salesforce/tests/test_InstallPackageVersion.py | hamedizadpanah-ibm/CumulusCI | eb93723e2da1ca66a7639b3197e6fab02d1bd24a | [
"BSD-3-Clause"
] | null | null | null | cumulusci/tasks/salesforce/tests/test_InstallPackageVersion.py | hamedizadpanah-ibm/CumulusCI | eb93723e2da1ca66a7639b3197e6fab02d1bd24a | [
"BSD-3-Clause"
] | null | null | null | import base64
import io
from unittest import mock
import unittest
import zipfile
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.salesforce_api.exceptions import MetadataApiError
from cumulusci.tasks.salesforce import InstallPackageVersion
from cumulusci.tests.util import create_project_config
from .util import create_task
class TestInstallPackageVersion(unittest.TestCase):
def test_run_task_with_retry(self):
project_config = create_project_config()
project_config.get_latest_version = mock.Mock(return_value="1.0")
project_config.config["project"]["package"]["namespace"] = "ns"
task = create_task(InstallPackageVersion, {"version": "latest"}, project_config)
not_yet = MetadataApiError("This package is not yet available", None)
api = mock.Mock(side_effect=[not_yet, None])
task.api_class = mock.Mock(return_value=api)
task()
self.assertEqual(2, api.call_count)
def test_run_task__options(self):
project_config = create_project_config()
project_config.get_latest_version = mock.Mock(return_value="1.0 (Beta 1)")
project_config.config["project"]["package"]["namespace"] = "ns"
task = create_task(
InstallPackageVersion,
{
"version": "latest_beta",
"activateRSS": True,
"password": "astro",
"security_type": "NONE",
},
project_config,
)
api = task._get_api()
zf = zipfile.ZipFile(io.BytesIO(base64.b64decode(api.package_zip)), "r")
package_xml = zf.read("installedPackages/ns.installedPackage")
self.assertIn(b"<activateRSS>true</activateRSS", package_xml)
self.assertIn(b"<password>astro</password>", package_xml)
self.assertIn(b"<securityType>NONE</securityType>", package_xml)
def test_run_task__bad_security_type(self):
project_config = create_project_config()
project_config.get_latest_version = mock.Mock(return_value="1.0")
project_config.config["project"]["package"]["namespace"] = "ns"
with self.assertRaises(TaskOptionsError):
create_task(
InstallPackageVersion,
{"version": "latest", "security_type": "BOGUS"},
project_config,
)
| 41.385965 | 88 | 0.666808 |
ace77fcae84d75e5e564cc4c62109d90bfdb7842 | 3,843 | bzl | Python | internal/common/check_bazel_version.bzl | Aghassi/rules_nodejs | 3eb42603c440f7e8496f2e6812337eb47827ff6a | [
"Apache-2.0"
] | 645 | 2017-08-22T22:18:51.000Z | 2022-03-31T11:50:53.000Z | internal/common/check_bazel_version.bzl | bolitt/rules_nodejs | ba9f82103c6122bb316614734489e44552d3d266 | [
"Apache-2.0"
] | 2,172 | 2017-08-26T23:52:39.000Z | 2022-03-31T23:51:29.000Z | internal/common/check_bazel_version.bzl | acidburn0zzz/rules_nodejs | d49a0390ff65a04d5ba8bed212afd2a282e83387 | [
"Apache-2.0"
] | 570 | 2017-08-24T19:57:44.000Z | 2022-03-29T12:09:04.000Z | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check Bazel version
We recommend forcing all users to update to at least the same version of Bazel
as the continuous integration, so they don't trip over incompatibilities with
rules used in the project.
"""
load(":check_version.bzl", "check_version", "check_version_range")
# From https://github.com/tensorflow/tensorflow/blob/5541ef4fbba56cf8930198373162dd3119e6ee70/tensorflow/workspace.bzl#L44
# Check that a specific bazel version is being used.
# Args: minimum_bazel_version in the form "<major>.<minor>.<patch>"
def check_bazel_version(minimum_bazel_version, message = ""):
"""
Verify the users Bazel version is at least the given one.
This can be used in rule implementations that depend on changes in Bazel,
to warn users about a mismatch between the rule and their installed Bazel
version.
This should *not* be used in users WORKSPACE files. To locally pin your
Bazel version, just create the .bazelversion file in your workspace.
Args:
minimum_bazel_version: a string indicating the minimum version
message: optional string to print to your users, could be used to help them update
"""
if "bazel_version" not in dir(native):
fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" %
minimum_bazel_version)
elif native.bazel_version and not check_version(native.bazel_version, minimum_bazel_version):
fail("\nCurrent Bazel version is {}, expected at least {}\n{}".format(
native.bazel_version,
minimum_bazel_version,
message,
))
# Check that a bazel version being used is in the version range.
# Args:
# minimum_bazel_version in the form "<major>.<minor>.<patch>"
# maximum_bazel_version in the form "<major>.<minor>.<patch>"
def check_bazel_version_range(minimum_bazel_version, maximum_bazel_version, message = ""):
"""
Verify the users Bazel version is in the version range.
This should be called from the `WORKSPACE` file so that the build fails as
early as possible. For example:
```
# in WORKSPACE:
load("@build_bazel_rules_nodejs//:index.bzl", "check_bazel_version_range")
check_bazel_version_range("0.11.0", "0.22.0")
```
Args:
minimum_bazel_version: a string indicating the minimum version
maximum_bazel_version: a string indicating the maximum version
message: optional string to print to your users, could be used to help them update
"""
if "bazel_version" not in dir(native):
fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" %
minimum_bazel_version)
elif not native.bazel_version:
print("\nCurrent Bazel is not a release version, cannot check for " +
"compatibility.")
print("Make sure that you are running at least Bazel %s.\n" % minimum_bazel_version)
elif not check_version_range(
native.bazel_version,
minimum_bazel_version,
maximum_bazel_version,
):
fail("\nCurrent Bazel version is {}, expected >= {} and <= {}\n{}".format(
native.bazel_version,
minimum_bazel_version,
maximum_bazel_version,
message,
))
| 41.322581 | 122 | 0.706479 |
ace780148b1b88503c9dc58fd6b663e5cb8ea6c4 | 12,578 | py | Python | datumaro/plugins/camvid_format.py | samrathkumawat1/cvat-json-dataumaro | 02e225814aecd54204ec14d6207d2655ff5f6f55 | [
"MIT"
] | null | null | null | datumaro/plugins/camvid_format.py | samrathkumawat1/cvat-json-dataumaro | 02e225814aecd54204ec14d6207d2655ff5f6f55 | [
"MIT"
] | null | null | null | datumaro/plugins/camvid_format.py | samrathkumawat1/cvat-json-dataumaro | 02e225814aecd54204ec14d6207d2655ff5f6f55 | [
"MIT"
] | null | null | null |
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
import os.path as osp
from collections import OrderedDict
from enum import Enum
from glob import glob
import numpy as np
from datumaro.components.converter import Converter
from datumaro.components.extractor import (AnnotationType, CompiledMask,
DatasetItem, Importer, LabelCategories, Mask,
MaskCategories, SourceExtractor)
from datumaro.util import find, str_to_bool
from datumaro.util.image import save_image
from datumaro.util.mask_tools import lazy_mask, paint_mask, generate_colormap
CamvidLabelMap = OrderedDict([
('Void', (0, 0, 0)),
('Animal', (64, 128, 64)),
('Archway', (192, 0, 128)),
('Bicyclist', (0, 128, 192)),
('Bridge', (0, 128, 64)),
('Building', (128, 0, 0)),
('Car', (64, 0, 128)),
('CartLuggagePram', (64, 0, 192)),
('Child', (192, 128, 64)),
('Column_Pole', (192, 192, 128)),
('Fence', (64, 64, 128)),
('LaneMkgsDriv', (128, 0, 192)),
('LaneMkgsNonDriv', (192, 0, 64)),
('Misc_Text', (128, 128, 64)),
('MotorcycycleScooter', (192, 0, 192)),
('OtherMoving', (128, 64, 64)),
('ParkingBlock', (64, 192, 128)),
('Pedestrian', (64, 64, 0)),
('Road', (128, 64, 128)),
('RoadShoulder', (128, 128, 192)),
('Sidewalk', (0, 0, 192)),
('SignSymbol', (192, 128, 128)),
('Sky', (128, 128, 128)),
('SUVPickupTruck', (64, 128, 192)),
('TrafficCone', (0, 0, 64)),
('TrafficLight', (0, 64, 64)),
('Train', (192, 64, 128)),
('Tree', (128, 128, 0)),
('Truck_Bus', (192, 128, 192)),
('Tunnel', (64, 0, 64)),
('VegetationMisc', (192, 192, 0)),
('Wall', (64, 192, 0))
])
class CamvidPath:
LABELMAP_FILE = 'label_colors.txt'
SEGM_DIR = "annot"
IMAGE_EXT = '.png'
def parse_label_map(path):
if not path:
return None
label_map = OrderedDict()
with open(path, 'r') as f:
for line in f:
# skip empty and commented lines
line = line.strip()
if not line or line and line[0] == '#':
continue
# color, name
label_desc = line.strip().split()
if 2 < len(label_desc):
name = label_desc[3]
color = tuple([int(c) for c in label_desc[:-1]])
else:
name = label_desc[0]
color = None
if name in label_map:
raise ValueError("Label '%s' is already defined" % name)
label_map[name] = color
return label_map
def write_label_map(path, label_map):
with open(path, 'w') as f:
for label_name, label_desc in label_map.items():
if label_desc:
color_rgb = ' '.join(str(c) for c in label_desc)
else:
color_rgb = ''
f.write('%s %s\n' % (color_rgb, label_name))
def make_camvid_categories(label_map=None):
if label_map is None:
label_map = CamvidLabelMap
# There must always be a label with color (0, 0, 0) at index 0
bg_label = find(label_map.items(), lambda x: x[1] == (0, 0, 0))
if bg_label is not None:
bg_label = bg_label[0]
else:
bg_label = 'background'
if bg_label not in label_map:
has_colors = any(v is not None for v in label_map.values())
color = (0, 0, 0) if has_colors else None
label_map[bg_label] = color
label_map.move_to_end(bg_label, last=False)
categories = {}
label_categories = LabelCategories()
for label, desc in label_map.items():
label_categories.add(label)
categories[AnnotationType.label] = label_categories
has_colors = any(v is not None for v in label_map.values())
if not has_colors: # generate new colors
colormap = generate_colormap(len(label_map))
else: # only copy defined colors
label_id = lambda label: label_categories.find(label)[0]
colormap = { label_id(name): (desc[0], desc[1], desc[2])
for name, desc in label_map.items() }
mask_categories = MaskCategories(colormap)
mask_categories.inverse_colormap # pylint: disable=pointless-statement
categories[AnnotationType.mask] = mask_categories
return categories
class CamvidExtractor(SourceExtractor):
def __init__(self, path):
assert osp.isfile(path), path
self._path = path
self._dataset_dir = osp.dirname(path)
super().__init__(subset=osp.splitext(osp.basename(path))[0])
self._categories = self._load_categories(self._dataset_dir)
self._items = list(self._load_items(path).values())
def _load_categories(self, path):
label_map = None
label_map_path = osp.join(path, CamvidPath.LABELMAP_FILE)
if osp.isfile(label_map_path):
label_map = parse_label_map(label_map_path)
else:
label_map = CamvidLabelMap
self._labels = [label for label in label_map]
return make_camvid_categories(label_map)
def _load_items(self, path):
items = {}
with open(path, encoding='utf-8') as f:
for line in f:
objects = line.split()
image = objects[0]
item_id = ('/'.join(image.split('/')[2:]))[:-len(CamvidPath.IMAGE_EXT)]
image_path = osp.join(self._dataset_dir,
(image, image[1:])[image[0] == '/'])
item_annotations = []
if 1 < len(objects):
gt = objects[1]
gt_path = osp.join(self._dataset_dir,
(gt, gt[1:]) [gt[0] == '/'])
inverse_cls_colormap = \
self._categories[AnnotationType.mask].inverse_colormap
mask = lazy_mask(gt_path, inverse_cls_colormap)
# loading mask through cache
mask = mask()
classes = np.unique(mask)
labels = self._categories[AnnotationType.label]._indices
labels = { labels[label_name]: label_name
for label_name in labels }
for label_id in classes:
if labels[label_id] in self._labels:
image = self._lazy_extract_mask(mask, label_id)
item_annotations.append(Mask(image=image, label=label_id))
items[item_id] = DatasetItem(id=item_id, subset=self._subset,
image=image_path, annotations=item_annotations)
return items
@staticmethod
def _lazy_extract_mask(mask, c):
return lambda: mask == c
class CamvidImporter(Importer):
@classmethod
def find_sources(cls, path):
return cls._find_sources_recursive(path, '.txt', 'camvid',
file_filter=lambda p: osp.basename(p) != CamvidPath.LABELMAP_FILE)
LabelmapType = Enum('LabelmapType', ['camvid', 'source'])
class CamvidConverter(Converter):
DEFAULT_IMAGE_EXT = '.png'
@classmethod
def build_cmdline_parser(cls, **kwargs):
parser = super().build_cmdline_parser(**kwargs)
parser.add_argument('--apply-colormap', type=str_to_bool, default=True,
help="Use colormap for class masks (default: %(default)s)")
parser.add_argument('--label-map', type=cls._get_labelmap, default=None,
help="Labelmap file path or one of %s" % \
', '.join(t.name for t in LabelmapType))
def __init__(self, extractor, save_dir,
apply_colormap=True, label_map=None, **kwargs):
super().__init__(extractor, save_dir, **kwargs)
self._apply_colormap = apply_colormap
if label_map is None:
label_map = LabelmapType.source.name
self._load_categories(label_map)
def apply(self):
subset_dir = self._save_dir
os.makedirs(subset_dir, exist_ok=True)
for subset_name, subset in self._extractor.subsets().items():
segm_list = {}
for item in subset:
masks = [a for a in item.annotations
if a.type == AnnotationType.mask]
if masks:
compiled_mask = CompiledMask.from_instance_masks(masks,
instance_labels=[self._label_id_mapping(m.label)
for m in masks])
self.save_segm(osp.join(subset_dir,
subset_name + CamvidPath.SEGM_DIR,
item.id + CamvidPath.IMAGE_EXT),
compiled_mask.class_mask)
segm_list[item.id] = True
else:
segm_list[item.id] = False
if self._save_images:
self._save_image(item, osp.join(subset_dir, subset_name,
item.id + CamvidPath.IMAGE_EXT))
self.save_segm_lists(subset_name, segm_list)
self.save_label_map()
def save_segm(self, path, mask, colormap=None):
if self._apply_colormap:
if colormap is None:
colormap = self._categories[AnnotationType.mask].colormap
mask = paint_mask(mask, colormap)
save_image(path, mask, create_dir=True)
def save_segm_lists(self, subset_name, segm_list):
if not segm_list:
return
ann_file = osp.join(self._save_dir, subset_name + '.txt')
with open(ann_file, 'w') as f:
for item in segm_list:
if segm_list[item]:
path_mask = '/%s/%s' % (subset_name + CamvidPath.SEGM_DIR,
item + CamvidPath.IMAGE_EXT)
else:
path_mask = ''
f.write('/%s/%s %s\n' % (subset_name,
item + CamvidPath.IMAGE_EXT, path_mask))
def save_label_map(self):
path = osp.join(self._save_dir, CamvidPath.LABELMAP_FILE)
labels = self._extractor.categories()[AnnotationType.label]._indices
if len(self._label_map) > len(labels):
self._label_map.pop('background')
write_label_map(path, self._label_map)
def _load_categories(self, label_map_source):
if label_map_source == LabelmapType.camvid.name:
# use the default Camvid colormap
label_map = CamvidLabelMap
elif label_map_source == LabelmapType.source.name and \
AnnotationType.mask not in self._extractor.categories():
# generate colormap for input labels
labels = self._extractor.categories() \
.get(AnnotationType.label, LabelCategories())
label_map = OrderedDict((item.name, None)
for item in labels.items)
elif label_map_source == LabelmapType.source.name and \
AnnotationType.mask in self._extractor.categories():
# use source colormap
labels = self._extractor.categories()[AnnotationType.label]
colors = self._extractor.categories()[AnnotationType.mask]
label_map = OrderedDict()
for idx, item in enumerate(labels.items):
color = colors.colormap.get(idx)
if color is not None:
label_map[item.name] = color
elif isinstance(label_map_source, dict):
label_map = OrderedDict(
sorted(label_map_source.items(), key=lambda e: e[0]))
elif isinstance(label_map_source, str) and osp.isfile(label_map_source):
label_map = parse_label_map(label_map_source)
else:
raise Exception("Wrong labelmap specified, "
"expected one of %s or a file path" % \
', '.join(t.name for t in LabelmapType))
self._categories = make_camvid_categories(label_map)
self._label_map = label_map
self._label_id_mapping = self._make_label_id_map()
def _make_label_id_map(self):
source_labels = {
id: label.name for id, label in
enumerate(self._extractor.categories().get(
AnnotationType.label, LabelCategories()).items)
}
target_labels = {
label.name: id for id, label in
enumerate(self._categories[AnnotationType.label].items)
}
id_mapping = {
src_id: target_labels.get(src_label, 0)
for src_id, src_label in source_labels.items()
}
def map_id(src_id):
return id_mapping.get(src_id, 0)
return map_id
| 36.994118 | 87 | 0.582207 |
ace780a4599ef0531e444ac725896a770ffcc9df | 527 | py | Python | Chapter03/c3_25_sort_Python.py | andrewjcoxon/Hands-On-Data-Science-with-Anaconda | 82504a059ecd284b3599fa9af2b3eb6bbd6e28f3 | [
"MIT"
] | 25 | 2018-06-25T16:21:09.000Z | 2022-02-08T09:28:29.000Z | Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter03/c3_25_sort_Python.py | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | null | null | null | Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter03/c3_25_sort_Python.py | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | 17 | 2018-06-15T02:55:30.000Z | 2022-03-09T15:24:42.000Z | """
Name : c3_25_sort_python.py
Book : Hands-on Data Science with Anaconda)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan and James Yan
Date : 1/15/2018
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import pandas as pd
a = pd.DataFrame([[8,3],[8,2],[1,-1]],columns=['X','Y'])
print(a)
# sort by A ascedning, then B descending
b= a.sort_values(['X', 'Y'], ascending=[1, 0])
print(b)
# sort by A and B, both ascedning
c= a.sort_values(['X', 'Y'], ascending=[1, 1])
print(c) | 26.35 | 56 | 0.618596 |
ace780cc81d90545cb7ff4dcf2fe77934d7e0a75 | 18 | py | Python | main/libxt/update.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | 46 | 2021-06-10T02:27:32.000Z | 2022-03-27T11:33:24.000Z | main/libxt/update.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | 58 | 2021-07-03T13:58:20.000Z | 2022-03-13T16:45:35.000Z | main/libxt/update.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | 6 | 2021-07-04T10:46:40.000Z | 2022-01-09T00:03:59.000Z | pkgname = "libXt"
| 9 | 17 | 0.666667 |
ace781be8beaad25854e71644055e987fa28bf17 | 3,449 | py | Python | spectro_data/lamost.py | ktfm2/Kai_updates | f731922d3e140c1f16ea9b4b45f39232fe19a1ba | [
"MIT"
] | 1 | 2020-03-30T02:33:45.000Z | 2020-03-30T02:33:45.000Z | spectro_data/lamost.py | ktfm2/Kai_updates | f731922d3e140c1f16ea9b4b45f39232fe19a1ba | [
"MIT"
] | null | null | null | spectro_data/lamost.py | ktfm2/Kai_updates | f731922d3e140c1f16ea9b4b45f39232fe19a1ba | [
"MIT"
] | 2 | 2018-09-26T05:15:33.000Z | 2020-09-27T21:10:11.000Z | from utils import *
import sys
sys.path.append('../')
import cross_match
def format_columns(data):
col_dict = {'teff_err': 'e_teff',
'logg_err': 'e_logg',
'feh_err': 'e_fe_h',
'feh': 'fe_h',
'rv': 'hrv',
'rv_err': 'e_hrv'}
data = data.rename(index=str, columns=col_dict)
return data
def load_data(add_masses=True):
data = sqlutil.get("""select * from lamost_dr5.stellar as l""",
host='cappc127', user='jason_sanders',
password=wsdbpassword,
preamb='set enable_seqscan to off; ' +
'set enable_mergejoin to off; ' +
'set enable_hashjoin to off;', asDict=True,
strLength=30)
df = pd.DataFrame(columns=data.keys())
for k in data.keys():
df[k] = data[k]
df['matchid']=df['obsdate'].str.replace('-','')+'-'+df['planid']+'-'+\
df['spid'].astype(str)+'-'+df['fiberid'].astype(str)
data_VAC = sqlutil.get("""select * from lamost_dr4.vac as l""",
host='cappc127', user='jason_sanders',
password=wsdbpassword,
preamb='set enable_seqscan to off; ' +
'set enable_mergejoin to off; ' +
'set enable_hashjoin to off;', asDict=True,
strLength=30)
df_VAC = pd.DataFrame(columns=data_VAC.keys())
for k in data_VAC.keys():
df_VAC[k] = data_VAC[k]
df_VAC['matchid'] = df_VAC['date']+'-'+df_VAC['plate']+'-'+\
df_VAC['sp_id'].astype(str)+'-'+df_VAC['fibre_id'].astype(str)
df = df.merge(df_VAC,on='matchid',how='left',suffixes=('','_vac'))
cm2MASS = crossmatch_2MASS(df.ra.values, df.dec.values)
cm2MASS = quality_2MASS_phot(cm2MASS)
cmSDSS = crossmatch_SDSS(df.ra.values, df.dec.values)
for k in ['J', 'eJ', 'H', 'eH', 'K', 'eK']:
df[k] = cm2MASS[k].values
for k in cmSDSS.keys():
df[k] = cmSDSS[k]
df['mag_use'] = [np.array(['J', 'H', 'K', 'G']) for i in range(len(df))]
df['rho_Tg'] = 0.
df['rho_TZ'] = 0.
df['rho_gZ'] = 0.
if not add_masses:
return df
output_file = '/data/jls/GaiaDR2/spectro/lamost_cannon/LAMOST_results.hdf5'
t = pd.read_hdf(output_file)
fltr = (t.TEFF>4000.)&(t.TEFF<5250.)&(t.LOGG>1.)&(t.LOGG<3.3)&(t.M_H>-1.5)&(t.M_H<0.5)
fltr &= (t.r_chi_sq < 3.) #& (t.in_convex_hull == True)
t = t[fltr].reset_index(drop=True)
df = df.merge(t[['obsid','mass','mass_error']], how='left')
fltr = df.mass!=df.mass
df.loc[fltr,'mass'] = 0.
df.loc[fltr,'mass_error'] = -1.
return df
def load_and_match(output_file='/data/jls/GaiaDR2/spectro/LAMOST_input.hdf5',
use_dr1=False):
loaded, data = check_and_load(
output_file, 'LAMOST DR5 A, F, G, K catalogue')
if loaded:
return data
data = load_data()
data = format_columns(data)
rx = cross_match.crossmatch_gaia_spectro(data, no_proper_motion=False, dist_max=5.)
data = cross_match.crossmatch_gaia_spectro(data, dr1=use_dr1, epoch=2000.)
fltr = (rx.source_id > 0) & (data.source_id != rx.source_id)
if np.count_nonzero(fltr)>0:
data.loc[fltr]=rx.loc[fltr]
data = data.reset_index(drop=True)
write_input_file(data, output_file, 'LAMOST DR5 A,F,G,K catalogue')
return data
| 34.148515 | 90 | 0.563352 |
ace781d8a32e831f3b2479a0c82689a0acc20742 | 19,821 | py | Python | imagepath.py | gitwipo/path_utils | 99cc8f49d8faaa00341febd89fad839619204c04 | [
"Apache-2.0"
] | null | null | null | imagepath.py | gitwipo/path_utils | 99cc8f49d8faaa00341febd89fad839619204c04 | [
"Apache-2.0"
] | null | null | null | imagepath.py | gitwipo/path_utils | 99cc8f49d8faaa00341febd89fad839619204c04 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2019 Wilfried Pollan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# # http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
file=imagepath.py
Seperate image into parts.
Get image values
Get/Set base name
Get/Set frame
Get/Set version
"""
__version__ = '3.0.0'
__author__ = 'Wilfried Pollan'
# MODULES
import os.path
import re
class Image(object):
"""
Manipulates vfx image values
"""
IMAGE = None
IMAGE_DICT = None
def __init__(self, image=None):
"""
Init the inage class.
It sets up all basic variables for the input image.
:params image: path to an image file
:type image: str
"""
# Init internal vars
self.image_path = None
self.image_name = None
self.name = None
self.ext = None
# Assign init parm
self.IMAGE = image
# Assign internal vars
self._get_basic_parts()
# private vars
self._name_list = self._split_name()
# Assign global class vars
self.IMAGE_DICT = self.get_image_values()
# REGEX FUNCTIONS
def _regex_version(self):
"""
Create version regex string.
:return: re_major_version, re_prefix_major_version,
re_prefix_major_minor_version
:rtype: tuple(str)
"""
re_major_version = r'^([v|V])(\d+)'
re_prefix_major_version = r'([.|_|-])([v|V])(\d+)*'
re_prefix_major_minor_version = r'([.|_|-])([v|V])(\d+)([.|_|-])(\d+)'
return (re_major_version, re_prefix_major_version,
re_prefix_major_minor_version)
def _regex_frame(self):
"""
Create frame regex string
:return: re_frame, re_frame_only
:rtype: tuple(str)
"""
re_frame = r'([.|_|-])((\d+)|(%0\dd)|(#+))\Z'
re_frame_only = r'^((\d+)|(%0\dd)|(#+))\Z'
return re_frame, re_frame_only
def _re_compile_version(self):
"""
Compile re version object.
:return: re_major_version, re_prefix_major_version,
re_prefix_major_minor_version
:rtype: tuple(re object)
"""
re_major_version = re.compile(self._regex_version()[0])
re_prefix_major_version = re.compile(self._regex_version()[1])
re_prefix_major_minor_version = re.compile(self._regex_version()[2])
return (re_major_version, re_prefix_major_version,
re_prefix_major_minor_version)
def _re_compile_frame(self):
"""
Compile re frame object.
:return: re_frame, re_frame_only
:rtype: tuple(re object)
"""
re_frame = re.compile(self._regex_frame()[0])
re_frame_only = re.compile(self._regex_frame()[1])
return re_frame, re_frame_only
# HELPER FUNCTIONS
def _set_padded_number(self, number, padding):
"""
Set padded number.
:params number:
:type number: int
:params padding:
:type padding: int
:return: padded number string
:rtype: str
"""
return '%0{}d'.format(padding) % number
# FUNCTIONS
def _get_basic_parts(self):
"""
Get path, name, ext
:return: [dirname, name, ext]
:rtype: list(str)
"""
self.image_path = os.path.dirname(self.IMAGE)
self.image_name = os.path.basename(self.IMAGE)
self.name, self.ext = os.path.splitext(self.image_name)
def _split_name(self):
"""
Split image into base name, prefix & frame part
:return: [basename, frame_prefix, frame]
or if frame_parts=True:
[basename, frame_prefix, frame,
frame_digit, frame_notation, frame_hash]
:rtype: list
"""
re_frame, re_frame_only = self._re_compile_frame()
self._get_basic_parts()
name_list = []
try:
name_list = re_frame.split(self.name)
if len(name_list) == 1:
name_list = re_frame_only.split(self.name)
if len(name_list) > 1:
name_list.insert(0, None)
else:
name_list.extend([None, None])
name_list = name_list[:6]
except IndexError:
pass
name_list = [None if v is '' else v for v in name_list]
return name_list
def get_b_name(self):
"""
Get image base name.
:return: base name
:rtype: str
"""
return self._name_list[0]
def set_b_name(self, new_name):
"""
Set image base name.
:params new_name: base name to use for the rename
:type new_name: str
:return: image
:rtype: str
"""
name_list = self._name_list
name_list = ['' if v is None else v for v in name_list]
new_name = new_name + ''.join(name_list[1:3])
self.IMAGE = os.path.join(self.image_path, new_name) + self.ext
self._name_list = self._split_name()
return self.IMAGE
def get_frame(self):
"""
Get image frame values.
Option name=True adds name value pair to dict.
:return: frame_dict = {'frame_prefix': frame_prefix,
'frame': frame,
'frame_padding': padding,
'frame_digit': frame_digit,
'frame_notation':frame_notation,
'frame_hash': frame_hash
}
:rtype: dict
"""
frame_prefix, frame = None, None
frame_digit, frame_notation, frame_hash = None, None, None
if self._name_list[2]:
frame_prefix = self._name_list[1]
frame = self._name_list[2]
frame_digit = self._name_list[3]
frame_notation = self._name_list[4]
frame_hash = self._name_list[5]
# GET FRAME PADDING
padding = None
if frame_digit:
padding = len(frame)
elif frame_notation:
padding = int(frame_notation[2])
elif frame_hash:
padding = len(frame_hash)
# FRAME NOTATION, HASH
if padding:
if frame:
frame_notation = '%0' + str(padding) + 'd'
frame_hash = '#' * padding
elif frame_notation:
frame_hash = '#' * padding
elif frame_hash:
frame_notation = '%0' + str(padding) + 'd'
frame_dict = {'frame_prefix': frame_prefix,
'frame': frame,
'frame_padding': padding,
'frame_digit': frame_digit,
'frame_notation':frame_notation,
'frame_hash': frame_hash
}
return frame_dict
def set_frame(self, new_frame, prefix=None):
"""
Set image frame value. Can also set the prefix if given.
:params new_frame: new frame number
:type new_frame: str
:params prefix: character to use before the frame e.g. _
:type prefix: str
:return: image
:rtype: str
"""
new_frame = str(new_frame)
re_frame, re_frame_only = self._re_compile_frame()
name_list = self._name_list
# Check input values
parm = None
value = None
if not re_frame_only.search(new_frame):
parm = 'new_frame'
value = new_frame
error_msg = '{} \"{}\" must be given as frame hash/frame,\
notation/digit.'.format(parm, value)
raise ValueError(error_msg)
elif prefix and not isinstance(prefix, str):
parm = 'prefix'
value = str(prefix)
error_msg = '{} \"{}\" must be given as string.'.format(parm, value)
raise ValueError(error_msg)
# CONVERT NONE TO EMPTY STRING
name_list = ['' if v is None else v for v in name_list]
frame_prefix = None
if name_list[1]:
frame_prefix = name_list[1]
elif prefix:
frame_prefix = prefix
else:
frame_prefix = ''
# Assign with existing frame
self.name = name_list[0] + frame_prefix + new_frame
self.IMAGE = os.path.join(self.image_path, self.name) + self.ext
# Replace values in internal var
self._name_list = self._split_name()
self.IMAGE_DICT = self.get_image_values()
return self.IMAGE
def get_version(self, major_minor=False):
"""
Get all version strings.
:params major_minor: Set to True if the image is using two style version
convention; default to False
:type major_minor: bool
:return: version_dict = {'version_folder_level': version_folder_level,
'version_folder_prefix': version_folder_prefix,
'version_folder': version_folder,
'version_prefix': version_prefix,
'version': version,
'version_sep': version_sep
}
:rtype: dict
"""
re_version_all = self._re_compile_version()
re_version_only = re_version_all[0]
re_version = re_version_all[1]
re_major_minor_version = re_version_all[2]
version_folder_prefix = None
version_folder = None
version_prefix = None
version = None
version_sep = None
def get_version_result(value):
"""
Inside method fetching version from input value.
:param value: image base name
:type value: str
:return: version_prefix, version
:rtype: tuple(str)
"""
re_version_result = re_version.search(value)
version_prefix = ''.join(re_version_result.group(1, 2))
version = re_version_result.group(3)
return version_prefix, version
def get_version_only_result(value):
"""
Inside method fetching version from input value
if the name may only consist of the version.
:param value: image base name
:type value: str
:return: version_prefix, version
:rtype: tuple(str)
"""
re_version_result = re_version_only.search(value)
version_prefix = re_version_result.group(1)
version = re_version_result.group(2)
return version_prefix, version
# Get file version
if major_minor:
try:
re_version_result_image = re_major_minor_version.search(self.name)
version_prefix = ''.join(re_version_result_image.group(1, 2))
version = re_version_result_image.group(3, 5)
version_sep = re_version_result_image.group(4)
except AttributeError:
pass
else:
try:
version_prefix, version = get_version_result(self.name)
except AttributeError:
try:
version_prefix, version = get_version_only_result(self.name)
except AttributeError:
pass
# Get folder version
level = 1
while level < len(self.image_path.split(os.sep))-1:
image_folder = self.image_path.split(os.sep)[-level]
try:
version_folder_prefix, version_folder = get_version_result(image_folder)
except AttributeError:
try:
version_folder_prefix, version_folder = get_version_only_result(image_folder)
except AttributeError:
pass
if version_folder:
break
level += 1
if not version_folder:
level = None
version_dict = {'version_folder_level': level,
'version_folder_prefix': version_folder_prefix,
'version_folder': version_folder,
'version_prefix': version_prefix,
'version': version,
'version_sep': version_sep
}
return version_dict
def set_version(self, new_version, set_folder=True, major_minor=False,
prefix=None, sep=None):
"""
Set the given version.
:params new_version: version as a string without the prefix
:type new_version: str
:params set_folder: Set the version in the folder
:type set_folder: bool
:params major_minor: Set to True if the version is using
major, minor version style
:type major_minor: bool
:params prefix: character to use before the version
:type prefix: str
:params sep: separator to use for major, minor version style
:type sep: str
:return: image
:rtype: str
"""
# Init self.regex
re_version_all = self._re_compile_version()
re_version_only = re_version_all[0]
re_version = re_version_all[1]
re_major_minor_version = re_version_all[2]
# Get current version
version_dict = self.get_version(major_minor)
version_folder_level = version_dict['version_folder_level']
version_folder_prefix = version_dict['version_folder_prefix']
version_folder = version_dict['version_folder']
version_prefix = version_dict['version_prefix']
version = version_dict['version']
version_sep = version_dict['version_sep']
if version_folder_level > 1:
folder_split = self.image_path.split(os.sep)
image_root = os.sep.join(folder_split[:-(version_folder_level)])
image_folder = folder_split[-version_folder_level]
sub_folder = os.sep.join(folder_split[-(version_folder_level-1):])
else:
image_root = os.path.dirname(self.image_path)
image_folder = os.path.basename(self.image_path)
sub_folder = ''
# Assign input parameter
if prefix:
version_prefix = prefix
if version_folder_prefix:
version_folder_prefix = prefix
if sep:
version_sep = sep
# Set version
try:
# Set version in file
if version:
if major_minor:
if isinstance(new_version, (list, tuple)):
sub_major = version_prefix + str(new_version[0])
sub_minor = version_sep + str(new_version[1])
substition = sub_major + sub_minor
self.name = re_major_minor_version.sub(substition, self.name)
else:
substition = version_prefix + str(new_version)
self.name = re_major_minor_version.sub(substition, self.name)
else:
if re_version.search(self.name):
substition = version_prefix + str(new_version)
self.name = re_version.sub(substition, self.name)
elif re_version_only.search(self.name):
substition = version_prefix + str(new_version)
self.name = re_version_only.sub(substition, self.name)
# Set version in folder
if set_folder:
if isinstance(new_version, (list, tuple)):
new_version = new_version[0]
if version_folder:
if re_version.search(image_folder):
substition = version_folder_prefix + str(new_version)
image_folder = re_version.sub(substition, image_folder)
elif re_version_only.search(image_folder):
substition = version_folder_prefix + str(new_version)
image_folder = re_version_only.sub(substition, image_folder)
# Generate image string
self.image_path = os.path.join(image_root, image_folder, sub_folder)
self.IMAGE = os.path.join(self.image_path, self.name) + self.ext
self._name_list = self._split_name()
return self.IMAGE
except (AttributeError, TypeError) as err:
error_msg = 'Wrong input. Error: {}'.format(err)
raise ValueError(error_msg)
def get_image_values(self, major_minor=False):
"""
Get all image part values.
:params major_minor: Set to True if the version is using
major, minor version style
:type major_minor: bool
:return: image_dict = {'path': image_path,
'name': b_name,
'ext': ext,
'version_folder_level': version_folder_level,
'version_folder_prefix': version_folder_prefix,
'version_folder': version_folder,
'version_prefix': version_prefix,
'version': version,
'version_sep': version_sep,
'frame_prefix': frame_prefix,
'frame': frame,
'frame_padding': padding,
'frame_notation': frame_notation,
'frame_hash': frame_hash
}
:rtype: dict
"""
# FRAME
frame_dict = self.get_frame()
# VERSION
version_dict = self.get_version(major_minor)
# GENERATE IMAGE DICT
image_dict = {'path': self.image_path,
'name': self._name_list[0],
'ext': self.ext,
'version_folder_level': version_dict['version_folder_level'],
'version_folder_prefix': version_dict['version_folder_prefix'],
'version_folder': version_dict['version_folder'],
'version_prefix': version_dict['version_prefix'],
'version': version_dict['version'],
'version_sep': version_dict['version_sep'],
'frame_prefix': frame_dict['frame_prefix'],
'frame': frame_dict['frame'],
'frame_padding': frame_dict['frame_padding'],
'frame_notation': frame_dict['frame_notation'],
'frame_hash': frame_dict['frame_hash']
}
return image_dict
| 34.591623 | 97 | 0.539277 |
ace782191d3300670c59744172f3cd5e6abf7e61 | 1,603 | py | Python | utils/mergeMultiloc.py | ClaudiaRaffaelli/Protein-subcellular-localization | 38a40c7389ee717954c254114959368223a55e43 | [
"MIT"
] | null | null | null | utils/mergeMultiloc.py | ClaudiaRaffaelli/Protein-subcellular-localization | 38a40c7389ee717954c254114959368223a55e43 | [
"MIT"
] | null | null | null | utils/mergeMultiloc.py | ClaudiaRaffaelli/Protein-subcellular-localization | 38a40c7389ee717954c254114959368223a55e43 | [
"MIT"
] | 1 | 2021-08-25T07:50:43.000Z | 2021-08-25T07:50:43.000Z | from Bio import SeqIO, SeqRecord
from Bio.Seq import Seq
import glob
labels_dic_location = {
'plasma_membrane.fasta':'Cell.membrane',
'cytoplasmic.fasta': 'Cytoplasm',
'ER.fasta': 'Endoplasmic.reticulum',
'Golgi.fasta': 'Golgi.apparatus',
'vacuolar.fasta': 'Lysosome/Vacuole',
'lysosomal.fasta': 'Lysosome/Vacuole',
'mitochondrial.fasta': 'Mitochondrion',
'nuclear.fasta': 'Nucleus',
'peroxisomal.fasta': 'Peroxisome',
'chloroplast.fasta': 'Plastid',
'extracellular.fasta': 'Extracellular'
}
# we want to reserve 3 parts for train, 1 for test and 1 for validation
part = 1
sequences_list = []
# running through all fasta files in order to merge them into one
for file in glob.glob("dataset/DeepLoc/multiloc/original/*.fasta"):
fasta_sequences = SeqIO.parse(file, 'fasta')
for sequence in fasta_sequences:
description, seq = sequence.description, str(sequence.seq)
print("description: {}, sequence: {}".format(description, seq))
# update the description in this format "id location [test]"
split = description.split() # something of the form "id location number number".
new_description = split[0] + " " + labels_dic_location[file.split("/")[-1]] + "-U"
# if the sequence is for test we annotate it
if ((((part - 1) % 5 + 5) % 5)+1) == 1:
new_description = new_description + " test"
new_sequence = SeqRecord.SeqRecord(seq=Seq(seq), id=split[0], description=new_description)
sequences_list.append(new_sequence)
part += 1
with open("../dataset/DeepLoc/multiloc/merged_multiloc.fasta", "w") as output_handle:
SeqIO.write(sequences_list, output_handle, "fasta")
| 35.622222 | 92 | 0.721148 |
ace7845965bf69360bf4c446f6d503c6cf019531 | 1,562 | py | Python | airflow/migrations/versions/86770d1215c0_add_kubernetes_scheduler_uniqueness.py | jacky-nirvana/incubator-airflow | 2318cea74d4f71fba353eaca9bb3c4fd3cdb06c0 | [
"Apache-2.0"
] | 1 | 2019-05-16T02:21:21.000Z | 2019-05-16T02:21:21.000Z | airflow/migrations/versions/86770d1215c0_add_kubernetes_scheduler_uniqueness.py | jacky-nirvana/incubator-airflow | 2318cea74d4f71fba353eaca9bb3c4fd3cdb06c0 | [
"Apache-2.0"
] | 6 | 2018-02-10T20:25:16.000Z | 2019-11-20T03:01:03.000Z | airflow/migrations/versions/86770d1215c0_add_kubernetes_scheduler_uniqueness.py | jacky-nirvana/incubator-airflow | 2318cea74d4f71fba353eaca9bb3c4fd3cdb06c0 | [
"Apache-2.0"
] | 2 | 2019-09-16T06:48:41.000Z | 2019-09-16T06:56:32.000Z | # flake8: noqa
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add kubernetes scheduler uniqueness
Revision ID: 86770d1215c0
Revises: 27c6a30d7c24
Create Date: 2018-04-03 15:31:20.814328
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '86770d1215c0'
down_revision = '27c6a30d7c24'
branch_labels = None
depends_on = None
RESOURCE_TABLE = "kube_worker_uuid"
def upgrade():
table = op.create_table(
RESOURCE_TABLE,
sa.Column("one_row_id", sa.Boolean, server_default=sa.true(), primary_key=True),
sa.Column("worker_uuid", sa.String(255)),
sa.CheckConstraint("one_row_id", name="kube_worker_one_row_id")
)
op.bulk_insert(table, [
{"worker_uuid": ""}
])
def downgrade():
op.drop_table(RESOURCE_TABLE)
| 28.925926 | 88 | 0.737516 |
ace785d57182fbf8142f7eb708b5c18dbbde06fa | 141,540 | py | Python | app/views.py | nyimbi/caseke | ce4a0fa44cd383bc23900e42f81656f089c8fdd9 | [
"MIT"
] | 1 | 2019-06-03T16:20:35.000Z | 2019-06-03T16:20:35.000Z | app/views.py | nyimbi/caseke | ce4a0fa44cd383bc23900e42f81656f089c8fdd9 | [
"MIT"
] | 20 | 2020-01-28T22:02:29.000Z | 2022-03-29T22:28:34.000Z | app/views.py | nyimbi/caseke | ce4a0fa44cd383bc23900e42f81656f089c8fdd9 | [
"MIT"
] | 1 | 2019-06-10T17:20:48.000Z | 2019-06-10T17:20:48.000Z | # coding: utf-8
# views.py AUTOGENERATED BY gen_script.sh from kp4.py
# Copyright (C) Nyimbi Odero, Sun Aug 13 05:04:25 EAT 2017
import calendar
from flask import redirect, flash, url_for, Markup
from flask import render_template
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.views import ModelView, BaseView, MasterDetailView, MultipleView, RestCRUDView, CompactCRUDMixin
from flask_appbuilder import ModelView, CompactCRUDMixin, aggregate_count, action, expose, BaseView, has_access
from flask_appbuilder.charts.views import ChartView, TimeChartView, GroupByChartView
from flask_appbuilder.models.group import aggregate_count
from flask_appbuilder.widgets import ListThumbnail, ListWidget
from flask_appbuilder.widgets import FormVerticalWidget, FormInlineWidget, FormHorizontalWidget, ShowBlockWidget
from flask_appbuilder.models.sqla.filters import FilterStartsWith, FilterEqualFunction as FA
from app import appbuilder, db
from .models import *
# Basic Lists
hide_list = ['created_by', 'changed_by', 'created_on', 'changed_on']
#To pretty Print from PersonMixin
def pretty_month_year(value):
return calendar.month_name[value.month] + ' ' + str(value.year)
def pretty_year(value):
return str(value.year)
def fill_gender():
try:
db.session.add(Gender(name='Male'))
db.session.add(Gender(name='Female'))
db.session.commit()
except:
db.session.rollback()
class BailView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Bail, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CaseView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Case, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns +['start_delay','end_delay']
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ['policestation','ob_number','report_date','casecategory']
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = casey_fieldset
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
# @action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
# def muldelete(self, items):
# self.datamodel.delete_all(items)
# self.update_redirect()
# return redirect(self.get_redirect())
class CaseCourtView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Case, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = case_columns_court #person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = casey_fieldset
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
class CasePoliceView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Case, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = case_columns_police #person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = casey_fieldset
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
class CasecategoryView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Casecategory, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset + [('County', {'fields': ['indictable', 'is_criminal'], 'expanded': True})]#+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CauseofactionView(CompactCRUDMixin, ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Causeofaction, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CommitaltypeView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Commitaltype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ConstituencyView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Constituency, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('Parents', {'fields': ['county1'], 'expanded': True})] + ref_fieldset
#+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CountyView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(County, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns #+ contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CourtView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Court, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('Court Station', {'fields': ['courtstation'], 'expanded': True})] + ref_fieldset #+ place_fieldset#+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CourtlevelView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Courtlevel, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CourtstationView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Courtstation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('Parents', {'fields': ['town1', 'courtlevel'], 'expanded': True})] + place_fieldset #person_fieldset + contact_fieldset #+ activity_fieldset + + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DefendantView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Defendant, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = person_list_columns #+ ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
person_fieldset + contact_fieldset +person_docs_fieldset + employment_fieldset+ biometric_fieldset + medical_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DisciplineView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Discipline, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('Person', {'fields': ['defendant1'], 'expanded': True})] + activity_fieldset #+ place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DocarchiveView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Docarchive, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ['name', 'scandate'] #person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('Doc Archive', {'fields': ['name', 'scandate', 'archival', 'doc'], 'expanded': True})] +[('Tags', {'fields': ['tag'], 'expanded': True})]# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DoctemplateView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Doctemplate, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns + ['search_vector']
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns +['search_vector']
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns #+ contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset + doc_edit_fieldset #+ [('Tags', {'fields': ['tag'], 'expanded': True})]#person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DocumentView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Document, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = doc_columns #person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
doc_edit_fieldset + [('Tags', {'fields': ['confidential','locked','pagecount','tag'], 'expanded': True})]
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class EventlogView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Eventlog, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class FilingView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Filing, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('Character', {'fields': ['case1','filingtype','urgent', 'urgentreason','uploaddate'], 'expanded': True})] + \
[('Filing', {'fields': ['case1','urgent', 'urgentreason'], 'expanded': True})]
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class FilingMultiView(MasterDetailView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Filing, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
related_views =[DocumentView]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('Character', {'fields': ['case1','filingtype','urgent', 'urgentreason','uploaddate'], 'expanded': True})] + \
[('Filing', {'fields': ['case1','urgent', 'urgentreason'], 'expanded': True})]
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class FilingtypeView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Filingtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class GateregisterView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Gateregister, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class GenderView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Gender, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class HearingView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Hearing, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class HearingtypeView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Hearingtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class InvestigationView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Investigation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class JoRankView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(JoRank, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class JudicialofficerView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Judicialofficer, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class LawfirmView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Lawfirm, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset + place_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class LawyerView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Lawyer, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class MedeventView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Medevent, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class NatureofsuitView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Natureofsuit, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PaymentView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Payment, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PaymentmethodView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Paymentmethod, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PlaintiffView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Plaintiff, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = person_list_columns # + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolicerankView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Policerank, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PoliceroleView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Policerole, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolicestationView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Policestation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns +['town1', 'policestationtype']
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('Town',{'fields':['town1','policestationtype']})] +ref_fieldset + place_fieldset # + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolicestationtypeView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Policestationtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolofficerView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Polofficer, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = person_list_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PrisonView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Prison, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
# search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
# #search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
# add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PrisoncellView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Prisoncell, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
# #base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
# #search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
# search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
# #search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
# add_exclude_columns = edit_exclude_columns = audit_exclude_columns
# #label_columns = {"contact_group":"Contacts Group"}
# #add_columns = person_list_columns + ref_columns + contact_columns
# #edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
# #list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
# #page_size = 10
# #related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PrisoncommitalView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Prisoncommital, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PrisonerpropertyView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Prisonerproperty, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ProsecutorView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Prosecutor, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ProsecutorteamView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Prosecutorteam, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns + ['prosecutors']
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class RemissionView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Remission, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class SecurityrankView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Securityrank, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset +[('Prisons', {'fields': ['prisons'], 'expanded': True})]#+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class SubcountyView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Subcounty, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns + ['county1']
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('County', {'fields': ['county1'], 'expanded': True})] + ref_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class SuretyView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Surety, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class TagView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Tag, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class TownView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Town, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
[('Parents', {'fields': ['subcounty1'], 'expanded': True})] + ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class VisitView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Visit, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class VisitorView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Visitor, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class WarderView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Warder, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class WarderrankView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Warderrank, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
list_columns = ref_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
edit_fieldsets = add_fieldsets = \
ref_fieldset #+ person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class WitnesView(ModelView):#MasterDetailView, MultipleView, CompactCRUDMixin
datamodel=SQLAInterface(Witnes, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
#base_filters = [[created_by, FilterEqualFunction, get_user]] #[name, FilterStartsWith, a]],
#search_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
#search_form_query_rel_fields = [(group:[[name,FilterStartsWith,W]]}
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#page_size = 10
#related_views =[]
#formatters_columns = {‘some_date_col’: lambda x: x.isoformat() }
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class LawyerChartView(GroupByChartView):
datamodel = SQLAInterface(Lawyer , db.session)
chart_title = 'Grouped Lawyer by Birth'
chart_3d = 'true'
label_columns = LawyerView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class LawyerTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Lawyer , db.session)
chart_title = 'Grouped Birth Lawyer'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = LawyerView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class PlaintiffChartView(GroupByChartView):
datamodel = SQLAInterface(Plaintiff , db.session)
chart_title = 'Grouped Plaintiff by Birth'
chart_3d = 'true'
label_columns = PlaintiffView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class PlaintiffTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Plaintiff , db.session)
chart_title = 'Grouped Birth Plaintiff'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = PlaintiffView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class WitnesChartView(GroupByChartView):
datamodel = SQLAInterface(Witnes , db.session)
chart_title = 'Grouped Witnes by Birth'
chart_3d = 'true'
label_columns = WitnesView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class WitnesTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Witnes , db.session)
chart_title = 'Grouped Birth Witnes'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = WitnesView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class SuretyChartView(GroupByChartView):
datamodel = SQLAInterface(Surety , db.session)
chart_title = 'Grouped Surety by Birth'
chart_3d = 'true'
label_columns = SuretyView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class SuretyTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Surety , db.session)
chart_title = 'Grouped Birth Surety'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = SuretyView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class ProsecutorChartView(GroupByChartView):
datamodel = SQLAInterface(Prosecutor , db.session)
chart_title = 'Grouped Prosecutor by Birth'
chart_3d = 'true'
label_columns = ProsecutorView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class ProsecutorTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Prosecutor , db.session)
chart_title = 'Grouped Birth Prosecutor'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = ProsecutorView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class PoliceofficerChartView(GroupByChartView):
datamodel = SQLAInterface(Polofficer , db.session)
chart_title = 'Grouped Policeofficer by Birth'
chart_3d = 'true'
label_columns = PolofficerView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class PoliceofficerTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Polofficer , db.session)
chart_title = 'Grouped Birth Policeofficer'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = PolofficerView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class JudicialofficerChartView(GroupByChartView):
datamodel = SQLAInterface(Judicialofficer , db.session)
chart_title = 'Grouped Judicialofficer by Birth'
chart_3d = 'true'
label_columns = JudicialofficerView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class JudicialofficerTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Judicialofficer , db.session)
chart_title = 'Grouped Birth Judicialofficer'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = JudicialofficerView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class DefendantChartView(GroupByChartView):
datamodel = SQLAInterface(Defendant , db.session)
chart_title = 'Grouped Defendant by Birth'
chart_3d = 'true'
label_columns = DefendantView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class DefendantTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Defendant , db.session)
chart_title = 'Grouped Birth Defendant'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = DefendantView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class VisitorChartView(GroupByChartView):
datamodel = SQLAInterface(Visitor , db.session)
chart_title = 'Grouped Visitor by Birth'
chart_3d = 'true'
label_columns = VisitorView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class VisitorTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Visitor , db.session)
chart_title = 'Grouped Birth Visitor'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = VisitorView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class WarderChartView(GroupByChartView):
datamodel = SQLAInterface(Warder , db.session)
chart_title = 'Grouped Warder by Birth'
chart_3d = 'true'
label_columns = WarderView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class WarderTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Warder , db.session)
chart_title = 'Grouped Birth Warder'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = WarderView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class VisitChartView(GroupByChartView):
datamodel = SQLAInterface(Visit , db.session)
chart_title = 'Grouped Visit by Birth'
chart_3d = 'true'
label_columns = VisitView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class VisitTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Visit , db.session)
chart_title = 'Grouped Birth Visit'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = VisitView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class DisciplineChartView(GroupByChartView):
datamodel = SQLAInterface(Discipline , db.session)
chart_title = 'Grouped Discipline by Birth'
chart_3d = 'true'
label_columns = DisciplineView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class DisciplineTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Discipline , db.session)
chart_title = 'Grouped Birth Discipline'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = DisciplineView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class MedeventChartView(GroupByChartView):
datamodel = SQLAInterface(Medevent , db.session)
chart_title = 'Grouped Medevent by Birth'
chart_3d = 'true'
label_columns = MedeventView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class MedeventTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Medevent , db.session)
chart_title = 'Grouped Birth Medevent'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = MedeventView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class HearingChartView(GroupByChartView):
datamodel = SQLAInterface(Hearing , db.session)
chart_title = 'Grouped Hearing by Birth'
chart_3d = 'true'
label_columns = HearingView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class HearingTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Hearing , db.session)
chart_title = 'Grouped Birth Hearing'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = HearingView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class PrisoncommitalChartView(GroupByChartView):
datamodel = SQLAInterface(Prisoncommital , db.session)
chart_title = 'Grouped Prisoncommital by Birth'
chart_3d = 'true'
label_columns = PrisoncommitalView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class PrisoncommitalTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Prisoncommital , db.session)
chart_title = 'Grouped Birth Prisoncommital'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = PrisoncommitalView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class CaseChartView(GroupByChartView):
datamodel = SQLAInterface(Case , db.session)
chart_title = 'Grouped Case by Birth'
chart_3d = 'true'
label_columns = CaseView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class CaseTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Case , db.session)
chart_title = 'Grouped Birth Case'
chart_type = 'AreaChart'
chart_3d = 'true'
label_columns = CaseView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
# How to create a MasterDetailView
#class DetailView(ModelView):
# datamodel = SQLAInterface(DetailTable, db.session)
#class MasterView(MasterDetailView):
# datamodel = SQLAInterface(MasterTable, db.session)
# related_views = [DetailView]
# How to create a MultipleView
#class MultipleViewsExp(MultipleView):
# views = [GroupModelView, ContactModelView]
#View Registration
db.configure_mappers() #very important!
db.create_all()
fill_gender()
# Menu structure
#1. Setup
#2. Categories
#3. People
#4. Processes
#5. Reports
#6. Help
# Setup
appbuilder.add_view(CountyView(), "Counties", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(SubcountyView(), "Sub Counties", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ConstituencyView(), "Constituencies", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(TownView(), "Towns", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CourtstationView(), "Court Stations", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CourtView(), "Courts", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PaymentmethodView(), "Payment Methods", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(LawfirmView(), "Law Firms", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PolicestationView(), "Police Stations", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PrisonView(), "Prisons", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PrisoncellView(), "Prison Cells", icon="fa-folder-open-o", category="Setup")
# Categories
appbuilder.add_view(CourtlevelView(), "Court Levels", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(CauseofactionView(), "Causes of Action", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(CasecategoryView(), "Case Categories", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(NatureofsuitView(), "Nature of Suit", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(CommitaltypeView(), "Commital Types", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(FilingtypeView(), "Filing Types", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(GenderView(), "Genders", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(HearingtypeView(), "Hearing Types", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(JoRankView(), "Judicial Ranks", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(PolicerankView(), "Police Ranks", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(WarderrankView(), "Warder Ranks", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(PoliceroleView(), "Police Roles", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(PolicestationtypeView(), "Police Station Types", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(ProsecutorteamView(), "Prosecutor Teams", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(SecurityrankView(), "Prison Security Ranks", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(TagView(), "Tags", icon="fa-folder-open-o", category="Categories")
appbuilder.add_view(DoctemplateView(), "Doc Templates", icon="fa-folder-open-o", category="Categories")
# People
appbuilder.add_view(PlaintiffView(), "Plaintiffs", icon="fa-folder-open-o", category="People")
appbuilder.add_view(DefendantView(), "Defendants", icon="fa-folder-open-o", category="People")
appbuilder.add_view(WitnesView(), "Witnesses", icon="fa-folder-open-o", category="People")
appbuilder.add_view(JudicialofficerView(), "Judicial Officers", icon="fa-folder-open-o", category="People")
appbuilder.add_view(LawyerView(), "Lawyers", icon="fa-folder-open-o", category="People")
appbuilder.add_view(PolofficerView(), "Police Officers", icon="fa-folder-open-o", category="People")
appbuilder.add_view(ProsecutorView(), "Prosecutors", icon="fa-folder-open-o", category="People")
appbuilder.add_view(VisitorView(), "Visitors", icon="fa-folder-open-o", category="People")
appbuilder.add_view(WarderView(), "Warders", icon="fa-folder-open-o", category="People")
appbuilder.add_view(SuretyView(), "Sureties", icon="fa-folder-open-o", category="People")
# Process
appbuilder.add_view(CaseView(), "Cases", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(InvestigationView(), "Investigations", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(FilingView(), "Filings", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(FilingMultiView(), "File & Docs", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(HearingView(), "Hearings", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(BailView(), "Bail", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(DocumentView(), "Documents", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(DocarchiveView(), "Doc Archives", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(PrisoncommitalView(), "Prison Commital", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(GateregisterView(), "Gate Registers", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(DisciplineView(), "Disciplinary", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(MedeventView(), "Medical Events", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(PaymentView(), "Payments", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(PrisonerpropertyView(), "Prisoner Property", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(RemissionView(), "Remissions", icon="fa-folder-open-o", category="Process")
appbuilder.add_view(VisitView(), "Visits", icon="fa-folder-open-o", category="Process")
# Charts & Reports
appbuilder.add_view(EventlogView(), "Eventlogs", icon="fa-folder-open-o", category="Reports")
appbuilder.add_view(LawyerChartView(), 'Lawyer Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(LawyerTimeChartView(), 'Lawyer Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PlaintiffChartView(), 'Plaintiff Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PlaintiffTimeChartView(), 'Plaintiff Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(WitnesChartView(), 'Witnes Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(WitnesTimeChartView(), 'Witnes Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(SuretyChartView(), 'Surety Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(SuretyTimeChartView(), 'Surety Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ProsecutorChartView(), 'Prosecutor Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ProsecutorTimeChartView(), 'Prosecutor Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PoliceofficerChartView(), 'Policeofficer Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PoliceofficerTimeChartView(), 'Policeofficer Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(JudicialofficerChartView(), 'Judicialofficer Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(JudicialofficerTimeChartView(), 'Judicialofficer Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DefendantChartView(), 'Defendant Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DefendantTimeChartView(), 'Defendant Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(VisitorChartView(), 'Visitor Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(VisitorTimeChartView(), 'Visitor Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(WarderChartView(), 'Warder Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(WarderTimeChartView(), 'Warder Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(VisitChartView(), 'Visit Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(VisitTimeChartView(), 'Visit Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DisciplineChartView(), 'Discipline Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DisciplineTimeChartView(), 'Discipline Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(MedeventChartView(), 'Medevent Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(MedeventTimeChartView(), 'Medevent Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(HearingChartView(), 'Hearing Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(HearingTimeChartView(), 'Hearing Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PrisoncommitalChartView(), 'Prisoncommital Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PrisoncommitalTimeChartView(), 'Prisoncommital Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(CaseChartView(), 'Case Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(CaseTimeChartView(), 'Case Time Chart', icon='fa-dashboard', category='Reports')
#appbuilder.add_separator("Setup")
#appbuilder.add_separator("My Views")
#appbuilder.add_link(name, href, icon='', label='', category='', category_icon='', category_label='', baseview=None)
| 45.482005 | 277 | 0.719973 |
ace78682d97a95e70d9eeeeb2e3293eae764cc27 | 15,097 | py | Python | py/fastspecfit/templates/sample.py | desihub/fastspecf | 5fe2e3520f0bbf0d98c5ed4a64e4937e2f7ad417 | [
"BSD-3-Clause"
] | 7 | 2020-12-22T15:26:36.000Z | 2022-03-26T13:31:09.000Z | py/fastspecfit/templates/sample.py | desihub/fastspecf | 5fe2e3520f0bbf0d98c5ed4a64e4937e2f7ad417 | [
"BSD-3-Clause"
] | 48 | 2020-12-29T22:13:34.000Z | 2022-03-24T10:31:43.000Z | py/fastspecfit/templates/sample.py | desihub/fastspecfit | 5fe2e3520f0bbf0d98c5ed4a64e4937e2f7ad417 | [
"BSD-3-Clause"
] | null | null | null | """
fastspecfit.templates.sample
============================
Code for defining samples and reading the data needed to build templates for the
various target classes. Called by bin/desi-templates.
"""
import pdb # for debugging
import os
import numpy as np
import fitsio
from astropy.table import Table, Column
from desiutil.log import get_logger
log = get_logger()
VITILES_TARGETCLASS = {'lrg': [80605, 80609],
'elg': [80606, 80608, 80610],
'bgs': [80613]}
SAMPLE_PROPERTIES = {
'lrg': {'zminmax': (0.1, 1.1), 'normwave': 4500.0, 'absmag': 'Mr', 'color': 'rW1',
'absmag_band': 'R', 'color_band1': 'R', 'color_band2': 'W1',
'absmag_label': 'M_{{0.0r}}', 'color_label': '^{{0.0}}(r-W1)'},
'elg': {'zminmax': (0.6, 1.5), 'normwave': 3500.0, 'absmag': 'Mg', 'color': 'gr',
'absmag_band': 'G', 'color_band1': 'G', 'color_band2': 'R',
'absmag_label': 'M_{{0.0g}}', 'color_label': '^{{0.0}}(g-r)'},
'bgs': {'zminmax': (0.05, 1.55), 'normwave': 5500.0, 'absmag': 'Mr', 'color': 'gr',
'absmag_band': 'R', 'color_band1': 'G', 'color_band2': 'R',
'absmag_label': 'M_{{0.0r}}', 'color_label': '^{{0.0}}(g-r)'},
}
def select_tiles(targetclass, remove_vi=False, min_efftime=None,
specprod='denali', outfile=None, png=None):
"""Select tiles to use. Remove low exposure-time tiles and also
remove tiles which are being visually inspected.
/global/cfs/cdirs/desi/sv/vi/TruthTables/Blanc/
"""
reduxdir = os.path.join(os.getenv('DESI_ROOT'), 'spectro', 'redux', specprod)
tilestable = Table.read(os.path.join(reduxdir, 'tiles-{}.csv'.format(specprod)))
tilestable = tilestable[tilestable['SURVEY'] == 'sv1']
#tilestable = tilestable[tilestable['SURVEY'] != 'unknown']
tilestable = tilestable[np.argsort(tilestable['TILEID'])]
itargtiles = [targetclass in program for program in tilestable['FAPRGRM']]
targtiles = tilestable[itargtiles]
efftime = tilestable['EFFTIME_SPEC'] / 60
if targetclass and remove_vi:
ivitiles = np.isin(tilestable['TILEID'], VITILES_TARGETCLASS[targetclass])
vitiles = tilestable[ivitiles]
log.info('Removing {} {} VI tiles: {}'.format(np.sum(ivitiles), targetclass.upper(),
', '.join(vitiles['TILEID'].astype(str))))
tilestable = tilestable[np.logical_not(ivitiles)]
else:
vitiles = None
if min_efftime:
ishallowtiles = tilestable['EFFTIME_SPEC'] / 60 <= min_efftime
shallowtiles = tilestable[ishallowtiles]
log.info('Removing {} tiles with efftime < {:.1f} min.'.format(
np.sum(ishallowtiles), min_efftime))
tilestable = tilestable[np.logical_not(ishallowtiles)]
else:
shallowtiles = None
if outfile:
#log.info('Writing {} tiles to {}'.format(len(targtiles), outfile))
#targtiles.write(outfile, overwrite=True)
log.info('Writing {} tiles to {}'.format(len(tilestable), outfile))
tilestable.write(outfile, overwrite=True)
if png:
import matplotlib.pyplot as plt
from fastspecfit.templates.qa import plot_style
sns, _ = plot_style()
xlim = (efftime.min(), efftime.max())
fig, ax = plt.subplots(figsize=(9, 6))
_ = ax.hist(tilestable['EFFTIME_SPEC'] / 60, bins=50, range=xlim,
label='All Tiles (N={})'.format(len(tilestable)))
_ = ax.hist(targtiles['EFFTIME_SPEC'] / 60, bins=50, range=xlim, alpha=0.9,
label='{} Tiles (N={})'.format(targetclass.upper(), len(targtiles)))
if vitiles:
_ = ax.hist(vitiles['EFFTIME_SPEC'] / 60, bins=50, range=xlim,
label='VI Tiles (N={})'.format(len(vitiles)))
if shallowtiles:
_ = ax.hist(shallowtiles['EFFTIME_SPEC'] / 60, bins=50, range=xlim,
label='Shallow (<{:.0f} min) Tiles (N={})'.format(
min_efftime, len(shallowtiles)))
ax.set_xlabel('Effective Time (spec, min)')
ax.set_ylabel('Number of Tiles')
ax.legend(loc='upper right', fontsize=16)
plt.subplots_adjust(right=0.95, top=0.95, bottom=0.17)
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
#return targtiles
return tilestable
def read_parent_sample(samplefile):
"""Read the output of select_parent_sample
"""
log.info('Reading {}'.format(samplefile))
phot = Table(fitsio.read(samplefile, 'FASTPHOT'))
spec = Table(fitsio.read(samplefile, 'FASTSPEC'))
meta = Table(fitsio.read(samplefile, 'METADATA'))
return phot, spec, meta
def read_tilestable(tilefile):
"""Read the output of select_tiles
"""
tilestable = Table.read(tilefile)
log.info('Read {} tiles from {}'.format(len(tilestable), tilefile))
return tilestable
def read_fastspecfit(tilestable, fastspecfit_dir=None, specprod='denali',
targetclass='lrg'):
"""Read the fastspecfit output for this production.
"""
from desitarget.targets import main_cmx_or_sv
if fastspecfit_dir is None:
fastspecfit_dir = os.path.join(os.getenv('DESI_ROOT'), 'spectro', 'fastspecfit')
fastspecdir = os.path.join(fastspecfit_dir, specprod, 'tiles')
specfile = os.path.join(fastspecdir, 'merged', 'fastspec-{}-cumulative.fits'.format(specprod))
photfile = os.path.join(fastspecdir, 'merged', 'fastphot-{}-cumulative.fits'.format(specprod))
spec = Table(fitsio.read(specfile, 'FASTSPEC'))
meta = Table(fitsio.read(specfile, 'METADATA'))
phot = Table(fitsio.read(photfile, 'FASTPHOT'))
assert(np.all(spec['TARGETID'] == phot['TARGETID']))
log.info('Read {} objects from {}'.format(len(spec), specfile))
log.info('Read {} objects from {}'.format(len(phot), photfile))
ontiles = np.where(np.isin(meta['TILEID'], tilestable['TILEID']))[0]
spec = spec[ontiles]
meta = meta[ontiles]
phot = phot[ontiles]
log.info('Keeping {} objects on {}/{} unique tiles.'.format(
len(ontiles), len(np.unique(meta['TILEID'])), len(tilestable)))
ngal = len(spec)
# correct for extinction!
# see https://github.com/desihub/fastspecfit/issues/23
if specprod == 'denali':
log.warning('Correcting for MW extinction in denali production!')
from desiutil.dust import SFDMap, ext_odonnell
from speclite import filters
RV = 3.1
SFD = SFDMap(scaling=0.86) # SF11 recalibration of the SFD maps
ebv = SFD.ebv(meta['RA'], meta['DEC'])
effwave_north, effwave_south = {}, {}
for band, nfilt, sfilt in zip(['G', 'R', 'Z', 'W1', 'W2'],
['BASS-g', 'BASS-r', 'MzLS-z', 'wise2010-W1', 'wise2010-W2'],
['decam2014-g', 'decam2014-r', 'decam2014-z', 'wise2010-W1', 'wise2010-W2']):
effwave_north[band] = filters.load_filters(nfilt).effective_wavelengths.value[0]
effwave_south[band] = filters.load_filters(sfilt).effective_wavelengths.value[0]
# correct for extinction!
# see https://github.com/desihub/fastspecfit/issues/23
def _get_mw_transmission(good, band):
mw_transmission = np.ones(len(good))
isouth = np.where(meta['PHOTSYS'][good] == 'S')[0]
inorth = np.where(meta['PHOTSYS'][good] == 'N')[0]
if len(isouth) > 0:
mw_transmission[isouth] = 10**(-0.4 * ebv[good][isouth] * RV * ext_odonnell(effwave_south[band], Rv=RV))
if len(inorth) > 0:
mw_transmission[inorth] = 10**(-0.4 * ebv[good][inorth] * RV * ext_odonnell(effwave_south[band], Rv=RV))
return mw_transmission
# for convenience, add observed-frame photometry, rest-frame colors, and
# targeting variables
for band in ('G', 'R', 'Z', 'W1'):
phot['{}MAG'.format(band)] = np.zeros(ngal, 'f4')
good = np.where(meta['FLUX_{}'.format(band)] > 0)[0]
phot['{}MAG'.format(band)][good] = 22.5 - 2.5 * np.log10(meta['FLUX_{}'.format(band)][good] / _get_mw_transmission(good, band))
for band in ('G', 'R', 'Z'):
phot['{}FIBERMAG'.format(band)] = np.zeros(ngal, 'f4')
good = np.where(meta['FIBERFLUX_{}'.format(band)] > 0)[0]
phot['{}FIBERMAG'.format(band)][good] = 22.5 - 2.5 * np.log10(meta['FIBERFLUX_{}'.format(band)][good] / _get_mw_transmission(good, band))
#phot['ABSMAG_GR'] = phot['ABSMAG_G'] - phot['ABSMAG_R']
#phot['ABSMAG_RZ'] = phot['ABSMAG_R'] - phot['ABSMAG_Z']
#phot['ABSMAG_RW1'] = phot['ABSMAG_R'] - phot['ABSMAG_W1']
# targeting...
targs = ['BGS_ANY', 'ELG', 'LRG', 'QSO']
targcols = ['BGS', 'ELG', 'LRG', 'QSO']
for targcol in targcols:
spec[targcol] = np.zeros(ngal, bool)
phot[targcol] = np.zeros(ngal, bool)
for tile in tilestable['TILEID']:
I = np.where(meta['TILEID'] == tile)[0]
if len(I) == 0:
continue
(desicol, bgscol, mwscol), (desimask, bgsmask, mwsmask), survey = main_cmx_or_sv(meta[I])
for targcol, targ in zip(targcols, targs):
phot[targcol][I] = meta[desicol][I] & desimask.mask(targ) != 0
spec[targcol][I] = meta[desicol][I] & desimask.mask(targ) != 0
#for targcol in targcols:
# log.info(' {}: {}'.format(targcol, np.sum(phot[targcol])))
itarg = phot[targetclass.upper()]
log.info('Keeping {} {} targets.'.format(np.sum(itarg), targetclass.upper()))
phot = phot[itarg]
spec = spec[itarg]
meta = meta[itarg]
return phot, spec, meta
def select_parent_sample(phot, spec, meta, targetclass='lrg', specprod='denali',
deltachi2_cut=40, fastphot_chi2cut=100.0,
fastspec_chi2cut=3.0, smoothcorr_cut=10,
zobj_minmax=None, absmag_minmax=None, color_minmax=None,
return_indices=False, verbose=False, png=None, samplefile=None):
"""High-level sample selection.
"""
iparent = (
(meta['Z'] > zobj_minmax[0]) *
(meta['Z'] < zobj_minmax[1]) *
(meta['DELTACHI2'] > deltachi2_cut) *
(meta['FLUX_G'] > 0) *
(meta['FLUX_R'] > 0) *
(meta['FLUX_Z'] > 0) *
(meta['FLUX_W1'] > 0) *
(spec['CONTINUUM_CHI2'] < fastspec_chi2cut) *
(phot['CONTINUUM_CHI2'] < fastphot_chi2cut)
#(np.abs(spec['CONTINUUM_SMOOTHCORR_B']) < smoothcorr_cut) *
#(np.abs(spec['CONTINUUM_SMOOTHCORR_R']) < smoothcorr_cut) *
#(np.abs(spec['CONTINUUM_SMOOTHCORR_Z']) < smoothcorr_cut)
)
if zobj_minmax is not None and absmag_minmax is not None and color_minmax is not None:
props = SAMPLE_PROPERTIES[targetclass]
absmagcol = 'ABSMAG_{}'.format(props['absmag_band'])
color = phot['ABSMAG_{}'.format(props['color_band1'])] - phot['ABSMAG_{}'.format(props['color_band2'])]
iselect = iparent * (
(meta['Z'] > zobj_minmax[0]) * (meta['Z'] < zobj_minmax[1]) *
(phot[absmagcol] > absmag_minmax[0]) * (phot[absmagcol] < absmag_minmax[1]) *
(color > color_minmax[0]) * (color < color_minmax[1]) )
else:
iselect = iparent
if verbose:
log.info('Selecting a parent sample of {}/{} {}s.'.format(
np.sum(iselect), len(meta), targetclass.upper()))
# optionally write out
if samplefile:
from astropy.io import fits
log.info('Writing {} objects to {}'.format(np.sum(iselect), samplefile))
hduprim = fits.PrimaryHDU()
hduphot = fits.convenience.table_to_hdu(phot[iselect])
hduphot.header['EXTNAME'] = 'FASTPHOT'
hduspec = fits.convenience.table_to_hdu(spec[iselect])
hduspec.header['EXTNAME'] = 'FASTSPEC'
hdumeta = fits.convenience.table_to_hdu(meta[iselect])
hdumeta.header['EXTNAME'] = 'METADATA'
hdumeta.header['SPECPROD'] = (specprod, 'spectroscopic production name')
hdumeta.header['TARGET'] = (targetclass, 'target class')
hx = fits.HDUList([hduprim, hduphot, hduspec, hdumeta])
hx.writeto(samplefile, overwrite=True, checksum=True)
# return
if return_indices:
return np.where(iselect)[0]
else:
return phot[iselect], spec[iselect], meta[iselect]
def stacking_bins(targetclass='lrg', verbose=False):
# define the stacking limits and the number of bin *centers*
if targetclass == 'lrg':
zlim, nz = [0.1, 1.1], 10
absmaglim, nabsmag = [-24.5, -20], 9 # Mr
colorlim, ncolor = [-1.0, 1.25], 9 # r-W1
elif targetclass == 'elg':
zlim, nz = [0.6, 1.5], 9
absmaglim, nabsmag = [-24, -19], 10 # Mg
colorlim, ncolor = [-0.2, 0.6], 4 # g-r
elif targetclass == 'bgs':
zlim, nz = [0.05, 0.55], 10
absmaglim, nabsmag = [-24.0, -17.0], 7 # Mr
colorlim, ncolor = [0.0, 1.0], 5 # g-r
else:
raise NotImplemented
dz = (zlim[1] - zlim[0]) / nz
dabsmag = (absmaglim[1] - absmaglim[0]) / nabsmag
dcolor = (colorlim[1] - colorlim[0]) / ncolor
# build the array of (left) bin *edges*
zgrid = np.arange(zlim[0], zlim[1], dz)
absmaggrid = np.arange(absmaglim[0], absmaglim[1], dabsmag)
colorgrid = np.arange(colorlim[0], colorlim[1], dcolor)
nbins = len(zgrid) * len(absmaggrid) * len(colorgrid)
# pack into a table
bins = Table()
bins.add_column(Column(name='TARGETCLASS', dtype='U3', length=nbins))
bins.add_column(Column(name='IBIN', dtype=np.int32, length=nbins))
bins.add_column(Column(name='ISUBBIN', dtype=np.int16, length=nbins))
bins.add_column(Column(name='NOBJ', dtype=np.int32, length=nbins))
bins.add_column(Column(name='SNR', dtype='f4', length=nbins))
for col in ('ZOBJ', 'ABSMAG', 'COLOR'):
bins.add_column(Column(name=col, dtype='f4', length=nbins)) # mean bin center
bins.add_column(Column(name='{}MIN'.format(col), dtype='f4', length=nbins))
bins.add_column(Column(name='{}MAX'.format(col), dtype='f4', length=nbins))
bins['TARGETCLASS'] = targetclass
bins['IBIN'] = np.arange(nbins, dtype=np.int32)
ibin = 0
for zmin in zgrid:
for absmagmin in absmaggrid:
for colormin in colorgrid:
for col, mmin, delt in zip(('ZOBJ', 'ABSMAG', 'COLOR'),
(zmin, absmagmin, colormin),
(dz, dabsmag, dcolor)):
bins[col][ibin] = mmin + delt / 2 # bin center
bins['{}MIN'.format(col)][ibin] = mmin # left edge
bins['{}MAX'.format(col)][ibin] = mmin + delt # right edge
ibin += 1
if verbose:
log.info('Number of {} bins = {}'.format(targetclass, bins))
return bins
| 41.13624 | 145 | 0.588726 |
ace786908194f293187dcc3037b39a9ba19dd461 | 3,316 | py | Python | main.py | AdamBromiley/enerctl | fad66fc5ba00aad6c9d2a72d8b72b0aa7a394047 | [
"MIT"
] | null | null | null | main.py | AdamBromiley/enerctl | fad66fc5ba00aad6c9d2a72d8b72b0aa7a394047 | [
"MIT"
] | null | null | null | main.py | AdamBromiley/enerctl | fad66fc5ba00aad6c9d2a72d8b72b0aa7a394047 | [
"MIT"
] | null | null | null | import sys
from enerctl import *
def usage():
print("""COMMAND [OPTION]...
Send COMMAND to a legacy Energenie 433 MHz radio controlled wall socket.
Commands:
cmd SEQUENCE Send a custom 4-bit binary code to the socket
off [SOCKET] Turn SOCKET off
on [SOCKET] Turn SOCKET on
Miscellaneous:
h, help Display this help message
q, quit Exit the console
Omitting the socket number means on/off commands will be accepted by all
sockets within range of the transmitter.
Examples:
on
cmd 1001
off 3""")
def cleanup():
transmitter_cleanup()
gpio_cleanup()
def main():
gpio_init()
transmitter_init()
try:
while True:
cmd = input("> ")
cmd = [item.lower() for item in cmd.split()]
if not cmd:
continue
elif ("help" in cmd) or ("h" in cmd):
usage()
continue
base_cmd = cmd.pop(0)
if base_cmd == "cmd":
if not cmd:
print("ERROR: Control sequence required")
continue
elif len(cmd) > 1:
print("ERROR: Too many arguments")
continue
code_str = cmd.pop(0)
if len(code_str) != 4:
print("ERROR: Invalid control sequence")
continue
try:
code = int(code_str, 2)
except ValueError:
print("ERROR: Invalid control sequence")
continue
k_0 = (code >> 0) & 1
k_1 = (code >> 1) & 1
k_2 = (code >> 2) & 1
k_3 = (code >> 3) & 1
send_code(k_3, k_2, k_1, k_0)
print(f"Control code {code_str} transmitted")
continue
elif base_cmd == "quit":
cleanup()
sys.exit(0)
# Default socket ID is 5 (for all)
sock_id = ALL_SOCKETS
if cmd:
try:
sock_id = int(cmd.pop(0))
except ValueError:
print("ERROR: Invalid socket ID")
continue
if cmd:
print("ERROR: Too many arguments")
continue
if sock_id != ALL_SOCKETS:
if not MINIMUM_SOCKET_ID <= sock_id <= MAXIMUM_SOCKET_ID:
print(f"ERROR: Socket ID ({sock_id}) out of range. Must be {MINIMUM_SOCKET_ID}-{MAXIMUM_SOCKET_ID}")
continue
if base_cmd == "off":
socket_off(sock_id)
if sock_id == ALL_SOCKETS:
print("All sockets powered off")
else:
print(f"Socket {sock_id} powered off")
elif base_cmd == "on":
socket_on(sock_id)
if sock_id == ALL_SOCKETS:
print("All sockets powered on")
else:
print(f"Socket {sock_id} powered on")
else:
print(f"ERROR: {base_cmd} is an invalid command")
except KeyboardInterrupt:
print("")
cleanup()
if __name__ == "__main__":
main()
| 26.31746 | 120 | 0.473462 |
ace786a1d21d9a822defcaa35a81eaf79ee5b420 | 4,310 | py | Python | track_tools/txt2video.py | noahcao/TransTrack | 93d6d711db64b06d2974f8a410c9630514bf8db3 | [
"MIT"
] | 466 | 2020-12-31T02:53:51.000Z | 2022-03-28T08:55:35.000Z | track_tools/txt2video.py | noahcao/TransTrack | 93d6d711db64b06d2974f8a410c9630514bf8db3 | [
"MIT"
] | 59 | 2021-01-01T06:27:55.000Z | 2022-03-30T10:28:23.000Z | track_tools/txt2video.py | noahcao/TransTrack | 93d6d711db64b06d2974f8a410c9630514bf8db3 | [
"MIT"
] | 90 | 2021-01-01T08:56:07.000Z | 2022-03-30T09:38:31.000Z | import os
import sys
import json
import cv2
import glob as gb
from track_tools.colormap import colormap
def txt2img(visual_path="visual_val_gt"):
print("Starting txt2img")
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
if not os.path.exists(visual_path):
os.makedirs(visual_path)
color_list = colormap()
gt_json_path = 'mot/annotations/val_half.json'
img_path = 'mot/train/'
show_video_names = ['MOT17-02-FRCNN',
'MOT17-04-FRCNN',
'MOT17-05-FRCNN',
'MOT17-09-FRCNN',
'MOT17-10-FRCNN',
'MOT17-11-FRCNN',
'MOT17-13-FRCNN']
test_json_path = 'mot/annotations/test.json'
test_img_path = 'mot/test/'
test_show_video_names = ['MOT17-01-FRCNN',
'MOT17-03-FRCNN',
'MOT17-06-FRCNN',
'MOT17-07-FRCNN',
'MOT17-08-FRCNN',
'MOT17-12-FRCNN',
'MOT17-14-FRCNN']
if visual_path == "visual_test_predict":
show_video_names = test_show_video_names
img_path = test_img_path
gt_json_path = test_json_path
for show_video_name in show_video_names:
img_dict = dict()
if visual_path == "visual_val_gt":
txt_path = 'mot/train/' + show_video_name + '/gt/gt_val_half.txt'
elif visual_path == "visual_val_predict":
txt_path = 'val/tracks/'+ show_video_name + '.txt'
elif visual_path == "visual_test_predict":
txt_path = 'test/tracks/'+ show_video_name + '.txt'
else:
raise NotImplementedError
with open(gt_json_path, 'r') as f:
gt_json = json.load(f)
for ann in gt_json["images"]:
file_name = ann['file_name']
video_name = file_name.split('/')[0]
if video_name == show_video_name:
img_dict[ann['frame_id']] = img_path + file_name
txt_dict = dict()
with open(txt_path, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
mark = int(float(linelist[6]))
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if visual_path == "visual_val_gt":
if mark == 0 or label not in valid_labels or label in ignore_labels or vis_ratio <= 0:
continue
img_id = linelist[0]
obj_id = linelist[1]
bbox = [float(linelist[2]), float(linelist[3]),
float(linelist[2]) + float(linelist[4]),
float(linelist[3]) + float(linelist[5]), int(obj_id)]
if int(img_id) in txt_dict:
txt_dict[int(img_id)].append(bbox)
else:
txt_dict[int(img_id)] = list()
txt_dict[int(img_id)].append(bbox)
for img_id in sorted(txt_dict.keys()):
img = cv2.imread(img_dict[img_id])
for bbox in txt_dict[img_id]:
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color_list[bbox[4]%79].tolist(), thickness=2)
cv2.putText(img, "{}".format(int(bbox[4])), (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color_list[bbox[4]%79].tolist(), 2)
cv2.imwrite(visual_path + "/" + show_video_name + "{:0>6d}.png".format(img_id), img)
print(show_video_name, "Done")
print("txt2img Done")
def img2video(visual_path="visual_val_gt"):
print("Starting img2video")
img_paths = gb.glob(visual_path + "/*.png")
fps = 16
size = (1920,1080)
videowriter = cv2.VideoWriter(visual_path + "_video.avi",cv2.VideoWriter_fourcc('M','J','P','G'), fps, size)
for img_path in sorted(img_paths):
img = cv2.imread(img_path)
img = cv2.resize(img, size)
videowriter.write(img)
videowriter.release()
print("img2video Done")
if __name__ == '__main__':
visual_path="visual_val_predict"
if len(sys.argv) > 1:
visual_path =sys.argv[1]
txt2img(visual_path)
img2video(visual_path)
| 35.327869 | 156 | 0.546172 |
ace788ea4cd7933da8860bb9d3229e1e796dda83 | 25,402 | py | Python | clearml_agent/backend_api/session/session.py | Honzys/clearml-agent | f58071fc74e2df9bdba8ca91569e0bfd70ad4f46 | [
"Apache-2.0"
] | 112 | 2019-10-29T10:36:20.000Z | 2020-12-19T08:08:27.000Z | clearml_agent/backend_api/session/session.py | Honzys/clearml-agent | f58071fc74e2df9bdba8ca91569e0bfd70ad4f46 | [
"Apache-2.0"
] | 37 | 2019-11-04T13:44:09.000Z | 2020-12-10T19:34:59.000Z | clearml_agent/backend_api/session/session.py | Honzys/clearml-agent | f58071fc74e2df9bdba8ca91569e0bfd70ad4f46 | [
"Apache-2.0"
] | 29 | 2019-10-29T13:06:29.000Z | 2020-12-19T08:09:24.000Z |
import json as json_lib
import os
import sys
import types
from socket import gethostname
from typing import Optional
import jwt
import requests
import six
from pyhocon import ConfigTree, ConfigFactory
from requests.auth import HTTPBasicAuth
from six.moves.urllib.parse import urlparse, urlunparse
from .callresult import CallResult
from .defs import ENV_VERBOSE, ENV_HOST, ENV_ACCESS_KEY, ENV_SECRET_KEY, ENV_WEB_HOST, ENV_FILES_HOST, ENV_AUTH_TOKEN, \
ENV_NO_DEFAULT_SERVER, ENV_DISABLE_VAULT_SUPPORT, ENV_INITIAL_CONNECT_RETRY_OVERRIDE, ENV_API_DEFAULT_REQ_METHOD
from .request import Request, BatchRequest
from .token_manager import TokenManager
from ..config import load
from ..utils import get_http_session_with_retry, urllib_log_warning_setup
from ...backend_config.environment import backward_compatibility_support
from ...version import __version__
class LoginError(Exception):
pass
class MaxRequestSizeError(Exception):
pass
class Session(TokenManager):
""" ClearML API Session class. """
_AUTHORIZATION_HEADER = "Authorization"
_WORKER_HEADER = ("X-ClearML-Worker", "X-Trains-Worker", )
_ASYNC_HEADER = ("X-ClearML-Async", "X-Trains-Async", )
_CLIENT_HEADER = ("X-ClearML-Agent", "X-Trains-Agent", )
_async_status_code = 202
_session_requests = 0
_session_initial_timeout = (3.0, 10.)
_session_timeout = (10.0, 30.)
_session_initial_retry_connect_override = 4
_write_session_data_size = 15000
_write_session_timeout = (30.0, 30.)
api_version = '2.1'
feature_set = 'basic'
default_host = "https://demoapi.demo.clear.ml"
default_web = "https://demoapp.demo.clear.ml"
default_files = "https://demofiles.demo.clear.ml"
default_key = "EGRTCO8JMSIGI6S39GTP43NFWXDQOW"
default_secret = "x!XTov_G-#vspE*Y(h$Anm&DIc5Ou-F)jsl$PdOyj5wG1&E!Z8"
# TODO: add requests.codes.gateway_timeout once we support async commits
_retry_codes = [
requests.codes.bad_gateway,
requests.codes.service_unavailable,
requests.codes.bandwidth_limit_exceeded,
requests.codes.too_many_requests,
]
@property
def access_key(self):
return self.__access_key
@property
def secret_key(self):
return self.__secret_key
@property
def host(self):
return self.__host
@property
def worker(self):
return self.__worker
def __init__(
self,
worker=None,
api_key=None,
secret_key=None,
host=None,
logger=None,
verbose=None,
initialize_logging=True,
client=None,
config=None,
http_retries_config=None,
**kwargs
):
# add backward compatibility support for old environment variables
backward_compatibility_support()
if config is not None:
self.config = config
else:
self.config = load()
if initialize_logging:
self.config.initialize_logging(debug=kwargs.get('debug', False))
super(Session, self).__init__(config=config, **kwargs)
self._verbose = verbose if verbose is not None else ENV_VERBOSE.get()
self._logger = logger
self.__auth_token = None
if ENV_AUTH_TOKEN.get(
value_cb=lambda key, value: print("Using environment access token {}=********".format(key))
):
self.set_auth_token(ENV_AUTH_TOKEN.get())
else:
self.__access_key = api_key or ENV_ACCESS_KEY.get(
default=(self.config.get("api.credentials.access_key", None) or self.default_key),
value_cb=lambda key, value: print("Using environment access key {}={}".format(key, value))
)
if not self.access_key:
raise ValueError(
"Missing access_key. Please set in configuration file or pass in session init."
)
self.__secret_key = secret_key or ENV_SECRET_KEY.get(
default=(self.config.get("api.credentials.secret_key", None) or self.default_secret),
value_cb=lambda key, value: print("Using environment secret key {}=********".format(key))
)
if not self.secret_key:
raise ValueError(
"Missing secret_key. Please set in configuration file or pass in session init."
)
if self.access_key == self.default_key and self.secret_key == self.default_secret:
print("Using built-in ClearML default key/secret")
host = host or self.get_api_server_host(config=self.config)
if not host:
raise ValueError(
"Could not find host server definition "
"(missing `~/clearml.conf` or Environment CLEARML_API_HOST)\n"
"To get started with ClearML: setup your own `clearml-server`, "
"or create a free account at https://app.clear.ml and run `clearml-agent init`"
)
self.__host = host.strip("/")
self.__worker = worker or gethostname()
self.__max_req_size = self.config.get("api.http.max_req_size", None)
if not self.__max_req_size:
raise ValueError("missing max request size")
self.client = client or "api-{}".format(__version__)
# limit the reconnect retries, so we get an error if we are starting the session
_, self.__http_session = self._setup_session(
http_retries_config,
initial_session=True,
default_initial_connect_override=(False if kwargs.get("command") == "execute" else None)
)
# try to connect with the server
self.refresh_token()
# create the default session with many retries
http_retries_config, self.__http_session = self._setup_session(http_retries_config)
# update api version from server response
try:
token_dict = TokenManager.get_decoded_token(self.token, verify=False)
api_version = token_dict.get('api_version')
if not api_version:
api_version = '2.2' if token_dict.get('env', '') == 'prod' else Session.api_version
Session.api_version = str(api_version)
Session.feature_set = str(token_dict.get('feature_set', self.feature_set) or "basic")
except (jwt.DecodeError, ValueError):
pass
# now setup the session reporting, so one consecutive retries will show warning
# we do that here, so if we have problems authenticating, we see them immediately
# notice: this is across the board warning omission
urllib_log_warning_setup(total_retries=http_retries_config.get('total', 0), display_warning_after=3)
def _setup_session(self, http_retries_config, initial_session=False, default_initial_connect_override=None):
# type: (dict, bool, Optional[bool]) -> (dict, requests.Session)
http_retries_config = http_retries_config or self.config.get(
"api.http.retries", ConfigTree()
).as_plain_ordered_dict()
http_retries_config["status_forcelist"] = self._retry_codes
if initial_session:
kwargs = {} if default_initial_connect_override is None else {
"default": default_initial_connect_override
}
if ENV_INITIAL_CONNECT_RETRY_OVERRIDE.get(**kwargs):
connect_retries = self._session_initial_retry_connect_override
try:
value = ENV_INITIAL_CONNECT_RETRY_OVERRIDE.get(converter=str)
if not isinstance(value, bool):
connect_retries = abs(int(value))
except ValueError:
pass
http_retries_config = dict(**http_retries_config)
http_retries_config['connect'] = connect_retries
return http_retries_config, get_http_session_with_retry(config=self.config or None, **http_retries_config)
def load_vaults(self):
if not self.check_min_api_version("2.15") or self.feature_set == "basic":
return
if ENV_DISABLE_VAULT_SUPPORT.get():
print("Vault support is disabled")
return
def parse(vault):
# noinspection PyBroadException
try:
d = vault.get('data', None)
if d:
r = ConfigFactory.parse_string(d)
if isinstance(r, (ConfigTree, dict)):
return r
except Exception as e:
print("Failed parsing vault {}: {}".format(vault.get("description", "<unknown>"), e))
# noinspection PyBroadException
try:
res = self.send_request("users", "get_vaults", json={"enabled": True, "types": ["config"]})
if res.ok:
vaults = res.json().get("data", {}).get("vaults", [])
data = list(filter(None, map(parse, vaults)))
if data:
self.config.set_overrides(*data)
elif res.status_code != 404:
raise Exception(res.json().get("meta", {}).get("result_msg", res.text))
except Exception as ex:
print("Failed getting vaults: {}".format(ex))
def verify_feature_set(self, feature_set):
if isinstance(feature_set, str):
feature_set = [feature_set]
if self.feature_set not in feature_set:
raise ValueError('ClearML-server does not support requested feature set {}'.format(feature_set))
def _send_request(
self,
service,
action,
version=None,
method="get",
headers=None,
auth=None,
data=None,
json=None,
refresh_token_if_unauthorized=True,
):
""" Internal implementation for making a raw API request.
- Constructs the api endpoint name
- Injects the worker id into the headers
- Allows custom authorization using a requests auth object
- Intercepts `Unauthorized` responses and automatically attempts to refresh the session token once in this
case (only once). This is done since permissions are embedded in the token, and addresses a case where
server-side permissions have changed but are not reflected in the current token. Refreshing the token will
generate a token with the updated permissions.
"""
host = self.host
headers = headers.copy() if headers else {}
for h in self._WORKER_HEADER:
headers[h] = self.worker
for h in self._CLIENT_HEADER:
headers[h] = self.client
token_refreshed_on_error = False
url = (
"{host}/v{version}/{service}.{action}"
if version
else "{host}/{service}.{action}"
).format(**locals())
while True:
if data and len(data) > self._write_session_data_size:
timeout = self._write_session_timeout
elif self._session_requests < 1:
timeout = self._session_initial_timeout
else:
timeout = self._session_timeout
res = self.__http_session.request(
method, url, headers=headers, auth=auth, data=data, json=json, timeout=timeout)
if (
refresh_token_if_unauthorized
and res.status_code == requests.codes.unauthorized
and not token_refreshed_on_error
):
# it seems we're unauthorized, so we'll try to refresh our token once in case permissions changed since
# the last time we got the token, and try again
self.refresh_token()
token_refreshed_on_error = True
# try again
continue
if (
res.status_code == requests.codes.service_unavailable
and self.config.get("api.http.wait_on_maintenance_forever", True)
):
self._logger.warning(
"Service unavailable: {} is undergoing maintenance, retrying...".format(
host
)
)
continue
break
self._session_requests += 1
return res
def add_auth_headers(self, headers):
headers[self._AUTHORIZATION_HEADER] = "Bearer {}".format(self.token)
return headers
def set_auth_token(self, auth_token):
self.__access_key = self.__secret_key = None
self._set_token(auth_token)
def send_request(
self,
service,
action,
version=None,
method="get",
headers=None,
data=None,
json=None,
async_enable=False,
):
"""
Send a raw API request.
:param service: service name
:param action: action name
:param version: version number (default is the preconfigured api version)
:param method: method type (default is 'get')
:param headers: request headers (authorization and content type headers will be automatically added)
:param json: json to send in the request body (jsonable object or builtin types construct. if used,
content type will be application/json)
:param data: Dictionary, bytes, or file-like object to send in the request body
:param async_enable: whether request is asynchronous
:return: requests Response instance
"""
headers = self.add_auth_headers(
headers.copy() if headers else {}
)
if async_enable:
for h in self._ASYNC_HEADER:
headers[h] = "1"
return self._send_request(
service=service,
action=action,
version=version,
method=method,
headers=headers,
data=data,
json=json,
)
def send_request_batch(
self,
service,
action,
version=None,
headers=None,
data=None,
json=None,
method="get",
):
"""
Send a raw batch API request. Batch requests always use application/json-lines content type.
:param service: service name
:param action: action name
:param version: version number (default is the preconfigured api version)
:param headers: request headers (authorization and content type headers will be automatically added)
:param json: iterable of json items (batched items, jsonable objects or builtin types constructs). These will
be sent as a multi-line payload in the request body.
:param data: iterable of bytes objects (batched items). These will be sent as a multi-line payload in the
request body.
:param method: HTTP method
:return: requests Response instance
"""
if not all(
isinstance(x, (list, tuple, type(None), types.GeneratorType))
for x in (data, json)
):
raise ValueError("Expecting list, tuple or generator in 'data' or 'json'")
if not data and not json:
raise ValueError(
"Missing data (data or json), batch requests are meaningless without it."
)
headers = headers.copy() if headers else {}
headers["Content-Type"] = "application/json-lines"
if data:
req_data = "\n".join(data)
else:
req_data = "\n".join(json_lib.dumps(x) for x in json)
cur = 0
results = []
while True:
size = self.__max_req_size
slice = req_data[cur: cur + size]
if not slice:
break
if len(slice) < size:
# this is the remainder, no need to search for newline
pass
elif slice[-1] != "\n":
# search for the last newline in order to send a coherent request
size = slice.rfind("\n") + 1
# readjust the slice
slice = req_data[cur: cur + size]
if not slice:
raise MaxRequestSizeError('Error: {}.{} request exceeds limit {} > {} bytes'.format(
service, action, len(req_data), self.__max_req_size))
res = self.send_request(
method=method,
service=service,
action=action,
data=slice,
headers=headers,
version=version,
)
results.append(res)
if res.status_code != requests.codes.ok:
break
cur += size
return results
def validate_request(self, req_obj):
""" Validate an API request against the current version and the request's schema """
try:
# make sure we're using a compatible version for this request
# validate the request (checks required fields and specific field version restrictions)
validate = req_obj.validate
except AttributeError:
raise TypeError(
'"req_obj" parameter must be an backend_api.session.Request object'
)
validate()
def send_async(self, req_obj):
"""
Asynchronously sends an API request using a request object.
:param req_obj: The request object
:type req_obj: Request
:return: CallResult object containing the raw response, response metadata and parsed response object.
"""
return self.send(req_obj=req_obj, async_enable=True)
def send(self, req_obj, async_enable=False, headers=None):
"""
Sends an API request using a request object.
:param req_obj: The request object
:type req_obj: Request
:param async_enable: Request this method be executed in an asynchronous manner
:param headers: Additional headers to send with request
:return: CallResult object containing the raw response, response metadata and parsed response object.
"""
self.validate_request(req_obj)
if isinstance(req_obj, BatchRequest):
# TODO: support async for batch requests as well
if async_enable:
raise NotImplementedError(
"Async behavior is currently not implemented for batch requests"
)
json_data = req_obj.get_json()
res = self.send_request_batch(
service=req_obj._service,
action=req_obj._action,
version=req_obj._version,
json=json_data,
method=req_obj._method,
headers=headers,
)
# TODO: handle multiple results in this case
try:
res = next(r for r in res if r.status_code != 200)
except StopIteration:
# all are 200
res = res[0]
else:
res = self.send_request(
service=req_obj._service,
action=req_obj._action,
version=req_obj._version,
json=req_obj.to_dict(),
method=req_obj._method,
async_enable=async_enable,
headers=headers,
)
call_result = CallResult.from_result(
res=res,
request_cls=req_obj.__class__,
logger=self._logger,
service=req_obj._service,
action=req_obj._action,
session=self,
)
return call_result
@classmethod
def get_api_server_host(cls, config=None):
if not config:
return None
default = config.get("api.api_server", None) or config.get("api.host", None)
if not ENV_NO_DEFAULT_SERVER.get():
default = default or cls.default_host
return ENV_HOST.get(default=default)
@classmethod
def get_app_server_host(cls, config=None):
if not config:
return None
# get from config/environment
web_host = ENV_WEB_HOST.get(default=config.get("api.web_server", None))
if web_host:
return web_host
# return default
host = cls.get_api_server_host(config)
if host == cls.default_host:
return cls.default_web
# compose ourselves
if '://demoapi.' in host:
return host.replace('://demoapi.', '://demoapp.', 1)
if '://api.' in host:
return host.replace('://api.', '://app.', 1)
parsed = urlparse(host)
if parsed.port == 8008:
return host.replace(':8008', ':8080', 1)
raise ValueError('Could not detect ClearML web application server')
@classmethod
def get_files_server_host(cls, config=None):
if not config:
return None
# get from config/environment
files_host = ENV_FILES_HOST.get(default=(config.get("api.files_server", None)))
if files_host:
return files_host
# return default
host = cls.get_api_server_host(config)
if host == cls.default_host:
return cls.default_files
# compose ourselves
app_host = cls.get_app_server_host(config)
parsed = urlparse(app_host)
if parsed.port:
parsed = parsed._replace(netloc=parsed.netloc.replace(':%d' % parsed.port, ':8081', 1))
elif parsed.netloc.startswith('demoapp.'):
parsed = parsed._replace(netloc=parsed.netloc.replace('demoapp.', 'demofiles.', 1))
elif parsed.netloc.startswith('app.'):
parsed = parsed._replace(netloc=parsed.netloc.replace('app.', 'files.', 1))
else:
parsed = parsed._replace(netloc=parsed.netloc + ':8081')
return urlunparse(parsed)
@classmethod
def check_min_api_version(cls, min_api_version):
"""
Return True if Session.api_version is greater or equal >= to min_api_version
"""
def version_tuple(v):
v = tuple(map(int, (v.split("."))))
return v + (0,) * max(0, 3 - len(v))
return version_tuple(cls.api_version) >= version_tuple(str(min_api_version))
def _do_refresh_token(self, current_token, exp=None):
""" TokenManager abstract method implementation.
Here we ignore the old token and simply obtain a new token.
"""
verbose = self._verbose and self._logger
if verbose:
self._logger.info(
"Refreshing token from {} (access_key={}, exp={})".format(
self.host, self.access_key, exp
)
)
auth = None
headers = None
if self.access_key and self.secret_key:
auth = HTTPBasicAuth(self.access_key, self.secret_key)
elif current_token:
headers = dict(Authorization="Bearer {}".format(current_token))
res = None
try:
data = {"expiration_sec": exp} if exp else {}
res = self._send_request(
method=ENV_API_DEFAULT_REQ_METHOD.get(default="get"),
service="auth",
action="login",
auth=auth,
json=data,
headers=headers,
refresh_token_if_unauthorized=False,
)
try:
resp = res.json()
except ValueError:
resp = {}
if res.status_code != 200:
msg = resp.get("meta", {}).get("result_msg", res.reason)
raise LoginError(
"Failed getting token (error {} from {}): {}".format(
res.status_code, self.host, msg
)
)
if verbose:
self._logger.info("Received new token")
token = resp["data"]["token"]
if ENV_AUTH_TOKEN.get():
os.environ[ENV_AUTH_TOKEN.key] = token
return token
except LoginError:
six.reraise(*sys.exc_info())
except KeyError as ex:
# check if this is a misconfigured api server (getting 200 without the data section)
if res and res.status_code == 200:
raise ValueError('It seems *api_server* is misconfigured. '
'Is this the ClearML API server {} ?'.format(self.get_api_server_host()))
else:
raise LoginError("Response data mismatch: No 'token' in 'data' value from res, receive : {}, "
"exception: {}".format(res, ex))
except requests.ConnectionError as ex:
raise ValueError('Connection Error: it seems *api_server* is misconfigured. '
'Is this the ClearML API server {} ?'.format('/'.join(ex.request.url.split('/')[:3])))
except Exception as ex:
raise LoginError('Unrecognized Authentication Error: {} {}'.format(type(ex), ex))
def __str__(self):
return "{self.__class__.__name__}[{self.host}, {self.access_key}/{secret_key}]".format(
self=self, secret_key=self.secret_key[:5] + "*" * (len(self.secret_key) - 5)
)
| 38.313725 | 120 | 0.590465 |
ace78a9decf369c01c2c45cdcdf5e9c211f7d850 | 961 | py | Python | db.py | hornetmadness/pfsence-voucher-printer | 5dd2a36f7f44480875131ca1724072d4a4444d4a | [
"MIT"
] | 2 | 2020-07-04T23:44:55.000Z | 2021-06-03T20:05:12.000Z | db.py | hornetmadness/pfsence-voucher-printer | 5dd2a36f7f44480875131ca1724072d4a4444d4a | [
"MIT"
] | null | null | null | db.py | hornetmadness/pfsence-voucher-printer | 5dd2a36f7f44480875131ca1724072d4a4444d4a | [
"MIT"
] | null | null | null | import config
import arrow
from sqlalchemy_utils import create_database, database_exists
from sqlalchemy import create_engine, Column, Integer, String, DateTime, Text
from sqlalchemy import func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
def connect_db():
if not database_exists(config.db_string):
create_database(config.db_string)
db = create_engine(config.db_string, echo=config.db_debug)
return db
Base = declarative_base()
class Vouchers(Base):
__tablename__ = "vouchers"
id = Column(Integer, primary_key=True)
vid = Column(String, nullable=False)
time = Column(Integer,nullable=False)
date_added = Column(DateTime, nullable = False, default=func.now())
date_disabled = Column(DateTime, nullable = False, default=func.now())
disabled = Column(Integer, default = 0, nullable = False)
engine = connect_db()
Vouchers.__table__.create(bind=engine, checkfirst=True)
| 32.033333 | 77 | 0.780437 |
ace78bd7fac5f6bc37ef81d04b6b7e9dfa9493c5 | 749 | py | Python | components/google-cloud/tests/aiplatform/test_remote_runner.py | algs/pipelines | ab63956f3a61d4d11b27ac26f097e1784588fed9 | [
"Apache-2.0"
] | 102 | 2019-10-23T20:35:41.000Z | 2022-03-27T10:28:56.000Z | components/google-cloud/tests/aiplatform/test_remote_runner.py | algs/pipelines | ab63956f3a61d4d11b27ac26f097e1784588fed9 | [
"Apache-2.0"
] | 891 | 2019-10-24T04:08:17.000Z | 2022-03-31T22:45:40.000Z | components/google-cloud/tests/aiplatform/test_remote_runner.py | algs/pipelines | ab63956f3a61d4d11b27ac26f097e1784588fed9 | [
"Apache-2.0"
] | 85 | 2019-10-24T04:04:36.000Z | 2022-03-01T10:52:57.000Z | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test remote_runner module."""
import mock
import unittest
class RemoteRunnerTests(unittest.TestCase):
def test_placeholder(self):
pass
| 32.565217 | 74 | 0.757009 |
ace78c5ce5b30540d6f5cbcb7693907bb009882d | 4,679 | py | Python | automon/integrations/nmap/output.py | TheShellLand/automonisaur | b5f304a44449b8664c93d8a8a3c3cf2d73aa0ce9 | [
"MIT"
] | 2 | 2021-09-15T18:35:44.000Z | 2022-01-18T05:36:54.000Z | automon/integrations/nmap/output.py | TheShellLand/automonisaur | b5f304a44449b8664c93d8a8a3c3cf2d73aa0ce9 | [
"MIT"
] | 16 | 2021-08-29T22:51:53.000Z | 2022-03-09T16:08:19.000Z | automon/integrations/nmap/output.py | TheShellLand/automonisaur | b5f304a44449b8664c93d8a8a3c3cf2d73aa0ce9 | [
"MIT"
] | null | null | null | import mmap
import xmltodict
import pandas as pd
from automon.log import Logging
from pandas import DataFrame, Series
from automon.helpers.runner import Run
from automon.helpers.datascience import Pandas
class NmapResult(object):
def __init__(self, file: str = None, run: Run = None, **kwargs):
self._log = Logging(name=NmapResult.__name__, level=Logging.INFO)
self.file = file
self._run = run
if file:
with open(self.file, 'r+') as f:
mm = mmap.mmap(f.fileno(), 0)
xml = xmltodict.parse(mm)
self.df = Pandas().DataFrame(data=xml, **kwargs)
df = Pandas().DataFrame(data=xml, **kwargs)
if 'host' in df.nmaprun:
self.host = self._normalize_ports(df)
else:
self.host = None
if 'hosthint' in df.nmaprun:
self._hosthint = pd.json_normalize(df.loc['hosthint'])
else:
self._hosthint = None
self._runstats = pd.json_normalize(df.loc['runstats'])
self._scaninfo = pd.json_normalize(df.loc['scaninfo'])
self._verbose = pd.json_normalize(df.loc['verbose'])
# normalize output data
self.command = df.loc['@args'][0]
self.cmd = self.command
self.time_start = df.loc['@start'][0]
self.time_startstr = df.loc['@startstr'][0]
self.hosts_up = self._runstats.loc[:, 'hosts.@up'][0]
self.hosts_down = self._runstats.loc[:, 'hosts.@down'][0]
self.hosts_total = self._runstats.loc[:, 'hosts.@total'][0]
self.version = df.loc['@version'].iloc[0]
self.elapsed = self._runstats.loc[:, 'finished.@elapsed'][0]
self.summary = self._runstats.loc[:, 'finished.@summary'][0]
self.time_finished = self._runstats.loc[:, 'finished.@time'][0]
self._log.info(f'hosts up: {self.hosts_up}')
self._log.info(f'hosts down: {self.hosts_down}')
# self._log.info(f'hosts total: {self.hosts_total}')
self._log.info(f'{self.summary}')
self._log.info(f'finished processing output ({round(df.memory_usage().sum() / 1024, 2)} Kb)')
def ports(self, df: DataFrame = None):
if df:
return self._get_ports(df)
return self._get_ports()
def _get_ports(self, df: DataFrame = None) -> DataFrame or False:
if df is None:
if self.host is not None:
df_host = self.host
else:
df_host = pd.json_normalize(df.nmaprun.loc['host'])
if 'ports.port' in df_host:
s_ports = df_host.loc[:, 'ports.port']
s_ports = s_ports.apply(lambda _list: pd.json_normalize(_list))
return s_ports
return False
def _normalize_ports(self, df):
df_host = pd.json_normalize(df.nmaprun.loc['host'])
if 'ports.port' in df_host:
s_ports = df_host.loc[:, 'ports.port']
s_ports = s_ports.apply(lambda _list: pd.json_normalize(_list))
scanned_ports = s_ports.apply(lambda x: x.loc[:, '@portid'])
scanned_ports = scanned_ports.iloc[0].to_list()
i = 0
while i < s_ports.size:
# get series index
# use index to save port result to
s_result = s_ports.iloc[i]
for port in scanned_ports:
_row = s_result[s_result.loc[:, '@portid'] == port]
status = _row.loc[:, 'state.@state']
status.index = [i]
if port not in df_host:
df_host[port] = status
else:
df_host[port].update(status)
self._log.debug(f"{df_host.loc[:, ['address.@addr'] + [x for x in scanned_ports if x in df_host]]}")
i += 1
return df_host
def hostnames(self):
if 'hostnames.hostname.@name' in self.host:
return self.host.loc[:, 'hostnames.hostname.@name']
return False
def ips(self):
if 'address.@addr' in self.host:
return self.host.loc[:, 'address.@addr']
return False
def __repr__(self):
msg = f'{self.summary} '
if self.df.memory_usage().sum() / 1024 / 1024 / 1024 > 1:
msg += f'({round(self.df.memory_usage().sum() / 1024 / 1024 / 1024, 2)} Gb)'
else:
msg += f'({round(self.df.memory_usage().sum() / 1024, 2)} Kb)'
return msg
def __len__(self):
return int(self.df.memory_usage().sum())
| 33.661871 | 120 | 0.546271 |
ace78d080f7d81684ba0f29abea5f0e5cfe18383 | 23,764 | py | Python | google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py | sararob/python-aiplatform | e64cd5588848a4dcd9117ff905e9569576541b69 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py | sararob/python-aiplatform | e64cd5588848a4dcd9117ff905e9569576541b69 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py | sararob/python-aiplatform | e64cd5588848a4dcd9117ff905e9569576541b69 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1.types import migration_service
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 # type: ignore
from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
class MigrationServiceGrpcTransport(MigrationServiceTransport):
"""gRPC backend transport for MigrationService.
A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def search_migratable_resources(
self,
) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
migration_service.SearchMigratableResourcesResponse,
]:
r"""Return a callable for the search migratable resources method over gRPC.
Searches all of the resources in
automl.googleapis.com, datalabeling.googleapis.com and
ml.googleapis.com that can be migrated to Vertex AI's
given location.
Returns:
Callable[[~.SearchMigratableResourcesRequest],
~.SearchMigratableResourcesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_migratable_resources" not in self._stubs:
self._stubs["search_migratable_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources",
request_serializer=migration_service.SearchMigratableResourcesRequest.serialize,
response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize,
)
return self._stubs["search_migratable_resources"]
@property
def batch_migrate_resources(
self,
) -> Callable[
[migration_service.BatchMigrateResourcesRequest], operations_pb2.Operation
]:
r"""Return a callable for the batch migrate resources method over gRPC.
Batch migrates resources from ml.googleapis.com,
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
Returns:
Callable[[~.BatchMigrateResourcesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_migrate_resources" not in self._stubs:
self._stubs["batch_migrate_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources",
request_serializer=migration_service.BatchMigrateResourcesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_migrate_resources"]
def close(self):
self.grpc_channel.close()
@property
def delete_operation(
self,
) -> Callable[[operations_pb2.DeleteOperationRequest], None]:
r"""Return a callable for the delete_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_operation" not in self._stubs:
self._stubs["delete_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/DeleteOperation",
request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["delete_operation"]
@property
def cancel_operation(
self,
) -> Callable[[operations_pb2.CancelOperationRequest], None]:
r"""Return a callable for the cancel_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_operation" not in self._stubs:
self._stubs["cancel_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/CancelOperation",
request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["cancel_operation"]
@property
def wait_operation(
self,
) -> Callable[[operations_pb2.WaitOperationRequest], None]:
r"""Return a callable for the wait_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_operation" not in self._stubs:
self._stubs["wait_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/WaitOperation",
request_serializer=operations_pb2.WaitOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["wait_operation"]
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse
]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_locations" not in self._stubs:
self._stubs["list_locations"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/ListLocations",
request_serializer=locations_pb2.ListLocationsRequest.SerializeToString,
response_deserializer=locations_pb2.ListLocationsResponse.FromString,
)
return self._stubs["list_locations"]
@property
def get_location(
self,
) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_location" not in self._stubs:
self._stubs["get_location"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/GetLocation",
request_serializer=locations_pb2.GetLocationRequest.SerializeToString,
response_deserializer=locations_pb2.Location.FromString,
)
return self._stubs["get_location"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the IAM access control policy on the specified
function. Replaces any existing policy.
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does
not have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
iam_policy_pb2.TestIamPermissionsResponse,
]:
r"""Return a callable for the test iam permissions method over gRPC.
Tests the specified permissions against the IAM access control
policy for a function. If the function does not exist, this will
return an empty set of permissions, not a NOT_FOUND error.
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
@property
def kind(self) -> str:
return "grpc"
__all__ = ("MigrationServiceGrpcTransport",)
| 45.178707 | 102 | 0.649386 |
ace78d2ac1196fe1b78a7bb30ad2e567f2408f7f | 1,308 | py | Python | test/compiler/__init__.py | AyishaR/deepC | 1dc9707ef5ca9000fc13c3da7f1129685a83b494 | [
"Apache-2.0"
] | 223 | 2020-04-15T20:34:33.000Z | 2022-03-28T05:41:49.000Z | test/compiler/__init__.py | AyishaR/deepC | 1dc9707ef5ca9000fc13c3da7f1129685a83b494 | [
"Apache-2.0"
] | 42 | 2019-07-29T15:57:12.000Z | 2020-04-08T15:12:48.000Z | test/compiler/__init__.py | AyishaR/deepC | 1dc9707ef5ca9000fc13c3da7f1129685a83b494 | [
"Apache-2.0"
] | 58 | 2019-07-22T11:46:19.000Z | 2020-04-09T22:56:41.000Z | import os, sys, importlib
import unittest
# def load_tests(loader, tests, pattern):
def load_tests(loader, tests):
# suite = unittest.TestSuite();
test_file = "compiler/passingTests.txt";
if ( os.path.isfile(test_file) == False ):
print("no test file in ", os.getcwd()+"/compiler");
# return suite;
return
test_fp = open(test_file, "r");
#print("opened file", test_file)
for test in test_fp.readlines():
module_name = test.strip().split(".")[0]
class_name = module_name + "Test"
module = importlib.import_module("."+module_name, package="compiler")
class_ = getattr(module, class_name)
tests.append(loader.loadTestsFromTestCase(class_))
test_fp.close()
return
# return suite;
def load_test(loader, test, tests):
test_file = "compiler/"+test
if ( os.path.isfile(test_file) == False ):
print("no test file in ", os.getcwd()+"/compiler");
# return suite;
return
print("running test", test.strip())
module_name = test.strip().split(".")[0]
class_name = module_name + "Test"
module = importlib.import_module("."+module_name, package="compiler")
class_ = getattr(module, class_name)
tests.append(loader.loadTestsFromTestCase(class_))
return
| 27.25 | 77 | 0.63685 |
ace78e54587eb831b5e61e7ebfaeec0ee8fe228f | 4,158 | py | Python | php_fpm/datadog_checks/php_fpm/config_models/defaults.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | php_fpm/datadog_checks/php_fpm/config_models/defaults.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | php_fpm/datadog_checks/php_fpm/config_models/defaults.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_allow_redirects(field, value):
return True
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_http_host(field, value):
return get_default_field_value(field, value)
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_min_collection_interval(field, value):
return 15
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_ping_reply(field, value):
return 'pong'
def instance_ping_url(field, value):
return 'http://localhost/ping'
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 16
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_status_url(field, value):
return 'http://localhost/status'
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_protocols_allowed(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return True
def instance_use_fastcgi(field, value):
return False
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_username(field, value):
return get_default_field_value(field, value)
| 20.482759 | 105 | 0.774651 |
ace78ecad9c936185b935ba79e783982813e3f19 | 2,585 | py | Python | util/create_soundscapes.py | mthaak/hownoisy | e346abd9594c7e27fce0a646b4776128991bb77f | [
"MIT"
] | 2 | 2018-04-16T13:40:54.000Z | 2018-04-16T13:41:00.000Z | util/create_soundscapes.py | mthaak/hownoisy | e346abd9594c7e27fce0a646b4776128991bb77f | [
"MIT"
] | null | null | null | util/create_soundscapes.py | mthaak/hownoisy | e346abd9594c7e27fce0a646b4776128991bb77f | [
"MIT"
] | null | null | null | """Script used to create artificial soundscapes using Scaper"""
import os
from datetime import datetime
import numpy as np
import scaper
# OUTPUT FOLDER
outfolder = '../data/Analysis_Dur'
# outfolder = sys.argv[1]
# SCAPER SETTINGS
fg_folder = '../data/ByClass'
bg_folder = '../data/ByClass'
n_soundscapes = 100
# n_soundscapes = int(sys.argv[2])
ref_db = -50
duration = 30.0
min_events = 1
max_events = 60
for n in range(n_soundscapes):
print('Generating soundscape: {:d}/{:d}'.format(n + 1, n_soundscapes))
before = datetime.now()
# create a scaper
sc = scaper.Scaper(duration, fg_folder, bg_folder)
sc.protected_labels = ['car_horn', 'dog_bark', 'gun_shot', 'siren']
sc.ref_db = ref_db
# add background
# sc.add_background(label=('const', 'noise'),
# source_file=('choose', []),
# source_time=('const', 0))
# add random number of foreground events
n_events = np.random.randint(min_events, max_events + 1)
time_interval = duration / n_events
# sc.add_event(label=('const', outfolder.split('/')[-1]),
# source_file=('choose', []),
# source_time=('const', 0.0),
# event_time=('uniform', 0.0, duration - 4.0),
# event_duration=('const', 4.0),
# snr=('const', 3),
# pitch_shift=None,
# time_stretch=None)
for idx in range(n_events):
sc.add_event(label=('choose', []),
source_file=('choose', []),
source_time=('const', 0.0),
event_time=('uniform', 0.0, duration - 4.0),
event_duration=('uniform', 1.0, 4.0),
snr=('uniform', 0, 50),
pitch_shift=('uniform', -2.0, 2.0),
time_stretch=('uniform', 0.8, 1.2))
# generate
audiofile = os.path.join(outfolder, "soundscape_a{:d}.wav".format(n))
jamsfile = os.path.join(outfolder, "soundscape_a{:d}.jams".format(n))
txtfile = os.path.join(outfolder, "soundscape_a{:d}.txt".format(n))
sc.generate(audiofile, jamsfile,
allow_repeated_label=True,
allow_repeated_source=True,
reverb=0.0,
disable_sox_warnings=True,
no_audio=False,
txt_path=txtfile)
after = datetime.now()
time_took = (after - before).total_seconds() * 1000
print('Soundscape %d took %d ms to generate' % (n + 1, time_took))
if n % 10 == 9:
duration += 30.0
| 30.77381 | 74 | 0.560155 |
ace78fb0f55afe8282f6f34886489c9e7530f6b8 | 2,169 | py | Python | streamlit_custom_components/crypto_account_stack/__init__.py | jasonjgarcia24/web3-ether-talent-pool | 104594336542e3028a4a1a186866e3509ba08996 | [
"MIT"
] | null | null | null | streamlit_custom_components/crypto_account_stack/__init__.py | jasonjgarcia24/web3-ether-talent-pool | 104594336542e3028a4a1a186866e3509ba08996 | [
"MIT"
] | null | null | null | streamlit_custom_components/crypto_account_stack/__init__.py | jasonjgarcia24/web3-ether-talent-pool | 104594336542e3028a4a1a186866e3509ba08996 | [
"MIT"
] | null | null | null | import os
import streamlit.components.v1 as components
import streamlit as st
_RELEASE = True
if _RELEASE:
root_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(root_dir, "frontend/build")
_crypto_account_stack = components.declare_component(
"crypto_account_stack",
path=build_dir,
)
else:
_crypto_account_stack = components.declare_component(
"crypto-account-stack",
url="http://localhost:3001",
)
def crypto_account_stack(
header_str: str,
address_str_val: str,
address_str_hdr: str,
balance_str_val: str,
balance_str_unit: str,
balance_str_hdr: str,
copy_str: str,
href: str,
key=None,
font_size="12px",
font_weight="none",
width="100%",
color="#122221",
padding="0px",
margin="0px",
text_align="center",
background="none",
):
if not copy_str:
copy_str = str
return _crypto_account_stack(
header_str=header_str,
address_str_val=address_str_val,
address_str_hdr=address_str_hdr,
balance_str_val=balance_str_val,
balance_str_unit=balance_str_unit,
balance_str_hdr=balance_str_hdr,
copy_str=copy_str,
href=href,
key=key,
font_size=font_size,
font_weight=font_weight,
width=width,
color=color,
padding=padding,
margin=margin,
text_align=text_align,
background=background,
)
if not _RELEASE:
st.title("Crypto Content Stack")
style_kwargs = dict(
font_size="14px",
width="100%",
padding="0px",
margin="0 auto",
text_align="center",
background="none"
)
with st.sidebar:
_crypto_account_stack(
header_str="Client",
address_str_val="0x123456789123456789",
address_str_hdr="Address:",
balance_str_val="99.99",
balance_str_unit="BTC",
balance_str_hdr="Balance: ",
copy_str="You just successfully copied the string!",
href="https://www.google.com/",
**style_kwargs,
)
| 23.074468 | 64 | 0.615952 |
ace78fcd290255202aa061061a28c109c281726e | 985 | py | Python | zerver/tests/test_onboarding.py | kaustubh-nair/zulip | fb96407607c1f42b350980ad13af20b884750606 | [
"Apache-2.0"
] | 6 | 2019-05-09T20:43:20.000Z | 2022-03-29T05:53:50.000Z | zerver/tests/test_onboarding.py | kaustubh-nair/zulip | fb96407607c1f42b350980ad13af20b884750606 | [
"Apache-2.0"
] | 2 | 2016-10-18T04:01:56.000Z | 2016-10-20T18:19:09.000Z | zerver/tests/test_onboarding.py | kaustubh-nair/zulip | fb96407607c1f42b350980ad13af20b884750606 | [
"Apache-2.0"
] | 7 | 2016-08-10T02:24:32.000Z | 2022-03-28T15:14:18.000Z | from zerver.lib.onboarding import create_if_missing_realm_internal_bots
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Realm, UserProfile
class TestRealmInternalBotCreation(ZulipTestCase):
def test_create_if_missing_realm_internal_bots(self) -> None:
realm_internal_bots_dict = [{'var_name': 'TEST_BOT',
'email_template': 'test-bot@%s',
'name': 'Test Bot'}]
def check_test_bot_exists() -> bool:
all_realms_count = Realm.objects.count()
all_test_bot_count = UserProfile.objects.filter(
email='test-bot@zulip.com',
).count()
return all_realms_count == all_test_bot_count
self.assertFalse(check_test_bot_exists())
with self.settings(REALM_INTERNAL_BOTS=realm_internal_bots_dict):
create_if_missing_realm_internal_bots()
self.assertTrue(check_test_bot_exists())
| 42.826087 | 73 | 0.664975 |
ace78fdd5b41522b9b73abdd713844ef15b719ca | 2,588 | py | Python | examples/id_pools_ipv4_ranges.py | PragadeeswaranS/oneview-python | 3acc113b8dd30029beb7c228c3bc2bbe67d3485b | [
"Apache-2.0"
] | null | null | null | examples/id_pools_ipv4_ranges.py | PragadeeswaranS/oneview-python | 3acc113b8dd30029beb7c228c3bc2bbe67d3485b | [
"Apache-2.0"
] | null | null | null | examples/id_pools_ipv4_ranges.py | PragadeeswaranS/oneview-python | 3acc113b8dd30029beb7c228c3bc2bbe67d3485b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from config_loader import try_load_from_file
config = {
"ip": "",
"credentials": {
"userName": "administrator",
"password": ""
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
options = {
"type": "Range",
"name": "IPv4",
"rangeCategory": "Custom",
"startAddress": "10.10.2.2",
"endAddress": "10.10.2.254",
"subnetUri": "/rest/id-pools/ipv4/subnets/7e77926c-195c-4984-926d-c858fde63f9b"
}
print("\n Create an IPv4 Range for id pools")
ipv4_range = oneview_client.id_pools_ipv4_ranges.create(options)
pprint(ipv4_range)
print("\n Update the IPv4 Range")
ipv4_range['name'] = 'New Name'
ipv4_range = oneview_client.id_pools_ipv4_ranges.update(ipv4_range)
pprint(ipv4_range)
print("\n Get the IPv4 range by uri")
ipv4_range_byuri = oneview_client.id_pools_ipv4_ranges.get(ipv4_range['uri'])
pprint(ipv4_range_byuri)
print("\n Enable an IPv4 range")
ipv4_range = oneview_client.id_pools_ipv4_ranges.enable(
{
"type": "Range",
"enabled": True
},
ipv4_range['uri'])
print(" IPv4 range enabled successfully.")
print("\n Get all allocated fragments in IPv4 range")
allocated_fragments = oneview_client.id_pools_ipv4_ranges.get_allocated_fragments(ipv4_range['uri'])
pprint(allocated_fragments)
print("\n Get all free fragments in IPv4 range")
allocated_fragments = oneview_client.id_pools_ipv4_ranges.get_free_fragments(ipv4_range['uri'])
pprint(allocated_fragments)
print("\n Disable an IPv4 range")
ipv4_range = oneview_client.id_pools_ipv4_ranges.enable({
"type": "Range",
"enabled": False
}, ipv4_range['uri'])
print(" IPv4 range disabled successfully.")
print("\n Delete the IPv4_range")
oneview_client.id_pools_ipv4_ranges.delete(ipv4_range)
print(" Successfully deleted IPv4 range")
| 30.809524 | 100 | 0.738408 |
ace7907a1134e0c6cea973c31f2b5d68f9f90f96 | 36,861 | py | Python | tb/test_udp_mux_64_4.py | renaissanxe/verilog-ethernet | cf6a01fffeda33b0748f942532ad91e945d4903f | [
"MIT"
] | 1 | 2021-05-24T17:21:53.000Z | 2021-05-24T17:21:53.000Z | tb/test_udp_mux_64_4.py | renaissanxe/verilog-ethernet | cf6a01fffeda33b0748f942532ad91e945d4903f | [
"MIT"
] | null | null | null | tb/test_udp_mux_64_4.py | renaissanxe/verilog-ethernet | cf6a01fffeda33b0748f942532ad91e945d4903f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Copyright (c) 2014-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import udp_ep
module = 'udp_mux_64_4'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_0_udp_hdr_valid = Signal(bool(0))
input_0_eth_dest_mac = Signal(intbv(0)[48:])
input_0_eth_src_mac = Signal(intbv(0)[48:])
input_0_eth_type = Signal(intbv(0)[16:])
input_0_ip_version = Signal(intbv(0)[4:])
input_0_ip_ihl = Signal(intbv(0)[4:])
input_0_ip_dscp = Signal(intbv(0)[6:])
input_0_ip_ecn = Signal(intbv(0)[2:])
input_0_ip_length = Signal(intbv(0)[16:])
input_0_ip_identification = Signal(intbv(0)[16:])
input_0_ip_flags = Signal(intbv(0)[3:])
input_0_ip_fragment_offset = Signal(intbv(0)[13:])
input_0_ip_ttl = Signal(intbv(0)[8:])
input_0_ip_protocol = Signal(intbv(0)[8:])
input_0_ip_header_checksum = Signal(intbv(0)[16:])
input_0_ip_source_ip = Signal(intbv(0)[32:])
input_0_ip_dest_ip = Signal(intbv(0)[32:])
input_0_udp_source_port = Signal(intbv(0)[16:])
input_0_udp_dest_port = Signal(intbv(0)[16:])
input_0_udp_length = Signal(intbv(0)[16:])
input_0_udp_checksum = Signal(intbv(0)[16:])
input_0_udp_payload_tdata = Signal(intbv(0)[64:])
input_0_udp_payload_tkeep = Signal(intbv(0)[8:])
input_0_udp_payload_tvalid = Signal(bool(0))
input_0_udp_payload_tlast = Signal(bool(0))
input_0_udp_payload_tuser = Signal(bool(0))
input_1_udp_hdr_valid = Signal(bool(0))
input_1_eth_dest_mac = Signal(intbv(0)[48:])
input_1_eth_src_mac = Signal(intbv(0)[48:])
input_1_eth_type = Signal(intbv(0)[16:])
input_1_ip_version = Signal(intbv(0)[4:])
input_1_ip_ihl = Signal(intbv(0)[4:])
input_1_ip_dscp = Signal(intbv(0)[6:])
input_1_ip_ecn = Signal(intbv(0)[2:])
input_1_ip_length = Signal(intbv(0)[16:])
input_1_ip_identification = Signal(intbv(0)[16:])
input_1_ip_flags = Signal(intbv(0)[3:])
input_1_ip_fragment_offset = Signal(intbv(0)[13:])
input_1_ip_ttl = Signal(intbv(0)[8:])
input_1_ip_protocol = Signal(intbv(0)[8:])
input_1_ip_header_checksum = Signal(intbv(0)[16:])
input_1_ip_source_ip = Signal(intbv(0)[32:])
input_1_ip_dest_ip = Signal(intbv(0)[32:])
input_1_udp_source_port = Signal(intbv(0)[16:])
input_1_udp_dest_port = Signal(intbv(0)[16:])
input_1_udp_length = Signal(intbv(0)[16:])
input_1_udp_checksum = Signal(intbv(0)[16:])
input_1_udp_payload_tdata = Signal(intbv(0)[64:])
input_1_udp_payload_tkeep = Signal(intbv(0)[8:])
input_1_udp_payload_tvalid = Signal(bool(0))
input_1_udp_payload_tlast = Signal(bool(0))
input_1_udp_payload_tuser = Signal(bool(0))
input_2_udp_hdr_valid = Signal(bool(0))
input_2_eth_dest_mac = Signal(intbv(0)[48:])
input_2_eth_src_mac = Signal(intbv(0)[48:])
input_2_eth_type = Signal(intbv(0)[16:])
input_2_ip_version = Signal(intbv(0)[4:])
input_2_ip_ihl = Signal(intbv(0)[4:])
input_2_ip_dscp = Signal(intbv(0)[6:])
input_2_ip_ecn = Signal(intbv(0)[2:])
input_2_ip_length = Signal(intbv(0)[16:])
input_2_ip_identification = Signal(intbv(0)[16:])
input_2_ip_flags = Signal(intbv(0)[3:])
input_2_ip_fragment_offset = Signal(intbv(0)[13:])
input_2_ip_ttl = Signal(intbv(0)[8:])
input_2_ip_protocol = Signal(intbv(0)[8:])
input_2_ip_header_checksum = Signal(intbv(0)[16:])
input_2_ip_source_ip = Signal(intbv(0)[32:])
input_2_ip_dest_ip = Signal(intbv(0)[32:])
input_2_udp_source_port = Signal(intbv(0)[16:])
input_2_udp_dest_port = Signal(intbv(0)[16:])
input_2_udp_length = Signal(intbv(0)[16:])
input_2_udp_checksum = Signal(intbv(0)[16:])
input_2_udp_payload_tdata = Signal(intbv(0)[64:])
input_2_udp_payload_tkeep = Signal(intbv(0)[8:])
input_2_udp_payload_tvalid = Signal(bool(0))
input_2_udp_payload_tlast = Signal(bool(0))
input_2_udp_payload_tuser = Signal(bool(0))
input_3_udp_hdr_valid = Signal(bool(0))
input_3_eth_dest_mac = Signal(intbv(0)[48:])
input_3_eth_src_mac = Signal(intbv(0)[48:])
input_3_eth_type = Signal(intbv(0)[16:])
input_3_ip_version = Signal(intbv(0)[4:])
input_3_ip_ihl = Signal(intbv(0)[4:])
input_3_ip_dscp = Signal(intbv(0)[6:])
input_3_ip_ecn = Signal(intbv(0)[2:])
input_3_ip_length = Signal(intbv(0)[16:])
input_3_ip_identification = Signal(intbv(0)[16:])
input_3_ip_flags = Signal(intbv(0)[3:])
input_3_ip_fragment_offset = Signal(intbv(0)[13:])
input_3_ip_ttl = Signal(intbv(0)[8:])
input_3_ip_protocol = Signal(intbv(0)[8:])
input_3_ip_header_checksum = Signal(intbv(0)[16:])
input_3_ip_source_ip = Signal(intbv(0)[32:])
input_3_ip_dest_ip = Signal(intbv(0)[32:])
input_3_udp_source_port = Signal(intbv(0)[16:])
input_3_udp_dest_port = Signal(intbv(0)[16:])
input_3_udp_length = Signal(intbv(0)[16:])
input_3_udp_checksum = Signal(intbv(0)[16:])
input_3_udp_payload_tdata = Signal(intbv(0)[64:])
input_3_udp_payload_tkeep = Signal(intbv(0)[8:])
input_3_udp_payload_tvalid = Signal(bool(0))
input_3_udp_payload_tlast = Signal(bool(0))
input_3_udp_payload_tuser = Signal(bool(0))
output_udp_payload_tready = Signal(bool(0))
output_udp_hdr_ready = Signal(bool(0))
enable = Signal(bool(0))
select = Signal(intbv(0)[2:])
# Outputs
input_0_udp_hdr_ready = Signal(bool(0))
input_0_udp_payload_tready = Signal(bool(0))
input_1_udp_hdr_ready = Signal(bool(0))
input_1_udp_payload_tready = Signal(bool(0))
input_2_udp_hdr_ready = Signal(bool(0))
input_2_udp_payload_tready = Signal(bool(0))
input_3_udp_hdr_ready = Signal(bool(0))
input_3_udp_payload_tready = Signal(bool(0))
output_udp_hdr_valid = Signal(bool(0))
output_eth_dest_mac = Signal(intbv(0)[48:])
output_eth_src_mac = Signal(intbv(0)[48:])
output_eth_type = Signal(intbv(0)[16:])
output_ip_version = Signal(intbv(0)[4:])
output_ip_ihl = Signal(intbv(0)[4:])
output_ip_dscp = Signal(intbv(0)[6:])
output_ip_ecn = Signal(intbv(0)[2:])
output_ip_length = Signal(intbv(0)[16:])
output_ip_identification = Signal(intbv(0)[16:])
output_ip_flags = Signal(intbv(0)[3:])
output_ip_fragment_offset = Signal(intbv(0)[13:])
output_ip_ttl = Signal(intbv(0)[8:])
output_ip_protocol = Signal(intbv(0)[8:])
output_ip_header_checksum = Signal(intbv(0)[16:])
output_ip_source_ip = Signal(intbv(0)[32:])
output_ip_dest_ip = Signal(intbv(0)[32:])
output_udp_source_port = Signal(intbv(0)[16:])
output_udp_dest_port = Signal(intbv(0)[16:])
output_udp_length = Signal(intbv(0)[16:])
output_udp_checksum = Signal(intbv(0)[16:])
output_udp_payload_tdata = Signal(intbv(0)[64:])
output_udp_payload_tkeep = Signal(intbv(0)[8:])
output_udp_payload_tvalid = Signal(bool(0))
output_udp_payload_tlast = Signal(bool(0))
output_udp_payload_tuser = Signal(bool(0))
# sources and sinks
source_0_pause = Signal(bool(0))
source_1_pause = Signal(bool(0))
source_2_pause = Signal(bool(0))
source_3_pause = Signal(bool(0))
sink_pause = Signal(bool(0))
source_0 = udp_ep.UDPFrameSource()
source_0_logic = source_0.create_logic(
clk,
rst,
udp_hdr_ready=input_0_udp_hdr_ready,
udp_hdr_valid=input_0_udp_hdr_valid,
eth_dest_mac=input_0_eth_dest_mac,
eth_src_mac=input_0_eth_src_mac,
eth_type=input_0_eth_type,
ip_version=input_0_ip_version,
ip_ihl=input_0_ip_ihl,
ip_dscp=input_0_ip_dscp,
ip_ecn=input_0_ip_ecn,
ip_length=input_0_ip_length,
ip_identification=input_0_ip_identification,
ip_flags=input_0_ip_flags,
ip_fragment_offset=input_0_ip_fragment_offset,
ip_ttl=input_0_ip_ttl,
ip_protocol=input_0_ip_protocol,
ip_header_checksum=input_0_ip_header_checksum,
ip_source_ip=input_0_ip_source_ip,
ip_dest_ip=input_0_ip_dest_ip,
udp_source_port=input_0_udp_source_port,
udp_dest_port=input_0_udp_dest_port,
udp_length=input_0_udp_length,
udp_checksum=input_0_udp_checksum,
udp_payload_tdata=input_0_udp_payload_tdata,
udp_payload_tkeep=input_0_udp_payload_tkeep,
udp_payload_tvalid=input_0_udp_payload_tvalid,
udp_payload_tready=input_0_udp_payload_tready,
udp_payload_tlast=input_0_udp_payload_tlast,
udp_payload_tuser=input_0_udp_payload_tuser,
pause=source_0_pause,
name='source_0'
)
source_1 = udp_ep.UDPFrameSource()
source_1_logic = source_1.create_logic(
clk,
rst,
udp_hdr_ready=input_1_udp_hdr_ready,
udp_hdr_valid=input_1_udp_hdr_valid,
eth_dest_mac=input_1_eth_dest_mac,
eth_src_mac=input_1_eth_src_mac,
eth_type=input_1_eth_type,
ip_version=input_1_ip_version,
ip_ihl=input_1_ip_ihl,
ip_dscp=input_1_ip_dscp,
ip_ecn=input_1_ip_ecn,
ip_length=input_1_ip_length,
ip_identification=input_1_ip_identification,
ip_flags=input_1_ip_flags,
ip_fragment_offset=input_1_ip_fragment_offset,
ip_ttl=input_1_ip_ttl,
ip_protocol=input_1_ip_protocol,
ip_header_checksum=input_1_ip_header_checksum,
ip_source_ip=input_1_ip_source_ip,
ip_dest_ip=input_1_ip_dest_ip,
udp_source_port=input_1_udp_source_port,
udp_dest_port=input_1_udp_dest_port,
udp_length=input_1_udp_length,
udp_checksum=input_1_udp_checksum,
udp_payload_tdata=input_1_udp_payload_tdata,
udp_payload_tkeep=input_1_udp_payload_tkeep,
udp_payload_tvalid=input_1_udp_payload_tvalid,
udp_payload_tready=input_1_udp_payload_tready,
udp_payload_tlast=input_1_udp_payload_tlast,
udp_payload_tuser=input_1_udp_payload_tuser,
pause=source_1_pause,
name='source_1'
)
source_2 = udp_ep.UDPFrameSource()
source_2_logic = source_2.create_logic(
clk,
rst,
udp_hdr_ready=input_2_udp_hdr_ready,
udp_hdr_valid=input_2_udp_hdr_valid,
eth_dest_mac=input_2_eth_dest_mac,
eth_src_mac=input_2_eth_src_mac,
eth_type=input_2_eth_type,
ip_version=input_2_ip_version,
ip_ihl=input_2_ip_ihl,
ip_dscp=input_2_ip_dscp,
ip_ecn=input_2_ip_ecn,
ip_length=input_2_ip_length,
ip_identification=input_2_ip_identification,
ip_flags=input_2_ip_flags,
ip_fragment_offset=input_2_ip_fragment_offset,
ip_ttl=input_2_ip_ttl,
ip_protocol=input_2_ip_protocol,
ip_header_checksum=input_2_ip_header_checksum,
ip_source_ip=input_2_ip_source_ip,
ip_dest_ip=input_2_ip_dest_ip,
udp_source_port=input_2_udp_source_port,
udp_dest_port=input_2_udp_dest_port,
udp_length=input_2_udp_length,
udp_checksum=input_2_udp_checksum,
udp_payload_tdata=input_2_udp_payload_tdata,
udp_payload_tkeep=input_2_udp_payload_tkeep,
udp_payload_tvalid=input_2_udp_payload_tvalid,
udp_payload_tready=input_2_udp_payload_tready,
udp_payload_tlast=input_2_udp_payload_tlast,
udp_payload_tuser=input_2_udp_payload_tuser,
pause=source_2_pause,
name='source_2'
)
source_3 = udp_ep.UDPFrameSource()
source_3_logic = source_3.create_logic(
clk,
rst,
udp_hdr_ready=input_3_udp_hdr_ready,
udp_hdr_valid=input_3_udp_hdr_valid,
eth_dest_mac=input_3_eth_dest_mac,
eth_src_mac=input_3_eth_src_mac,
eth_type=input_3_eth_type,
ip_version=input_3_ip_version,
ip_ihl=input_3_ip_ihl,
ip_dscp=input_3_ip_dscp,
ip_ecn=input_3_ip_ecn,
ip_length=input_3_ip_length,
ip_identification=input_3_ip_identification,
ip_flags=input_3_ip_flags,
ip_fragment_offset=input_3_ip_fragment_offset,
ip_ttl=input_3_ip_ttl,
ip_protocol=input_3_ip_protocol,
ip_header_checksum=input_3_ip_header_checksum,
ip_source_ip=input_3_ip_source_ip,
ip_dest_ip=input_3_ip_dest_ip,
udp_source_port=input_3_udp_source_port,
udp_dest_port=input_3_udp_dest_port,
udp_length=input_3_udp_length,
udp_checksum=input_3_udp_checksum,
udp_payload_tdata=input_3_udp_payload_tdata,
udp_payload_tkeep=input_3_udp_payload_tkeep,
udp_payload_tvalid=input_3_udp_payload_tvalid,
udp_payload_tready=input_3_udp_payload_tready,
udp_payload_tlast=input_3_udp_payload_tlast,
udp_payload_tuser=input_3_udp_payload_tuser,
pause=source_3_pause,
name='source_3'
)
sink = udp_ep.UDPFrameSink()
sink_logic = sink.create_logic(
clk,
rst,
udp_hdr_ready=output_udp_hdr_ready,
udp_hdr_valid=output_udp_hdr_valid,
eth_dest_mac=output_eth_dest_mac,
eth_src_mac=output_eth_src_mac,
eth_type=output_eth_type,
ip_version=output_ip_version,
ip_ihl=output_ip_ihl,
ip_dscp=output_ip_dscp,
ip_ecn=output_ip_ecn,
ip_length=output_ip_length,
ip_identification=output_ip_identification,
ip_flags=output_ip_flags,
ip_fragment_offset=output_ip_fragment_offset,
ip_ttl=output_ip_ttl,
ip_protocol=output_ip_protocol,
ip_header_checksum=output_ip_header_checksum,
ip_source_ip=output_ip_source_ip,
ip_dest_ip=output_ip_dest_ip,
udp_source_port=output_udp_source_port,
udp_dest_port=output_udp_dest_port,
udp_length=output_udp_length,
udp_checksum=output_udp_checksum,
udp_payload_tdata=output_udp_payload_tdata,
udp_payload_tkeep=output_udp_payload_tkeep,
udp_payload_tvalid=output_udp_payload_tvalid,
udp_payload_tready=output_udp_payload_tready,
udp_payload_tlast=output_udp_payload_tlast,
udp_payload_tuser=output_udp_payload_tuser,
pause=sink_pause,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
input_0_udp_hdr_valid=input_0_udp_hdr_valid,
input_0_udp_hdr_ready=input_0_udp_hdr_ready,
input_0_eth_dest_mac=input_0_eth_dest_mac,
input_0_eth_src_mac=input_0_eth_src_mac,
input_0_eth_type=input_0_eth_type,
input_0_ip_version=input_0_ip_version,
input_0_ip_ihl=input_0_ip_ihl,
input_0_ip_dscp=input_0_ip_dscp,
input_0_ip_ecn=input_0_ip_ecn,
input_0_ip_length=input_0_ip_length,
input_0_ip_identification=input_0_ip_identification,
input_0_ip_flags=input_0_ip_flags,
input_0_ip_fragment_offset=input_0_ip_fragment_offset,
input_0_ip_ttl=input_0_ip_ttl,
input_0_ip_protocol=input_0_ip_protocol,
input_0_ip_header_checksum=input_0_ip_header_checksum,
input_0_ip_source_ip=input_0_ip_source_ip,
input_0_ip_dest_ip=input_0_ip_dest_ip,
input_0_udp_source_port=input_0_udp_source_port,
input_0_udp_dest_port=input_0_udp_dest_port,
input_0_udp_length=input_0_udp_length,
input_0_udp_checksum=input_0_udp_checksum,
input_0_udp_payload_tdata=input_0_udp_payload_tdata,
input_0_udp_payload_tkeep=input_0_udp_payload_tkeep,
input_0_udp_payload_tvalid=input_0_udp_payload_tvalid,
input_0_udp_payload_tready=input_0_udp_payload_tready,
input_0_udp_payload_tlast=input_0_udp_payload_tlast,
input_0_udp_payload_tuser=input_0_udp_payload_tuser,
input_1_udp_hdr_valid=input_1_udp_hdr_valid,
input_1_udp_hdr_ready=input_1_udp_hdr_ready,
input_1_eth_dest_mac=input_1_eth_dest_mac,
input_1_eth_src_mac=input_1_eth_src_mac,
input_1_eth_type=input_1_eth_type,
input_1_ip_version=input_1_ip_version,
input_1_ip_ihl=input_1_ip_ihl,
input_1_ip_dscp=input_1_ip_dscp,
input_1_ip_ecn=input_1_ip_ecn,
input_1_ip_length=input_1_ip_length,
input_1_ip_identification=input_1_ip_identification,
input_1_ip_flags=input_1_ip_flags,
input_1_ip_fragment_offset=input_1_ip_fragment_offset,
input_1_ip_ttl=input_1_ip_ttl,
input_1_ip_protocol=input_1_ip_protocol,
input_1_ip_header_checksum=input_1_ip_header_checksum,
input_1_ip_source_ip=input_1_ip_source_ip,
input_1_ip_dest_ip=input_1_ip_dest_ip,
input_1_udp_source_port=input_1_udp_source_port,
input_1_udp_dest_port=input_1_udp_dest_port,
input_1_udp_length=input_1_udp_length,
input_1_udp_checksum=input_1_udp_checksum,
input_1_udp_payload_tdata=input_1_udp_payload_tdata,
input_1_udp_payload_tkeep=input_1_udp_payload_tkeep,
input_1_udp_payload_tvalid=input_1_udp_payload_tvalid,
input_1_udp_payload_tready=input_1_udp_payload_tready,
input_1_udp_payload_tlast=input_1_udp_payload_tlast,
input_1_udp_payload_tuser=input_1_udp_payload_tuser,
input_2_udp_hdr_valid=input_2_udp_hdr_valid,
input_2_udp_hdr_ready=input_2_udp_hdr_ready,
input_2_eth_dest_mac=input_2_eth_dest_mac,
input_2_eth_src_mac=input_2_eth_src_mac,
input_2_eth_type=input_2_eth_type,
input_2_ip_version=input_2_ip_version,
input_2_ip_ihl=input_2_ip_ihl,
input_2_ip_dscp=input_2_ip_dscp,
input_2_ip_ecn=input_2_ip_ecn,
input_2_ip_length=input_2_ip_length,
input_2_ip_identification=input_2_ip_identification,
input_2_ip_flags=input_2_ip_flags,
input_2_ip_fragment_offset=input_2_ip_fragment_offset,
input_2_ip_ttl=input_2_ip_ttl,
input_2_ip_protocol=input_2_ip_protocol,
input_2_ip_header_checksum=input_2_ip_header_checksum,
input_2_ip_source_ip=input_2_ip_source_ip,
input_2_ip_dest_ip=input_2_ip_dest_ip,
input_2_udp_source_port=input_2_udp_source_port,
input_2_udp_dest_port=input_2_udp_dest_port,
input_2_udp_length=input_2_udp_length,
input_2_udp_checksum=input_2_udp_checksum,
input_2_udp_payload_tdata=input_2_udp_payload_tdata,
input_2_udp_payload_tkeep=input_2_udp_payload_tkeep,
input_2_udp_payload_tvalid=input_2_udp_payload_tvalid,
input_2_udp_payload_tready=input_2_udp_payload_tready,
input_2_udp_payload_tlast=input_2_udp_payload_tlast,
input_2_udp_payload_tuser=input_2_udp_payload_tuser,
input_3_udp_hdr_valid=input_3_udp_hdr_valid,
input_3_udp_hdr_ready=input_3_udp_hdr_ready,
input_3_eth_dest_mac=input_3_eth_dest_mac,
input_3_eth_src_mac=input_3_eth_src_mac,
input_3_eth_type=input_3_eth_type,
input_3_ip_version=input_3_ip_version,
input_3_ip_ihl=input_3_ip_ihl,
input_3_ip_dscp=input_3_ip_dscp,
input_3_ip_ecn=input_3_ip_ecn,
input_3_ip_length=input_3_ip_length,
input_3_ip_identification=input_3_ip_identification,
input_3_ip_flags=input_3_ip_flags,
input_3_ip_fragment_offset=input_3_ip_fragment_offset,
input_3_ip_ttl=input_3_ip_ttl,
input_3_ip_protocol=input_3_ip_protocol,
input_3_ip_header_checksum=input_3_ip_header_checksum,
input_3_ip_source_ip=input_3_ip_source_ip,
input_3_ip_dest_ip=input_3_ip_dest_ip,
input_3_udp_source_port=input_3_udp_source_port,
input_3_udp_dest_port=input_3_udp_dest_port,
input_3_udp_length=input_3_udp_length,
input_3_udp_checksum=input_3_udp_checksum,
input_3_udp_payload_tdata=input_3_udp_payload_tdata,
input_3_udp_payload_tkeep=input_3_udp_payload_tkeep,
input_3_udp_payload_tvalid=input_3_udp_payload_tvalid,
input_3_udp_payload_tready=input_3_udp_payload_tready,
input_3_udp_payload_tlast=input_3_udp_payload_tlast,
input_3_udp_payload_tuser=input_3_udp_payload_tuser,
output_udp_hdr_valid=output_udp_hdr_valid,
output_udp_hdr_ready=output_udp_hdr_ready,
output_eth_dest_mac=output_eth_dest_mac,
output_eth_src_mac=output_eth_src_mac,
output_eth_type=output_eth_type,
output_ip_version=output_ip_version,
output_ip_ihl=output_ip_ihl,
output_ip_dscp=output_ip_dscp,
output_ip_ecn=output_ip_ecn,
output_ip_length=output_ip_length,
output_ip_identification=output_ip_identification,
output_ip_flags=output_ip_flags,
output_ip_fragment_offset=output_ip_fragment_offset,
output_ip_ttl=output_ip_ttl,
output_ip_protocol=output_ip_protocol,
output_ip_header_checksum=output_ip_header_checksum,
output_ip_source_ip=output_ip_source_ip,
output_ip_dest_ip=output_ip_dest_ip,
output_udp_source_port=output_udp_source_port,
output_udp_dest_port=output_udp_dest_port,
output_udp_length=output_udp_length,
output_udp_checksum=output_udp_checksum,
output_udp_payload_tdata=output_udp_payload_tdata,
output_udp_payload_tkeep=output_udp_payload_tkeep,
output_udp_payload_tvalid=output_udp_payload_tvalid,
output_udp_payload_tready=output_udp_payload_tready,
output_udp_payload_tlast=output_udp_payload_tlast,
output_udp_payload_tuser=output_udp_payload_tuser,
enable=enable,
select=select
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
enable.next = True
yield clk.posedge
print("test 1: select port 0")
current_test.next = 1
select.next = 0
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80165
test_frame.ip_dest_ip = 0xc0a80164
test_frame.udp_source_port = 1
test_frame.udp_dest_port = 2
test_frame.udp_length = None
test_frame.udp_checksum = None
test_frame.payload = bytearray(range(32))
test_frame.build()
source_0.send(test_frame)
yield clk.posedge
yield clk.posedge
while input_0_udp_payload_tvalid or input_1_udp_payload_tvalid or input_2_udp_payload_tvalid or input_3_udp_payload_tvalid:
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = sink.recv()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: select port 1")
current_test.next = 2
select.next = 1
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80165
test_frame.ip_dest_ip = 0xc0a80164
test_frame.udp_source_port = 1
test_frame.udp_dest_port = 2
test_frame.udp_length = None
test_frame.udp_checksum = None
test_frame.payload = bytearray(range(32))
test_frame.build()
source_1.send(test_frame)
yield clk.posedge
yield clk.posedge
while input_0_udp_payload_tvalid or input_1_udp_payload_tvalid or input_2_udp_payload_tvalid or input_3_udp_payload_tvalid:
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = sink.recv()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 3: back-to-back packets, same port")
current_test.next = 3
select.next = 0
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source_0.send(test_frame1)
source_0.send(test_frame2)
yield clk.posedge
yield clk.posedge
while input_0_udp_payload_tvalid or input_1_udp_payload_tvalid or input_2_udp_payload_tvalid or input_3_udp_payload_tvalid:
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = sink.recv()
assert rx_frame == test_frame1
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 4: back-to-back packets, different ports")
current_test.next = 4
select.next = 1
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source_1.send(test_frame1)
source_2.send(test_frame2)
yield clk.posedge
yield clk.posedge
while input_0_udp_payload_tvalid or input_1_udp_payload_tvalid or input_2_udp_payload_tvalid or input_3_udp_payload_tvalid:
yield clk.posedge
select.next = 2
yield clk.posedge
yield clk.posedge
rx_frame = sink.recv()
assert rx_frame == test_frame1
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 5: alterate pause source")
current_test.next = 5
select.next = 1
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source_1.send(test_frame1)
source_2.send(test_frame2)
yield clk.posedge
yield clk.posedge
while input_0_udp_payload_tvalid or input_1_udp_payload_tvalid or input_2_udp_payload_tvalid or input_3_udp_payload_tvalid:
source_0_pause.next = True
source_1_pause.next = True
source_2_pause.next = True
source_3_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_0_pause.next = False
source_1_pause.next = False
source_2_pause.next = False
source_3_pause.next = False
yield clk.posedge
select.next = 2
yield clk.posedge
yield clk.posedge
rx_frame = sink.recv()
assert rx_frame == test_frame1
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 6: alterate pause sink")
current_test.next = 6
select.next = 1
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source_1.send(test_frame1)
source_2.send(test_frame2)
yield clk.posedge
yield clk.posedge
while input_0_udp_payload_tvalid or input_1_udp_payload_tvalid or input_2_udp_payload_tvalid or input_3_udp_payload_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
select.next = 2
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = sink.recv()
assert rx_frame == test_frame1
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield delay(100)
raise StopSimulation
return dut, source_0_logic, source_1_logic, source_2_logic, source_3_logic, sink_logic, clkgen, check
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 37.651685 | 131 | 0.700849 |
ace790811fe1836aa079e521c111f2b3fa66619f | 7,602 | py | Python | models/DCGCN-SIS/model.py | CNU-DLandCV-lab/DCGCN-SIS | 545386f06dd722b96372fbef3fa565009c72b855 | [
"Unlicense"
] | null | null | null | models/DCGCN-SIS/model.py | CNU-DLandCV-lab/DCGCN-SIS | 545386f06dd722b96372fbef3fa565009c72b855 | [
"Unlicense"
] | null | null | null | models/DCGCN-SIS/model.py | CNU-DLandCV-lab/DCGCN-SIS | 545386f06dd722b96372fbef3fa565009c72b855 | [
"Unlicense"
] | null | null | null | import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tensorflow as tf
import numpy as np
import tf_util
from pointnet_util import pointnet_sa_module, chevb_model, pointnet_fp_module, pointnet_upsample
from loss import *
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 9))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
sem_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
return pointclouds_pl, labels_pl, sem_pl
def get_model(point_cloud, is_training, num_class, bn_decay=None):
""" Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud[:, :, :3]
l0_points = point_cloud[:, :, 3:]
end_points['l0_xyz'] = l0_xyz
# Shared encoder
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1',pooling='att_pooling')
l2_xyz, l2_points, l2_indices = chevb_model(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, scope='layer2',mlp=[64,64,128], bn_decay=bn_decay, output_dim=128, bn=True, pooling='att_pooling', is_training=is_training)
l3_xyz, l3_points, l3_indices = chevb_model(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, scope='layer3', mlp=[128,128,256], bn_decay=bn_decay, output_dim=256,bn=True, pooling='att_pooling',is_training=is_training)
l4_xyz, l4_points, l4_indices = chevb_model(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, scope='layer4', mlp=[256,256,512], bn_decay=bn_decay, output_dim=512,bn=True, pooling='att_pooling', is_training=is_training)
# Semantic decoder
l3_points_sem = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [512,512], is_training, bn_decay, scope='sem_fa_layer1')
l2_points_sem = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points_sem, [256,256], is_training, bn_decay, scope='sem_fa_layer2')
l1_points_sem = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points_sem, [256,128], is_training, bn_decay, scope='sem_fa_layer3')
l0_points_sem = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points_sem, [128,128,128], is_training, bn_decay, scope='sem_fa_layer4')
# Sem channel aggregation
l2_points_sem_up = pointnet_upsample(l0_xyz, l2_xyz, l2_points_sem, scope='sem_up1')
l1_points_sem_up = pointnet_upsample(l0_xyz, l1_xyz, l1_points_sem, scope='sem_up2')
net_sem_0 = tf.add(tf.concat([l0_points_sem, l1_points_sem_up], axis=-1, name='sem_up_concat'), l2_points_sem_up,name='sem_up_add')
net_sem_0 = tf_util.conv1d(net_sem_0, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='sem_fc1', bn_decay=bn_decay)
# Instance decoder
l3_points_ins = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [512,512], is_training, bn_decay, scope='ins_fa_layer1')
l2_points_ins = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points_ins, [256,256], is_training, bn_decay, scope='ins_fa_layer2')
l1_points_ins = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points_ins, [256,128], is_training, bn_decay, scope='ins_fa_layer3')
l0_points_ins = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points_ins, [128,128,128], is_training, bn_decay, scope='ins_fa_layer4')
# Ins channel aggregation
l2_points_ins_up = pointnet_upsample(l0_xyz, l2_xyz, l2_points_ins, scope='ins_up1')
l1_points_ins_up = pointnet_upsample(l0_xyz, l1_xyz, l1_points_ins, scope='ins_up2')
net_ins_0 = tf.add(tf.concat([l0_points_ins, l1_points_ins_up], axis=-1, name='ins_up_concat'), l2_points_ins_up, name='ins_up_add')
net_ins_0 = tf_util.conv1d(net_ins_0, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='ins_fc1', bn_decay=bn_decay)
# Semantic-fused Instance
sem = tf.matmul(net_sem_0, tf.transpose(net_sem_0, perm=[0, 2, 1]))
sim_sem = tf.multiply(net_sem_0, net_sem_0)
sim_sem = tf.reduce_sum(sim_sem, 2, keep_dims=True)
sim_sem = tf.sqrt(sim_sem)
sim_sem = tf.matmul(sim_sem, tf.transpose(sim_sem, perm=[0, 2, 1]))
sim_sem = tf.add(sim_sem, 1e-7)
sem = tf.div(sem, sim_sem)
tf.add_to_collection('sem-f-ins', sem)
sem = tf.matmul(sem, net_sem_0)
sem = tf.layers.dense(inputs=sem, units=128, activation=None, use_bias=False)
sem = tf_util.batch_norm_for_conv1d(sem, is_training, bn_decay, "sem_bn", is_dist=False)
sem = tf.nn.relu(sem)
gate_sem = tf.layers.dense(inputs=sem, units=128, activation=None, use_bias=False)
gate_sem = tf.nn.sigmoid(gate_sem)
net_ins = tf.add(tf.multiply(bi_sem, gate_sem), tf.multiply(tf.subtract(tf.ones_like(gate_sem), gate_sem), net_ins_0))
net_ins_2 = tf.concat([net_ins_0, sem], axis=-1, name='net_ins_2_concat')
# Instance-fused Semantic
net_ins_cache_0 = tf_util.conv1d(net_ins_2, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='ins_cache_1', bn_decay=bn_decay)
net_ins_cache_1 = tf.reduce_mean(net_ins_cache_0, axis=1, keep_dims=True, name='ins_cache_2')
net_ins_cache_1 = tf.tile(net_ins_cache_1, [1, num_point, 1], name='ins_cache_tile')
net_sem_1 = net_sem_0 + net_ins_cache_1
net_sem_2 = tf.concat([net_sem_0, net_sem_1], axis=-1, name='net_sem_2_concat')
# Output
net_sem_3 = tf_util.conv1d(net_sem_2, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='sem_cache_3', bn_decay=bn_decay)
net_sem_4 = net_sem_3 + net_sem_1
net_sem_5 = tf_util.conv1d(net_sem_4, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='sem_fc2', bn_decay=bn_decay)
net_sem_6 = tf_util.dropout(net_sem_5, keep_prob=0.5, is_training=is_training, scope='sem_dp_4')
net_sem_6 = tf_util.conv1d(net_sem_6, num_class, 1, padding='VALID', activation_fn=None, scope='sem_fc5')
net_ins_3 = tf_util.conv1d(net_ins_2, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='ins_fc2', bn_decay=bn_decay)
net_ins_4 = net_ins_3 + net_ins + net_sem_3
net_ins_5 = tf_util.conv1d(net_ins_4, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='ins_cache_3', bn_decay=bn_decay)
net_ins_6 = tf_util.dropout(net_ins_5, keep_prob=0.5, is_training=is_training, scope='ins_dp_5')
net_ins_6 = tf_util.conv1d(net_ins_6, 5, 1, padding='VALID', activation_fn=None, scope='ins_fc5')
return net_sem_6, net_ins_6
def get_loss(pred, ins_label, pred_sem_label, pred_sem, sem_label):
""" pred: BxNxE,
ins_label: BxN
pred_sem_label: BxN
pred_sem: BxNx13
sem_label: BxN
"""
classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=sem_label, logits=pred_sem)
tf.summary.scalar('classify loss', classify_loss)
feature_dim = pred.get_shape()[-1]
delta_v = 0.5
delta_d = 1.5
param_var = 1.
param_dist = 1.
param_reg = 0.001
disc_loss, l_var, l_dist, l_reg = discriminative_loss(pred, ins_label, feature_dim, delta_v, delta_d, param_var, param_dist, param_reg)
loss = classify_loss + disc_loss
tf.add_to_collection('losses', loss)
return loss, classify_loss, disc_loss, l_var, l_dist, l_reg
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,2048,3))
net, _ = get_model(inputs, tf.constant(True), 10)
print(net)
| 56.311111 | 236 | 0.735727 |
ace790dc7e4a8752a9406129576df2578bae9a48 | 7,324 | py | Python | excel_text/tokens.py | AutoActuary/excel-text | b25ec52d74cac54de480b67a4b9db5bac618606e | [
"MIT"
] | null | null | null | excel_text/tokens.py | AutoActuary/excel-text | b25ec52d74cac54de480b67a4b9db5bac618606e | [
"MIT"
] | 4 | 2021-10-20T09:58:41.000Z | 2021-11-30T12:12:10.000Z | excel_text/tokens.py | AutoActuary/excel-text | b25ec52d74cac54de480b67a4b9db5bac618606e | [
"MIT"
] | null | null | null | import re
from dataclasses import dataclass, field
from typing import Any, List
from excel_dates import ensure_python_date, ensure_python_time
from excel_text.condition import Condition
from excel_text.elapsed import elapsed_hours, elapsed_minutes, elapsed_seconds
from excel_text.numbers import render_left, render_right
@dataclass
class FormatStringToken:
text: str
def render(self, value: Any):
raise NotImplementedError()
@dataclass
class MonthOrMinuteToken(FormatStringToken):
"""
A placeholder token to use when we don't know yet whether it's minutes or months.
"""
def render(self, value: Any):
raise NotImplementedError(
f"Failed to determine whether '{self.text}' refers to months or minutes."
)
@dataclass
class DateToken(FormatStringToken):
def render(self, value: Any):
raise NotImplementedError()
@dataclass
class YearToken(DateToken):
def __post_init__(self):
if re.fullmatch("e+", self.text):
self.text = "yyyy"
def render(self, value: Any):
n = len(self.text)
d = ensure_python_date(value)
if n > 2:
return d.strftime("%Y")
if n > 0:
return d.strftime("%y")
raise ValueError("TODO proper Excel error")
@dataclass
class MonthToken(DateToken):
def render(self, value: Any):
n = len(self.text)
d = ensure_python_date(value)
if n >= 6 or n == 4:
return d.strftime("%B")
if n == 5:
return d.strftime("%b")[0]
if n == 3:
return d.strftime("%b")
if n == 2:
return f"{d.month:02d}"
if n == 1:
return str(d.month)
@dataclass
class DayToken(DateToken):
def render(self, value: Any):
n = len(self.text)
d = ensure_python_date(value)
if n > 3:
return d.strftime("%A")
if n > 2:
return d.strftime("%a")
if n > 1:
return f"{d.day:02d}"
if n > 0:
return str(d.day)
@dataclass
class HourToken(DateToken):
twelve: bool = False
"""
12-hour mode
"""
def render(self, value: Any):
n = len(self.text)
d = ensure_python_time(value)
if self.twelve:
if n >= 2:
return d.strftime("%I")
if n == 1:
return d.strftime("%I").lstrip("0")
else:
if n >= 2:
return f"{d.hour:02d}"
if n == 1:
return str(d.hour)
raise ValueError("TODO proper Excel error")
@dataclass
class MinuteToken(DateToken):
def render(self, value: Any):
n = len(self.text)
d = ensure_python_time(value)
if n == 2:
return f"{d.minute:02d}"
if n == 1:
return str(d.minute)
@dataclass
class SecondToken(FormatStringToken):
decimal_char: str
def render(self, value: Any):
d = ensure_python_time(value)
val = d.second + d.microsecond / 1000000
parts = self.text.split(self.decimal_char)
n_int = len(parts[0])
if len(parts) > 1:
# Seconds with decimals.
n_frac = len(parts[1])
pad = f"0{1 + n_frac + n_int}"
else:
# Seconds as integer.
n_frac = 0
pad = f"0{n_int}"
return f"{val:{pad}.{n_frac}f}"
@dataclass
class AmPmToken(FormatStringToken):
def render(self, value: Any):
d = ensure_python_time(value)
val = d.strftime("%p")
if self.text == "am/pm":
return val.lower()
if self.text == "AM/PM":
return val.upper()
if self.text == "a/p":
return val[0].lower()
if self.text == "A/P":
return val[0].upper()
if self.text == "A/p":
return "A" if val.lower() == "am" else "p"
if self.text == "a/P":
return "a" if val.lower() == "am" else "P"
raise ValueError(f"Failed to render token '{self.text}'.")
@dataclass
class ElapsedHoursToken(FormatStringToken):
text: str = field(default="[h]", init=False, repr=False, compare=False)
def render(self, value: Any):
return str(int(elapsed_hours(value)))
@dataclass
class ElapsedMinutesToken(FormatStringToken):
text: str = field(default="[m]", init=False, repr=False, compare=False)
def render(self, value: Any):
return str(int(elapsed_minutes(value)))
@dataclass
class ElapsedSecondsToken(FormatStringToken):
text: str = field(default="[s]", init=False, repr=False, compare=False)
def render(self, value: Any):
return str(int(elapsed_seconds(value)))
@dataclass
class VerbatimToken(FormatStringToken):
"""
Renders a part of the format string into the results. Ignores the value.
"""
def render(self, value: Any):
return self.text
@dataclass
class NumberToken(FormatStringToken):
"""
>>> NumberToken(text="0000", decimal_char=".", thousands_char=",").render(12)
'0012'
>>> NumberToken(text="$#,##0.00", decimal_char=".", thousands_char=",").render(1234.5678)
'$1,234.57'
"""
decimal_char: str
thousands_char: str
def render(self, value: Any):
if not isinstance(value, (float, int)):
raise ValueError("Value is not numeric.")
parts = self.text.split(self.decimal_char)
if "%" in self.text:
value *= 100
if len(parts) == 1:
return render_left(
parts[0][::-1],
self.thousands_char,
str(int(round(value)))[::-1],
)
else:
left = render_left(
parts[0][::-1],
self.thousands_char,
str(int(value))[::-1],
)
right = render_right(
parts[1],
str(abs(value) % 1)[2:],
)
return f"{left}{self.decimal_char}{right}"
@dataclass
class StringToken(FormatStringToken):
"""
Represents the "@" formatter, which means "format as a string". Its definition is quite vague.
"""
text: str = field(default="@", init=False, repr=False, compare=False)
def render(self, value: Any):
return str(value)
@dataclass
class BinaryConditionalToken(FormatStringToken):
"""
Represents an conditional structure like `[condition]true_value;false_value`.
"""
condition: Condition
true_tokens: List[FormatStringToken]
false_tokens: List[FormatStringToken]
def render(self, value: Any):
tokens = self.true_tokens if self.condition.eval(value) else self.false_tokens
return "".join(t.render(value) for t in tokens)
@dataclass
class TernaryConditionalToken(FormatStringToken):
"""
Represents an conditional structure like `value_if_gt_zero;value_if_zero;value_if_lt_zero`
"""
gt_tokens: List[FormatStringToken]
lt_tokens: List[FormatStringToken]
eq_tokens: List[FormatStringToken]
def render(self, value: Any):
if value > 0:
tokens = self.gt_tokens
elif value < 0:
tokens = self.lt_tokens
else:
tokens = self.eq_tokens
return "".join(t.render(value) for t in tokens)
| 25.608392 | 98 | 0.578236 |
ace79165b19d506eae75992a365331f47da0e30e | 2,689 | py | Python | reader.py | toastytato/DAQ_Interface | c824f5f2b73b18fa409d4ba38375f05bd191651e | [
"CNRI-Python",
"Condor-1.1",
"Naumen",
"FTL",
"MS-PL"
] | 2 | 2022-02-14T13:10:24.000Z | 2022-03-29T03:14:08.000Z | reader.py | toastytato/DAQ_Interface | c824f5f2b73b18fa409d4ba38375f05bd191651e | [
"CNRI-Python",
"Condor-1.1",
"Naumen",
"FTL",
"MS-PL"
] | null | null | null | reader.py | toastytato/DAQ_Interface | c824f5f2b73b18fa409d4ba38375f05bd191651e | [
"CNRI-Python",
"Condor-1.1",
"Naumen",
"FTL",
"MS-PL"
] | 2 | 2022-02-28T10:20:45.000Z | 2022-03-29T03:14:01.000Z | import time
import nidaqmx
import numpy as np
from nidaqmx.constants import AcquisitionType
from nidaqmx.stream_readers import AnalogMultiChannelReader
from pyqtgraph.Qt import QtCore
from config import *
# Thread for capturing input signal through DAQ
class SignalReader(QtCore.QThread):
incoming_data = QtCore.pyqtSignal(object)
def __init__(self, sample_rate, sample_size, channels, dev_name="Dev2"):
super().__init__()
self.reader = None
self.is_running = False
self.is_paused = False
self.input_channels = channels
self.daq_in_name = dev_name
self.sample_rate = sample_rate
self.sample_size = sample_size
# actual data received from the DAQ
self.input = np.empty(shape=(len(CHANNEL_NAMES_IN), self.sample_size))
# called on start()
def run(self):
self.is_running = True
self.create_task()
while self.is_running:
if not self.is_paused:
try:
self.reader.read_many_sample(
data=self.input, number_of_samples_per_channel=self.sample_size
)
self.incoming_data.emit(self.input)
except Exception as e:
print("Error with read_many_sample")
print(e)
break
self.task.close()
def create_task(self):
print("reader input channels:", self.input_channels)
try:
self.task = nidaqmx.Task("Reader Task")
except OSError:
print("DAQ is not connected, task could not be created")
return
try:
for ch in self.input_channels:
channel_name = self.daq_in_name + "/ai" + str(ch)
self.task.ai_channels.add_ai_voltage_chan(channel_name)
print(channel_name)
except Exception:
print("DAQ is not connected, channel could not be added")
return
self.task.timing.cfg_samp_clk_timing(
rate=self.sample_rate, sample_mode=AcquisitionType.CONTINUOUS
)
self.task.start()
self.reader = AnalogMultiChannelReader(self.task.in_stream)
def restart(self):
self.is_paused = True
self.task.close()
self.create_task()
self.is_paused = False
if __name__ == "__main__":
print("\nRunning demo for SignalReader\n")
# reader_thread = SignalReader(sample_rate=1000, sample_size=1000, channels=[])
# reader_thread.start()
# input("Press return to stop")
# reader_thread.is_running = False
# reader_thread.wait()
# print("\nTask done")
| 30.556818 | 87 | 0.616214 |
ace792d8e5cf69b5cedf3fb402b75f03b6efa9e0 | 12,163 | py | Python | SpoTwillio/lib/python3.6/site-packages/tests/integration/chat/v1/service/channel/test_message.py | Natfan/funlittlethings | 80d5378b45b5c0ead725942ee50403bd057514a6 | [
"MIT"
] | 3 | 2019-11-12T07:55:51.000Z | 2020-04-01T11:19:18.000Z | SpoTwillio/lib/python3.6/site-packages/tests/integration/chat/v1/service/channel/test_message.py | Natfan/funlittlethings | 80d5378b45b5c0ead725942ee50403bd057514a6 | [
"MIT"
] | 7 | 2020-06-06T01:06:19.000Z | 2022-02-10T11:15:14.000Z | SpoTwillio/lib/python3.6/site-packages/tests/integration/chat/v1/service/channel/test_message.py | Natfan/funlittlethings | 80d5378b45b5c0ead725942ee50403bd057514a6 | [
"MIT"
] | 2 | 2019-10-20T14:54:47.000Z | 2020-06-11T07:29:37.000Z | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class MessageTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages(sid="IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages/IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"to": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-03-24T20:37:57Z",
"date_updated": "2016-03-24T20:37:57Z",
"was_edited": false,
"from": "system",
"attributes": "{}",
"body": "Hello",
"index": 0,
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages/IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages(sid="IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages.create(body="body")
values = {
'Body': "body",
}
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"to": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"attributes": null,
"date_created": "2016-03-24T20:37:57Z",
"date_updated": "2016-03-24T20:37:57Z",
"was_edited": false,
"from": "system",
"body": "Hello",
"index": 0,
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages/IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages.create(body="body")
self.assertIsNotNone(actual)
def test_create_with_attributes_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"to": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-03-24T20:37:57Z",
"date_updated": "2016-03-24T20:37:57Z",
"was_edited": false,
"from": "system",
"attributes": "{}",
"body": "Hello",
"index": 0,
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages/IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages.create(body="body")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages.list()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages?PageSize=50&Page=0",
"next_page_url": null,
"key": "messages"
},
"messages": [
{
"sid": "IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"to": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-03-24T20:37:57Z",
"date_updated": "2016-03-24T20:37:57Z",
"was_edited": false,
"from": "system",
"attributes": "{}",
"body": "Hello",
"index": 0,
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages/IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages?PageSize=50&Page=0",
"next_page_url": null,
"key": "messages"
},
"messages": []
}
'''
))
actual = self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages.list()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages(sid="IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages/IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages(sid="IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.assertTrue(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages(sid="IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages/IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"to": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"attributes": "{\\"test\\": \\"test\\"}",
"date_created": "2016-03-24T20:37:57Z",
"date_updated": "2016-03-24T20:37:57Z",
"was_edited": false,
"from": "system",
"body": "Hello",
"index": 0,
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages/IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v1.services(sid="ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.channels(sid="CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.messages(sid="IMaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.assertIsNotNone(actual)
| 44.068841 | 199 | 0.570336 |
ace792f5ec332d1d4d0b53476c7a0373be295963 | 12,311 | py | Python | examples/Nolan/AFRL/Hipersonica/hyper18.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | d14713d8211b64293c4427005cf02fbd58630598 | [
"MIT"
] | 1 | 2019-03-26T03:00:03.000Z | 2019-03-26T03:00:03.000Z | examples/Nolan/AFRL/Hipersonica/hyper18.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | d14713d8211b64293c4427005cf02fbd58630598 | [
"MIT"
] | null | null | null | examples/Nolan/AFRL/Hipersonica/hyper18.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | d14713d8211b64293c4427005cf02fbd58630598 | [
"MIT"
] | 1 | 2019-07-14T22:53:52.000Z | 2019-07-14T22:53:52.000Z | if __name__ == "__main__":
import numpy as np
import beluga.Beluga as Beluga
import beluga.bvpsol as bvpsol
import beluga.bvpsol.algorithms as algorithms
import beluga.optim.Problem
from beluga.optim.problem import *
from beluga.continuation import *
import logging
# Import Libraries for Matrix Calculations
from sympy import symbols, Matrix, Transpose, simplify, diff
from sympy import sin
from sympy import cos, acos
from sympy import sqrt
from sympy import exp
from sympy import atan
from numpy import pi
writeEqn = True
simpList = False
if writeEqn:
writeList = []
# Constants
rho0, h_ref, A_ref, r_e, mass, mu = symbols('rho0, h_ref, A_ref, r_e, mass, mu')
sig_h, sig_t, sig_v, sig_g, sig_a, sig_b, Dt = symbols('sig_h, sig_t, sig_v, sig_g, sig_a, sig_b Dt')
theta_b = symbols('theta_b')
# Primary States
h, theta, v, gam = symbols('h, theta, v, gam')
# Control
a_trig, a_max, u, u_max = symbols('a_trig, a_max, u, u_max')
# alpha = a_max*sin(a_trig)
alpha = symbols('alpha')
# Secondary States
rho = rho0 * exp(-h/h_ref)
Cl = 1.5658*alpha*180/pi + -0.00000
Cd = 1.6537*(alpha*180/pi)**2 + 0.0612
D = 0.5*rho*v**2*Cd*A_ref
L = 0.5*rho*v**2*Cl*A_ref
r = r_e + h
# Primary State Rates
h_dot = v*sin(gam)
theta_dot = v*cos(gam)/r
v_dot = -D/mass - mu*sin(gam)/r**2
gam_dot = L/(mass*v) + (v/r - mu/(v*r**2))*cos(gam)
alpha_dot = u_max*sin(u)
writeList = [h_dot, theta_dot, v_dot, gam_dot, alpha_dot]
# Covariance Calculations
p11, p12, p13, p14, \
p22, p23, p24, \
p33, p34, \
p44 \
= symbols('p11 p12 p13 p14 \
p22 p23 p24 \
p33 p34 \
p44')
P = Matrix([[p11, p12, p13, p14],
[p12, p22, p23, p24],
[p13, p23, p33, p34],
[p14, p24, p34, p44]])
F = Matrix([[diff(h_dot, h), diff(theta_dot, h), diff(v_dot, h), diff(gam_dot, h)],
[diff(h_dot, theta), diff(theta_dot, theta), diff(v_dot, theta), diff(gam_dot, theta)],
[diff(h_dot, v), diff(theta_dot, v), diff(v_dot, v), diff(gam_dot, v)],
[diff(h_dot, gam), diff(theta_dot, gam), diff(v_dot, gam), diff(gam_dot, gam)]]).T
G = Matrix([[0, 0],
[0, 0],
[1, 0],
[0, 1]])
theta_r = theta - theta_b
Rho = sqrt(r_e**2 + r**2 - 2*r*r_e*cos(theta - theta_b)) # sqrt(2*r_e*(r_e + h)*(1 - cos(theta_r)) + h**2)
Rho_dot = (2 * r_e * h_dot * cos(theta_b - theta) - 2 * r_e * r * theta_dot * sin(
theta_b - theta) + 2 * r * h_dot) / (2 * Rho)
Bear = alpha + gam + acos(r_e*sin(theta - theta_b)/Rho)
H = Matrix([[diff(v_dot, h), diff(v_dot, theta), diff(v_dot, v), diff(v_dot, gam)]])
Q = Dt*Matrix([[sig_v**2, 0],
[ 0, sig_g**2]])
R = Dt*Matrix([[sig_a**2]])
P_dot = (F*P + P*F.T - P*H.T*(R**-1)*H*P + G*Q*G.T)
Dim = P_dot.shape
for i in range(0, Dim[0]):
for j in range(i, Dim[1]):
# print(P_dot[i, j])
writeList.append(P_dot[i, j])
# h_new, theta_new, v_new, gam_new = symbols('h_new, theta_new, v_new, gam_new')
# h_scale, theta_scale, v_scale, gam_scale = symbols('h_scale, theta_scale, v_scale, gam_scale')
states = [h, theta, v, gam, a_trig,
p11, p12, p13, p14,
p22, p23, p24,
p33, p34,
p44]
h_s, theta_s, v_s, gam_s, \
p11_s, p12_s, p13_s, p14_s, \
p22_s, p23_s, p24_s, \
p33_s, p34_s, \
p44_s = \
symbols('h_s, theta_s, v_s, gam_s, \
p11_s, p12_s, p13_s, p14_s, \
p22_s, p23_s, p24_s, \
p33_s, p34_s, \
p44_s')
scales = [h_s, theta_s, v_s, gam_s, 1,
p11_s, p12_s, p13_s, p14_s,
p22_s, p23_s, p24_s,
p33_s, p34_s,
p44_s]
h_n, theta_n, v_n, gam_n, \
p11_n, p12_n, p13_n, p14_n, \
p22_n, p23_n, p24_n, \
p33_n, p34_n, \
p44_n = \
symbols('h_n, theta_n, v_n, gam_n, \
p11_n, p12_n, p13_n, p14_n, \
p22_n, p23_n, p24_n, \
p33_n, p34_n, \
p44_n')
states_new = [h_n, theta_n, v_n, gam_n, a_trig,
p11_n, p12_n, p13_n, p14_n,
p22_n, p23_n, p24_n,
p33_n, p34_n,
p44_n]
# print(writeList)
Z1 = zip(writeList, scales)
scaledList = []
for item, Scale in Z1:
# print(item)
item = item/Scale
Z2 = zip(states, states_new, scales)
# print(item)
# for state, new, scale in Z2:
# print(state)
# print(new)
# print(scale)
for state, new, scale in Z2:
# print(new)
item = item.subs(state, scale*new)
# print(item)
scaledList.append(item)
k = 1
with open("eqns.txt", "w") as my_file:
for item in scaledList:
if simpList:
# print('* ' + str(item))
item = simplify(item)
# print('# ' + str(item))
my_file.write(str(item) + "\n")
# print(" Wrote " + str(k) + "/" + str(len(scaledList)))
k += 1
k = 1
alfa = symbols('alpha')
with open("eqnsUnscaled.txt", "w") as my_file:
for item in writeList:
item = item.subs(a_max*sin(a_trig),alfa)
my_file.write(str(item) + "\n")
# print(" Wrote " + str(k) + "/" + str(len(writeList)))
k += 1
''' Start Optimal Control Calculations '''
# Read Covariance State Rates from File
with open("eqns.txt", "r") as f:
eqnsList = list(f)
# for item in P_dot_eqns:
# print(item)
# Rename this and/or move to optim package?
problem = beluga.optim.Problem('hyperKalman7')
# problem = beluga.optim.Problem()
# Define independent variables
problem.independent('t', 's')
# rho = 'rho0*exp(-h/H)'
# Cl = '(1.5658*alfa + -0.0000)'
# Cd = '(1.6537*alfa^2 + 0.0612)'
# Cl = 'CLfunctio0n(alfa)'
# Cd = 'CDfunction(alfa)'
# D = '(0.5*'+rho+'*v^2*'+Cd+'*Aref)'
# L = '(0.5*'+rho+'*v^2*'+Cl+'*Aref)'
# r = '(re+h)'
# Define equations of motion
problem.state('h_n', eqnsList[0]+'+ ep/h_s*cos(u)', 'm') \
.state('theta_n', eqnsList[1], 'rad') \
.state('v_n', eqnsList[2], 'm/s') \
.state('gam_n', eqnsList[3], 'rad') \
.state('alpha', eqnsList[4], 'rad') \
.state('p11_n', eqnsList[5], 'm**2') \
.state('p12_n', eqnsList[6], 'm') \
.state('p13_n', eqnsList[7], 'm**2/s') \
.state('p14_n', eqnsList[8], 'm') \
.state('p23_n', eqnsList[10], 'rad*m/s') \
.state('p24_n', eqnsList[11], 'rad**2') \
.state('p33_n', eqnsList[12], 'm**2/s**2') \
.state('p34_n', eqnsList[13], 'm/s') \
.state('p44_n', eqnsList[14], 'rad**2') \
# Define controls
problem.control('u', 'rad')
# Define costs
# problem.cost['path'] = Expression('p11', 'm^2/s^2')
# problem.cost['path'] = Expression('0.001', 's')
problem.cost['terminal'] = Expression('p22_n', '1')
# Define constraints
problem.constraints().initial('h_n-h_n_0', 'm') \
.initial('theta_n-theta_n_0', 'rad') \
.initial('v_n-v_n_0', 'm/s') \
.initial('alpha-alpha_0', 'rad') \
.initial('p11_n-p11_n_0', 'm**2') \
.initial('p12_n-p12_n_0', 'm') \
.initial('p13_n-p13_n_0', 'm**2/s') \
.initial('p14_n-p14_n_0', 'm') \
.initial('p23_n-p23_n_0', 'rad*m/s') \
.initial('p24_n-p24_n_0', 'rad**2') \
.initial('p33_n-p33_n_0', 'm**2/s**2') \
.initial('p34_n-p34_n_0', 'm/s') \
.initial('p44_n-p44_n_0', 'rad**2') \
.terminal('h_n-h_n_f', 'm') \
.terminal('theta_n-theta_n_f', 'rad')
# Define constants
problem.constant('mu', 3.986e5*1e9, 'm^3/s^2') # Gravitational parameter, m^3/s^2
problem.constant('rho0', 1.2, 'kg/m^3') # Sea-level atmospheric density, kg/m^3
problem.constant('h_ref', 7500, 'm') # Scale height for atmosphere of Earth, m
problem.constant('mass', 750/2.2046226, 'kg') # Mass of vehicle, kg
problem.constant('r_e', 6378000, 'm') # Radius of planet, m
problem.constant('A_ref', pi*(24*.0254/2)**2, 'm^2') # Reference area of vehicle, m^2
problem.constant('rn', 1/12*0.3048, 'm') # Nose radius, m
problem.constant('Dt', 0.1, 's') # time step
problem.constant('sig_v', 10.0, 'm/s**2') # var in v
problem.constant('sig_g', 0.1*pi/180, 'rad/s') # var in gam
problem.constant('sig_a', 10.0, 'm/s**2') # var in range
problem.constant('sig_b', 0.1*pi/180, 'rad/s') # var in range
problem.constant('theta_b', -2*pi/180, 'rad') # location of kalmanBeacon
problem.constant('a_max', 10.0*pi/180, 'rad')
problem.constant('u_max', 0.25*pi/180, 'rad/s')
problem.constant('h_s', 1000, 'rad')
problem.constant('theta_s', 1*(pi/180), 'rad')
problem.constant('v_s', 1000, 'rad')
problem.constant('gam_s', 1*(pi/180), 'rad')
problem.constant('p11_s', 1e5, 'rad')
problem.constant('p12_s', 1e-4, 'rad')
problem.constant('p13_s', 100, 'rad')
problem.constant('p14_s', 1e-1, 'rad')
problem.constant('p22_s', 1e-9, 'rad')
problem.constant('p23_s', 1e-4, 'rad')
problem.constant('p24_s', 1e-8, 'rad')
problem.constant('p33_s', 10, 'rad')
problem.constant('p34_s', 1e-3, 'rad')
problem.constant('p44_s', 1e-6, 'rad')
problem.constant('ep', 40, 'rad')
problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd', tolerance=1e-4, max_iterations=1000, verbose=True, cached=False, number_arcs=16)
# problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose=True, cached=False)
problem.scale.unit('m', 1) \
.unit('s', 1) \
.unit('kg', 1) \
.unit('rad', 1)
# Define quantity (not implemented at present)
# Is this actually an Expression rather than a Value?
# problem.quantity = [Value('tanAng','tan(theta)')]
problem.guess.setup('auto', start=[80, 0, 5, -89, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], costate_guess=[0, 0, 0, 0, 0.0001, 0, 0, 0, 0, 0, 0, 0, 0, 0], time_integrate=2.5) # costate_guess=[0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# problem.guess.setup('auto',start=[80000,3.38575809e-21,5000,7.98617365e-02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],direction='forward',time_integrate=229.865209,costate_guess =[-1.37514494e+01,3.80852584e+06,-3.26290152e+03,-2.31984720e-14,0.00,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01])
# Figure out nicer way of representing this. Done?
problem.steps.add_step().num_cases(3) \
.terminal('theta_n', 10) \
.terminal('h_n', 20)
problem.steps.add_step().num_cases(6) \
.terminal('h_n', 0)
# problem.steps.add_step().num_cases(15) \
# .terminal('theta_n', 5)
# problem.steps.add_step().num_cases(21) \
# .terminal('theta', 10*pi/180)
Beluga.run(problem, display_level=logging.DEBUG)
| 36.749254 | 288 | 0.502396 |
ace794e09d131156cb0b4de68dbaeb7940a1c742 | 2,691 | py | Python | RSA_single_test.py | QuantumLiu/pyRSA_demo | dea749f2be26d464c5a5572735a52a8dcef48612 | [
"Apache-2.0"
] | 11 | 2017-05-16T12:27:54.000Z | 2021-07-14T02:59:18.000Z | RSA_single_test.py | QuantumLiu/pyRSA_demo | dea749f2be26d464c5a5572735a52a8dcef48612 | [
"Apache-2.0"
] | null | null | null | RSA_single_test.py | QuantumLiu/pyRSA_demo | dea749f2be26d464c5a5572735a52a8dcef48612 | [
"Apache-2.0"
] | 5 | 2017-05-18T05:17:48.000Z | 2021-01-28T06:46:27.000Z | import os,sys
import traceback
import random
def RSA_decrypt(d,n,tep):
pass
def RSA_encrypt(plaintext,e,n):#对明文RSA加密返回密文
ciphertext=[pow(c,e,n).to_bytes(8, byteorder='big', signed=False) for c in plaintext]
return b''.join(ciphertext)
def AKS(a,n):
if pow(17-a,n,n)==pow(17,n,n)-(a%n):
return 1
else:
return 0
def big_rand():
flag=0
l,u=2**16,2**32
while not flag:
n=random.randrange(l,u)
if any([n%x==0 for x in[2,3,5,7,13]]):
continue
flag=AKS(2,n)
return n
pass
def get_e(e_n):
flag=1
while flag:
e=random.randrange(e_n)
if coprime(e,e_n)==(1,0):
flag=0
return e
def euclid(a,b):
lx=[1,0,b]
ly=[0,1,a]
while ly[2]!=1:
if ly[2]==0:
return 0
q=lx[2]/ly[2]
lt=[lx[i]-ly[i]*q for i in range(3)]
lx=ly
ly=lt
return ly[1]%b
def coprime(a,b):
if a<b:
a,b=b,a
while b!=0:
t=a%b
a=b
b=t
return (a,b)
def get_key():
p=big_rand()
q=big_rand()
n=p*q
e_n=n-p-q+1
e=get_e(e_n)
d=euclid(e,e_n)
return [e,n,e_n,d,p,q]
def IterateFiles(directory,formlist=['txt','doc','png']):
assert os.path.isdir(directory),'make sure directory argument should be a directory'
result = []
for root,dirs,files in os.walk(directory, topdown=True):
for fl in files:
if fl.split('.')[-1] in formlist:
result.append(os.path.join(root,fl))
return result
def drives():
drive_list = []
for drive in range(ord('A'), ord('N')):
if os.path.exists(chr(drive) + ':'):
drive_list.append(chr(drive)+":\\")
return drive_list
def walk_drivers(formlist=['txt','doc','png']):
driver_list=drives()
files=[]
for driver in driver_list:
files+=IterateFiles(driver,formlist=['txt','doc','png'])
if sys.argv[0] in files:
files.remove(sys.argv[0])
print('There are '+str(len(files))+'target files\n')
return files
def encrypt(filename,k):
print('encrypting :'+filename)
try:
with open(filename,'rb') as f:
t=f.read()
c=RSA_encrypt(t,k[0],k[1])#加密
except:
traceback.print_exc()
def attack(formlist=['txt','doc','png']):
files=walk_drivers(formlist)
k=get_key()
print('Got key!')
for filename in files[-100:]:
encrypt(filename,k)
return len(files)
if __name__ == '__main__':
l=attack()
print('Dangerous! '+str(l)+' files have ben encrypted!')
os.system("pause")
| 26.91 | 90 | 0.539205 |
ace795408e5bea48b84396a4a5b1187d4458a115 | 6,699 | py | Python | bindings/python/ensmallen_graph/datasets/string/hymenobacterspapr13.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/hymenobacterspapr13.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/hymenobacterspapr13.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Hymenobacter sp. APR13.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:08:26.440019
The undirected graph Hymenobacter sp. APR13 has 4173 nodes and 435703 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05005 and has 27 connected components, where the component with most
nodes has 4114 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 178, the mean node degree is 208.82, and
the node degree mode is 1. The top 5 most central nodes are 1356852.N008_06875
(degree 1312), 1356852.N008_20685 (degree 1039), 1356852.N008_09290 (degree
1006), 1356852.N008_01805 (degree 974) and 1356852.N008_06390 (degree 958).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import HymenobacterSpApr13
# Then load the graph
graph = HymenobacterSpApr13()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def HymenobacterSpApr13(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Hymenobacter sp. APR13 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Hymenobacter sp. APR13 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:08:26.440019
The undirected graph Hymenobacter sp. APR13 has 4173 nodes and 435703 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05005 and has 27 connected components, where the component with most
nodes has 4114 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 178, the mean node degree is 208.82, and
the node degree mode is 1. The top 5 most central nodes are 1356852.N008_06875
(degree 1312), 1356852.N008_20685 (degree 1039), 1356852.N008_09290 (degree
1006), 1356852.N008_01805 (degree 974) and 1356852.N008_06390 (degree 958).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import HymenobacterSpApr13
# Then load the graph
graph = HymenobacterSpApr13()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="HymenobacterSpApr13",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.444444 | 223 | 0.702791 |
ace7957539a109c9a4ef37629873af65fb42eeea | 938 | py | Python | project/urls.py | platypotomus/python-react-travel-app | fefbbfae3aae554e82ae41d1550f380287b06702 | [
"MIT"
] | null | null | null | project/urls.py | platypotomus/python-react-travel-app | fefbbfae3aae554e82ae41d1550f380287b06702 | [
"MIT"
] | null | null | null | project/urls.py | platypotomus/python-react-travel-app | fefbbfae3aae554e82ae41d1550f380287b06702 | [
"MIT"
] | null | null | null | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
# from django.urls import include
from frontend import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include ('cities.urls')),
url('', include ('frontend.urls')),
url('', views.index)
]
| 33.5 | 79 | 0.692964 |
ace79675a1ecd3aec2e287df832429139bcc9a48 | 339 | py | Python | CONTENT/Resources/guides/__UNSORTED/120.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | CONTENT/Resources/guides/__UNSORTED/120.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 162 | 2021-03-09T01:52:11.000Z | 2022-03-12T01:09:07.000Z | CONTENT/Resources/guides/__UNSORTED/120.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | # down-top
class Solution:
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
dp = [0] * (len(triangle) + 1)
for row in triangle[::-1]:
for j in range(len(row)):
dp[j] = min(dp[j], dp[j + 1]) + row[j]
return dp[0]
| 22.6 | 54 | 0.451327 |
ace797047b1d45ced39579359ab4754ea20c4977 | 201 | py | Python | example.py | contribu/rollingrank | 33cdeadf5eda724f5d50438ae7b314b3670d3503 | [
"MIT"
] | 9 | 2020-04-03T17:22:59.000Z | 2021-11-19T01:09:54.000Z | example.py | contribu/rollingrank | 33cdeadf5eda724f5d50438ae7b314b3670d3503 | [
"MIT"
] | 3 | 2020-12-17T13:18:06.000Z | 2022-03-02T11:12:47.000Z | example.py | contribu/rollingrank | 33cdeadf5eda724f5d50438ae7b314b3670d3503 | [
"MIT"
] | null | null | null | import numpy as np
import rollingrank
x = np.array([0.1, 0.2, 0.3, 0.25, 0.1, 0.2, 0.3])
y = rollingrank.rollingrank(x, window=3)
print(y)
y = rollingrank.rollingrank(x, window=3, pct=True)
print(y)
| 20.1 | 50 | 0.676617 |
ace797ae526e92a7a310dc6c2f46a33e1e21f860 | 23,848 | py | Python | armi/reactor/blueprints/__init__.py | keckler/armi | b5f95b4795aa21e00fd6786f6994862a4bdccb16 | [
"Apache-2.0"
] | 162 | 2019-11-01T17:35:58.000Z | 2022-03-18T04:22:39.000Z | armi/reactor/blueprints/__init__.py | keckler/armi | b5f95b4795aa21e00fd6786f6994862a4bdccb16 | [
"Apache-2.0"
] | 315 | 2019-11-01T17:32:05.000Z | 2022-03-30T03:51:42.000Z | armi/reactor/blueprints/__init__.py | keckler/armi | b5f95b4795aa21e00fd6786f6994862a4bdccb16 | [
"Apache-2.0"
] | 55 | 2019-11-01T16:59:59.000Z | 2022-03-25T18:19:06.000Z | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Blueprints describe the geometric and composition details of the objects in the reactor
(e.g. fuel assemblies, control rods, etc.).
Inputs captured within this blueprints module pertain to major design criteria like
custom material properties or basic structures like the assemblies in use.
This is essentially a wrapper for a yaml loader.
The given yaml file is expected to rigidly adhere to given key:value pairings.
See the :doc:`blueprints documentation </user/inputs/blueprints>` for more details.
The file structure is expectation is::
nuclide flags:
AM241: {burn: true, xs: true}
...
custom isotopics: {} # optional
blocks:
name:
component name:
component dimensions
...
assemblies:
name:
specifier: ABC
blocks: [...]
height: [...]
axial mesh points: [...]
xs types: [...]
# optional
myMaterialModification1: [...]
myMaterialModification2: [...]
# optionally extra settings (note this is probably going to be a removed feature)
# hotChannelFactors: TWRPclad
Examples
--------
>>> design = blueprints.Blueprints.load(self.yamlString)
>>> print(design.gridDesigns)
Notes
-----
The blueprints system was built to enable round trip translations between
text representations of input and objects in the code.
"""
from collections import OrderedDict
import collections
import copy
import os
import pathlib
import traceback
import typing
from ruamel.yaml import CLoader, RoundTripLoader
import ordered_set
import tabulate
import yamlize
import yamlize.objects
from armi import context
from armi import getPluginManager, getPluginManagerOrFail
from armi import plugins
from armi import runLog
from armi import settings
from armi.utils.customExceptions import InputError
from armi.nucDirectory import elements
from armi.nucDirectory import nuclideBases
from armi.reactor import assemblies
from armi.reactor import geometry
from armi.reactor import systemLayoutInput
from armi.scripts import migration
from armi.utils import textProcessors
# NOTE: using non-ARMI-standard imports because these are all a part of this package,
# and using the module imports would make the attribute definitions extremely long
# without adding detail
from armi.reactor.blueprints.reactorBlueprint import Systems, SystemBlueprint
from armi.reactor.blueprints.assemblyBlueprint import AssemblyKeyedList
from armi.reactor.blueprints.blockBlueprint import BlockKeyedList
from armi.reactor.blueprints.componentBlueprint import ComponentKeyedList
from armi.reactor.blueprints import isotopicOptions
from armi.reactor.blueprints.gridBlueprint import Grids, Triplet
context.BLUEPRINTS_IMPORTED = True
context.BLUEPRINTS_IMPORT_CONTEXT = "".join(traceback.format_stack())
def loadFromCs(cs, roundTrip=False):
"""
Function to load Blueprints based on supplied ``CaseSettings``.
"""
# pylint: disable=import-outside-toplevel; circular import protection
from armi.utils import directoryChangers
with directoryChangers.DirectoryChanger(cs.inputDirectory, dumpOnException=False):
with open(cs["loadingFile"], "r") as bpYaml:
root = pathlib.Path(cs["loadingFile"]).parent.absolute()
bpYaml = textProcessors.resolveMarkupInclusions(bpYaml, root)
try:
bp = Blueprints.load(bpYaml, roundTrip=roundTrip)
except yamlize.yamlizing_error.YamlizingError as err:
if "cross sections" in err.args[0]:
runLog.error(
"The loading file {} contains invalid `cross sections` input. "
"Please run the `modify` entry point on this case to automatically convert."
"".format(cs["loadingFile"])
)
raise
return bp
class _BlueprintsPluginCollector(yamlize.objects.ObjectType):
"""
Simple metaclass for adding yamlize.Attributes from plugins to Blueprints.
This calls the defineBlueprintsSections() plugin hook to discover new class
attributes to add before the yamlize code fires off to make the root yamlize.Object.
Since yamlize.Object itself uses a metaclass to define the attributes to turn into
yamlize.Attributes, these need to be folded in early.
"""
def __new__(mcs, name, bases, attrs):
# pylint: disable=no-member
pm = getPluginManager()
if pm is None:
runLog.warning(
"Blueprints were instantiated before the framework was "
"configured with plugins. Blueprints cannot be imported before "
"ARMI has been configured."
)
else:
pluginSections = pm.hook.defineBlueprintsSections()
for plug in pluginSections:
for (attrName, section, resolver) in plug:
assert isinstance(section, yamlize.Attribute)
if attrName in attrs:
raise plugins.PluginError(
"There is already a section called '{}' in the reactor "
"blueprints".format(attrName)
)
attrs[attrName] = section
attrs["_resolveFunctions"].append(resolver)
newType = yamlize.objects.ObjectType.__new__(mcs, name, bases, attrs)
return newType
class Blueprints(yamlize.Object, metaclass=_BlueprintsPluginCollector):
"""Base Blueprintsobject representing all the subsections in the input file."""
nuclideFlags = yamlize.Attribute(
key="nuclide flags", type=isotopicOptions.NuclideFlags, default=None
)
customIsotopics = yamlize.Attribute(
key="custom isotopics", type=isotopicOptions.CustomIsotopics, default=None
)
blockDesigns = yamlize.Attribute(key="blocks", type=BlockKeyedList, default=None)
assemDesigns = yamlize.Attribute(
key="assemblies", type=AssemblyKeyedList, default=None
)
systemDesigns = yamlize.Attribute(key="systems", type=Systems, default=None)
gridDesigns = yamlize.Attribute(key="grids", type=Grids, default=None)
componentDesigns = yamlize.Attribute(
key="components", type=ComponentKeyedList, default=None
)
# These are used to set up new attributes that come from plugins. Defining its
# initial state here to make pylint happy
_resolveFunctions = []
def __new__(cls):
# yamlizable does not call __init__, so attributes that are not defined above
# need to be initialized here
self = yamlize.Object.__new__(cls)
self.assemblies = {}
self._prepped = False
self._assembliesBySpecifier = {}
self.allNuclidesInProblem = (
ordered_set.OrderedSet()
) # Better for performance since these are used for lookups
self.activeNuclides = ordered_set.OrderedSet()
self.inertNuclides = ordered_set.OrderedSet()
self.elementsToExpand = []
return self
def __init__(self):
# again, yamlize does not call __init__, instead we use Blueprints.load which
# creates and instance of a Blueprints object and initializes it with values
# using setattr. Since the method is never called, it serves the purpose of
# preventing pylint from issuing warnings about attributes not existing.
self._assembliesBySpecifier = {}
self._prepped = False
self.systemDesigns = Systems()
self.assemDesigns = AssemblyKeyedList()
self.blockDesigns = BlockKeyedList()
self.assemblies = {}
self.grids = Grids()
self.elementsToExpand = []
def __repr__(self):
return "<{} Assemblies:{} Blocks:{}>".format(
self.__class__.__name__, len(self.assemDesigns), len(self.blockDesigns)
)
def constructAssem(self, cs, name=None, specifier=None):
"""
Construct a new assembly instance from the assembly designs in this Blueprints object.
Parameters
----------
cs : CaseSettings object
Used to apply various modeling options when constructing an assembly.
name : str (optional, and should be exclusive with specifier)
Name of the assembly to construct. This should match the key that was used
to define the assembly in the Blueprints YAML file.
specifier : str (optional, and should be exclusive with name)
Identifier of the assembly to construct. This should match the identifier
that was used to define the assembly in the Blueprints YAML file.
Raises
------
ValueError
If neither name nor specifier are passed
Notes
-----
There is some possibility for "compiling" the logic with closures to make
constructing an assembly / block / component faster. At this point is is pretty
much irrelevant because we are currently just deepcopying already constructed
assemblies.
Currently, this method is backward compatible with other code in ARMI and
generates the `.assemblies` attribute (the BOL assemblies). Eventually, this
should be removed.
"""
self._prepConstruction(cs)
# TODO: this should be migrated assembly designs instead of assemblies
if name is not None:
assem = self.assemblies[name]
elif specifier is not None:
assem = self._assembliesBySpecifier[specifier]
else:
raise ValueError("Must supply assembly name or specifier to construct")
a = copy.deepcopy(assem)
# since a deepcopy has the same assembly numbers and block id's, we need to make it unique
a.makeUnique()
return a
def _prepConstruction(self, cs):
"""
This method initializes a bunch of information within a Blueprints object such
as assigning assembly and block type numbers, resolving the nuclides in the
problem, and pre-populating assemblies.
Ideally, it would not be necessary at all, but the ``cs`` currently contains a
bunch of information necessary to create the applicable model. If it were
possible, it would be terrific to override the Yamlizable.from_yaml method to
run this code after the instance has been created, but we need additional
information in order to build the assemblies that is not within the YAML file.
This method should not be called directly, but it is used in testing.
"""
if not self._prepped:
self._assignTypeNums()
for func in self._resolveFunctions:
func(self, cs)
self._resolveNuclides(cs)
self._assembliesBySpecifier.clear()
self.assemblies.clear()
for aDesign in self.assemDesigns:
a = aDesign.construct(cs, self)
self._assembliesBySpecifier[aDesign.specifier] = a
self.assemblies[aDesign.name] = a
self._checkAssemblyAreaConsistency(cs)
runLog.header("=========== Verifying Assembly Configurations ===========")
# pylint: disable=no-member
getPluginManagerOrFail().hook.afterConstructionOfAssemblies(
assemblies=self.assemblies.values(), cs=cs
)
self._prepped = True
def _assignTypeNums(self):
if self.blockDesigns is None:
# this happens when directly defining assemblies.
self.blockDesigns = BlockKeyedList()
for aDesign in self.assemDesigns:
for bDesign in aDesign.blocks:
if bDesign not in self.blockDesigns:
self.blockDesigns.add(bDesign)
def _resolveNuclides(self, cs):
"""
Process elements and determine how to expand them to natural isotopics.
Also builds meta-data about which nuclides are in the problem.
This system works by building a dictionary in the
``elementsToExpand`` attribute with ``Element`` keys
and list of ``NuclideBase`` values.
The actual expansion of elementals to isotopics occurs during
:py:meth:`Component construction <armi.reactor.blueprints.componentBlueprint.
ComponentBlueprint._constructMaterial>`.
"""
from armi import utils
actives = set()
inerts = set()
undefBurnChainActiveNuclides = set()
if self.nuclideFlags is None:
self.nuclideFlags = isotopicOptions.genDefaultNucFlags()
self.elementsToExpand = []
for nucFlag in self.nuclideFlags:
# this returns any nuclides that are flagged specifically for expansion by input
expandedElements = nucFlag.fileAsActiveOrInert(
actives, inerts, undefBurnChainActiveNuclides
)
self.elementsToExpand.extend(expandedElements)
inerts -= actives
self.customIsotopics = self.customIsotopics or isotopicOptions.CustomIsotopics()
(
elementalsToKeep,
expansions,
) = isotopicOptions.autoSelectElementsToKeepFromSettings(cs)
# Flag all elementals for expansion unless they've been flagged otherwise by
# user input or automatic lattice/datalib rules.
for elemental in nuclideBases.instances:
if not isinstance(elemental, nuclideBases.NaturalNuclideBase):
# `elemental` may be a NaturalNuclideBase or a NuclideBase
# skip all NuclideBases
continue
if elemental in elementalsToKeep:
continue
if elemental.name in actives:
currentSet = actives
actives.remove(elemental.name)
elif elemental.name in inerts:
currentSet = inerts
inerts.remove(elemental.name)
else:
# This was not specified in the nuclide flags at all.
# If a material with this in its composition is brought in
# it's nice from a user perspective to allow it.
# But current behavior is that all nuclides in problem
# must be declared up front.
continue
self.elementsToExpand.append(elemental.element)
if (
elemental.name in self.nuclideFlags
and self.nuclideFlags[elemental.name].expandTo
):
# user-input has precedence
newNuclides = [
nuclideBases.byName[nn]
for nn in self.nuclideFlags[elemental.element.symbol].expandTo
]
elif (
elemental in expansions
and elemental.element.symbol in self.nuclideFlags
):
# code-specific expansion required
newNuclides = expansions[elemental]
# overlay code details onto nuclideFlags for other parts of the code
# that will use them.
# TODO: would be better if nuclideFlags did this upon reading s.t.
# order didn't matter. On the other hand, this is the only place in
# the code where NuclideFlags get built and have user settings around
# (hence "resolve").
# This must be updated because the operative expansion code just uses the flags
#
# Also, if this element is not in nuclideFlags at all, we just don't add it
self.nuclideFlags[elemental.element.symbol].expandTo = [
nb.name for nb in newNuclides
]
else:
# expand to all possible natural isotopics
newNuclides = elemental.element.getNaturalIsotopics()
for nb in newNuclides:
currentSet.add(nb.name)
if self.elementsToExpand:
runLog.info(
"Will expand {} elementals to have natural isotopics".format(
", ".join(element.symbol for element in self.elementsToExpand)
)
)
self.activeNuclides = ordered_set.OrderedSet(sorted(actives))
self.inertNuclides = ordered_set.OrderedSet(sorted(inerts))
self.allNuclidesInProblem = ordered_set.OrderedSet(
sorted(actives.union(inerts))
)
# Inform user which nuclides are truncating the burn chain.
if undefBurnChainActiveNuclides:
runLog.info(
tabulate.tabulate(
[
[
"Nuclides truncating the burn-chain:",
utils.createFormattedStrWithDelimiter(
list(undefBurnChainActiveNuclides)
),
]
],
tablefmt="plain",
),
single=True,
)
def _checkAssemblyAreaConsistency(self, cs):
references = None
for a in self.assemblies.values():
if references is None:
references = (a, a.getArea())
continue
assemblyArea = a.getArea()
if isinstance(a, assemblies.RZAssembly):
# R-Z assemblies by definition have different areas, so skip the check
continue
if abs(references[1] - assemblyArea) > 1e-9:
runLog.error("REFERENCE COMPARISON ASSEMBLY:")
references[0][0].printContents()
runLog.error("CURRENT COMPARISON ASSEMBLY:")
a[0].printContents()
raise InputError(
"Assembly {} has a different area {} than assembly {} {}. Check inputs for accuracy".format(
a, assemblyArea, references[0], references[1]
)
)
blockArea = a[0].getArea()
for b in a[1:]:
if (
abs(b.getArea() - blockArea) / blockArea
> cs["acceptableBlockAreaError"]
):
runLog.error("REFERENCE COMPARISON BLOCK:")
a[0].printContents(includeNuclides=False)
runLog.error("CURRENT COMPARISON BLOCK:")
b.printContents(includeNuclides=False)
for c in b.getChildren():
runLog.error(
"{0} area {1} effective area {2}"
"".format(c, c.getArea(), c.getVolume() / b.getHeight())
)
raise InputError(
"Block {} has a different area {} than block {} {}. Check inputs for accuracy".format(
b, b.getArea(), a[0], blockArea
)
)
@classmethod
def migrate(cls, inp: typing.TextIO):
"""Given a stream representation of a blueprints file, migrate it.
Parameters
----------
inp : typing.TextIO
Input stream to migrate.
"""
for migI in migration.ACTIVE_MIGRATIONS:
if issubclass(migI, migration.base.BlueprintsMigration):
mig = migI(stream=inp)
inp = mig.apply()
return inp
@classmethod
def load(cls, stream, roundTrip=False):
"""This class method is a wrapper around the `yamlize.Object.load()` method.
The reason for the wrapper is to allow us to default to `Cloader`. Essentially,
the `CLoader` class is 10x faster, but doesn't allow for "round trip" (read-
write) access to YAMLs; for that we have the `RoundTripLoader`.
"""
loader = RoundTripLoader if roundTrip else CLoader
return super().load(stream, Loader=loader)
def migrate(bp: Blueprints, cs):
"""
Apply migrations to the input structure.
This is a good place to perform migrations that address changes to the system design
description (settings, blueprints, geom file). We have access to all three here, so
we can even move stuff between files. Namely, this:
* creates a grid blueprint to represent the core layout from the old ``geomFile``
setting, and applies that grid to a ``core`` system.
* moves the radial and azimuthal submesh values from the ``geomFile`` to the
assembly designs, but only if they are uniform (this is limiting, but could be
made more sophisticated in the future, if there is need)
This allows settings-driven core map to still be used for backwards compatibility.
At some point once the input stabilizes, we may wish to move this out to the
dedicated migration portion of the code, and not perform the migration so
implicitly.
"""
from armi.reactor.blueprints import gridBlueprint
if bp.systemDesigns is None:
bp.systemDesigns = Systems()
if bp.gridDesigns is None:
bp.gridDesigns = gridBlueprint.Grids()
if "core" in [rd.name for rd in bp.gridDesigns]:
raise ValueError("Cannot auto-create a 2nd `core` grid. Adjust input.")
geom = systemLayoutInput.SystemLayoutInput()
geom.readGeomFromFile(os.path.join(cs.inputDirectory, cs["geomFile"]))
gridDesigns = geom.toGridBlueprints("core")
for design in gridDesigns:
bp.gridDesigns[design.name] = design
if "core" in [rd.name for rd in bp.systemDesigns]:
raise ValueError(
"Core map is defined in both the ``geometry`` setting and in "
"the blueprints file. Only one definition may exist. "
"Update inputs."
)
bp.systemDesigns["core"] = SystemBlueprint("core", "core", Triplet())
if geom.geomType in (geometry.GeomType.RZT, geometry.GeomType.RZ):
aziMeshes = {indices[4] for indices, _ in geom.assemTypeByIndices.items()}
radMeshes = {indices[5] for indices, _ in geom.assemTypeByIndices.items()}
if len(aziMeshes) > 1 or len(radMeshes) > 1:
raise ValueError(
"The system layout described in {} has non-uniform "
"azimuthal and/or radial submeshing. This migration is currently "
"only smart enough to handle a single radial and single azimuthal "
"submesh for all assemblies.".format(cs["geomFile"])
)
radMesh = next(iter(radMeshes))
aziMesh = next(iter(aziMeshes))
for _, aDesign in bp.assemDesigns.items():
aDesign.radialMeshPoints = radMesh
aDesign.azimuthalMeshPoints = aziMesh
# Someday: write out the migrated file. At the moment this messes up the case
# title and doesn't yet have the other systems in place so this isn't the right place.
# cs.writeToXMLFile(cs.caseTitle + '.migrated.xml')
# with open(os.path.split(cs['loadingFile'])[0] + '.migrated.' + '.yaml', 'w') as loadingFile:
# blueprints.Blueprints.dump(bp, loadingFile)
| 40.013423 | 113 | 0.627893 |
ace797fa2f89c802324d4c2371cfc9d7f30f1c65 | 1,431 | py | Python | globaldat.py | aldenq/Yodel | 52870781c82cf9af63b8dbaf61cca8240d1b7928 | [
"MIT"
] | 2 | 2021-03-11T22:56:51.000Z | 2021-04-16T21:12:28.000Z | globaldat.py | aldenq/Yodel | 52870781c82cf9af63b8dbaf61cca8240d1b7928 | [
"MIT"
] | null | null | null | globaldat.py | aldenq/Yodel | 52870781c82cf9af63b8dbaf61cca8240d1b7928 | [
"MIT"
] | null | null | null | import multiprocessing
from multiprocessing import Pipe
from multiprocessing.queues import Queue
from socket import socket
from typing import *
lastMessages: List[int] = [] #holds the list of past message id's received, used to avoid receiving the same message twice
iface: str = "" # interface name, set during runtime
robotName: str = "" # robot name, set by setName during runtime
groups: List[str] = [""] # list of groups bot is a part of
delay: int = 0
ETH_P_ALL: int = 3
ETH_FRAME_LEN: int = 1514 # bytes in standard 80211 frame
relay: bool = False
maxRelay: int = 5
totalsends: int = 10
RADIO_TAP: bytearray = b"\x00\x00\x22\x00\xae\x40\x00\xa0\x20\x08\x00\xa0\x20\x08\x00\x00\x00\x10\x10\x02\x6c\x09\xa0\x00\xb0\x00\x64\x00\x00\x00\x00\x00\x00\x01"
lastMid: int = 0 # message ID of last message received
yodelSocket: socket = None
yodelSocketRecv: socket = None
debug: bool = True
sender_thread: multiprocessing.Process = None
receiver_thread: multiprocessing.Process = None
receiver_pipe: Pipe = None
sender_pipe: Pipe = None
outgoing: Queue = None
messages_types: List[int] = [0] * 256
# how much time in seconds the threads have to finish what they are doing
# once the program ends
SOCKET_CLOSE_TIME: float = .1
def getInt(bytea: bytearray) -> int:
return (int.from_bytes(bytea, byteorder='little'))
def bytesPrint(x: bytearray) -> NoReturn:
print(''.join(r'\x' + hex(letter)[2:] for letter in x))
| 34.071429 | 162 | 0.738644 |
ace7985647bbbdba059b67401aab2723a89c6c31 | 1,630 | py | Python | graphs/bellman_ford.py | jenia90/Python | 696fb4a681ad9e4d84e0d2b894daf449a3e30b24 | [
"MIT"
] | 21 | 2020-11-29T11:34:44.000Z | 2021-06-04T05:50:33.000Z | graphs/bellman_ford.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 41 | 2020-10-03T07:31:42.000Z | 2020-11-10T09:36:38.000Z | graphs/bellman_ford.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 12 | 2020-10-03T05:44:19.000Z | 2022-01-16T05:37:54.000Z | from __future__ import annotations
def printDist(dist, V):
print("Vertex Distance")
distances = ("INF" if d == float("inf") else d for d in dist)
print("\t".join(f"{i}\t{d}" for i, d in enumerate(distances)))
def BellmanFord(graph: list[dict[str, int]], V: int, E: int, src: int) -> int:
"""
Returns shortest paths from a vertex src to all
other vertices.
"""
mdist = [float("inf") for i in range(V)]
mdist[src] = 0.0
for i in range(V - 1):
for j in range(E):
u = graph[j]["src"]
v = graph[j]["dst"]
w = graph[j]["weight"]
if mdist[u] != float("inf") and mdist[u] + w < mdist[v]:
mdist[v] = mdist[u] + w
for j in range(E):
u = graph[j]["src"]
v = graph[j]["dst"]
w = graph[j]["weight"]
if mdist[u] != float("inf") and mdist[u] + w < mdist[v]:
print("Negative cycle found. Solution not possible.")
return
printDist(mdist, V)
return src
if __name__ == "__main__":
V = int(input("Enter number of vertices: ").strip())
E = int(input("Enter number of edges: ").strip())
graph = [dict() for j in range(E)]
for i in range(E):
graph[i][i] = 0.0
for i in range(E):
print("\nEdge ", i + 1)
src = int(input("Enter source:").strip())
dst = int(input("Enter destination:").strip())
weight = float(input("Enter weight:").strip())
graph[i] = {"src": src, "dst": dst, "weight": weight}
gsrc = int(input("\nEnter shortest path source:").strip())
BellmanFord(graph, V, E, gsrc)
| 28.596491 | 78 | 0.532515 |
ace79972eaf95a789372b54d70c0b64c8d560bc5 | 7,325 | py | Python | flask_eureka/httpclient.py | exaV/flask-eureka | fb96349eeddf420c614c7718d2422a1ed3b6dccd | [
"MIT"
] | null | null | null | flask_eureka/httpclient.py | exaV/flask-eureka | fb96349eeddf420c614c7718d2422a1ed3b6dccd | [
"MIT"
] | null | null | null | flask_eureka/httpclient.py | exaV/flask-eureka | fb96349eeddf420c614c7718d2422a1ed3b6dccd | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import sys
import io
import json
import logging
import re
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
try:
# for python3
from urllib.parse import urlencode
except ImportError:
# for python2
from urllib import urlencode
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""
Returns a dictionary of the response headers.
"""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""
Returns a given response header.
"""
return self.urllib3_response.getheader(name, default)
class HttpClientObject(object):
def __init__(self, pool_manager=None):
# https pool manager
if pool_manager is not None:
self.pool_manager = pool_manager
else:
self.pool_manager = urllib3.PoolManager(num_pools=4)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None):
"""
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencode`
and `multipart/form-data`
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body:
request_body = json.dumps(body)
r = self.pool_manager.request(method, url,
body=request_body,
headers=headers)
if headers['Content-Type'] == 'application/x-www-form-urlencoded':
r = self.pool_manager.request(method, url,
fields=post_params,
encode_multipart=False,
headers=headers)
if headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct Content-Type
# which generated by urllib3 will be overwritten.
del headers['Content-Type']
r = self.pool_manager.request(method, url,
fields=post_params,
encode_multipart=True,
headers=headers)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if sys.version_info > (3,):
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s" % r.data)
if r.status not in range(200, 206):
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None):
return self.request("GET", url,
headers=headers,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None):
return self.request("HEAD", url,
headers=headers,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None, body=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None, body=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None, body=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None, body=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""
Custom error messages for exception
"""
error_message = "({0})\n" \
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
| 36.809045 | 89 | 0.518225 |
ace79af78024a140ca9296dca8eed5d460f6bb9c | 1,386 | py | Python | memefly-ml/memefly/networks/cnn_rnn_merge.py | Nburkhal/memefly-ds | 359853ba79f2ce8e8145fc04d2a33ab58a7faf00 | [
"MIT"
] | 6 | 2019-11-06T21:56:26.000Z | 2020-01-17T06:26:47.000Z | memefly-ml/memefly/networks/cnn_rnn_merge.py | Nburkhal/memefly-ds | 359853ba79f2ce8e8145fc04d2a33ab58a7faf00 | [
"MIT"
] | 12 | 2020-03-24T18:29:09.000Z | 2021-09-08T01:34:54.000Z | memefly-ml/memefly/networks/cnn_rnn_merge.py | BloomTech-Labs/memefly-ds | 82c7b556627b992b6fcbc5f4b8ccfafb171d0aac | [
"MIT"
] | 3 | 2019-11-06T21:54:25.000Z | 2020-03-06T04:17:31.000Z | """Keras network code for the cnn -> rnn encoder decoder for image caption."""
from typing import List, Tuple
from tensorflow.keras.models import Input, Model
from tensorflow.keras.layers import Dense, GRU, Embedding, Droupout, add, Add, RepeatVector, Attention
from tensorflow.keras import backend as K
def cnn_merge_rnn_network(*, vocab_size: int, max_length:int) -> Model:
"""
Function to instantiate a cnn-rnn encoder decoder model, with image embedding injection
using merge method (4) as described in the following paper.
[Where to put the Image in an Image CaptionGenerator](https://arxiv.org/abs/1703.09137)
"""
img_emb_input = Input(shape=(2048,), name="image_input")
x1 = Dropout(0.5, name='image_dropout')(img_emb_input)
x1 = Dense(256, activation='relu', name='image_dense')(x1)
tokenized_text_input = Input(shape=(max_length,), name='text_input')
x2 = Embedding(vocab_size, 256, mask_zero=True, name='text_embedding')(tokenized_text_input)
x2 = Dropout(0.5, name='text_dropout')(x2)
x2 = GRU(256, name='GRU_text')(x2)
decoder = add([x1, x2], name='add')
decoder = Dense(256, activation='relu', name='combined_dense')(decoder)
outputs = Dense(vocab_size, activation='softmax', name='output')(decoder)
model = Model(inputs=[img_emb_input, tokenized_text_input], outputs=outputs)
return model | 49.5 | 102 | 0.717172 |
ace79b6da23beed4d0934b4f6bcac4476004e58c | 117 | py | Python | models/__init__.py | bartwojcik/Continual-Learning-Benchmark | 8f8ce236918f2755de0bc58e6d920849c312b5b1 | [
"MIT"
] | null | null | null | models/__init__.py | bartwojcik/Continual-Learning-Benchmark | 8f8ce236918f2755de0bc58e6d920849c312b5b1 | [
"MIT"
] | null | null | null | models/__init__.py | bartwojcik/Continual-Learning-Benchmark | 8f8ce236918f2755de0bc58e6d920849c312b5b1 | [
"MIT"
] | 2 | 2021-04-11T13:53:18.000Z | 2021-04-26T11:27:46.000Z | from . import mlp
from . import lenet
from . import resnet
from . import senet
from . import cnn
from . import large
| 16.714286 | 20 | 0.74359 |
ace79bf36b3f5909059a08d4f6280abbd628f8f1 | 2,892 | py | Python | src/dispenv/conda.py | pmbaumgartner/dispenv | 11053e7e3e6f50e7ae71c0550d6647ec02ce9778 | [
"MIT"
] | 3 | 2022-01-26T20:52:44.000Z | 2022-03-14T18:47:01.000Z | src/dispenv/conda.py | pmbaumgartner/dispenv | 11053e7e3e6f50e7ae71c0550d6647ec02ce9778 | [
"MIT"
] | 1 | 2022-01-28T13:18:52.000Z | 2022-01-28T13:18:52.000Z | src/dispenv/conda.py | pmbaumgartner/dispenv | 11053e7e3e6f50e7ae71c0550d6647ec02ce9778 | [
"MIT"
] | null | null | null | import re
from pathlib import Path
from subprocess import run
import srsly
from wasabi import msg
from .checks import (
run_conda_checks,
run_gh_cli_checks,
)
from .github import get_requirements_from_gist
from ._types import EnvData
from typing import Dict, Any
def env_exists(environment_name) -> bool:
env_regex: str = r"\b" + re.escape(environment_name) + r"\b"
env_list: str = run(["conda", "info", "--envs"], capture_output=True).stdout.decode(
"utf-8"
)
search_in = re.search(env_regex, env_list)
return search_in is not None
def create(env_data: EnvData) -> None:
run_conda_checks()
if env_exists(env_data.environment_name):
raise ValueError(f"Environment '{env_data.environment_name}' already exists")
if Path(env_data.folder_name).exists():
raise ValueError(f"Folder '{env_data.folder_name}' already exists.")
folder_path = Path(env_data.folder_name).resolve()
msg.info(f"Creating Folder: {folder_path}")
folder_path.mkdir(parents=True, exist_ok=False)
msg.info(f"Creating Environment: {env_data.environment_name}")
with msg.loading("Creating..."):
run(
[
"conda",
"create",
"-y",
"-n",
env_data.environment_name,
f"python={env_data.python_version}",
],
capture_output=True,
)
msg.good("\nEnvironment Created")
if env_data.requirements_txt_gist:
run_gh_cli_checks()
msg.info(
f"Downloading & Installing requirements.txt"
f" from {env_data.requirements_txt_gist}"
)
requirements_txt_path = get_requirements_from_gist(
env_data.requirements_txt_gist, folder_path
)
with msg.loading("Installing..."):
run(
[
"conda",
"run",
"-n",
env_data.environment_name,
"python",
"-m",
"pip",
"install",
"-r",
str(requirements_txt_path),
],
capture_output=True,
)
msg.good("Packages Installed in Environment")
srsly.write_yaml(folder_path / ".dispenv.yaml", env_data.dict())
msg.good(
f"Created Environment {env_data.environment_name} in {env_data.folder_name}"
)
def cleanup(dispenv_data: Dict[str, Any]) -> None:
msg.info("Removing Folder")
folder_path = Path(dispenv_data["folder_name"]).resolve()
run(["rm", "-rf", str(folder_path)], capture_output=True)
msg.info("Removing Environment")
run(
["conda", "env", "remove", "-n", dispenv_data["environment_name"]],
capture_output=True,
)
msg.good("Cleanup Complete.")
| 30.442105 | 88 | 0.58195 |
ace79c3f40a9d924600f4018fb56d10b5e8c630c | 1,963 | py | Python | mars/services/tests/test_svcs/test_svc1/supervisor.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 1 | 2022-02-02T03:03:48.000Z | 2022-02-02T03:03:48.000Z | mars/services/tests/test_svcs/test_svc1/supervisor.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | mars/services/tests/test_svcs/test_svc1/supervisor.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..... import oscar as mo
from ....core import AbstractService
class SvcActor1(mo.Actor):
def __init__(self, arg):
super().__init__()
self._arg = arg
def get_arg(self):
return self._arg
class SvcSessionActor1(mo.Actor):
@classmethod
def gen_uid(cls, session_id: str):
return f"{session_id}_svc_session_actor1"
class TestService1(AbstractService):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def start(self):
svc_config = self._config["test_svc1"]
await mo.create_actor(
SvcActor1,
uid=svc_config["uid"],
arg=svc_config["arg1"],
address=self._address,
)
async def stop(self):
svc_config = self._config["test_svc1"]
await mo.destroy_actor(
mo.create_actor_ref(uid=svc_config["uid"], address=self._address)
)
async def create_session(self, session_id: str):
await mo.create_actor(
SvcSessionActor1,
uid=SvcSessionActor1.gen_uid(session_id),
address=self._address,
)
async def destroy_session(self, session_id: str):
await mo.destroy_actor(
mo.create_actor_ref(
uid=SvcSessionActor1.gen_uid(session_id), address=self._address
)
)
| 29.742424 | 79 | 0.653591 |
ace79c6e6a31de341b5f7229ca52b9eb414844c0 | 3,816 | py | Python | tests/test_data_stats.py | murraycutforth/MONAI | ad06dff7f85711048690b2e85c99d51001612708 | [
"Apache-2.0"
] | 1 | 2021-06-18T00:53:06.000Z | 2021-06-18T00:53:06.000Z | tests/test_data_stats.py | Transconnectome/MONAI | dc7cd0ec25d4b27f321a31f13e707769922c66b3 | [
"Apache-2.0"
] | null | null | null | tests/test_data_stats.py | Transconnectome/MONAI | dc7cd0ec25d4b27f321a31f13e707769922c66b3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import logging
import tempfile
import numpy as np
from parameterized import parameterized
from monai.transforms import DataStats
TEST_CASE_1 = [
{
"prefix": "test data",
"data_shape": False,
"intensity_range": False,
"data_value": False,
"additional_info": None,
"logger_handler": None,
},
np.array([[0, 1], [1, 2]]),
"test data statistics:",
]
TEST_CASE_2 = [
{
"prefix": "test data",
"data_shape": True,
"intensity_range": False,
"data_value": False,
"additional_info": None,
"logger_handler": None,
},
np.array([[0, 1], [1, 2]]),
"test data statistics:\nShape: (2, 2)",
]
TEST_CASE_3 = [
{
"prefix": "test data",
"data_shape": True,
"intensity_range": True,
"data_value": False,
"additional_info": None,
"logger_handler": None,
},
np.array([[0, 1], [1, 2]]),
"test data statistics:\nShape: (2, 2)\nIntensity range: (0, 2)",
]
TEST_CASE_4 = [
{
"prefix": "test data",
"data_shape": True,
"intensity_range": True,
"data_value": True,
"additional_info": None,
"logger_handler": None,
},
np.array([[0, 1], [1, 2]]),
"test data statistics:\nShape: (2, 2)\nIntensity range: (0, 2)\nValue: [[0 1]\n [1 2]]",
]
TEST_CASE_5 = [
{
"prefix": "test data",
"data_shape": True,
"intensity_range": True,
"data_value": True,
"additional_info": lambda x: np.mean(x),
"logger_handler": None,
},
np.array([[0, 1], [1, 2]]),
"test data statistics:\nShape: (2, 2)\nIntensity range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0",
]
TEST_CASE_6 = [
np.array([[0, 1], [1, 2]]),
"test data statistics:\nShape: (2, 2)\nIntensity range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0\n",
]
class TestDataStats(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5])
def test_value(self, input_param, input_data, expected_print):
transform = DataStats(**input_param)
_ = transform(input_data)
self.assertEqual(transform.output, expected_print)
@parameterized.expand([TEST_CASE_6])
def test_file(self, input_data, expected_print):
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, "test_stats.log")
handler = logging.FileHandler(filename, mode="w")
input_param = {
"prefix": "test data",
"data_shape": True,
"intensity_range": True,
"data_value": True,
"additional_info": lambda x: np.mean(x),
"logger_handler": handler,
}
transform = DataStats(**input_param)
_ = transform(input_data)
handler.stream.close()
transform._logger.removeHandler(handler)
with open(filename, "r") as f:
content = f.read()
self.assertEqual(content, expected_print)
os.remove(filename)
if __name__ == "__main__":
unittest.main()
| 31.02439 | 116 | 0.596698 |
ace79c802fb3d061015e6604fca5e5fb4a76f77f | 1,185 | py | Python | utils.py | watstock/rnns | bc6bd1d0200acc3a901dbb7bb937d702b07dbe83 | [
"MIT"
] | null | null | null | utils.py | watstock/rnns | bc6bd1d0200acc3a901dbb7bb937d702b07dbe83 | [
"MIT"
] | null | null | null | utils.py | watstock/rnns | bc6bd1d0200acc3a901dbb7bb937d702b07dbe83 | [
"MIT"
] | null | null | null | """
Helper methods.
"""
from dateutil.relativedelta import relativedelta
from pymongo import MongoClient
import os
import datetime
def next_work_day(date, distance=1):
weekend = set([5, 6])
last_date = date
work_days = []
while len(work_days) < distance:
last_date += relativedelta(days=1)
if last_date.weekday() not in weekend:
work_days.append(last_date)
return work_days[-1]
def build_model_params(architectures, timesteps, steps_ahead):
params = []
for arch in architectures:
for tstep in timesteps:
for step_ahead in steps_ahead:
params.append(arch + [tstep] + [step_ahead])
return params
def plot_data(df):
import matplotlib.pyplot as plt
df.plot()
plt.show()
def save_prediction_to_db(data):
MONGODB_CONNECTION = os.environ['MONGODB_CONNECTION']
client = MongoClient(MONGODB_CONNECTION)
db = client.watstock
collection = db.prediction_models
prediction = data
now = datetime.datetime.utcnow()
prediction['timestamp'] = now
prediction['date'] = now.strftime('%Y-%m-%d')
prediction_id = collection.insert_one(prediction).inserted_id
print('Prediction saved to the db:', prediction_id)
| 22.788462 | 63 | 0.718987 |
ace79dbc7a8b01a484d2d26a23d4e604d176bdba | 777 | py | Python | setup.py | billypoke/Preston_new | 6f949e7cddf4d7310a28ae2da9df648141911f4c | [
"MIT"
] | null | null | null | setup.py | billypoke/Preston_new | 6f949e7cddf4d7310a28ae2da9df648141911f4c | [
"MIT"
] | null | null | null | setup.py | billypoke/Preston_new | 6f949e7cddf4d7310a28ae2da9df648141911f4c | [
"MIT"
] | null | null | null | from setuptools import setup
import re
with open('preston_new/__init__.py') as f:
version = re.search(r'(\d+\.\d+\.\d+)', f.read()).group(1)
setup(
name='Preston_new',
author='billypoke',
author_email='billypoke_dev@gmail.com',
version=version,
license='MIT',
description='EVE ESI API access tool',
url='https://github.com/billypoke/Preston',
platforms='any',
packages=['preston_new'],
keywords=['eve online', 'api', 'esi'],
install_requires=[
'requests>=2.18.4'
],
classifiers=[
'Environment :: Console',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries'
]
)
| 25.9 | 62 | 0.608752 |
ace79e599dbe14b87c117e422c3353501704f786 | 908 | py | Python | impl/dlsgs/transformer/lr_schedules.py | ju-kreber/Transformers-and-GANs-for-LTL-sat | 45fe14815562dd3e0d3705573ce9358bfbdc22b3 | [
"MIT"
] | null | null | null | impl/dlsgs/transformer/lr_schedules.py | ju-kreber/Transformers-and-GANs-for-LTL-sat | 45fe14815562dd3e0d3705573ce9358bfbdc22b3 | [
"MIT"
] | null | null | null | impl/dlsgs/transformer/lr_schedules.py | ju-kreber/Transformers-and-GANs-for-LTL-sat | 45fe14815562dd3e0d3705573ce9358bfbdc22b3 | [
"MIT"
] | null | null | null | # implementation from DeepLTL https://github.com/reactive-systems/deepltl
import tensorflow as tf
class TransformerSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Learning Rate Schedule proposed by Vaswani et al. (2017) that corresponds to a linear increase
during the warmup phase followed by a decrease proportional to the inverse of the square root of
the step number"""
def __init__(self, d_embedding, warmup_steps=4000):
super(TransformerSchedule, self).__init__()
self.d_embedding = tf.cast(d_embedding, tf.float32)
self.warmup_steps = tf.cast(warmup_steps, tf.float32)
def __call__(self, step):
step = tf.cast(step, tf.float32)
increasing_lr = step * (self.warmup_steps ** -1.5)
decreasing_lr = tf.math.rsqrt(step)
return tf.math.rsqrt(self.d_embedding) * tf.math.minimum(increasing_lr, decreasing_lr)
| 41.272727 | 101 | 0.725771 |
ace7a133c3a0cb143a4f511071066d8c65555131 | 187 | py | Python | SpotiRy/frontend/urls.py | DarishkaAMS/Dj_Projects-SpotiRy | 7b1b2b9835029b63f392dd2cdbcb277dc020b60f | [
"MIT"
] | null | null | null | SpotiRy/frontend/urls.py | DarishkaAMS/Dj_Projects-SpotiRy | 7b1b2b9835029b63f392dd2cdbcb277dc020b60f | [
"MIT"
] | null | null | null | SpotiRy/frontend/urls.py | DarishkaAMS/Dj_Projects-SpotiRy | 7b1b2b9835029b63f392dd2cdbcb277dc020b60f | [
"MIT"
] | null | null | null | from django.urls import path
from .views import index
urlpatterns = [
path('', index),
path('join', index),
path('create', index),
path('room/<str:roomCode>', index),
]
| 17 | 39 | 0.620321 |
ace7a25e751c9f4778bf11336285735417a08860 | 8,019 | py | Python | python/hail/tests/test_typecheck.py | konradjk/hail | 10a75bb57a6ff5d8c90475ba04299a571c494357 | [
"MIT"
] | 1 | 2020-03-09T21:25:00.000Z | 2020-03-09T21:25:00.000Z | python/hail/tests/test_typecheck.py | konradjk/hail | 10a75bb57a6ff5d8c90475ba04299a571c494357 | [
"MIT"
] | null | null | null | python/hail/tests/test_typecheck.py | konradjk/hail | 10a75bb57a6ff5d8c90475ba04299a571c494357 | [
"MIT"
] | null | null | null | import unittest
from hail.typecheck.check import *
class TypeCheckTests(unittest.TestCase):
def test_varargs(self):
@typecheck(x=int, y=int)
def f1(x, y):
return x + y
# ensure that f1 and f2 both run with correct arguments
f1(1, 2)
self.assertRaises(TypeError, lambda: f1('2', 3))
@typecheck(x=int, y=int)
def bad_signature_1(x, y, *args):
pass
@typecheck(x=int, y=int)
def bad_signature_2(x, y, **kwargs):
pass
@typecheck(x=int)
def bad_signature_3(x, *args, **kwargs):
pass
for f in [bad_signature_1, bad_signature_2, bad_signature_3]:
self.assertRaises(RuntimeError, lambda: f(1, 2))
@typecheck()
def f():
pass
f()
@typecheck(x=int, y=int, args=int)
def good_signature_1(x, y, *args):
pass
good_signature_1(1, 2)
good_signature_1(1, 2, 3)
good_signature_1(1, 2, 3, 4, 5)
self.assertRaises(TypeError, lambda: good_signature_1(1, 2, 3, '4'))
self.assertRaises(TypeError, lambda: good_signature_1(1, 2, '4'))
@typecheck(x=int, y=int, kwargs=int)
def good_signature_2(x, y, **kwargs):
pass
good_signature_2(1, 2, a=5, z=2)
good_signature_2(1, 2)
good_signature_2(1, 2, a=5)
self.assertRaises(TypeError, lambda: good_signature_2(1, 2, a='2'))
self.assertRaises(TypeError, lambda: good_signature_2(1, 2, a='2', b=5, c=10))
@typecheck(x=int, y=int, args=int, kwargs=int)
def good_signature_3(x, y, *args, **kwargs):
pass
good_signature_3(1, 2)
good_signature_3(1, 2, 3)
good_signature_3(1, 2, a=3)
good_signature_3(1, 2, 3, a=4)
self.assertRaises(TypeError, lambda: good_signature_3(1, 2, a='2'))
self.assertRaises(TypeError, lambda: good_signature_3(1, 2, '3', b=5, c=10))
self.assertRaises(TypeError, lambda: good_signature_3(1, 2, '3', b='5', c=10))
@typecheck(x=int, y=int, args=int, kwargs=oneof(sequenceof(int), str))
def good_signature_4(x, y, *args, **kwargs):
pass
good_signature_4(1, 2)
good_signature_4(1, 2, 3)
good_signature_4(1, 2, a='1')
good_signature_4(1, 2, 3, a=[1, 2, 3])
good_signature_4(1, 2, 3, a=[1, 2, 3], b='5')
good_signature_4(1, 2, a=[1, 2, 3], b='5')
self.assertRaises(TypeError, lambda: good_signature_4(1, 2, a=2))
self.assertRaises(TypeError, lambda: good_signature_4(1, 2, '3', b='5', c=10))
@typecheck(x=sized_tupleof(str, int, int))
def good_signature_5(x):
pass
good_signature_5(("1", 5, 10))
self.assertRaises(TypeError, lambda: good_signature_5("1", 2, 2))
self.assertRaises(TypeError, lambda: good_signature_5(("1", 5, 10), ("2", 10, 20)))
@typecheck(x=int, y=str, z=sequenceof(sized_tupleof(str, int, int)),
args=int)
def good_signature_6(x, y, z, *args):
pass
good_signature_6(7, "hello", [("1", 5, 10), ("3", 10, 1)], 1, 2, 3)
good_signature_6(7, "hello", [("1", 5, 10), ("3", 10, 1)])
good_signature_6(7, "hello", [], 1, 2)
good_signature_6(7, "hello", [])
self.assertRaises(TypeError, lambda: good_signature_6(1, "2", ("3", 4, 5)))
self.assertRaises(TypeError, lambda: good_signature_6(7, "hello", [(9, 5.6, 10), (4, "hello", 1)], 1, 2, 3))
def test_helpers(self):
# check nullable
@typecheck(x=nullable(int))
def f(x):
pass
f(5)
f(None)
self.assertRaises(TypeError, lambda: f('2'))
# check integral
@typecheck(x=int)
def f(x):
pass
f(1)
self.assertRaises(TypeError, lambda: f(1.1))
# check numeric
@typecheck(x=numeric)
def f(x):
pass
f(1)
f(1.0)
self.assertRaises(TypeError, lambda: f('1.1'))
# check strlike
@typecheck(x=str)
def f(x):
pass
f('str')
f(u'unicode')
self.assertRaises(TypeError, lambda: f(['abc']))
def test_nested(self):
@typecheck(
x=int,
y=oneof(nullable(str), sequenceof(sequenceof(dictof(oneof(str, int), anytype))))
)
def f(x, y):
pass
f(5, None)
f(5, u'7')
f(5, [])
f(5, [[]])
f(5, [[{}]])
f(5, [[{'6': None}]])
f(5, [[{'6': None}]])
f(5, [[{'6': None, 5: {1, 2, 3, 4}}]])
self.assertRaises(TypeError, lambda: f(2, 2))
def test_class_methods(self):
class Foo:
@typecheck_method(a=int, b=str)
def __init__(self, a, b):
pass
@typecheck_method(x=int, y=int)
def a(self, x, y):
pass
@staticmethod
@typecheck(x=int, y=int)
def b(x, y):
pass
# error because it should be typecheck_method
@typecheck(x=int, y=int)
def c(self, x, y):
pass
@typecheck_method(x=int, y=int, args=str, kwargs=int)
def d(self, x, y, *args, **kwargs):
pass
Foo(2, '2')
self.assertRaises(TypeError, lambda: Foo('2', '2'))
f = Foo(2, '2')
f.a(2, 2)
f.b(2, 2)
Foo.b(2, 2)
f.d(1, 2)
f.d(1, 2, '3')
f.d(1, 2, '3', z=5)
self.assertRaises(TypeError, lambda: f.a('2', '2'))
self.assertRaises(TypeError, lambda: f.b('2', '2'))
self.assertRaises(TypeError, lambda: Foo.b('2', '2'))
self.assertRaises(RuntimeError, lambda: f.c(2, 2))
self.assertRaises(TypeError, lambda: f.d(2, 2, 3))
self.assertRaises(TypeError, lambda: f.d(2, 2, z='2'))
def test_lazy(self):
foo_type = lazy()
class Foo:
def __init__(self):
pass
@typecheck_method(other=foo_type)
def bar(self, other):
pass
foo_type.set(Foo)
foo = Foo()
foo2 = Foo()
foo.bar(foo)
foo.bar(foo2)
self.assertRaises(TypeError, lambda: foo.bar(2))
def test_coercion(self):
@typecheck(a=transformed((int, lambda x: 'int'),
(str, lambda x: 'str')),
b=sequenceof(dictof(str, transformed((int, lambda x: 'int'),
(str, lambda x: 'str')))))
def foo(a, b):
return a, b
self.assertRaises(TypeError, lambda: foo(5.5, [{'5': 5}]))
self.assertRaises(TypeError, lambda: foo(5, [{'5': 5.5}]))
a, b = foo(5, [])
self.assertEqual(a, 'int')
a, b = foo('5', [])
self.assertEqual(a, 'str')
a, b = foo(5, [{'5': 5, '6': '6'}, {'10': 10}])
self.assertEqual(a, 'int')
self.assertEqual(b, [{'5': 'int', '6': 'str'}, {'10': 'int'}])
def test_function_checker(self):
@typecheck(f=func_spec(3, int))
def foo(f):
return f(1, 2, 3)
l1 = lambda: 5
l2 = 5
l3 = lambda x, y, z: x + y + z
self.assertRaises(TypeError, lambda: foo(l1))
self.assertRaises(TypeError, lambda: foo(l2))
foo(l3)
def test_complex_signature(self):
@typecheck(a=int, b=str, c=sequenceof(int), d=tupleof(str), e=dict)
def f(a, b='5', c=[10], *d, **e):
pass
f(1, 'a', )
f(1, foo={})
f(1, 'a', foo={})
f(1, c=[25, 2])
with self.assertRaises(TypeError):
f(1, '2', a=2)
def test_extra_args(self):
@typecheck(x=int)
def f(x):
pass
f(1)
with self.assertRaises(TypeError):
f(1,2)
| 28.235915 | 116 | 0.501309 |
ace7a2cef756946faaa09aa916e6942adb89e527 | 111 | py | Python | gym_ur5/__init__.py | pnnayyeri/gym-ur5 | e515a405d6619456a8cd77f8e5108e7f0bc08246 | [
"MIT"
] | 1 | 2021-02-01T18:17:48.000Z | 2021-02-01T18:17:48.000Z | gym_ur5/__init__.py | pnnayyeri/gym-ur5 | e515a405d6619456a8cd77f8e5108e7f0bc08246 | [
"MIT"
] | null | null | null | gym_ur5/__init__.py | pnnayyeri/gym-ur5 | e515a405d6619456a8cd77f8e5108e7f0bc08246 | [
"MIT"
] | 1 | 2021-07-15T07:06:35.000Z | 2021-07-15T07:06:35.000Z | from gym.envs.registration import register
register(
id='ur5-v0',
entry_point='gym_ur5.envs:UR5Env'
)
| 15.857143 | 42 | 0.720721 |
ace7a43786ee9a25bf9448128898633aff6168bc | 13,981 | py | Python | sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/aio/operations/_virtual_machines_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/aio/operations/_virtual_machines_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/aio/operations/_virtual_machines_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachinesOperations:
"""VirtualMachinesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.hdinsight.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_hosts(
self,
resource_group_name: str,
cluster_name: str,
**kwargs: Any
) -> List["_models.HostInfo"]:
"""Lists the HDInsight clusters hosts.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of HostInfo, or the result of cls(response)
:rtype: list[~azure.mgmt.hdinsight.models.HostInfo]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.HostInfo"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.list_hosts.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[HostInfo]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_hosts.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/listHosts'} # type: ignore
async def _restart_hosts_initial(
self,
resource_group_name: str,
cluster_name: str,
hosts: List[str],
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._restart_hosts_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(hosts, '[str]')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_hosts_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/restartHosts'} # type: ignore
async def begin_restart_hosts(
self,
resource_group_name: str,
cluster_name: str,
hosts: List[str],
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Restarts the specified HDInsight cluster hosts.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param hosts: The list of hosts to restart.
:type hosts: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_hosts_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
hosts=hosts,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart_hosts.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/restartHosts'} # type: ignore
async def get_async_operation_status(
self,
resource_group_name: str,
cluster_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.AsyncOperationResult":
"""Gets the async operation status.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param operation_id: The long running operation id.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AsyncOperationResult, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.AsyncOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AsyncOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get_async_operation_status.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AsyncOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_async_operation_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/restartHosts/azureasyncoperations/{operationId}'} # type: ignore
| 49.05614 | 236 | 0.67606 |
ace7a4ac7d4bf0b674f91a1706b621de2abcd1d7 | 1,008 | py | Python | imdbscraper/imdbscraper/pipelines.py | sparshk/movie_recommender_system | 473409f4d569291ab1badccd16c92e575ca487a5 | [
"MIT"
] | null | null | null | imdbscraper/imdbscraper/pipelines.py | sparshk/movie_recommender_system | 473409f4d569291ab1badccd16c92e575ca487a5 | [
"MIT"
] | null | null | null | imdbscraper/imdbscraper/pipelines.py | sparshk/movie_recommender_system | 473409f4d569291ab1badccd16c92e575ca487a5 | [
"MIT"
] | null | null | null | import logging
import pymongo
from scrapy.conf import settings
class MongoDBPipeline(object):
collection_name = 'movies'
def __init__(self, mongo_uri,mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
## pull in information from settings.py
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE','items')
)
def open_spider(self, spider):
## initializing spider
## opening db connection
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
## clean up when spider is closed
self.client.close()
def process_item(self, item, spider):
## how to handle each post
self.db[self.collection_name].insert(dict(item))
logging.debug("movie added to MongoDB")
return item | 29.647059 | 67 | 0.649802 |
ace7a5c746478cdfcc183aa8f8f647111fddbfe3 | 2,118 | py | Python | .buildkite/get_changed_code_files.py | Infi-zc/horovod | 94cd8561a21d449fc8c80c8fef422025b84dfc22 | [
"Apache-2.0"
] | 7,676 | 2019-02-12T02:57:22.000Z | 2022-03-31T21:05:40.000Z | .buildkite/get_changed_code_files.py | Infi-zc/horovod | 94cd8561a21d449fc8c80c8fef422025b84dfc22 | [
"Apache-2.0"
] | 2,431 | 2019-02-12T01:34:21.000Z | 2022-03-31T21:43:38.000Z | .buildkite/get_changed_code_files.py | Infi-zc/horovod | 94cd8561a21d449fc8c80c8fef422025b84dfc22 | [
"Apache-2.0"
] | 1,557 | 2019-02-12T07:52:15.000Z | 2022-03-31T21:05:43.000Z | import json
import logging
import os
import sys
import re
import requests
# this script outputs all code files that have changed between commit and master
# environment variable BUILDKITE_COMMIT provides the commit SHA
# environment variable BUILDKITE_PIPELINE_DEFAULT_BRANCH provides the default branch (master)
# files that match any of these regexps are considered non-code files
# even though those files have changed, they will not be in the output of this script
non_code_file_patterns = [
r'^.buildkite/get_changed_code_files.py$',
r'^.github/',
r'^docs/',
r'^.*\.md',
r'^.*\.rst'
]
def get_changed_files(base, head):
response = requests.get(
'https://api.github.com/repos/horovod/horovod/compare/{base}...{head}'.format(
base=base, head=head
)
)
if response.status_code != 200:
logging.error('Request failed: {}'.format(json.loads(response.text).get('message')))
return []
compare_json = response.text
compare = json.loads(compare_json)
return [file.get('filename') for file in compare.get('files')]
def is_code_file(file):
return not is_non_code_file(file)
def is_non_code_file(file):
return any([pattern
for pattern in non_code_file_patterns
if re.match(pattern, file)])
if __name__ == "__main__":
logging.getLogger().level = logging.DEBUG
commit = os.environ.get('BUILDKITE_COMMIT')
default = os.environ.get('BUILDKITE_PIPELINE_DEFAULT_BRANCH')
if commit is None or default is None:
logging.warning('no commit ({}) or default branch ({}) given'.format(commit, default))
sys.exit(1)
logging.debug('commit = {}'.format(commit))
logging.debug('default = {}'.format(default))
commit_files = get_changed_files(default, commit)
if len(commit_files) == 0:
logging.warning('could not find any commit files')
sys.exit(1)
changed_code_files = [file
for file in commit_files
if is_code_file(file)]
for file in changed_code_files:
print(file)
| 29.830986 | 94 | 0.668083 |
ace7a5fa30bf8e284bb0fe363bb57f9e3e3b7bee | 799 | py | Python | webapp/webapp/urls.py | troymoench/naccbis | 87fd79a79e9ae189236781fa4682811c1da6480f | [
"MIT"
] | 1 | 2019-02-28T18:46:40.000Z | 2019-02-28T18:46:40.000Z | webapp/webapp/urls.py | troymoench/naccbis | 87fd79a79e9ae189236781fa4682811c1da6480f | [
"MIT"
] | 19 | 2018-04-17T04:47:40.000Z | 2022-02-10T10:40:07.000Z | webapp/webapp/urls.py | troymoench/naccbis | 87fd79a79e9ae189236781fa4682811c1da6480f | [
"MIT"
] | null | null | null | """webapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('naccbisapp.urls')),
path('admin/', admin.site.urls),
]
| 34.73913 | 77 | 0.703379 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.