repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
DjalelBBZ/ramp-board | ramp-database/ramp_database/utils.py | <reponame>DjalelBBZ/ramp-board<filename>ramp-database/ramp_database/utils.py
"""
The :mod:`ramp_database.utils` module provides tools to setup and connect to the
RAMP database.
"""
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.engine.url import URL
from .model import Model
def setup_db(config):
"""Create a sqlalchemy engine and session to interact with the database.
Parameters
----------
config : dict
Configuration file containing the information to connect to the
dataset. If you are using the configuration provided by ramp, it
corresponds to the the `sqlalchemy` key.
Returns
-------
db : :class:`sqlalchemy.Engine`
The engine to connect to the database.
Session : :class:`sqlalchemy.orm.Session`
Configured Session class which can later be used to communicate with
the database.
"""
# create the URL from the configuration
db_url = URL(**config)
db = create_engine(db_url)
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
return db, Session
@contextmanager
def session_scope(config):
"""Connect to a database and provide a session to make some operation.
Parameters
----------
config : dict
Configuration file containing the information to connect to the
dataset. If you are using the configuration provided by ramp, it
corresponds to the the `sqlalchemy` key.
Returns
-------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
"""
db, Session = setup_db(config)
with db.connect() as conn:
session = Session(bind=conn)
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/model/score.py | from sqlalchemy import Float
from sqlalchemy import Column
from sqlalchemy import String
from sqlalchemy import Integer
from sqlalchemy import Boolean
from ramp_utils.utils import encode_string
from .base import Model
__all__ = ['ScoreType']
# XXX: Should probably be addressed at some point?
# Deprecated: score types are now defined in problem.py.
# EventScoreType.score_type should be deleted then DB migrated.
class ScoreType(Model):
"""ScoreType table.
Parameters
----------
name : str
The name of the score.
is_lower_the_better : bool
Whether a lower score is better.
minimum : float
The minimum possible score.
maximum : float
The maximum possible score.
Attributes
----------
id : int
The ID of the row table.
is_lower_the_better : bool
Whether a lower score is better.
minimum : float
The minimum possible score.
maximum : float
The maximum possible score.
events : list of :class:`ramp_database.model.EventScoreType`
A back-reference to the event using the score.
"""
__tablename__ = 'score_types'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False, unique=True)
is_lower_the_better = Column(Boolean, nullable=False)
minimum = Column(Float, nullable=False)
maximum = Column(Float, nullable=False)
def __init__(self, name, is_lower_the_better, minimum, maximum):
self.name = name
self.is_lower_the_better = is_lower_the_better
self.minimum = minimum
self.maximum = maximum
def __repr__(self):
return 'ScoreType(name={})'.format(encode_string(self.name))
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/model/__init__.py | """
The :mod:`ramp_database.model` defines the database structure which is used for the
RAMP events.
"""
from .base import * # noqa
from .user import * # noqa
from .fold import * # noqa
from .team import * # noqa
from .score import * # noqa
from .event import * # noqa
from .problem import * # noqa
from .workflow import * # noqa
from .datatype import * # noqa
from .submission import * # noqa
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/tests/test_utils.py | <filename>ramp-database/ramp_database/tests/test_utils.py
import shutil
import pytest
from ramp_utils import read_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_database.testing import create_test_db
from ramp_database.model import Model
from ramp_database.model import SubmissionFileType
from ramp_database.utils import setup_db
from ramp_database.utils import session_scope
@pytest.fixture
def database():
database_config = read_config(database_config_template())
ramp_config = read_config(ramp_config_template())
try:
create_test_db(database_config, ramp_config)
yield
finally:
shutil.rmtree(
ramp_config['ramp']['deployment_dir'], ignore_errors=True
)
db, _ = setup_db(database_config['sqlalchemy'])
Model.metadata.drop_all(db)
def test_setup_db(database):
database_config = read_config(
database_config_template(), filter_section='sqlalchemy'
)
db, Session = setup_db(database_config)
with db.connect() as conn:
session = Session(bind=conn)
file_type = session.query(SubmissionFileType).all()
assert len(file_type) > 0
def test_session_scope(database):
database_config = read_config(
database_config_template(), filter_section='sqlalchemy'
)
with session_scope(database_config) as session:
file_type = session.query(SubmissionFileType).all()
assert len(file_type) > 0
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/model/tests/test_fold.py | import datetime
import shutil
import pytest
from ramp_utils import read_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_database.model import CVFold
from ramp_database.model import Model
from ramp_database.model import SubmissionOnCVFold
from ramp_database.utils import setup_db
from ramp_database.utils import session_scope
from ramp_database.testing import create_toy_db
from ramp_database.tools.event import get_event
@pytest.fixture(scope='module')
def session_scope_module():
database_config = read_config(database_config_template())
ramp_config = read_config(ramp_config_template())
try:
create_toy_db(database_config, ramp_config)
with session_scope(database_config['sqlalchemy']) as session:
yield session
finally:
shutil.rmtree(
ramp_config['ramp']['deployment_dir'], ignore_errors=True
)
db, _ = setup_db(database_config['sqlalchemy'])
Model.metadata.drop_all(db)
def test_cv_fold_model(session_scope_module):
event = get_event(session_scope_module, 'iris_test')
cv_fold = (session_scope_module.query(CVFold)
.filter(CVFold.event_id == event.id)
.all())
assert "train fold [" in repr(cv_fold[0])
@pytest.mark.parametrize(
'backref, expected_type',
[('submissions', SubmissionOnCVFold)]
)
def test_cv_fold_model_backref(session_scope_module, backref, expected_type):
event = get_event(session_scope_module, 'iris_test')
cv_fold = (session_scope_module.query(CVFold)
.filter(CVFold.event_id == event.id)
.first())
backref_attr = getattr(cv_fold, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type)
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/tests/test_string_encoding.py | import sys
from ramp_utils import encode_string
PYTHON3 = sys.version_info[0] == 3
def test_encode_string():
if PYTHON3:
string = encode_string('a string')
assert isinstance(string, bytes)
string = encode_string(b'a string')
assert isinstance(string, bytes)
else:
string = encode_string('a string')
assert isinstance(string, bytes)
|
DjalelBBZ/ramp-board | ramp-engine/ramp_engine/tests/test_cli.py | import shutil
from click.testing import CliRunner
from ramp_utils import read_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_database.model import Model
from ramp_database.utils import setup_db
from ramp_database.testing import create_toy_db
from ramp_engine.cli import main
def setup_module(module):
database_config = read_config(database_config_template())
ramp_config = read_config(ramp_config_template())
create_toy_db(database_config, ramp_config)
def teardown_module(module):
database_config = read_config(database_config_template())
ramp_config = read_config(ramp_config_template())
shutil.rmtree(ramp_config['ramp']['deployment_dir'], ignore_errors=True)
db, _ = setup_db(database_config['sqlalchemy'])
Model.metadata.drop_all(db)
def test_dispatcher():
runner = CliRunner()
result = runner.invoke(main, ["dispatcher",
"--config", database_config_template(),
"--event-config", ramp_config_template()])
assert result.exit_code == 0, result.output
def test_worker():
runner = CliRunner()
result = runner.invoke(main, ["worker",
"--config", ramp_config_template(),
"--submission", "starting_kit"])
assert result.exit_code == 0, result.output
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/deploy.py | import os
import shutil
from ramp_database.testing import setup_files_extension_type
from ramp_database.testing import setup_ramp_kits_ramp_data
from ramp_database.tools.event import add_event
from ramp_database.tools.event import add_problem
from ramp_database.tools.event import get_problem
from ramp_database.utils import session_scope
from .config_parser import read_config
from .ramp import generate_ramp_config
def deploy_ramp_event(config, event_config, setup_ramp_repo=True, force=False):
"""Deploy a RAMP event using a configuration file.
This utility is in charge of creating the kit and data repository for a
given RAMP event. It will also setup the database.
Parameters
----------
config : str
The path to the YAML file containing the database information.
event_config : str
The path to the YAML file containing the RAMP infomation.
setup_ramp_repo : bool, default is True
Whether or not to setup the RAMP kit and data repositories.
force : bool, default is False
Whether or not to potentially overwrite the repositories, problem and
event in the database.
"""
database_config = read_config(config, filter_section='sqlalchemy')
event_config = read_config(event_config)
ramp_config = generate_ramp_config(event_config)
with session_scope(database_config) as session:
setup_files_extension_type(session)
if setup_ramp_repo:
setup_ramp_kits_ramp_data(
event_config, ramp_config['event'], force
)
# check if the repository exists
problem = get_problem(session, ramp_config['event'])
if problem is None:
add_problem(session, ramp_config['event'],
ramp_config['ramp_kits_dir'],
ramp_config['ramp_data_dir'])
else:
if ((ramp_config['ramp_kits_dir'] != problem.path_ramp_kits or
ramp_config['ramp_data_dir'] != problem.path_ramp_data) and
not force):
raise ValueError(
'The RAMP problem already exists in the database. The path'
'to the kit or to the data is different. You need to set'
'"force=True" if you want to overwrite these parameters.'
)
if setup_ramp_repo:
setup_ramp_kits_ramp_data(
event_config, ramp_config['event'], force
)
add_problem(session, ramp_config['event'],
ramp_config['ramp_kits_dir'],
ramp_config['ramp_data_dir'],
force)
if not os.path.exists(ramp_config['ramp_submissions_dir']):
os.makedirs(ramp_config['ramp_submissions_dir'])
add_event(session, ramp_config['event'],
ramp_config['event_name'],
ramp_config['event_title'],
ramp_config['sandbox_name'],
ramp_config['ramp_submissions_dir'],
ramp_config['event_is_public'],
force)
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/frontend.py | import six
from .config_parser import read_config
def generate_flask_config(config):
"""Generate the configuration to deal with Flask.
Parameters
----------
config : dict or str
Either the loaded configuration or the configuration YAML file.
Returns
-------
flask_config : dict
The configuration for the RAMP worker.
"""
if isinstance(config, six.string_types):
config = read_config(config, filter_section=['flask', 'sqlalchemy'])
flask_config = {key.upper(): value
for key, value in config['flask'].items()}
database_config = config['sqlalchemy']
flask_config['SQLALCHEMY_DATABASE_URI'] = \
('{}://{}:{}@{}:{}/{}'
.format(database_config['drivername'], database_config['username'],
database_config['password'], database_config['host'],
database_config['port'], database_config['database']))
return flask_config
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/tests/test_utils.py | <reponame>DjalelBBZ/ramp-board
import os
import six
from ramp_utils.utils import check_password
from ramp_utils.utils import encode_string
from ramp_utils.utils import hash_password
from ramp_utils.utils import import_module_from_source
def test_import_module_from_source():
module_path = os.path.dirname(__file__)
# import the local_module.py which consist of a single function.
mod = import_module_from_source(
os.path.join(module_path, 'local_module.py'), 'mod'
)
assert hasattr(mod, 'func_local_module')
def test_check_password():
password = "<PASSWORD>"
hashed_password = hash_password(password)
assert check_password(password, hashed_password)
assert not check_password("<PASSWORD>", hashed_password)
def test_encode_string():
if six.PY3:
string = encode_string('a string')
assert isinstance(string, bytes)
string = encode_string(b'a string')
assert isinstance(string, bytes)
else:
string = encode_string('a string')
assert isinstance(string, bytes)
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/utils.py | <gh_stars>0
import importlib
import sys
import bcrypt
import six
def import_module_from_source(source, name):
"""Load a module from a Python source file.
Parameters
----------
source : str
Path to the Python source file which will be loaded as a module.
name : str
Name to give to the module once loaded.
Returns
-------
module : Python module
Return the Python module which has been loaded.
"""
if sys.version_info[0] < 3:
import imp
module = imp.load_source(name, source)
return module
spec = importlib.util.spec_from_file_location(name, source)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def hash_password(password):
"""Hash a password.
Parameters
----------
password : str or bytes
Human readable password.
Returns
-------
hashed_password : bytes
The hashed password.
"""
return bcrypt.hashpw(encode_string(password), bcrypt.gensalt())
def check_password(password, hashed_password):
"""Check if a password is the same than the hashed password.
Parameters
----------
password : str or bytes
Human readable password.
hashed_password : str or bytes
The hashed password.
Returns
-------
is_same_password : bool
Return True if the two passwords are identical.
"""
return bcrypt.checkpw(encode_string(password),
encode_string(hashed_password))
def encode_string(text):
"""Encode text into an array of bytes in both Python 2 and 3 with UTF-8.
Parameters
----------
text : str or bytes
The text to be encoded
Returns
-------
encoded_text : bytes
The encoded text.
"""
if six.PY3:
return bytes(text, 'utf-8') if isinstance(text, str) else text
return text.encode('utf8')
|
DjalelBBZ/ramp-board | ramp-frontend/ramp_frontend/__init__.py | <filename>ramp-frontend/ramp_frontend/__init__.py
import os
from flask import Flask
from flask_login import LoginManager
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from ramp_database.model import Model
from ._version import __version__
all = [
'__version__'
]
HERE = os.path.dirname(__file__)
db = SQLAlchemy(model_class=Model)
login_manager = LoginManager()
mail = Mail()
def create_app(config):
"""Create the RAMP Flask app and register the views.
Parameters
----------
config : dict
The Flask configuration generated with
:func:`ramp_utils.generate_flask_config`.
Returns
-------
app : Flask
The Flask app created.
"""
app = Flask('ramp-frontend', root_path=HERE)
app.config.update(config)
with app.app_context():
db.init_app(app)
# register the login manager
login_manager.init_app(app)
login_manager.login_view = 'auth.login'
login_manager.login_message = ('Please log in or sign up to access '
'this page.')
# register the email manager
mail.init_app(app)
# register our blueprint
from .views import admin
from .views import auth
from .views import general
from .views import leaderboard
from .views import ramp
app.register_blueprint(admin.mod)
app.register_blueprint(auth.mod)
app.register_blueprint(general.mod)
app.register_blueprint(leaderboard.mod)
app.register_blueprint(ramp.mod)
# initialize the database
db.create_all()
return app
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/string_encoding.py | # TODO: remove this file once we are moving to Python 3 only
import sys
PYTHON3 = sys.version_info[0] == 3
def encode_string(text):
"""Encode text into an array of bytes in both Python 2 and 3 with UTF-8.
Parameters
----------
text : str or bytes
The text to be encoded
Returns
-------
encoded_text : bytes
The encoded text.
"""
if PYTHON3:
return bytes(text, 'utf-8') if isinstance(text, str) else text
return text.encode('utf8')
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/__init__.py | <filename>ramp-database/ramp_database/__init__.py
from ._version import __version__
all = [
'__version__'
] |
DjalelBBZ/ramp-board | ramp-database/ramp_database/model/tests/test_submission.py | import os
import re
import shutil
import numpy as np
from numpy.testing import assert_allclose
import pytest
from ramp_utils import read_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from rampwf.prediction_types.base import BasePrediction
from ramp_database.model import DetachedSubmissionOnCVFold
from ramp_database.model import Event
from ramp_database.model import EventScoreType
from ramp_database.model import Extension
from ramp_database.model import HistoricalContributivity
from ramp_database.model import Model
from ramp_database.model import Submission
from ramp_database.model import SubmissionFile
from ramp_database.model import SubmissionFileType
from ramp_database.model import SubmissionFileTypeExtension
from ramp_database.model import SubmissionOnCVFold
from ramp_database.model import SubmissionScore
from ramp_database.model import SubmissionScoreOnCVFold
from ramp_database.model import Team
from ramp_database.model import WorkflowElementType
from ramp_database.utils import setup_db
from ramp_database.utils import session_scope
from ramp_database.testing import create_toy_db
from ramp_database.tools.submission import get_submission_by_id
@pytest.fixture(scope='module')
def session_scope_module():
database_config = read_config(database_config_template())
ramp_config = read_config(ramp_config_template())
try:
create_toy_db(database_config, ramp_config)
with session_scope(database_config['sqlalchemy']) as session:
yield session
finally:
shutil.rmtree(
ramp_config['ramp']['deployment_dir'], ignore_errors=True
)
db, _ = setup_db(database_config['sqlalchemy'])
Model.metadata.drop_all(db)
def test_submission_model_property(session_scope_module):
# check that the property of Submission
submission = get_submission_by_id(session_scope_module, 5)
assert re.match(r'Submission\(iris_test/test_user/.*\)',
str(submission))
assert re.match(r'Submission\(event_name.*\)', repr(submission))
assert isinstance(submission.team, Team)
assert isinstance(submission.event, Event)
assert submission.official_score_name == 'acc'
assert isinstance(submission.official_score, SubmissionScore)
assert all([isinstance(score, EventScoreType)
for score in submission.score_types])
assert issubclass(submission.Predictions, BasePrediction)
assert submission.is_not_sandbox is True
assert submission.is_error is False
assert submission.is_public_leaderboard is False
assert submission.is_private_leaderboard is False
assert (os.path.join('submissions', 'submission_000000005') in
submission.path)
assert submission.basename == 'submission_000000005'
assert "submissions.submission_000000005" in submission.module
assert len(submission.f_names) == 1
assert submission.f_names[0] == 'classifier.py'
assert submission.link == '/' + os.path.join(submission.hash_,
'classifier.py')
assert re.match('<a href={}>{}/{}/{}</a>'
.format(submission.link, submission.event.name,
submission.team.name, submission.name),
submission.full_name_with_link)
assert re.match('<a href={}>{}</a>'
.format(submission.link, submission.name),
submission.name_with_link)
assert re.match('<a href=.*{}.*error.txt>{}</a>'
.format(submission.hash_, submission.state),
submission.state_with_link)
for score in submission.ordered_scores(score_names=['acc', 'error']):
assert isinstance(score, SubmissionScore)
def test_submission_model_set_state(session_scope_module):
submission = get_submission_by_id(session_scope_module, 5)
submission.set_state('scored')
assert submission.state == 'scored'
for cv_fold in submission.on_cv_folds:
assert cv_fold.state == 'scored'
def test_submission_model_reset(session_scope_module):
submission = get_submission_by_id(session_scope_module, 5)
for score in submission.ordered_scores(score_names=['acc', 'error']):
assert isinstance(score, SubmissionScore)
# set the score to later test the reset function
score.valid_score_cv_bag = 1.0
score.test_score_cv_bag = 1.0
score.valid_score_cv_bags = np.ones(2)
score.test_score_cv_bags = np.ones(2)
# set to non-default the variable that should change with reset
submission.error_msg = 'simulate an error'
submission.contributivity = 100.
submission.reset()
assert submission.contributivity == pytest.approx(0)
assert submission.state == 'new'
assert submission.error_msg == ''
for score, worse_score in zip(submission.ordered_scores(['acc', 'error']),
[0, 1]):
assert score.valid_score_cv_bag == pytest.approx(worse_score)
assert score.test_score_cv_bag == pytest.approx(worse_score)
assert score.valid_score_cv_bags is None
assert score.test_score_cv_bags is None
def test_submission_model_set_error(session_scope_module):
submission = get_submission_by_id(session_scope_module, 5)
error = 'training_error'
error_msg = 'simulate an error'
submission.set_error(error, error_msg)
assert submission.state == error
assert submission.error_msg == error_msg
for cv_fold in submission.on_cv_folds:
assert cv_fold.state == error
assert cv_fold.error_msg == error_msg
@pytest.mark.parametrize(
"state, expected_contributivity",
[('scored', 0.3), ('training_error', 0.0)]
)
def test_submission_model_set_contributivity(session_scope_module, state,
expected_contributivity):
submission = get_submission_by_id(session_scope_module, 5)
# set the state of the submission such that the contributivity
submission.set_state(state)
# set the fold contributivity to non-default
for cv_fold in submission.on_cv_folds:
cv_fold.contributivity = 0.3
submission.set_contributivity()
assert submission.contributivity == pytest.approx(expected_contributivity)
@pytest.mark.parametrize(
'backref, expected_type',
[('historical_contributivitys', HistoricalContributivity),
('scores', SubmissionScore),
('files', SubmissionFile),
('on_cv_folds', SubmissionOnCVFold),
('sources', Submission),
('targets', Submission)]
)
def test_submission_model_backref(session_scope_module, backref,
expected_type):
submission = get_submission_by_id(session_scope_module, 5)
backref_attr = getattr(submission, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type)
@pytest.mark.parametrize(
"state_cv_folds, expected_state",
[(['tested', 'tested'], 'tested'),
(['tested', 'validated'], 'validated'),
(['validated', 'validated'], 'validated'),
(['trained', 'validated'], 'trained'),
(['trained', 'tested'], 'trained'),
(['trained', 'trained'], 'trained'),
(['training_error', 'tested'], 'training_error'),
(['validating_error', 'tested'], 'validating_error'),
(['testing_error', 'tested'], 'testing_error')]
)
def test_submission_model_set_state_after_training(session_scope_module,
state_cv_folds,
expected_state):
submission = get_submission_by_id(session_scope_module, 5)
# set the state of the each fold
for cv_fold, fold_state in zip(submission.on_cv_folds, state_cv_folds):
cv_fold.state = fold_state
submission.set_state_after_training()
assert submission.state == expected_state
def test_submission_score_model_property(session_scope_module):
# get the submission associated with the 5th submission (iris)
# we get only the information linked to the accuracy score which the first
# score
submission_score = \
(session_scope_module.query(SubmissionScore)
.filter(SubmissionScore.submission_id == 5)
.first())
assert submission_score.score_name == 'acc'
assert callable(submission_score.score_function)
assert submission_score.precision == 2
@pytest.mark.parametrize(
"step_score", ['train_score', 'valid_score', 'test_score']
)
def test_submission_score_model_scoring(session_scope_module, step_score):
# get the submission associated with the 5th submission (iris)
# we get only the information linked to the accuracy score which the first
# score
submission_score = \
(session_scope_module.query(SubmissionScore)
.filter(SubmissionScore.submission_id == 5)
.first())
# we set the score on the different fold to check the mean and std
# computation on those folds.
for cv_fold, fold_score in zip(submission_score.on_cv_folds,
[0.2, 0.8]):
setattr(cv_fold, step_score, fold_score)
assert (getattr(submission_score, '{}_cv_mean'.format(step_score)) ==
pytest.approx(0.5))
assert (getattr(submission_score, '{}_cv_std'.format(step_score)) ==
pytest.approx(0.3))
@pytest.mark.parametrize(
'backref, expected_type',
[('on_cv_folds', SubmissionScoreOnCVFold)]
)
def test_submission_score_model_backref(session_scope_module, backref,
expected_type):
submission_score = \
(session_scope_module.query(SubmissionScore)
.filter(SubmissionScore.submission_id == 5)
.first())
backref_attr = getattr(submission_score, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type)
def test_submission_file_model_property(session_scope_module):
# get the submission file of an iris submission with only a classifier file
submission_file = \
(session_scope_module.query(SubmissionFile)
.filter(SubmissionFile.submission_id == 5)
.first())
assert re.match('SubmissionFile(.*)',
repr(submission_file))
assert submission_file.is_editable is True
assert submission_file.extension == 'py'
assert submission_file.type == 'classifier'
assert submission_file.name == 'classifier'
assert submission_file.f_name == 'classifier.py'
assert re.match('/.*classifier.py', submission_file.link)
assert re.match('.*submissions.*submission_000000005.*classifier.py',
submission_file.path)
assert re.match('<a href=".*classifier.py">.*classifier</a>',
submission_file.name_with_link)
assert re.match('from sklearn.base import BaseEstimator.*',
submission_file.get_code())
submission_file.set_code(code='# overwriting a code file')
assert submission_file.get_code() == '# overwriting a code file'
def test_submission_file_type_extension_model_property(session_scope_module):
submission_file_type_extension = \
(session_scope_module.query(SubmissionFileTypeExtension).first())
assert submission_file_type_extension.file_type == 'code'
assert submission_file_type_extension.extension_name == 'py'
@pytest.mark.parametrize(
'backref, expected_type',
[('submission_files', SubmissionFile)]
)
def test_submission_file_type_extension_model_backref(session_scope_module,
backref, expected_type):
submission_file_type_extension = \
(session_scope_module.query(SubmissionFileTypeExtension).first())
backref_attr = getattr(submission_file_type_extension, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type)
def test_submission_score_on_cv_fold_model_property(session_scope_module):
cv_fold_score = (session_scope_module
.query(SubmissionScoreOnCVFold)
.filter(SubmissionScoreOnCVFold.submission_score_id ==
SubmissionScore.id)
.filter(SubmissionScore.event_score_type_id ==
EventScoreType.id)
.filter(SubmissionScore.submission_id == 5)
.filter(EventScoreType.name == 'acc')
.first())
assert cv_fold_score.name == 'acc'
assert isinstance(cv_fold_score.event_score_type, EventScoreType)
assert callable(cv_fold_score.score_function)
def test_submission_on_cv_fold_model_property(session_scope_module):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
cv_fold.state = 'scored'
cv_fold.contributivity = 0.2
assert repr(cv_fold) == 'state = scored, c = 0.2, best = False'
assert isinstance(cv_fold.official_score, SubmissionScoreOnCVFold)
assert cv_fold.official_score.name == 'acc'
@pytest.mark.parametrize(
"state_set, expected_state",
[('new', False),
('checked', False),
('checking_error', False),
('trained', False),
('training_error', False),
('validated', False),
('validating_error', False),
('tested', False),
('testing_error', False),
('training', False),
('sent_to_training', False),
('scored', True)]
)
def test_submission_on_cv_fold_model_is_public_leaderboard(
session_scope_module, state_set, expected_state):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
cv_fold.state = state_set
assert cv_fold.is_public_leaderboard is expected_state
@pytest.mark.parametrize(
"state_set, expected_state",
[('new', False),
('checked', False),
('checking_error', False),
('trained', True),
('training_error', False),
('validated', True),
('validating_error', True),
('tested', True),
('testing_error', True),
('training', False),
('sent_to_training', False),
('scored', True)]
)
def test_submission_on_cv_fold_model_is_trained(session_scope_module,
state_set, expected_state):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
cv_fold.state = state_set
assert cv_fold.is_trained is expected_state
@pytest.mark.parametrize(
"state_set, expected_state",
[('new', False),
('checked', False),
('checking_error', False),
('trained', False),
('training_error', False),
('validated', True),
('validating_error', False),
('tested', True),
('testing_error', True),
('training', False),
('sent_to_training', False),
('scored', True)]
)
def test_submission_on_cv_fold_model_is_validated(session_scope_module,
state_set, expected_state):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
cv_fold.state = state_set
assert cv_fold.is_validated is expected_state
@pytest.mark.parametrize(
"state_set, expected_state",
[('new', False),
('checked', False),
('checking_error', False),
('trained', False),
('training_error', False),
('validated', False),
('validating_error', False),
('tested', True),
('testing_error', False),
('training', False),
('sent_to_training', False),
('scored', True)]
)
def test_submission_on_cv_fold_model_is_tested(session_scope_module,
state_set, expected_state):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
cv_fold.state = state_set
assert cv_fold.is_tested is expected_state
@pytest.mark.parametrize(
"state_set, expected_state",
[('new', False),
('checked', False),
('checking_error', True),
('trained', False),
('training_error', True),
('validated', False),
('validating_error', True),
('tested', False),
('testing_error', True),
('training', False),
('sent_to_training', False),
('scored', False)]
)
def test_submission_on_cv_fold_model_is_error(session_scope_module,
state_set, expected_state):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
cv_fold.state = state_set
assert cv_fold.is_error is expected_state
def test_submission_on_cv_fold_model_predictions(session_scope_module):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
# Set fake predictions to check the prediction properties
cv_fold.full_train_y_pred = np.empty((120, 3))
cv_fold.full_train_y_pred[:, 0] = 1
cv_fold.full_train_y_pred[:, 1:] = 0
cv_fold.test_y_pred = np.empty((30, 3))
cv_fold.test_y_pred[:, 0] = 1
cv_fold.test_y_pred[:, 1:] = 0
assert isinstance(cv_fold.full_train_predictions, BasePrediction)
assert isinstance(cv_fold.train_predictions, BasePrediction)
assert isinstance(cv_fold.valid_predictions, BasePrediction)
assert isinstance(cv_fold.test_predictions, BasePrediction)
def test_submission_on_cv_fold_model_reset(session_scope_module):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
# set to non-default values]
cv_fold.full_train_y_pred = np.empty((120, 3))
cv_fold.full_train_y_pred[:, 0] = 1
cv_fold.full_train_y_pred[:, 1:] = 0
cv_fold.test_y_pred = np.ones(30)
cv_fold.test_y_pred = np.empty((30, 3))
cv_fold.test_y_pred[:, 0] = 1
cv_fold.test_y_pred[:, 1:] = 0
cv_fold.contributivity = 0.3
cv_fold.best = True
cv_fold.train_time = 1
cv_fold.valid_time = 1
cv_fold.test_time = 1
cv_fold.state = 'scored'
cv_fold.error_msg = 'simulate a message'
for score in cv_fold.scores:
if score.name == 'acc':
score.train_score = 1.0
score.valid_score = 1.0
score.test_score = 1.0
cv_fold.reset()
assert cv_fold.contributivity == pytest.approx(0)
assert cv_fold.best is False
assert cv_fold.full_train_y_pred is None
assert cv_fold.test_y_pred is None
assert cv_fold.train_time == pytest.approx(0)
assert cv_fold.valid_time == pytest.approx(0)
assert cv_fold.test_time == pytest.approx(0)
assert cv_fold.state == 'new'
assert cv_fold.error_msg == ''
for score in cv_fold.scores:
if score.name == 'acc':
assert score.train_score == pytest.approx(0)
assert score.valid_score == pytest.approx(0)
assert score.test_score == pytest.approx(0)
def test_submission_on_cv_fold_model_error(session_scope_module):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
error = 'training_error'
error_msg = 'simulate an error'
cv_fold.set_error(error, error_msg)
assert cv_fold.state == error
assert cv_fold.error_msg == error_msg
@pytest.mark.filterwarnings('ignore:F-score is ill-defined and being set to')
def test_submission_on_cv_fold_model_train_scores(session_scope_module):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
# Set fake predictions to compute the score
cv_fold.state = 'trained'
cv_fold.full_train_y_pred = np.empty((120, 3))
cv_fold.full_train_y_pred[:, 0] = 1
cv_fold.full_train_y_pred[:, 1:] = 0
cv_fold.compute_train_scores()
for score in cv_fold.scores:
if score.name == 'acc':
assert score.train_score == pytest.approx(0.3333333333333333)
# simulate that the training did not complete
cv_fold.state = 'training'
cv_fold.compute_train_scores()
for score in cv_fold.scores:
if score.name == 'acc':
assert score.train_score == pytest.approx(0)
@pytest.mark.filterwarnings('ignore:F-score is ill-defined and being set to')
def test_submission_on_cv_fold_model_valid_scores(session_scope_module):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
# Set fake predictions to compute the score
cv_fold.state = 'validated'
cv_fold.full_train_y_pred = np.empty((120, 3))
cv_fold.full_train_y_pred[:, 0] = 1
cv_fold.full_train_y_pred[:, 1:] = 0
cv_fold.compute_valid_scores()
for score in cv_fold.scores:
if score.name == 'acc':
assert score.valid_score == pytest.approx(0.3333333333333333)
# simulate that the training did not complete
cv_fold.state = 'training'
cv_fold.compute_valid_scores()
for score in cv_fold.scores:
if score.name == 'acc':
assert score.valid_score == pytest.approx(0)
@pytest.mark.filterwarnings('ignore:F-score is ill-defined and being set to')
def test_submission_on_cv_fold_model_test_scores(session_scope_module):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
# Set fake predictions to compute the score
cv_fold.state = 'scored'
cv_fold.test_y_pred = np.empty((30, 3))
cv_fold.test_y_pred[:, 0] = 1
cv_fold.test_y_pred[:, 1:] = 0
cv_fold.compute_test_scores()
for score in cv_fold.scores:
if score.name == 'acc':
assert score.test_score == pytest.approx(0.3333333333333333)
# simulate that the training did not complete
cv_fold.state = 'training'
cv_fold.compute_test_scores()
for score in cv_fold.scores:
if score.name == 'acc':
assert score.test_score == pytest.approx(0)
def test_submission_on_cv_fold_model_update(session_scope_module):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
detached_cv_fold = DetachedSubmissionOnCVFold(cv_fold)
detached_cv_fold.state = 'scored'
detached_cv_fold.train_time = 1
detached_cv_fold.valid_time = 2
detached_cv_fold.full_train_y_pred = np.zeros((120, 3))
detached_cv_fold.test_time = 3
detached_cv_fold.test_y_pred = np.zeros((30, 3))
cv_fold.update(detached_cv_fold)
assert cv_fold.state == 'scored'
assert cv_fold.train_time == 1
assert cv_fold.valid_time == 2
assert cv_fold.test_time == 3
assert_allclose(cv_fold.full_train_y_pred, np.zeros((120, 3)))
assert_allclose(cv_fold.test_y_pred, np.zeros((30, 3)))
@pytest.mark.parametrize(
'backref, expected_type',
[('scores', SubmissionScoreOnCVFold)]
)
def test_submission_on_cv_fold_model_backref(session_scope_module, backref,
expected_type):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
backref_attr = getattr(cv_fold, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type)
def test_detached_submission_on_cv_fold_model(session_scope_module):
cv_fold = \
(session_scope_module.query(SubmissionOnCVFold)
.filter(SubmissionOnCVFold.submission_id == 5)
.first())
detached_cv_fold = DetachedSubmissionOnCVFold(cv_fold)
assert re.match('Submission(.*).*', repr(detached_cv_fold))
@pytest.mark.parametrize(
'backref, expected_type',
[('submission_file_types', SubmissionFileTypeExtension)]
)
def test_extension_model_backref(session_scope_module, backref, expected_type):
extension = session_scope_module.query(Extension).first()
backref_attr = getattr(extension, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type)
@pytest.mark.parametrize(
'backref, expected_type',
[('workflow_element_types', WorkflowElementType)]
)
def test_submission_file_type_model_backref(session_scope_module, backref,
expected_type):
submission_file_type = (session_scope_module.query(SubmissionFileType)
.first())
backref_attr = getattr(submission_file_type, backref)
assert isinstance(backref_attr, list)
# only check if the list is not empty
if backref_attr:
assert isinstance(backref_attr[0], expected_type)
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/__init__.py | from .config_parser import read_config
from .frontend import generate_flask_config
from .ramp import generate_ramp_config
from .string_encoding import encode_string
from .utils import import_module_from_source
from .worker import generate_worker_config
from ._version import __version__
__all__ = [
'generate_flask_config',
'generate_ramp_config',
'generate_worker_config',
'import_module_from_source',
'read_config',
'__version__'
]
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/cli.py | <filename>ramp-utils/ramp_utils/cli.py
import click
from ramp_utils import deploy
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def main():
pass
@main.command()
@click.option("--config", default='config.yml', show_default=True,
help='Configuration file in YAML format containing the database '
'information')
@click.option("--event-config", default='config.yml', show_default=True,
help='Configuration file in YAML format containing the RAMP '
'information')
@click.option("--cloning/--no-cloning", default=True, show_default=True,
help='Whether or not to clone the RAMP kit and data '
'repositories.')
@click.option('--force', is_flag=True,
help='Whether or not to potentially overwrite the '
'repositories, problem and event in the database.')
def deploy_ramp_event(config, event_config, cloning, force):
deploy.deploy_ramp_event(config, event_config, cloning, force)
def start():
main()
if __name__ == '__main__':
start()
|
DjalelBBZ/ramp-board | ramp-engine/ramp_engine/__init__.py | <filename>ramp-engine/ramp_engine/__init__.py
from .dispatcher import Dispatcher
from .local import CondaEnvWorker
from ._version import __version__
available_workers = {'conda': CondaEnvWorker}
__all__ = [
'CondaEnvWorker',
'Dispatcher',
'available_workers',
'__version__'
]
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/tools/database.py | import logging
from ..model import Extension
from ..model import SubmissionFileType
from ..model import SubmissionFileTypeExtension
from ._query import select_extension_by_name
from ._query import select_submission_file_type_by_name
from ._query import select_submission_type_extension_by_name
logger = logging.getLogger('RAMP-DATABASE')
# Add functions: add entries in the database
def add_extension(session, name):
"""Adding a new extension, e.g., 'py'.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
name : str
The name of the extension to add if it does not exist.
"""
extension = select_extension_by_name(session, name)
if extension is None:
extension = Extension(name=name)
logger.info('Adding {}'.format(extension))
session.add(extension)
session.commit()
def add_submission_file_type(session, name, is_editable, max_size):
"""Add a new submission file type, e.g., ('code', True, 10 ** 5).
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
name : str
The name of file type.
is_editable: bool
If the file type is editable.
max_size : int
The maximum size of the file.
Notes
-----
Should be preceded by adding extensions.
"""
submission_file_type = select_submission_file_type_by_name(session, name)
if submission_file_type is None:
submission_file_type = SubmissionFileType(
name=name, is_editable=is_editable, max_size=max_size)
logger.info('Adding {}'.format(submission_file_type))
session.add(submission_file_type)
session.commit()
def add_submission_file_type_extension(session, type_name, extension_name):
"""Adding a new submission file type extension, e.g., ('code', 'py').
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
type_name : str
The file type.
extension_name : str
The extension name.
Notes
-----
Should be preceded by adding submission file types and extensions.
"""
type_extension = select_submission_type_extension_by_name(
session, type_name, extension_name
)
if type_extension is None:
submission_file_type = select_submission_file_type_by_name(session,
type_name)
extension = select_extension_by_name(session, extension_name)
type_extension = SubmissionFileTypeExtension(
type=submission_file_type,
extension=extension
)
logger.info('Adding {}'.format(type_extension))
session.add(type_extension)
session.commit()
# Get functions: get information from the database
def get_extension(session, extension_name):
"""Get extension from the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
extension_name : str or None
The name of the extension to query. If None, all the extensions will be
queried.
Returns
-------
extension : :class:`ramp_database.model.Extension` or list of \
:class:`ramp_database.model.Extension`
The queried extension.
"""
return select_extension_by_name(session, extension_name)
def get_submission_file_type(session, type_name):
"""Get submission file type from the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
type_name : str or None
The name of the type to query. If None, all the file type will be
queried.
Returns
-------
extension : :class:`ramp_database.model.SubmissionFileType` or list of \
:class:`ramp_database.model.SubmissionFileType`
The queried submission file type.
"""
return select_submission_file_type_by_name(session, type_name)
def get_submission_file_type_extension(session, type_name, extension_name):
"""Get submission file type extension from the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
type_name : str or None
The name of the type to query. If None, all the file type will be
queried.
extension_name : str or None
The name of the extension to query. If None, all the extension will be
queried.
Returns
-------
extension : :class:`ramp_database.model.SubmissionFileTypeExtension` or list of \
:class:`ramp_database.model.SubmissionFileTypeExtension`
The queried submission file type.
"""
return select_submission_type_extension_by_name(
session, type_name, extension_name
)
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/model/team.py | <reponame>DjalelBBZ/ramp-board
import datetime
from sqlalchemy import Column
from sqlalchemy import String
from sqlalchemy import Integer
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from ramp_utils.utils import encode_string
from .base import Model
__all__ = ['Team']
class Team(Model):
"""Team table.
Parameters
----------
name : str
The name of the team.
admin : :class:`ramp_database.model.User`
The admin user of the team.
initiator : None or :class:`ramp_database.model.Team`, default is None
The team initiating a merging.
acceptor : None or :class:`ramp_database.model.Team`, default is None
The team accepting a merging.
Attributes
----------
id : int
The ID of the table row.
name : str
The name of the team.
admin_id : int
The ID of the admin user.
admin : :class:`ramp_database.model.User`
The admin user instance.
initiator_id : int
The ID of the team asking for merging.
initiator : :class:`ramp_database.model.Team`
The team instance asking for merging.
acceptor_id : int
The ID of the team accepting the merging.
acceptor : :class:`ramp_database.model.Team`
The team instance accepting the merging.
team_events : :class:`ramp_database.model.EventTeam`
A back-reference to the events to which the team is enroll.
"""
__tablename__ = 'teams'
id = Column(Integer, primary_key=True)
name = Column(String(20), nullable=False, unique=True)
admin_id = Column(Integer, ForeignKey('users.id'))
admin = relationship('User', backref=backref('admined_teams'))
# initiator asks for merge, acceptor accepts
initiator_id = Column(Integer, ForeignKey('teams.id'), default=None)
initiator = relationship(
'Team', primaryjoin=('Team.initiator_id == Team.id'), uselist=False
)
acceptor_id = Column(Integer, ForeignKey('teams.id'), default=None)
acceptor = relationship(
'Team', primaryjoin=('Team.acceptor_id == Team.id'), uselist=False
)
creation_timestamp = Column(DateTime, nullable=False)
def __init__(self, name, admin, initiator=None, acceptor=None):
self.name = name
self.admin = admin
self.initiator = initiator
self.acceptor = acceptor
self.creation_timestamp = datetime.datetime.utcnow()
def __str__(self):
return 'Team({})'.format(encode_string(self.name))
def __repr__(self):
return ('Team(name={}, admin_name={}, initiator={}, acceptor={})'
.format(encode_string(self.name),
encode_string(self.admin.name),
self.initiator, self.acceptor))
|
DjalelBBZ/ramp-board | ramp-frontend/ramp_frontend/security.py | <filename>ramp-frontend/ramp_frontend/security.py
from itsdangerous import URLSafeTimedSerializer
from ramp_frontend import app
ts = URLSafeTimedSerializer(app.config["SECRET_KEY"])
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/worker.py | import os
import six
from .config_parser import read_config
def generate_worker_config(config):
"""Generate the configuration for RAMP worker from a configuration
file.
Parameters
----------
config : dict or str
Either the loaded configuration or the configuration YAML file.
Returns
-------
worker_config : dict
The configuration for the RAMP worker.
"""
if isinstance(config, six.string_types):
config = read_config(config, filter_section=['ramp', 'worker'])
ramp_config = config['ramp']
# copy the specific information for the given worker configuration
worker_config = config['worker'].copy()
# define the directory of the ramp-kit for the event
worker_config['kit_dir'] = os.path.join(
ramp_config['deployment_dir'],
ramp_config['kits_dir'],
ramp_config['event']
)
# define the directory of the ramp-data for the event
worker_config['data_dir'] = os.path.join(
ramp_config['deployment_dir'],
ramp_config['data_dir'],
ramp_config['event']
)
# define the directory of the submissions
worker_config['submissions_dir'] = os.path.join(
ramp_config['deployment_dir'],
ramp_config['submissions_dir']
)
# define the directory of the predictions
worker_config['predictions_dir'] = os.path.join(
ramp_config['deployment_dir'],
ramp_config['predictions_dir']
)
# define the directory of the logs
worker_config['logs_dir'] = os.path.join(
ramp_config['deployment_dir'],
ramp_config['logs_dir']
)
return worker_config
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/tests/test_testing.py | <filename>ramp-database/ramp_database/tests/test_testing.py
import os
import shutil
import pytest
from git.exc import GitCommandError
from ramp_utils import read_config
from ramp_utils import generate_ramp_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_database.utils import setup_db
from ramp_database.utils import session_scope
from ramp_database.model import Model
from ramp_database.exceptions import NameClashError
from ramp_database.tools.user import get_user_by_name
from ramp_database.tools.event import get_problem
from ramp_database.testing import create_test_db
from ramp_database.testing import add_events
from ramp_database.testing import add_users
from ramp_database.testing import add_problems
from ramp_database.testing import setup_ramp_kits_ramp_data
from ramp_database.testing import sign_up_teams_to_events
from ramp_database.testing import submit_all_starting_kits
@pytest.fixture(scope='module')
def database_config():
return read_config(database_config_template())
@pytest.fixture(scope='module')
def ramp_config():
return read_config(ramp_config_template())
@pytest.fixture
def session_scope_function(database_config, ramp_config):
try:
create_test_db(database_config, ramp_config)
with session_scope(database_config['sqlalchemy']) as session:
yield session
finally:
shutil.rmtree(
ramp_config['ramp']['deployment_dir'], ignore_errors=True
)
db, _ = setup_db(database_config['sqlalchemy'])
Model.metadata.drop_all(db)
def test_ramp_kits_ramp_data(session_scope_function, ramp_config):
setup_ramp_kits_ramp_data(ramp_config, 'iris')
msg_err = 'The RAMP kit repository was previously cloned.'
with pytest.raises(ValueError, match=msg_err):
setup_ramp_kits_ramp_data(ramp_config, 'iris')
# retrieve the path to the ramp kit to remove it
internal_ramp_config = generate_ramp_config(ramp_config)
shutil.rmtree(os.path.join(internal_ramp_config['ramp_kits_dir'], 'iris'))
msg_err = 'The RAMP data repository was previously cloned.'
with pytest.raises(ValueError, match=msg_err):
setup_ramp_kits_ramp_data(ramp_config, 'iris')
setup_ramp_kits_ramp_data(ramp_config, 'iris', force=True)
def test_add_users(session_scope_function):
add_users(session_scope_function)
users = get_user_by_name(session_scope_function, None)
for user in users:
assert user.name in ('test_user', 'test_user_2', 'test_iris_admin')
err_msg = 'username is already in use'
with pytest.raises(NameClashError, match=err_msg):
add_users(session_scope_function)
def test_add_problems(session_scope_function, ramp_config):
add_problems(session_scope_function, ramp_config)
problems = get_problem(session_scope_function, None)
for problem in problems:
assert problem.name in ('iris', 'boston_housing')
# trying to add twice the same problem will raise a git error since the
# repositories already exist.
msg_err = 'The RAMP kit repository was previously cloned.'
with pytest.raises(ValueError, match=msg_err):
add_problems(session_scope_function, ramp_config)
def test_add_events(session_scope_function, ramp_config):
add_problems(session_scope_function, ramp_config)
add_events(session_scope_function, ramp_config)
with pytest.raises(ValueError):
add_events(session_scope_function, ramp_config)
def test_sign_up_team_to_events(session_scope_function, ramp_config):
add_users(session_scope_function)
add_problems(session_scope_function, ramp_config)
add_events(session_scope_function, ramp_config)
sign_up_teams_to_events(session_scope_function)
def test_submit_all_starting_kits(session_scope_function, ramp_config):
add_users(session_scope_function)
add_problems(session_scope_function, ramp_config)
add_events(session_scope_function, ramp_config)
sign_up_teams_to_events(session_scope_function)
submit_all_starting_kits(session_scope_function, ramp_config)
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/tests/test_cli.py | import shutil
from click.testing import CliRunner
from ramp_database.utils import setup_db
from ramp_database.model import Model
from ramp_utils import read_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_utils.cli import main
def teardown_function(function):
database_config = read_config(database_config_template())
ramp_config = read_config(ramp_config_template())
shutil.rmtree(ramp_config['ramp']['deployment_dir'], ignore_errors=True)
db, _ = setup_db(database_config['sqlalchemy'])
Model.metadata.drop_all(db)
def test_deploy_ramp_event():
runner = CliRunner()
result = runner.invoke(main, ['deploy-ramp-event',
'--config', database_config_template(),
'--event-config', ramp_config_template()])
assert result.exit_code == 0, result.output
result = runner.invoke(main, ['deploy-ramp-event',
'--config', database_config_template(),
'--event-config', ramp_config_template(),
'--force'])
assert result.exit_code == 0, result.output
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/tests/test_frontend.py | import pytest
from ramp_utils.testing import database_config_template
from ramp_utils import read_config
from ramp_utils import generate_flask_config
@pytest.mark.parametrize(
"config",
[database_config_template(),
read_config(database_config_template())]
)
def test_generate_flask_config(config):
flask_config = generate_flask_config(config)
expected_config = {
'SECRET_KEY': '<KEY>',
'WTF_CSRF_ENABLED': True,
'LOG_FILENAME': 'None',
'MAX_CONTENT_LENGTH': 1073741824,
'DEBUG': True,
'TESTING': False,
'MAIL_SERVER': 'localhost',
'MAIL_PORT': 8025,
'MAIL_DEFAULT_SENDER': ['RAMP admin', '<EMAIL>'],
'SQLALCHEMY_TRACK_MODIFICATIONS': True,
'SQLALCHEMY_DATABASE_URI': ('postgresql://mrramp:mrramp@localhost:5432'
'/databoard_test')
}
assert flask_config == expected_config
|
DjalelBBZ/ramp-board | ramp-frontend/ramp_frontend/cli.py | <reponame>DjalelBBZ/ramp-board
import click
from ramp_utils import generate_flask_config
from ramp_utils import read_config
from . import create_app
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def main():
pass
@main.command()
@click.option("--config", default='config.yml', show_default=True,
help='Configuration file in YAML format')
@click.option("--port", default=8080, show_default=True,
help='The port where to launch the website')
@click.option("--host", default='127.0.0.1', show_default=True,
help='The IP address where to launch the website')
def launch(config, port, host):
config = read_config(config)
flask_config = generate_flask_config(config)
app = create_app(flask_config)
app.run(port=port, use_reloader=False,
host=host, processes=1000, threaded=False)
def start():
main()
if __name__ == '__main__':
start()
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/tools/event.py | import logging
import os
from sqlalchemy.orm.exc import NoResultFound
from ramp_utils.utils import import_module_from_source
from ._query import select_event_admin_by_instance
from ._query import select_event_by_name
from ._query import select_extension_by_name
from ._query import select_problem_by_name
from ._query import select_submissions_by_state
from ._query import select_similarities_by_source
from ._query import select_similarities_by_target
from ._query import select_submission_by_id
from ._query import select_submission_type_extension_by_extension
from ._query import select_user_by_name
from ._query import select_workflow_by_name
from ._query import select_workflow_element_by_workflow_and_type
from ._query import select_workflow_element_type_by_name
from ..model import CVFold
from ..model import Event
from ..model import EventAdmin
from ..model import EventScoreType
from ..model import Keyword
from ..model import Problem
from ..model import ProblemKeyword
from ..model import Workflow
from ..model import WorkflowElement
from ..model import WorkflowElementType
logger = logging.getLogger('RAMP-DATABASE')
# Delete functions: remove from the database some information
def delete_problem(session, problem_name):
"""Delete a problem from the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
problem_name : str
The name of the problem to remove.
"""
problem = select_problem_by_name(session, problem_name)
if problem is None:
raise NoResultFound('No result found for "{}" in Problem table'
.format(problem_name))
for event in problem.events:
delete_event(session, event.name)
session.delete(problem)
session.commit()
def delete_event(session, event_name):
"""Delete an event from the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The name of the event to delete.
"""
event = select_event_by_name(session, event_name)
submissions = select_submissions_by_state(session, event_name, state=None)
for sub in submissions:
delete_submission_similarity(session, sub.id)
session.delete(event)
session.commit()
def delete_submission_similarity(session, submission_id):
"""Delete the submission similarity associated with a submission.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submission_id: int
The id of the submission to use.
"""
submission = select_submission_by_id(session, submission_id)
similarities = []
similarities += select_similarities_by_target(session, submission)
similarities += select_similarities_by_source(session, submission)
for similarity in similarities:
session.delete(similarity)
session.commit()
# Add functions: add to the database some information
def add_workflow(session, workflow_object):
"""Add a new workflow.
Workflow class should exist in ``rampwf.workflows``. The name of the
workflow will be the classname (e.g. Classifier). Element names are taken
from ``workflow.element_names``. Element types are inferred from the
extension. This is important because e.g. the max size and the editability
will depend on the type.
``add_workflow`` is called by :func:`add_problem`, taking the workflow to
add from the ``problem.py`` file of the starting kit.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
workflow_object : :mod:`rampwf.workflows`
A ramp workflow instance. Refer to :mod:`rampwf.workflows` for all
available workflows.
"""
workflow_name = workflow_object.__class__.__name__
workflow = select_workflow_by_name(session, workflow_name)
if workflow is None:
session.add(Workflow(name=workflow_name))
workflow = select_workflow_by_name(session, workflow_name)
for element_name in workflow_object.element_names:
tokens = element_name.split('.')
element_filename = tokens[0]
# inferring that file is code if there is no extension
if len(tokens) > 2:
raise ValueError('File name {} should contain at most one "."'
.format(element_name))
element_file_extension_name = tokens[1] if len(tokens) == 2 else 'py'
extension = select_extension_by_name(session,
element_file_extension_name)
if extension is None:
raise ValueError('Unknown extension {}.'
.format(element_file_extension_name))
type_extension = select_submission_type_extension_by_extension(
session, extension
)
if type_extension is None:
raise ValueError('Unknown file type {}.'
.format(element_file_extension_name))
workflow_element_type = select_workflow_element_type_by_name(
session, element_filename
)
if workflow_element_type is None:
workflow_element_type = WorkflowElementType(
name=element_filename, type=type_extension.type
)
logger.info('Adding {}'.format(workflow_element_type))
session.add(workflow_element_type)
workflow_element = select_workflow_element_by_workflow_and_type(
session, workflow=workflow,
workflow_element_type=workflow_element_type
)
if workflow_element is None:
workflow_element = WorkflowElement(
workflow=workflow,
workflow_element_type=workflow_element_type
)
logger.info('Adding {}'.format(workflow_element))
session.add(workflow_element)
session.commit()
def add_problem(session, problem_name, kits_dir, data_dir, force=False):
"""Add a RAMP problem to the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
problem_name : str
The name of the problem to register in the database.
kits_dir : str
The directory where the RAMP kits are located. It will corresponds to
the key `ramp_kits_dir` of the dictionary created with
:func:`ramp_utils.generate_ramp_config`.
data_dir : str
The directory where the RAMP data are located. It will corresponds to
the key `ramp_data_dir` of the dictionary created with
:func:`ramp_utils.generate_ramp_config`.
force : bool, default is False
Whether to force add the problem. If ``force=False``, an error is
raised if the problem was already in the database.
"""
problem = select_problem_by_name(session, problem_name)
problem_kits_path = os.path.join(kits_dir, problem_name)
if problem is not None:
if not force:
raise ValueError('Attempting to overwrite a problem and '
'delete all linked events. Use"force=True" '
'if you want to overwrite the problem and '
'delete the events.')
delete_problem(session, problem_name)
# load the module to get the type of workflow used for the problem
problem_module = import_module_from_source(
os.path.join(problem_kits_path, 'problem.py'), 'problem')
add_workflow(session, problem_module.workflow)
problem = Problem(name=problem_name, path_ramp_kits=kits_dir,
path_ramp_data=data_dir, session=session)
logger.info('Adding {}'.format(problem))
session.add(problem)
session.commit()
def add_event(session, problem_name, event_name, event_title,
ramp_sandbox_name, ramp_submissions_path, is_public=False,
force=False):
"""Add a RAMP event in the database.
Event file should be set up in ``databoard/specific/events/<event_name>``.
Should be preceded by adding a problem (cf., :func:`add_problem`), then
``problem_name`` imported in the event file (``problem_name`` is acting as
a pointer for the join). Also adds CV folds.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
problem_name : str
The problem name associated with the event.
event_name : str
The event name.
event_title : str
The even title.
ramp_sandbox_name : str
Name of the submission which will be considered the sandbox. It will
correspond to the key ``sandbox_name`` of the dictionary created with
:func:`ramp_utils.generate_ramp_config`.
ramp_submissions_path : str
Path to the deployment RAMP submissions directory. It will corresponds
to the key `ramp_submissions_dir` of the dictionary created with
:func:`ramp_utils.generate_ramp_config`.
is_public : bool, default is False
Whether the event is made public or not.
force : bool, default is False
Whether to overwrite an existing event. If ``false=False``, an error
will be raised.
Returns
-------
event : Event
The event which has been registered in the database.
"""
event = select_event_by_name(session, event_name)
if event is not None:
if not force:
raise ValueError("Attempting to overwrite existing event. "
"Use force=True to overwrite.")
delete_event(session, event_name)
event = Event(name=event_name, problem_name=problem_name,
event_title=event_title,
ramp_sandbox_name=ramp_sandbox_name,
path_ramp_submissions=ramp_submissions_path,
session=session)
event.is_public = is_public
event.is_send_submitted_mails = False
event.is_send_trained_mails = False
logger.info('Adding {}'.format(event))
session.add(event)
session.commit()
X_train, y_train = event.problem.get_train_data()
cv = event.problem.module.get_cv(X_train, y_train)
for train_indices, test_indices in cv:
cv_fold = CVFold(event=event,
train_is=train_indices,
test_is=test_indices)
session.add(cv_fold)
score_types = event.problem.module.score_types
for score_type in score_types:
event_score_type = EventScoreType(event=event,
score_type_object=score_type)
session.add(event_score_type)
event.official_score_name = score_types[0].name
session.commit()
return event
def add_event_admin(session, event_name, user_name):
"""Add an administrator event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
user_name : str
The user name.
"""
event = select_event_by_name(session, event_name)
user = select_user_by_name(session, user_name)
event_admin = select_event_admin_by_instance(session, event, user)
if event_admin is None:
event_admin = EventAdmin(event=event, admin=user)
session.commit()
def add_keyword(session, name, keyword_type, category=None, description=None,
force=False):
"""Add a keyword to the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
name : str
The name of the keyword.
keyword_type : {'data_domain', 'data_science_theme'}
The type of keyword.
category : None or str, default is None
The category of the keyword.
description : None or str, default is None
The description of the keyword.
force : bool, default is False
Whether or not to overwrite the keyword if it already exists.
"""
keyword = session.query(Keyword).filter_by(name=name).one_or_none()
if keyword is not None:
if not force:
raise ValueError(
'Attempting to update an existing keyword. Use "force=True"'
'to overwrite the keyword.'
)
keyword.type = keyword_type
keyword.category = category
keyword.description = description
else:
keyword = Keyword(name=name, type=keyword_type, category=category,
description=description)
session.add(keyword)
session.commit()
def add_problem_keyword(session, problem_name, keyword_name, description=None,
force=False):
"""Add relationship between a keyword and a problem.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
problem_name : str
The name of the problem.
keyword_name : str
The name of the keyword.
description : None or str, default is None
A particular description of the keyword of the particular problem.
force : bool, default is False
Whether or not to overwrite the relationship.
"""
problem = select_problem_by_name(session, problem_name)
keyword = get_keyword_by_name(session, keyword_name)
problem_keyword = (session.query(ProblemKeyword)
.filter_by(problem=problem, keyword=keyword)
.one_or_none())
if problem_keyword is not None:
if not force:
raise ValueError(
'Attempting to update an existing problem-keyword '
'relationship. Use "force=True" if you want to overwrite the '
'relationship.'
)
problem_keyword.description = description
else:
problem_keyword = ProblemKeyword(
problem=problem, keyword=keyword, description=description
)
session.add(problem_keyword)
session.commit()
# Getter functions: get information from the database
def get_problem(session, problem_name):
"""Get problem from the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
problem_name : str or None
The name of the problem to query. If None, all the problems will be
queried.
Returns
-------
problem : :class:`ramp_database.model.Problem` or list of \
:class:`ramp_database.model.Problem`
The queried problem.
"""
return select_problem_by_name(session, problem_name)
def get_workflow(session, workflow_name):
"""Get workflow from the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
workflow_name : str or None
The name of the workflow to query. If None, all the workflows will be
queried.
Returns
-------
workflow : :class:`ramp_database.model.Workflow` or list of \
:class:`ramp_database.model.Workflow`
The queried workflow.
"""
return select_workflow_by_name(session, workflow_name)
def get_event(session, event_name):
"""Get event from the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str or None
The name of the event to query. If None, all the events will be
queried.
Returns
-------
even : :class:`ramp_database.model.Event` or list of :class:`ramp_database.model.Event`
The queried problem.
"""
return select_event_by_name(session, event_name)
def get_event_admin(session, event_name, user_name):
"""Get an administrator event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
user_name : str
The user name.
Returns
-------
event_admin : :class:`ramp_database.model.EventAdmin` or None
The event/admin instance queried.
"""
event = select_event_by_name(session, event_name)
user = select_user_by_name(session, user_name)
return select_event_admin_by_instance(session, event, user)
def get_keyword_by_name(session, name):
"""Get the keyword filtering by there name
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
name : str or None
The name of the keyword. If None, all keywords will be returned.
Returns
-------
keyword : :class:`ramp_database.model.Keyword` or list of \
:class:`ramp.model.Keyword`
The keyword which have been queried.
"""
q = session.query(Keyword)
if name is None:
return q.all()
return q.filter_by(name=name).one_or_none()
def get_problem_keyword_by_name(session, problem_name, keyword_name):
"""Get a problem-keyword relationship given their names.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
problem_name : str
The name of the problem.
keyword_name : str
The name of the keyword.
Returns
-------
problem_keyword : :class:`ramp_database.model.ProblemKeyword`
The problem-keyword relationship.
"""
problem = select_problem_by_name(session, problem_name)
keyword = session.query(Keyword).filter_by(name=keyword_name).one_or_none()
return (session.query(ProblemKeyword)
.filter_by(problem=problem, keyword=keyword)
.one_or_none())
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/tests/test_password.py | <filename>ramp-utils/ramp_utils/tests/test_password.py
from ramp_utils.password import hash_password
from ramp_utils.password import check_password
def test_check_password():
password = "<PASSWORD>"
hashed_password = hash_password(password)
assert check_password(password, hashed_password)
assert not check_password("<PASSWORD>", hashed_password)
|
DjalelBBZ/ramp-board | ramp-engine/ramp_engine/local.py | <gh_stars>0
import json
import logging
import os
import shutil
import subprocess
from .base import BaseWorker
logger = logging.getLogger('RAMP-WORKER')
class CondaEnvWorker(BaseWorker):
"""Local worker which uses conda environment to dispatch submission.
Parameters
----------
config : dict
Configuration dictionary to set the worker. The following parameter
should be set:
* 'conda_env': the name of the conda environment to use. If not
specified, the base environment will be used.
* 'kit_dir': path to the directory of the RAMP kit;
* 'data_dir': path to the directory of the data;
* 'submissions_dir': path to the directory containing the
submissions;
* `logs_dir`: path to the directory where the log of the
submission will be stored;
* `predictions_dir`: path to the directory where the
predictions of the submission will be stored.
submission : str
Name of the RAMP submission to be handle by the worker.
Attributes
----------
status : str
The status of the worker. It should be one of the following state:
* 'initialized': the worker has been instanciated.
* 'setup': the worker has been set up.
* 'running': the worker is training the submission.
* 'finished': the worker finished to train the submission.
* 'collected': the results of the training have been collected.
"""
def __init__(self, config, submission):
super(CondaEnvWorker, self).__init__(config=config,
submission=submission)
@staticmethod
def _check_config_name(config, param):
if param not in config.keys():
raise ValueError("The worker required the parameter '{}' in the "
"configuration given at instantiation. Only {}"
"parameters were given."
.format(param, config.keys()))
def setup(self):
"""Set up the worker.
The worker will find the path to the conda environment to use using
the configuration passed when instantiating the worker.
"""
# sanity check for the configuration variable
for required_param in ('kit_dir', 'data_dir', 'submissions_dir',
'logs_dir', 'predictions_dir'):
self._check_config_name(self.config, required_param)
# find the path to the conda environment
env_name = (self.config['conda_env']
if 'conda_env' in self.config.keys() else 'base')
proc = subprocess.Popen(
["conda", "info", "--envs", "--json"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
stdout, _ = proc.communicate()
conda_info = json.loads(stdout)
if env_name == 'base':
self._python_bin_path = os.path.join(conda_info['envs'][0], 'bin')
else:
envs_path = conda_info['envs'][1:]
if not envs_path:
raise ValueError('Only the conda base environment exist. You '
'need to create the "{}" conda environment '
'to use it.'.format(env_name))
is_env_found = False
for env in envs_path:
if env_name == os.path.split(env)[-1]:
is_env_found = True
self._python_bin_path = os.path.join(env, 'bin')
break
if not is_env_found:
raise ValueError('The specified conda environment {} does not '
'exist. You need to create it.'
.format(env_name))
super(CondaEnvWorker, self).setup()
def teardown(self):
"""Remove the predictions stores within the submission."""
if self.status != 'collected':
raise ValueError("Collect the results before to kill the worker.")
output_training_dir = os.path.join(self.config['kit_dir'],
'submissions', self.submission,
'training_output')
if os.path.exists(output_training_dir):
shutil.rmtree(output_training_dir)
super(CondaEnvWorker, self).teardown()
def _is_submission_finished(self):
"""Status of the submission.
The submission was launched in a subprocess. Calling ``poll()`` will
indicate the status of this subproces.
"""
return False if self._proc.poll() is None else True
def launch_submission(self):
"""Launch the submission.
Basically, it comes to run ``ramp_test_submission`` using the conda
environment given in the configuration. The submission is launched in
a subprocess to free to not lock the Python main process.
"""
cmd_ramp = os.path.join(self._python_bin_path, 'ramp_test_submission')
if self.status == 'running':
raise ValueError('Wait that the submission is processed before to '
'launch a new one.')
self._proc = subprocess.Popen(
[cmd_ramp,
'--submission', self.submission,
'--ramp_kit_dir', self.config['kit_dir'],
'--ramp_data_dir', self.config['data_dir'],
'--ramp_submission_dir', self.config['submissions_dir'],
'--save-y-preds'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
super(CondaEnvWorker, self).launch_submission()
def collect_results(self):
"""Collect the results after that the submission is completed.
Be aware that calling ``collect_results()`` before that the submission
finished will lock the Python main process awaiting for the submission
to be processed. Use ``worker.status`` to know the status of the worker
beforehand.
"""
super(CondaEnvWorker, self).collect_results()
if self.status == 'finished' or self.status == 'running':
# communicate() will wait for the process to be completed
self._proc_log, stderr = self._proc.communicate()
# write the log into the disk
log_dir = os.path.join(self.config['logs_dir'],
self.submission)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
with open(os.path.join(log_dir, 'log'), 'wb+') as f:
f.write(self._proc_log)
# copy the predictions into the disk
# no need to create the directory, it will be handle by copytree
pred_dir = os.path.join(self.config['predictions_dir'],
self.submission)
output_training_dir = os.path.join(
self.config['submissions_dir'], self.submission,
'training_output')
shutil.copytree(output_training_dir, pred_dir)
self.status = 'collected'
logger.info(repr(self))
return (self._proc.returncode, stderr.decode('utf-8'))
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/password.py | <filename>ramp-utils/ramp_utils/password.py
import bcrypt
from .string_encoding import encode_string
def hash_password(password):
"""Hash a password.
Parameters
----------
password : str or bytes
Human readable password.
Returns
-------
hashed_password : bytes
The hashed password.
"""
return bcrypt.hashpw(encode_string(password), bcrypt.gensalt())
def check_password(password, hashed_password):
"""Check if a password is the same than the hashed password.
Parameters
----------
password : str or bytes
Human readable password.
hashed_password : str or bytes
The hashed password.
Returns
-------
is_same_password : bool
Return True if the two passwords are identical.
"""
return bcrypt.checkpw(encode_string(password),
encode_string(hashed_password))
|
DjalelBBZ/ramp-board | ramp-frontend/ramp_frontend/testing.py | <reponame>DjalelBBZ/ramp-board<filename>ramp-frontend/ramp_frontend/testing.py<gh_stars>0
"""The :mod:`ramp_frontend.testing` module contains all functions used to easily
test the frontend."""
from contextlib import contextmanager
def login(client, username, password):
"""Simulate a log-in from a user.
After the log-in request, the client will be redirect to the expected page.
Parameters
----------
client : :class:`flask.testing.FlaskClient`
The testing client used for unit testing.
username : str
The user's name.
password : str
The <PASSWORD>.
Returns
-------
response : :class:`flask.wrappers.Response`
The response of the client.
"""
return client.post('/login', data=dict(
user_name=username,
password=password
), follow_redirects=True)
def logout(client):
"""Simulate a log-out.
After the log-out request, the client will be redirected to the expected
page.
Parameters
----------
client : :class:`flask.testing.FlaskClient`
The testing client used for unit testing.
Returns
-------
response : :class:`flask.wrappers.Response`
The response of the client.
"""
return client.get('/logout', follow_redirects=True)
@contextmanager
def login_scope(client, username, password):
"""Context manager to log-in during the ``with`` scope.
Parameters
----------
client : :class:`flask.testing.FlaskClient`
The testing client used for unit testing.
username : str
The user's name.
password : str
The <PASSWORD>.
Returns
-------
client : :class:`flask.testing.FlaskClient`
A client which is logged-in for the duration of the ``with`` scope.
"""
login(client, username, password)
yield client
logout(client)
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/testing.py | """
The :mod:`ramp_database.testing` module create facility functions to test the
tools and model of ``ramp-database``.
"""
import logging
import os
import shutil
import subprocess
from git import Repo
from ramp_utils import generate_ramp_config
from .utils import setup_db
from .utils import session_scope
from .model import Model
from .tools.database import add_extension
from .tools.database import add_submission_file_type
from .tools.database import add_submission_file_type_extension
from .tools.event import add_event
from .tools.event import add_keyword
from .tools.event import add_problem
from .tools.event import add_problem_keyword
from .tools.user import approve_user
from .tools.user import add_user
from .tools.team import sign_up_team
from .tools.submission import submit_starting_kits
logger = logging.getLogger('RAMP-DATABASE')
def create_test_db(database_config, ramp_config):
"""Create an empty test database and the setup the files for RAMP.
Note: this will forcedly remove any existing content in the deployment
directory.
Parameters
----------
database_config : dict
The configuration file containing the database information.
ramp_config : dict
The configuration file containing the information about a RAMP event.
"""
database_config = database_config['sqlalchemy']
# we can automatically setup the database from the config file used for the
# tests.
ramp_config = generate_ramp_config(ramp_config)
shutil.rmtree(ramp_config['deployment_dir'], ignore_errors=True)
os.makedirs(ramp_config['ramp_kits_dir'])
os.makedirs(ramp_config['ramp_data_dir'])
os.makedirs(ramp_config['ramp_submissions_dir'])
db, _ = setup_db(database_config)
Model.metadata.drop_all(db)
Model.metadata.create_all(db)
with session_scope(database_config) as session:
setup_files_extension_type(session)
def create_toy_db(database_config, ramp_config):
"""Create a toy dataset with couple of users, problems, events.
Parameters
----------
database_config : dict
The configuration file containing the database information.
ramp_config : dict
The configuration file containing the information about a RAMP event.
"""
create_test_db(database_config, ramp_config)
with session_scope(database_config['sqlalchemy']) as session:
setup_toy_db(session, ramp_config)
# Setup functions: functions used to setup the database initially
def setup_toy_db(session, ramp_config):
"""Only setup the database by adding some data.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
ramp_config : dict
The configuration file containing the information about a RAMP event.
"""
add_users(session)
add_problems(session, ramp_config)
add_events(session, ramp_config)
sign_up_teams_to_events(session)
submit_all_starting_kits(session, ramp_config)
def _delete_line_from_file(f_name, line_to_delete):
with open(f_name, "r+") as f:
lines = f.readlines()
f.seek(0)
for line in lines:
if line != line_to_delete:
f.write(line)
f.truncate()
def setup_ramp_kits_ramp_data(ramp_config, problem_name, force=False):
"""Clone ramp-kits and ramp-data repository and setup it up.
Parameters
----------
ramp_config : dict
The configuration file containing the information about a RAMP event.
problem_name : str
The name of the problem.
force : bool, default is False
Whether or not to overwrite the RAMP kit and data repositories if they
already exists.
"""
ramp_config = generate_ramp_config(ramp_config)
problem_kits_path = os.path.join(ramp_config['ramp_kits_dir'],
problem_name)
if not os.path.exists(ramp_config['ramp_kits_dir']):
os.makedirs(ramp_config['ramp_kits_dir'])
if os.path.exists(problem_kits_path):
if not force:
raise ValueError(
'The RAMP kit repository was previously cloned. To replace '
'it, you need to set "force=True".'
)
shutil.rmtree(problem_kits_path, ignore_errors=True)
ramp_kits_url = 'https://github.com/ramp-kits/{}.git'.format(problem_name)
Repo.clone_from(ramp_kits_url, problem_kits_path)
problem_data_path = os.path.join(ramp_config['ramp_data_dir'],
problem_name)
if not os.path.exists(ramp_config['ramp_data_dir']):
os.makedirs(ramp_config['ramp_data_dir'])
if os.path.exists(problem_data_path):
if not force:
raise ValueError(
'The RAMP data repository was previously cloned. To replace '
'it, you need to set "force=True".'
)
shutil.rmtree(problem_data_path, ignore_errors=True)
ramp_data_url = 'https://github.com/ramp-data/{}.git'.format(problem_name)
Repo.clone_from(ramp_data_url, problem_data_path)
current_directory = os.getcwd()
os.chdir(problem_data_path)
subprocess.check_output(["python", "prepare_data.py"])
os.chdir(problem_kits_path)
subprocess.check_output(["jupyter", "nbconvert", "--to", "html",
"{}_starting_kit.ipynb".format(problem_name)])
# delete this line since it trigger in the front-end
# (try to open execute "custom.css".)
_delete_line_from_file("{}_starting_kit.html".format(problem_name),
'<link rel="stylesheet" href="custom.css">\n')
os.chdir(current_directory)
def setup_files_extension_type(session):
"""Setup the files' extensions and types.
This function registers the file extensions and types. This function
should be called after creating the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
"""
extension_names = ['py', 'R', 'txt', 'csv']
for name in extension_names:
add_extension(session, name)
submission_file_types = [
('code', True, 10 ** 5),
('text', True, 10 ** 5),
('data', False, 10 ** 8)
]
for name, is_editable, max_size in submission_file_types:
add_submission_file_type(session, name, is_editable, max_size)
submission_file_type_extensions = [
('code', 'py'),
('code', 'R'),
('text', 'txt'),
('data', 'csv')
]
for type_name, extension_name in submission_file_type_extensions:
add_submission_file_type_extension(session, type_name, extension_name)
# Add functions: functions to populate the database to obtain a toy dataset
def add_users(session):
"""Add dummy users in the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
"""
add_user(
session, name='test_user', password='<PASSWORD>',
lastname='Test', firstname='User',
email='<EMAIL>', access_level='asked')
approve_user(session, 'test_user')
add_user(
session, name='test_user_2', password='<PASSWORD>',
lastname='Test_2', firstname='User_2',
email='<EMAIL>', access_level='user')
approve_user(session, 'test_user_2')
add_user(
session, name='test_iris_admin', password='<PASSWORD>',
lastname='Admin', firstname='Iris',
email='<EMAIL>', access_level='admin')
def add_problems(session, ramp_config):
"""Add dummy problems into the database. In addition, we add couple of
keyword.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
ramp_config : dict
The configuration file containing the information about a RAMP event.
"""
problems = ['iris', 'boston_housing']
for problem_name in problems:
setup_ramp_kits_ramp_data(ramp_config, problem_name)
internal_ramp_config = generate_ramp_config(ramp_config)
add_problem(session, problem_name,
internal_ramp_config['ramp_kits_dir'],
internal_ramp_config['ramp_data_dir'])
add_keyword(session, problem_name, 'data_domain',
category='scientific data')
add_problem_keyword(session, problem_name=problem_name,
keyword_name=problem_name)
add_keyword(session, problem_name + '_theme', 'data_science_theme',
category='classification')
add_problem_keyword(session, problem_name=problem_name,
keyword_name=problem_name + '_theme')
def add_events(session, ramp_config):
"""Add events in the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
ramp_config : dict
The configuration file containing the information about a RAMP event.
Notes
-----
Be aware that :func:`add_problems` needs to be called before.
"""
ramp_config = generate_ramp_config(ramp_config)
problems = ['iris', 'boston_housing']
for problem_name in problems:
event_name = '{}_test'.format(problem_name)
event_title = 'test event'
add_event(session, problem_name=problem_name, event_name=event_name,
event_title=event_title,
ramp_sandbox_name=ramp_config['sandbox_name'],
ramp_submissions_path=ramp_config['ramp_submissions_dir'],
is_public=True, force=False)
def sign_up_teams_to_events(session):
"""Sign up user to the events in the database.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
Notes
-----
Be aware that :func:`add_users`, :func:`add_problems`,
and :func:`add_events` need to be called before.
"""
for event_name in ['iris_test', 'boston_housing_test']:
sign_up_team(session, event_name, 'test_user')
sign_up_team(session, event_name, 'test_user_2')
def submit_all_starting_kits(session, ramp_config):
"""Submit all starting kits.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
ramp_config : dict
The configuration file containing the information about a RAMP event.
"""
ramp_config = generate_ramp_config(ramp_config)
for event, event_name in zip(['iris', 'boston_housing'],
['iris_test', 'boston_housing_test']):
path_submissions = os.path.join(
ramp_config['ramp_kits_dir'], event, 'submissions'
)
submit_starting_kits(session, event_name, 'test_user',
path_submissions)
submit_starting_kits(session, event_name, 'test_user_2',
path_submissions)
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/ramp.py | import os
import six
from .config_parser import read_config
def generate_ramp_config(config):
"""Generate the configuration to deploy RAMP.
Parameters
----------
config : dict or str
Either the loaded configuration or the configuration YAML file.
Returns
-------
ramp_config : dict
The configuration for the RAMP worker.
"""
if isinstance(config, six.string_types):
config = read_config(config, filter_section='ramp')
else:
if 'ramp' in config.keys():
config = config['ramp']
ramp_config = {}
ramp_config['event'] = config['event']
ramp_config['event_name'] = config['event_name']
ramp_config['event_title'] = config['event_title']
ramp_config['event_is_public'] = config['event_is_public']
ramp_config['sandbox_name'] = config['sandbox_dir']
ramp_config['deployment_dir'] = config['deployment_dir']
ramp_config['ramp_kits_dir'] = os.path.join(
config['deployment_dir'], config['kits_dir']
)
ramp_config['ramp_data_dir'] = os.path.join(
config['deployment_dir'], config['data_dir']
)
ramp_config['ramp_kit_submissions_dir'] = os.path.join(
ramp_config['ramp_kits_dir'],
ramp_config['event'],
'submissions'
)
ramp_config['ramp_submissions_dir'] = os.path.join(
config['deployment_dir'], config['submissions_dir']
)
ramp_config['ramp_sandbox_dir'] = os.path.join(
ramp_config['ramp_kits_dir'], ramp_config['event'],
'submissions', config['sandbox_dir']
)
return ramp_config
|
DjalelBBZ/ramp-board | ramp-utils/ramp_utils/tests/test_worker.py | import os
import pytest
from ramp_utils.testing import ramp_config_template
from ramp_utils import read_config
from ramp_utils import generate_worker_config
@pytest.mark.parametrize(
"config", [ramp_config_template(), read_config(ramp_config_template())]
)
def test_generate_worker_config(config):
worker_config = generate_worker_config(config)
expected_config = {
'worker_type': 'conda',
'conda_env': 'ramp-iris',
'kit_dir': os.path.join('/tmp/databoard_test', 'ramp-kits', 'iris'),
'data_dir': os.path.join('/tmp/databoard_test', 'ramp-data', 'iris'),
'submissions_dir': os.path.join('/tmp/databoard_test', 'submissions'),
'predictions_dir': os.path.join('/tmp/databoard_test', 'preds'),
'logs_dir': os.path.join('/tmp/databoard_test', 'log')
}
assert worker_config == expected_config
|
DjalelBBZ/ramp-board | ramp-database/ramp_database/exceptions.py | """
The :mod:`ramp_database.exceptions` module include all custom warnings and errors
used in ``ramp-database``.
"""
__all__ = [
'DuplicateSubmissionError',
'MergeTeamError',
'MissingExtensionError',
'MissingSubmissionFileError',
'NameClashError',
'TooEarlySubmissionError',
'UnknownStateError'
]
class DuplicateSubmissionError(Exception):
"""Error to raise when a submission is already present in the database."""
pass
class MergeTeamError(Exception):
"""Error to raise when the merging of teams is failing."""
pass
class MissingSubmissionFileError(Exception):
"""Error to raise when the file submitted is not present in the supposed
location."""
pass
class MissingExtensionError(Exception):
"""Error to raise when the extension is not registered in the database."""
pass
class NameClashError(Exception):
"""Error to raise when there is a duplicate in submission name."""
pass
class TooEarlySubmissionError(Exception):
"""Error to raise when a submission was submitted to early."""
pass
class UnknownStateError(Exception):
"""Error to raise when the state of the submission is unknown."""
pass
|
DjalelBBZ/ramp-board | ramp-engine/ramp_engine/aws/ramp_aws_train.py | <filename>ramp-engine/ramp_engine/aws/ramp_aws_train.py
from __future__ import print_function, absolute_import, unicode_literals
import sys
import logging
import argparse
from argparse import RawTextHelpFormatter
from ramp_engine.aws.api import launch_ec2_instance_and_train
from ramp_engine.aws.api import train_on_existing_ec2_instance
from ramp_engine.aws.api import validate_config
from ramp_engine.config import read_backend_config
from ramp_database.tools import get_submission_by_name
desc = """
Train a submission on AWS.
Two ways of specifying the submission are available.
Either we give the submission id or name.
Use ramp_aws_train config.yml --id=<submission id> if you want to
specify submission by id.
Use ramp_aws_train config.yml --event=<event name> --team=<team name> --name=<submission name>
if you want to specify submission by name.
By default a new ec2 instance will be created then training will be done there,
then the instance will be killed after training.
If you want to train on an existing instance just add the option
--instance-id. Example:
ramp_aws_train config.yml --event=<event name> --team=<team name> --name=<submission name> --instance-id=<instance id>
To find the instance id, you have to check the AWS EC2 console
or use the cli `aws` provided by amazon.
"""
def init_parser():
"""Defines command-line interface"""
parser = argparse.ArgumentParser(
prog=__file__,
description=desc,
formatter_class=RawTextHelpFormatter)
parser.add_argument('config', type=str,
help='Backend configuration file with database')
parser.add_argument('--id', type=int,
help='Submission ID')
parser.add_argument('--event', type=str,
help='Event name')
parser.add_argument('--team', type=str,
help='Team name')
parser.add_argument('--name', type=str,
help='Submission name')
parser.add_argument('--instance-id', type=str,
help='Instance id')
parser.add_argument('--log-level', type=str, default='INFO',
help='Log level : DEBUG/INFO/WARNING/ERROR/CRITICAL')
return parser
def main():
parser = init_parser()
args = parser.parse_args()
logger = logging.getLogger('ramp_aws')
logger.setLevel(args.log_level)
config = read_backend_config(args.config)
validate_config(config)
if args.id:
submission_id = args.id
elif args.name and args.event and args.team:
try:
submission = get_submission_by_name(
config,
args.event,
args.team,
args.name
)
except Exception as ex:
print('Submission not found. Reasons:')
print(ex)
sys.exit(1)
submission_id = submission.id
else:
print('Please specify either submission id, or alternatively submission'
'event/team/name. Use ramp_aws_train --help for help.')
sys.exit(1)
if args.instance_id:
train_on_existing_ec2_instance(config, args.instance_id, submission_id)
else:
launch_ec2_instance_and_train(config, submission_id)
if __name__ == '__main__':
main()
|
sbhattach/SNMP-TRAP-Receiver | trap_inform.py | """
SNMP TRAP and Infor Reciver for SNMP V1,V2c,V3
"""
import argparse
try:
from pysnmp.entity import engine, config
from pysnmp.carrier.asyncore.dgram import udp, udp6
from pysnmp.entity.rfc3413 import ntfrcv
from pysnmp.proto.api import v2c
from pysnmp.smi import builder, view, compiler, rfc1902, error
from pysnmp import debug
except ImportError:
print "IMPORT ERROR Please install PYSNMP 4.3.8 usning"
mibViewController = None
pdu_count = 1
def user_input(snmpEngine):
"""
TBD
:param snmpEngine:
:return:
"""
CUSTOM_MIB_PATH= '/usr/share/snmp/mibs/'
LOAD_MIB_MODULE = ''
ans = 'no'
print "\n"
PORT=raw_input("Please Provide The SNMP Trap Port: ")
print "\n"
vserion = raw_input("Please Enter SNMP Version [OPTION: 1,2,3] :")
print "\n"
ip_type = raw_input("Please IP Type [OPTION: 4, 6] :")
print "\n"
if vserion in ['1', '2']:
COMMUNITYSTRING = raw_input("Please Provide SNMP V1/V2 community "
"String: ")
print "\n"
config.addV1System(snmpEngine, COMMUNITYSTRING, COMMUNITYSTRING)
while 1:
asn = raw_input("Waant to add a another community ("
"Yes/No/n/y)?")
if ans in ["yes", "Yes", "Y", "y"]:
COMMUNITYSTRING = raw_input(
"Please Provide SNMP V1/V2 community "
"String: ")
config.addV1System(snmpEngine, COMMUNITYSTRING, COMMUNITYSTRING)
else:
break
else:
add_snmp_v3(snmpEngine)
print "\n"
_new_mib_path=raw_input("Please provide the custom mib dir path: ")
print "\n"
_new_mib_path = _new_mib_path.strip()
if _new_mib_path and _new_mib_path[-1] == "/":
CUSTOM_MIB_PATH = _new_mib_path+','+CUSTOM_MIB_PATH
else:
CUSTOM_MIB_PATH = _new_mib_path+'/'+','+CUSTOM_MIB_PATH
LOAD_MIB_MODULE = raw_input("Please provide the custom MIB Name seperated "
"by comma: ")
print "\n"
return COMMUNITYSTRING, CUSTOM_MIB_PATH, PORT, LOAD_MIB_MODULE, ip_type
def add_transport(snmpEngine, PORT, ip_type):
"""
:param snmpEngine:
:return:
"""
try:
if ip_type == '6':
config.addTransport(
snmpEngine,
udp.domainName,
udp6.Udp6SocketTransport().openServerMode((
'::', int(PORT)))
)
else:
config.addTransport(
snmpEngine,
udp.domainName,
udp.UdpTransport().openServerMode(('0.0.0.0',
int(PORT)))
)
except Exception as e:
print "{} Port Binding Failed the Provided Port {} is in Use".format(e, PORT)
def add_snmp_v3(snmpEngine):
"""
TBD
:param snmpEngine:
:return:
"""
__authProtocol = {
'usmHMACMD5AuthProtocol': config.usmHMACMD5AuthProtocol,
'usmHMACSHAAuthProtocol': config.usmHMACSHAAuthProtocol,
'usmAesCfb128Protocol': config.usmAesCfb128Protocol,
'usmAesCfb256Protocol': config.usmAesCfb256Protocol,
'usmAesCfb192Protocol': config.usmAesCfb192Protocol,
'usmDESPrivProtocol': config.usmDESPrivProtocol,
'usmNoAuthProtocol': config.usmNoAuthProtocol,
'usmNoPrivProtocol': config.usmNoPrivProtocol
}
while 1:
V3=raw_input("Want to add New V3 User (Yes/No/n/y)?")
if V3 in ["yes", "Yes", "Y", "y"]:
v3_user = raw_input("Provide V3 User Name: ")
print "\n"
v3_authkey = raw_input("Provide Auth Key: ")
print "\n"
v3_privkey = raw_input("Provide Priv Key: ")
print "\n"
authProtocol = raw_input("Provide authProtocol: Option: ["
"usmNoAuthProtocol, "
"usmHMACMD5AuthProtocol, "
"usmHMACSHAAuthProtocol] :")
print "\n"
privProtocol = raw_input("Provide privProtocol: Option: ["
"usmNoPrivProtocol, usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol] :")
print "\n"
securityEngineId = raw_input("Provide V3 security EngineId e.g. "
"'800000d30300000e112245' :")
print "\n"
config.addV3User(
snmpEngine, userName=v3_user,
authKey=v3_authkey, privKey=v3_privkey,
authProtocol=__authProtocol.get(
authProtocol, config.usmNoAuthProtocol),
privProtocol=__authProtocol.get(
privProtocol,config.usmNoPrivProtocol),
securityEngineId=v2c.OctetString(
hexValue=securityEngineId))
elif V3 in ["No", "n", "N", "no"]:
break
else:
continue
def mib_builder(custom_mib_path, LOAD_MIB_MODULE):
mibBuilder = builder.MibBuilder()
try:
if custom_mib_path:
compiler.addMibCompiler(mibBuilder, sources=custom_mib_path.split(
","))
global mibViewController
mibViewController = view.MibViewController(mibBuilder)
if LOAD_MIB_MODULE:
_mibs=LOAD_MIB_MODULE.split(",")
mibBuilder.loadModules(*_mibs)
except error.MibNotFoundError as excep:
print " {} Mib Not Found!".format(excep)
def cbFun(snmpEngine, stateReference, contextEngineId, contextName,
varBinds, cbCtx):
global pdu_count
global mibViewController
print "####################### NEW Notification(PDU_COUNT: {}) #######################".format(pdu_count)
execContext = snmpEngine.observer.getExecutionContext(
'rfc3412.receiveMessage:request'
)
print('#Notification from %s \n#ContextEngineId: "%s" \n#ContextName: "%s" \n#SNMPVER "%s" \n#SecurityName "%s"' % ('@'.join([str(x) for x in execContext['transportAddress']]),contextEngineId.prettyPrint(),contextName.prettyPrint(), execContext['securityModel'], execContext['securityName']))
for oid, val in varBinds:
output = rfc1902.ObjectType(rfc1902.ObjectIdentity(oid),val).resolveWithMib(mibViewController).prettyPrint()
print output
pdu_count +=1
def check_parser():
"""
TBD
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument("--debug",
help="Enable Debug Mode")
args = parser.parse_args()
if args.debug:
debug.setLogger(debug.Debug('all'))
if __name__ == "__main__":
check_parser()
snmpEngine = engine.SnmpEngine()
COMMUNITYSTRING, CUSTOM_MIB_PATH, PORT, LOAD_MIB_MODULE, ip_type = user_input(
snmpEngine)
ntfrcv.NotificationReceiver(snmpEngine, cbFun)
add_transport(snmpEngine, PORT, ip_type)
snmpEngine.transportDispatcher.jobStarted(1)
try:
print "Trap Listener started ....."
print "To Stop Press Ctrl+c"
print "\n"
snmpEngine.transportDispatcher.runDispatcher()
except:
snmpEngine.transportDispatcher.closeDispatcher()
raise |
nagomiso/knnFeat | sklearn_api.py | <filename>sklearn_api.py
# coding: utf-8
from functools import partial
from itertools import product
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import check_X_y
class KNeighborsFeatures(BaseEstimator, TransformerMixin):
def __init__(
self,
n_neighbors=1,
n_splits=5,
shuffle=False,
random_state=None
):
self.n_neighbors = n_neighbors
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
@staticmethod
def __distance(v, u):
return np.linalg.norm(v - u)
def _extract_feature_value(self, vector, X, y, class_label, k):
X_target_class = X[y == class_label]
euclidean_vec = partial(self.__distance, vector)
distances = np.array([
euclidean_vec(vector) for vector in X_target_class])
return np.sum(np.sort(distances)[:k + 1])
def _extract_feature_vectors(self, matrix, X, y, class_label, k):
_extract_feature_value = partial(
self._extract_feature_value,
X=X, y=y, class_label=class_label, k=k)
return np.array([
_extract_feature_value(vector) for vector in matrix])
def transform(self, X, y):
X, y = check_X_y(X, y, force_all_finite=True)
class_labels = set(y)
row_size = np.array(X).shape[0]
X_knn = np.empty((row_size, len(class_labels) * self.n_neighbors))
skf = StratifiedKFold(
self.n_splits, self.shuffle, self.random_state)
for train_idx, test_idx in skf.split(X, y):
X_train, y_train = X[train_idx], y[train_idx]
X_test = X[test_idx]
features = [
self._extract_feature_vectors(
X_test, X_train, y_train, class_label, k)
for class_label, k in product(
class_labels, range(self.n_neighbors))
]
X_knn[test_idx] = np.array(features).T
return X_knn
|
nagomiso/knnFeat | knn_feat.py | import numpy as np
from sklearn.model_selection import KFold
def _distance(a, b):
return np.linalg.norm(b - a)
def _get_feat(data, X_train, y_train, class_index, k_index):
inclass_X = X_train[y_train == class_index]
distances = np.array([_distance(a, data) for a in inclass_X])
sorted_distances_index = np.argsort(distances)
nearest_index = list(sorted_distances_index[0: (k_index + 1)])
dist = np.sum(distances[nearest_index])
return dist
def knnExtract(X, y, k = 1, holds = 5):
CLASS_NUM = len(set(y))
res = np.empty((len(X), CLASS_NUM * k))
kf = KFold(n_splits = holds, shuffle = True)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
features = np.empty([0, len(X_test)])
for class_index in range(CLASS_NUM):
for k_index in range(k):
feat = np.array([np.apply_along_axis(_get_feat, 1, X_test, X_train, y_train, class_index, k_index)])
features = np.append(features, feat, axis = 0)
res[test_index] = features.T
return res
|
nagomiso/knnFeat | test/test_knnFeat.py | import unittest
import numpy as np
import sys, os
sys.path.append(os.getcwd())
from knnFeat import knnExtract
class TestKnnFeat(unittest.TestCase):
def test_knnFeat(self):
X = np.reshape(np.array([0, 1, 3, 4, 5, 6, 1, 1, 0, 3]), (5, 2))
y = np.array([0, 0, 0, 1, 1])
expected = knnExtract(X, y, k = 1, holds = 5)
self.assertEqual(expected.shape, X.shape)
if __name__ == "__main__":
unittest.main() |
nagomiso/knnFeat | test/test_distance.py | import unittest
import numpy as np
import sys, os
sys.path.append(os.getcwd())
from knnFeat import _distance
class TestDistance(unittest.TestCase):
def test_distance(self):
a = np.array([0, 0])
b = np.array([3, 4])
expected = _distance(a, b)
actual = 5
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main() |
nagomiso/knnFeat | test/test_get_feat.py | <reponame>nagomiso/knnFeat
import unittest
import numpy as np
import sys, os
sys.path.append(os.getcwd())
from knnFeat import _get_feat
class TestGetFeat(unittest.TestCase):
# Case 1: class_index == 0 and k_index == 0
def test_get_feat_c0k0(self):
data = np.array([0, 0])
X_train = np.reshape(np.array([0, 1, 3, 4, 5, 6, 1, 1, 0, 3]), (5, 2))
y_train = np.array([0, 0, 0, 1, 1])
class_index = 0
k_index = 0
expected = _get_feat(data, X_train, y_train, class_index, k_index)
# [0, 1] is the 1-nearest point
actual = 1
self.assertEqual(expected, actual)
# Case 2: class_index == 0 and k_index == 1
def test_get_feat_c0k1(self):
data = np.array([0, 0])
X_train = np.reshape(np.array([0, 1, 3, 4, 5, 6, 1, 1, 0, 3]), (5, 2))
y_train = np.array([0, 0, 0, 1, 1])
class_index = 0
k_index = 1
expected = _get_feat(data, X_train, y_train, class_index, k_index)
# [0, 1] and [3, 4] is the 2-nearest points
actual = 1 + 5
self.assertEqual(expected, actual)
# Case 3: class_index == 1 and k_index == 0
def test_get_feat_c1k0(self):
data = np.array([0, 0])
X_train = np.reshape(np.array([0, 1, 3, 4, 5, 6, 0, 2, 0, 3]), (5, 2))
y_train = np.array([0, 0, 0, 1, 1])
class_index = 1
k_index = 0
expected = _get_feat(data, X_train, y_train, class_index, k_index)
# [0, 2] is the 1-nearest point
actual = 2
self.assertEqual(expected, actual)
# Case 4: class_index == 1 and k_index == 1
def test_get_feat_c1k1(self):
data = np.array([0, 0])
X_train = np.reshape(np.array([0, 1, 3, 4, 5, 6, 0, 2, 0, 3]), (5, 2))
y_train = np.array([0, 0, 0, 1, 1])
class_index = 1
k_index = 1
expected = _get_feat(data, X_train, y_train, class_index, k_index)
# [0, 2] and [0, 3]is the 2-nearest points
actual = 2 + 3
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main() |
ryphon/rke2 | contrib/custom-image-kubelet/genconfig.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
import logging
import os
import re
import stat
import sys
import tarfile
from base64 import b64decode
from collections import defaultdict
from os.path import basename, normpath
from shutil import copyfileobj
import boto3
import click
import requests
from yaml import dump, load
try:
from yaml import CDumper as Dumper
from yaml import CLoader as Loader
except ImportError:
from yaml import Dumper, Loader
IMAGES = ['kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'pause', 'etcd']
COMPONENTS = ['kube-proxy', 'coredns', 'metrics-server']
ECR_REGEX = r'(?P<registry>.+)\.dkr\.ecr\.(?P<region>.+)\.amazonaws\.com'
@click.command(context_settings={'max_content_width': 120})
@click.option('--release-url', type=str, required=True, help='URL to a Kubernetes Release YAML')
@click.option('--data-dir', type=str, default='/var/lib/rancher/rke2', help='RKE2 data directory', show_default=True)
@click.option('--prefix', type=str, default='/', help='Prefix for output files created by this script', show_default=True)
def main(release_url, data_dir, prefix):
release = get_release(release_url)
logging.info(f'Got Release: {release["metadata"]["name"]}')
rke2_config = {
'kubelet-path': normpath(f'{data_dir}/opt/bin/kubelet'),
'data-dir': normpath(data_dir),
}
try:
write_ecr_credentials(release, prefix)
except Exception as e:
raise Exception(f'Unable to write ECR credentials to registries.yaml: {e}') from e
for image in IMAGES:
alt_image = get_image_artifact(release, image)
rke2_config[f'{image}-image'] = alt_image
for component in COMPONENTS:
alt_image = get_image_artifact(release, component)
write_chart_config(component, alt_image, f'{prefix}/{data_dir}')
extract_archive(release, 'kubernetes-node-linux-amd64.tar.gz', f'{prefix}/{data_dir}')
write_rke2_config(rke2_config, prefix)
def get_release(release_url):
response = requests.get(release_url)
response.raise_for_status()
return load(response.text, Loader=Loader)
def get_image_artifact(release, component):
for c in release.get('status', {}).get('components', []):
for a in c.get('assets', []):
if a['name'] == component + '-image':
return a['image']['uri']
raise Exception(f'Unable to find image asset for {component}')
def write_rke2_config(config, prefix):
etc_dir = f'{prefix}/etc/rancher/rke2'
try:
os.makedirs(normpath(etc_dir), mode=0o0700)
except FileExistsError:
pass
with open(normpath(f'{etc_dir}/config.yaml'), mode='a+') as config_file:
config_file.seek(0, 0)
config_yaml = load(config_file, Loader=Loader)
if not config_yaml:
config_yaml = dict()
config_yaml.update(config)
logging.info(f'Writing config to {config_file.name}')
config_file.seek(0, 0)
config_file.truncate()
dump(config_yaml, config_file, Dumper=Dumper)
def write_chart_config(component, image, data_dir):
manifests_dir = f'{data_dir}/server/manifests'
repository, tag = image.split(':')
values_yaml = {
'image': {
'repository': repository,
'tag': tag,
},
}
config_yaml = {
'apiVersion': 'helm.cattle.io/v1',
'kind': 'HelmChartConfig',
'metadata': {
'name': f'rke2-{component}',
'namespace': 'kube-system',
},
'spec': {
'valuesContent': dump(values_yaml, Dumper=Dumper),
},
}
try:
os.makedirs(normpath(manifests_dir), mode=0o0700)
except FileExistsError:
pass
with open(normpath(f'{manifests_dir}/rke2-{component}-config.yaml'), mode='w+') as config_file:
logging.info(f'Writing HelmChartConfig to {config_file.name}')
dump(config_yaml, config_file, Dumper=Dumper)
def extract_archive(release, name, data_dir):
bin_dir = normpath(f'{data_dir}/opt/bin')
try:
os.makedirs(normpath(bin_dir), mode=0o0700)
except FileExistsError:
pass
for c in release.get('status', {}).get('components', []):
for a in c.get('assets', []):
if a['type'] == 'Archive' and a['name'] == name:
extract_network_tar(a['archive']['uri'], bin_dir)
def extract_network_tar(archive_url, bin_dir):
logging.info(f'Extracting files from {archive_url}')
with requests.get(archive_url, stream=True) as response, io.BytesIO() as buf:
response.raise_for_status()
copyfileobj(response.raw, buf)
buf.seek(0, 0)
with tarfile.open(fileobj=buf) as tar:
for member in tar.getmembers():
if member.isreg() and member.mode & stat.S_IXUSR:
member.name = basename(member.name)
logging.info(f'Extracting {bin_dir}/{member.name}')
tar.extract(member, bin_dir)
def write_ecr_credentials(release, prefix):
etc_dir = f'{prefix}/etc/rancher/rke2'
registries = get_ecr_registries(release)
registry_configs = dict()
registry_regions = defaultdict(list)
for registry in registries:
match = re.match(ECR_REGEX, registry)
if match:
registry_regions[match.group('region')].append(match.group('registry'))
if not registry_regions:
return
for region, registries in registry_regions.items():
logging.info(f'Getting auth tokens for {registries} in {region}')
response = boto3.client('ecr', region_name=region).get_authorization_token(registryIds=registries)
for auth in response.get('authorizationData', []):
endpoint = auth['proxyEndpoint'].split('//')[1]
username, password = b64decode(auth['authorizationToken']).decode().split(':')
registry_configs[endpoint] = {'auth': {'username': username, 'password': password}}
try:
os.makedirs(normpath(etc_dir), mode=0o0700)
except FileExistsError:
pass
with open(normpath(f'{etc_dir}/registries.yaml'), mode='a+') as registries_file:
registries_file.seek(0, 0)
registries_yaml = load(registries_file, Loader=Loader)
if not registries_yaml:
registries_yaml = dict()
if 'configs' not in registries_yaml:
registries_yaml['configs'] = dict()
registries_yaml['configs'].update(registry_configs)
logging.info(f'Writing credentials to {registries_file.name}')
registries_file.seek(0, 0)
registries_file.truncate()
dump(registries_yaml, registries_file, Dumper=Dumper)
def get_ecr_registries(release):
registries = set()
for c in release.get('status', {}).get('components', []):
for a in c.get('assets', []):
if 'image' in a:
registry = a['image']['uri'].split('/')[0]
if '.ecr.' in registry:
registries.add(registry)
return registries
if __name__ == '__main__':
try:
logging.basicConfig(format="%(levelname).1s %(message)s", level=logging.INFO, stream=sys.stdout)
main()
except (KeyboardInterrupt, BrokenPipeError):
pass
except Exception as e:
logging.fatal(e)
|
jmay0504/nitime | nitime/tests/test_lazy.py | <gh_stars>1-10
import sys
import os
import numpy.testing as npt
import numpy.testing.decorators as dec
import pytest
import nitime.lazyimports as l
# The next test requires nitime.lazyimports.disable_lazy_imports to have been
# set to false, otherwise the lazy import machinery is disabled and all imports
# happen at l.LazyImport calls which become equivalent to regular import
# statements
@pytest.mark.skipif(l.disable_lazy_imports, reason="Lazy imports disabled")
def test_lazy():
mlab = l.LazyImport('matplotlib.mlab')
# repr for mlab should be <module 'matplotlib.mlab' will be lazily loaded>
assert 'lazily loaded' in repr(mlab)
# accessing mlab's attribute will cause an import of mlab
npt.assert_equal(mlab.dist(1969,2011), 42.0)
# now mlab should be of class LoadedLazyImport an repr(mlab) should be
# <module 'matplotlib.mlab' from # '.../matplotlib/mlab.pyc>
assert 'lazily loaded' not in repr(mlab)
# A known limitation of our lazy loading implementation is that, when it it is
# enabled, reloading the module raises an ImportError, and it also does not
# actually perform a reload, as demonstrated by this test.
@pytest.mark.skipif(l.disable_lazy_imports, reason="Lazy imports disabled")
def test_lazy_noreload():
"Reloading of lazy modules causes ImportError"
mod = l.LazyImport('sys')
# accessing module dictionary will trigger an import
len(mod.__dict__)
# do not use named tuple feature for Python 2.6 compatibility
major, minor = sys.version_info[:2]
if major == 2:
with pytest.raises(ImportError) as e_info:
reload(mod)
elif major == 3:
import imp
with pytest.raises(ImportError) as e_info:
imp.reload(mod)
|
jmay0504/nitime | nitime/algorithms/spectral.py | <filename>nitime/algorithms/spectral.py
"""
Spectral transforms are used in order to estimate the frequency-domain
representation of time-series. Several methods can be used and this module
contains implementations of several algorithms for the calculation of spectral
transforms.
"""
import numpy as np
from nitime.lazy import matplotlib_mlab as mlab
from nitime.lazy import scipy_linalg as linalg
from nitime.lazy import scipy_signal as sig
from nitime.lazy import scipy_interpolate as interpolate
from nitime.lazy import scipy_fftpack as fftpack
import nitime.utils as utils
from nitime.utils import tapered_spectra, dpss_windows
# To support older versions of numpy that don't have tril_indices:
from nitime.index_utils import tril_indices, triu_indices
# Set global variables for the default NFFT to be used in spectral analysis and
# the overlap:
default_nfft = 64
default_n_overlap = int(np.ceil(default_nfft // 2))
def get_spectra(time_series, method=None):
r"""
Compute the spectra of an n-tuple of time series and all of
the pairwise cross-spectra.
Parameters
----------
time_series : float array
The time-series, where time is the last dimension
method : dict, optional
contains: this_method:'welch'
indicates that :func:`mlab.psd` will be used in
order to calculate the psd/csd, in which case, additional optional
inputs (and default values) are:
NFFT=64
Fs=2pi
detrend=mlab.detrend_none
window=mlab.window_hanning
n_overlap=0
this_method:'periodogram_csd'
indicates that :func:`periodogram` will
be used in order to calculate the psd/csd, in which case, additional
optional inputs (and default values) are:
Skx=None
Sky=None
N=None
sides='onesided'
normalize=True
Fs=2pi
this_method:'multi_taper_csd'
indicates that :func:`multi_taper_psd` used in order to calculate
psd/csd, in which case additional optional inputs (and default
values) are:
BW=0.01
Fs=2pi
sides = 'onesided'
Returns
-------
f : float array
The central frequencies for the frequency bands for which the spectra
are estimated
fxy : float array
A semi-filled matrix with the cross-spectra of the signals. The csd of
signal i and signal j is in f[j][i], but not in f[i][j] (which will be
filled with zeros). For i=j fxy[i][j] is the psd of signal i.
"""
if method is None:
method = {'this_method': 'welch'} # The default
# If no choice of method was explicitly set, but other parameters were
# passed, assume that the method is mlab:
this_method = method.get('this_method', 'welch')
if this_method == 'welch':
NFFT = method.get('NFFT', default_nfft)
Fs = method.get('Fs', 2 * np.pi)
detrend = method.get('detrend', mlab.detrend_none)
window = method.get('window', mlab.window_hanning)
n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2)))
# The length of the spectrum depends on how many sides are taken, which
# depends on whether or not this is a complex object:
if np.iscomplexobj(time_series):
fxy_len = NFFT
else:
fxy_len = NFFT // 2 + 1
# If there is only 1 channel in the time-series:
if len(time_series.shape) == 1 or time_series.shape[0] == 1:
temp, f = mlab.csd(time_series, time_series,
NFFT, Fs, detrend, window, n_overlap,
scale_by_freq=True)
fxy = temp.squeeze() # the output of mlab.csd has a weird shape
else:
fxy = np.zeros((time_series.shape[0],
time_series.shape[0],
fxy_len), dtype=complex) # Make sure it's complex
for i in range(time_series.shape[0]):
for j in range(i, time_series.shape[0]):
#Notice funny indexing, in order to conform to the
#conventions of the other methods:
temp, f = mlab.csd(time_series[j], time_series[i],
NFFT, Fs, detrend, window, n_overlap,
scale_by_freq=True)
fxy[i][j] = temp.squeeze() # the output of mlab.csd has a
# weird shape
elif this_method in ('multi_taper_csd', 'periodogram_csd'):
# these methods should work with similar signatures
mdict = method.copy()
func = eval(mdict.pop('this_method'))
freqs, fxy = func(time_series, **mdict)
f = utils.circle_to_hz(freqs, mdict.get('Fs', 2 * np.pi))
else:
raise ValueError("Unknown method provided")
return f, fxy.squeeze()
def get_spectra_bi(x, y, method=None):
r"""
Computes the spectra of two timeseries and the cross-spectrum between them
Parameters
----------
x,y : float arrays
Time-series data
method : dict, optional
See :func:`get_spectra` documentation for details
Returns
-------
f : float array
The central frequencies for the frequency
bands for which the spectra are estimated
fxx : float array
The psd of the first signal
fyy : float array
The psd of the second signal
fxy : float array
The cross-spectral density of the two signals
"""
f, fij = get_spectra(np.vstack((x, y)), method=method)
fxx = fij[0, 0].real
fyy = fij[1, 1].real
fxy = fij[0, 1]
return f, fxx, fyy, fxy
# The following spectrum estimates are normalized to the convention
# adopted by MATLAB (or at least spectrum.psd)
# By definition, Sxx(f) = DTFT{Rxx(n)}, where Rxx(n) is the autocovariance
# function of x(n). Therefore the integral from
# [-Fs/2, Fs/2] of Sxx(f)*df is Rxx(0).
# And from the definition of Rxx(n),
# Rxx(0) = Expected-Value{x(n)x*(n)} = Expected-Value{ |x|^2 },
# which is estimated as (x*x.conj()).mean()
# In other words, sum(Sxx) * Fs / NFFT ~ var(x)
def periodogram(s, Fs=2 * np.pi, Sk=None, N=None,
sides='default', normalize=True):
"""Takes an N-point periodogram estimate of the PSD function. The
number of points N, or a precomputed FFT Sk may be provided. By default,
the PSD function returned is normalized so that the integral of the PSD
is equal to the mean squared amplitude (mean energy) of s (see Notes).
Parameters
----------
s : ndarray
Signal(s) for which to estimate the PSD, time dimension in the last
axis
Fs : float (optional)
The sampling rate. Defaults to 2*pi
Sk : ndarray (optional)
Precomputed FFT of s
N : int (optional)
Indicates an N-point FFT where N != s.shape[-1]
sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
This determines which sides of the spectrum to return.
For complex-valued inputs, the default is two-sided, for real-valued
inputs, default is one-sided Indicates whether to return a one-sided
or two-sided
PSD normalize : boolean (optional, default=True) Normalizes the PSD
Returns
-------
(f, psd) : tuple
f: The central frequencies for the frequency bands
PSD estimate for each row of s
"""
if Sk is not None:
N = Sk.shape[-1]
else:
N = s.shape[-1] if not N else N
Sk = fftpack.fft(s, n=N)
pshape = list(Sk.shape)
# if the time series is a complex vector, a one sided PSD is invalid:
if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
sides = 'twosided'
elif sides in ('default', 'onesided'):
sides = 'onesided'
if sides == 'onesided':
# putative Nyquist freq
Fn = N // 2 + 1
# last duplicate freq
Fl = (N + 1) // 2
pshape[-1] = Fn
P = np.zeros(pshape, 'd')
freqs = np.linspace(0, Fs // 2, Fn)
P[..., 0] = (Sk[..., 0] * Sk[..., 0].conj()).real
P[..., 1:Fl] = 2 * (Sk[..., 1:Fl] * Sk[..., 1:Fl].conj()).real
if Fn > Fl:
P[..., Fn - 1] = (Sk[..., Fn - 1] * Sk[..., Fn - 1].conj()).real
else:
P = (Sk * Sk.conj()).real
freqs = np.linspace(0, Fs, N, endpoint=False)
if normalize:
P /= (Fs * s.shape[-1])
return freqs, P
def periodogram_csd(s, Fs=2 * np.pi, Sk=None, NFFT=None, sides='default',
normalize=True):
"""Takes an N-point periodogram estimate of all the cross spectral
density functions between rows of s.
The number of points N, or a precomputed FFT Sk may be provided. By
default, the CSD function returned is normalized so that the integral of
the PSD is equal to the mean squared amplitude (mean energy) of s (see
Notes).
Parameters
---------
s : ndarray
Signals for which to estimate the CSD, time dimension in the last axis
Fs : float (optional)
The sampling rate. Defaults to 2*pi
Sk : ndarray (optional)
Precomputed FFT of rows of s
NFFT : int (optional)
Indicates an N-point FFT where N != s.shape[-1]
sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
This determines which sides of the spectrum to return.
For complex-valued inputs, the default is two-sided, for real-valued
inputs, default is one-sided Indicates whether to return a one-sided
or two-sided
normalize : boolean (optional)
Normalizes the PSD
Returns
-------
freqs, csd_est : ndarrays
The estimated CSD and the frequency points vector.
The CSD{i,j}(f) are returned in a square "matrix" of vectors
holding Sij(f). For an input array that is reshaped to (M,N),
the output is (M,M,N)
"""
s_shape = s.shape
s.shape = (-1, s_shape[-1])
# defining an Sk_loc is a little opaque, but it avoids having to
# reset the shape of any user-given Sk later on
if Sk is not None:
Sk_shape = Sk.shape
N = Sk.shape[-1]
Sk_loc = Sk.reshape(np.prod(Sk_shape[:-1]), N)
else:
if NFFT is not None:
N = NFFT
else:
N = s.shape[-1]
Sk_loc = fftpack.fft(s, n=N)
# reset s.shape
s.shape = s_shape
M = Sk_loc.shape[0]
# if the time series is a complex vector, a one sided PSD is invalid:
if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
sides = 'twosided'
elif sides in ('default', 'onesided'):
sides = 'onesided'
if sides == 'onesided':
# putative Nyquist freq
Fn = N // 2 + 1
# last duplicate freq
Fl = (N + 1) // 2
csd_pairs = np.zeros((M, M, Fn), 'D')
freqs = np.linspace(0, Fs / 2, Fn)
for i in range(M):
for j in range(i + 1):
csd_pairs[i, j, 0] = Sk_loc[i, 0] * Sk_loc[j, 0].conj()
csd_pairs[i, j, 1:Fl] = 2 * (Sk_loc[i, 1:Fl] *
Sk_loc[j, 1:Fl].conj())
if Fn > Fl:
csd_pairs[i, j, Fn - 1] = (Sk_loc[i, Fn - 1] *
Sk_loc[j, Fn - 1].conj())
else:
csd_pairs = np.zeros((M, M, N), 'D')
freqs = np.linspace(0, Fs / 2, N, endpoint=False)
for i in range(M):
for j in range(i + 1):
csd_pairs[i, j] = Sk_loc[i] * Sk_loc[j].conj()
if normalize:
csd_pairs /= (Fs*N)
csd_mat = csd_pairs.transpose(1,0,2).conj()
csd_mat += csd_pairs
diag_idc = (np.arange(M), np.arange(M))
csd_mat[diag_idc] /= 2
return freqs, csd_mat
def mtm_cross_spectrum(tx, ty, weights, sides='twosided'):
r"""
The cross-spectrum between two tapered time-series, derived from a
multi-taper spectral estimation.
Parameters
----------
tx, ty : ndarray (K, ..., N)
The complex DFTs of the tapered sequence
weights : ndarray, or 2-tuple or list
Weights can be specified as a length-2 list of weights for spectra tx
and ty respectively. Alternatively, if tx is ty and this function is
computing the spectral density function of a single sequence, the
weights can be given as an ndarray of weights for the spectrum.
Weights may be
* scalars, if the shape of the array is (K, ..., 1)
* vectors, with the shape of the array being the same as tx or ty
sides : str in {'onesided', 'twosided'}
For the symmetric spectra of a real sequence, optionally combine half
of the frequencies and scale the duplicate frequencies in the range
(0, F_nyquist).
Notes
-----
spectral densities are always computed as
:math:`S_{xy}^{mt}(f) = \frac{\sum_k
[d_k^x(f)s_k^x(f)][d_k^y(f)(s_k^y(f))^{*}]}{[\sum_k
d_k^x(f)^2]^{\frac{1}{2}}[\sum_k d_k^y(f)^2]^{\frac{1}{2}}}`
"""
N = tx.shape[-1]
if ty.shape != tx.shape:
raise ValueError('shape mismatch between tx, ty')
# pshape = list(tx.shape)
if isinstance(weights, (list, tuple)):
autospectrum = False
weights_x = weights[0]
weights_y = weights[1]
denom = (np.abs(weights_x) ** 2).sum(axis=0) ** 0.5
denom *= (np.abs(weights_y) ** 2).sum(axis=0) ** 0.5
else:
autospectrum = True
weights_x = weights
weights_y = weights
denom = (np.abs(weights) ** 2).sum(axis=0)
if sides == 'onesided':
# where the nyq freq should be
Fn = N // 2 + 1
truncated_slice = [slice(None)] * len(tx.shape)
truncated_slice[-1] = slice(0, Fn)
tsl = tuple(truncated_slice)
tx = tx[tsl]
ty = ty[tsl]
# if weights.shape[-1] > 1 then make sure weights are truncated too
if weights_x.shape[-1] > 1:
weights_x = weights_x[tsl]
weights_y = weights_y[tsl]
denom = denom[tsl[1:]]
sf = weights_x * tx
sf *= (weights_y * ty).conj()
sf = sf.sum(axis=0)
sf /= denom
if sides == 'onesided':
# dbl power at duplicated freqs
Fl = (N + 1) // 2
sub_slice = [slice(None)] * len(sf.shape)
sub_slice[-1] = slice(1, Fl)
sf[tuple(sub_slice)] *= 2
if autospectrum:
return sf.real
return sf
def multi_taper_psd(
s, Fs=2 * np.pi, NW=None, BW=None, adaptive=False,
jackknife=True, low_bias=True, sides='default', NFFT=None
):
"""Returns an estimate of the PSD function of s using the multitaper
method. If the NW product, or the BW and Fs in Hz are not specified
by the user, a bandwidth of 4 times the fundamental frequency,
corresponding to NW = 4 will be used.
Parameters
----------
s : ndarray
An array of sampled random processes, where the time axis is assumed to
be on the last axis
Fs : float
Sampling rate of the signal
NW : float
The normalized half-bandwidth of the data tapers, indicating a
multiple of the fundamental frequency of the DFT (Fs/N).
Common choices are n/2, for n >= 4. This parameter is unitless
and more MATLAB compatible. As an alternative, set the BW
parameter in Hz. See Notes on bandwidth.
BW : float
The sampling-relative bandwidth of the data tapers, in Hz.
adaptive : {True/False}
Use an adaptive weighting routine to combine the PSD estimates of
different tapers.
jackknife : {True/False}
Use the jackknife method to make an estimate of the PSD variance
at each point.
low_bias : {True/False}
Rather than use 2NW tapers, only use the tapers that have better than
90% spectral concentration within the bandwidth (still using
a maximum of 2NW tapers)
sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
This determines which sides of the spectrum to return.
For complex-valued inputs, the default is two-sided, for real-valued
inputs, default is one-sided Indicates whether to return a one-sided
or two-sided
Returns
-------
(freqs, psd_est, var_or_nu) : ndarrays
The first two arrays are the frequency points vector and the
estimated PSD. The last returned array differs depending on whether
the jackknife was used. It is either
* The jackknife estimated variance of the log-psd, OR
* The degrees of freedom in a chi2 model of how the estimated
PSD is distributed about the true log-PSD (this is either
2*floor(2*NW), or calculated from adaptive weights)
Notes
-----
The bandwidth of the windowing function will determine the number
tapers to use. This parameters represents trade-off between frequency
resolution (lower main lobe BW for the taper) and variance reduction
(higher BW and number of averaged estimates). Typically, the number of
tapers is calculated as 2x the bandwidth-to-fundamental-frequency
ratio, as these eigenfunctions have the best energy concentration.
"""
# have last axis be time series for now
N = s.shape[-1]
M = int(np.product(s.shape[:-1]))
if BW is not None:
# BW wins in a contest (since it was the original implementation)
norm_BW = np.round(BW * N / Fs)
NW = norm_BW / 2.0
elif NW is None:
# default NW
NW = 4
# (else BW is None and NW is not None) ... all set
Kmax = int(2 * NW)
# if the time series is a complex vector, a one sided PSD is invalid:
if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
sides = 'twosided'
elif sides in ('default', 'onesided'):
sides = 'onesided'
# Find the direct spectral estimators S_k(f) for k tapered signals..
# don't normalize the periodograms by 1/N as normal.. since the taper
# windows are orthonormal, they effectively scale the signal by 1/N
spectra, eigvals = tapered_spectra(
s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias
)
NFFT = spectra.shape[-1]
K = len(eigvals)
# collapse spectra's shape back down to 3 dimensions
spectra.shape = (M, K, NFFT)
last_freq = NFFT // 2 + 1 if sides == 'onesided' else NFFT
# degrees of freedom at each timeseries, at each freq
nu = np.empty((M, last_freq))
if adaptive:
weights = np.empty((M, K, last_freq))
for i in range(M):
weights[i], nu[i] = utils.adaptive_weights(
spectra[i], eigvals, sides=sides
)
else:
# let the weights simply be the square-root of the eigenvalues.
# repeat these values across all n_chan channels of data
weights = np.tile(np.sqrt(eigvals), M).reshape(M, K, 1)
nu.fill(2 * K)
if jackknife:
jk_var = np.empty_like(nu)
for i in range(M):
jk_var[i] = utils.jackknifed_sdf_variance(
spectra[i], eigvals, sides=sides, adaptive=adaptive
)
# Compute the unbiased spectral estimator for S(f) as the sum of
# the S_k(f) weighted by the function w_k(f)**2, all divided by the
# sum of the w_k(f)**2 over k
# 1st, roll the tapers axis forward
spectra = np.rollaxis(spectra, 1, start=0)
weights = np.rollaxis(weights, 1, start=0)
sdf_est = mtm_cross_spectrum(
spectra, spectra, weights, sides=sides
)
sdf_est /= Fs
if sides == 'onesided':
freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
else:
freqs = np.linspace(0, Fs, NFFT, endpoint=False)
out_shape = s.shape[:-1] + (len(freqs),)
sdf_est.shape = out_shape
if jackknife:
jk_var.shape = out_shape
return freqs, sdf_est, jk_var
else:
nu.shape = out_shape
return freqs, sdf_est, nu
def multi_taper_csd(s, Fs=2 * np.pi, NW=None, BW=None, low_bias=True,
adaptive=False, sides='default', NFFT=None):
"""Returns an estimate of the Cross Spectral Density (CSD) function
between all (N choose 2) pairs of timeseries in s, using the multitaper
method. If the NW product, or the BW and Fs in Hz are not specified by
the user, a bandwidth of 4 times the fundamental frequency, corresponding
to NW = 4 will be used.
Parameters
----------
s : ndarray
An array of sampled random processes, where the time axis is
assumed to be on the last axis. If ndim > 2, the number of time
series to compare will still be taken as prod(s.shape[:-1])
Fs : float, Sampling rate of the signal
NW : float
The normalized half-bandwidth of the data tapers, indicating a
multiple of the fundamental frequency of the DFT (Fs/N).
Common choices are n/2, for n >= 4. This parameter is unitless
and more MATLAB compatible. As an alternative, set the BW
parameter in Hz. See Notes on bandwidth.
BW : float
The sampling-relative bandwidth of the data tapers, in Hz.
adaptive : {True, False}
Use adaptive weighting to combine spectra
low_bias : {True, False}
Rather than use 2NW tapers, only use the tapers that have better than
90% spectral concentration within the bandwidth (still using
a maximum of 2NW tapers)
sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
This determines which sides of the spectrum to return. For
complex-valued inputs, the default is two-sided, for real-valued
inputs, default is one-sided Indicates whether to return a one-sided
or two-sided
Returns
-------
(freqs, csd_est) : ndarrays
The estimatated CSD and the frequency points vector.
The CSD{i,j}(f) are returned in a square "matrix" of vectors
holding Sij(f). For an input array of (M,N), the output is (M,M,N)
Notes
-----
The bandwidth of the windowing function will determine the number
tapers to use. This parameters represents trade-off between frequency
resolution (lower main lobe BW for the taper) and variance reduction
(higher BW and number of averaged estimates). Typically, the number of
tapers is calculated as 2x the bandwidth-to-fundamental-frequency
ratio, as these eigenfunctions have the best energy concentration.
"""
# have last axis be time series for now
N = s.shape[-1]
M = int(np.product(s.shape[:-1]))
if BW is not None:
# BW wins in a contest (since it was the original implementation)
norm_BW = np.round(BW * N / Fs)
NW = norm_BW / 2.0
elif NW is None:
# default NW
NW = 4
# (else BW is None and NW is not None) ... all set
Kmax = int(2 * NW)
# if the time series is a complex vector, a one sided PSD is invalid:
if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
sides = 'twosided'
elif sides in ('default', 'onesided'):
sides = 'onesided'
# Find the direct spectral estimators S_k(f) for k tapered signals..
# don't normalize the periodograms by 1/N as normal.. since the taper
# windows are orthonormal, they effectively scale the signal by 1/N
spectra, eigvals = tapered_spectra(
s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias
)
NFFT = spectra.shape[-1]
K = len(eigvals)
# collapse spectra's shape back down to 3 dimensions
spectra.shape = (M, K, NFFT)
# compute the cross-spectral density functions
last_freq = NFFT // 2 + 1 if sides == 'onesided' else NFFT
if adaptive:
w = np.empty((M, K, last_freq))
nu = np.empty((M, last_freq))
for i in range(M):
w[i], nu[i] = utils.adaptive_weights(
spectra[i], eigvals, sides=sides
)
else:
weights = np.sqrt(eigvals).reshape(K, 1)
csd_pairs = np.zeros((M, M, last_freq), 'D')
for i in range(M):
if adaptive:
wi = w[i]
else:
wi = weights
for j in range(i + 1):
if adaptive:
wj = w[j]
else:
wj = weights
ti = spectra[i]
tj = spectra[j]
csd_pairs[i, j] = mtm_cross_spectrum(ti, tj, (wi, wj), sides=sides)
csdfs = csd_pairs.transpose(1,0,2).conj()
csdfs += csd_pairs
diag_idc = (np.arange(M), np.arange(M))
csdfs[diag_idc] /= 2
csdfs /= Fs
if sides == 'onesided':
freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
else:
freqs = np.linspace(0, Fs, NFFT, endpoint=False)
return freqs, csdfs
def freq_response(b, a=1., n_freqs=1024, sides='onesided'):
"""
Returns the frequency response of the IIR or FIR filter described
by beta and alpha coefficients.
Parameters
----------
b : beta sequence (moving average component)
a : alpha sequence (autoregressive component)
n_freqs : size of frequency grid
sides : {'onesided', 'twosided'}
compute frequencies between [-PI,PI), or from [0, PI]
Returns
-------
fgrid, H(e^jw)
Notes
-----
For a description of the linear constant-coefficient difference equation,
see
http://en.wikipedia.org/wiki/Z-transform
"""
# transitioning to scipy freqz
real_n = n_freqs // 2 + 1 if sides == 'onesided' else n_freqs
return sig.freqz(b, a=a, worN=real_n, whole=sides != 'onesided')
|
jmay0504/nitime | nitime/analysis/coherence.py | import warnings
import numpy as np
from nitime.lazy import scipy_stats_distributions as dist
from nitime.lazy import scipy_fftpack as fftpack
from nitime import descriptors as desc
from nitime import utils as tsu
from nitime import algorithms as tsa
# To support older versions of numpy that don't have tril_indices:
from nitime.index_utils import tril_indices, triu_indices
from .base import BaseAnalyzer
class CoherenceAnalyzer(BaseAnalyzer):
"""Analyzer object for coherence/coherency analysis """
def __init__(self, input=None, method=None, unwrap_phases=False):
"""
Parameters
----------
input : TimeSeries object
Containing the data to analyze.
method : dict, optional,
This is the method used for spectral analysis of the signal for the
coherence caclulation. See :func:`algorithms.get_spectra`
documentation for details.
unwrap_phases : bool, optional
Whether to unwrap the phases. This should be True if you assume that
the time-delay is the same for all the frequency bands. See
_[Sun2005] for details. Default : False
Examples
--------
>>> import nitime.timeseries as ts
>>> np.set_printoptions(precision=4) # for doctesting
>>> t1 = ts.TimeSeries(data = np.arange(0,1024,1).reshape(2,512),
... sampling_rate=np.pi)
>>> c1 = CoherenceAnalyzer(t1)
>>> c1.method['Fs'] # doctest: +ELLIPSIS
3.1415926535... Hz
>>> c1.method['this_method']
'welch'
>>> c1.coherence[0,1]
array([ 0.9024, 0.9027, 0.9652, 0.9433, 0.9297, 0.9213, 0.9161,
0.9126, 0.9102, 0.9085, 0.9072, 0.9063, 0.9055, 0.905 ,
0.9045, 0.9041, 0.9038, 0.9036, 0.9034, 0.9032, 0.9031,
0.9029, 0.9028, 0.9027, 0.9027, 0.9026, 0.9026, 0.9025,
0.9025, 0.9025, 0.9025, 0.9026, 1. ])
>>> c1.phase[0,1]
array([ 0. , -0.035 , -0.4839, -0.4073, -0.3373, -0.2828, -0.241 ,
-0.2085, -0.1826, -0.1615, -0.144 , -0.1292, -0.1164, -0.1054,
-0.0956, -0.0869, -0.0791, -0.072 , -0.0656, -0.0596, -0.0541,
-0.0489, -0.0441, -0.0396, -0.0353, -0.0314, -0.0277, -0.0244,
-0.0216, -0.0197, -0.0198, -0.028 , 0. ])
"""
BaseAnalyzer.__init__(self, input)
# Set the variables for spectral estimation (can also be entered by
# user):
if method is None:
self.method = {'this_method': 'welch',
'Fs': self.input.sampling_rate}
else:
self.method = method
# If an input is provided, get the sampling rate from there, if you
# want to over-ride that, input a method with a 'Fs' field specified:
self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)
self._unwrap_phases = unwrap_phases
# The following only applies to the welch method:
if (self.method.get('this_method') == 'welch' or
self.method.get('this_method') is None):
# If the input is shorter than NFFT, all the coherences will be
# 1 per definition. Throw a warning about that:
self.method['NFFT'] = self.method.get('NFFT', tsa.default_nfft)
self.method['n_overlap'] = self.method.get('n_overlap',
tsa.default_n_overlap)
if (self.input.shape[-1] <
(self.method['NFFT'] + self.method['n_overlap'])):
e_s = "In nitime.analysis, the provided input time-series is"
e_s += " shorter than the requested NFFT + n_overlap. All "
e_s += "coherence values will be set to 1."
warnings.warn(e_s, RuntimeWarning)
@desc.setattr_on_read
def coherency(self):
"""The standard output for this kind of analyzer is the coherency """
data = self.input.data
tseries_length = data.shape[0]
spectrum_length = self.spectrum.shape[-1]
coherency = np.zeros((tseries_length,
tseries_length,
spectrum_length), dtype=complex)
for i in range(tseries_length):
for j in range(i, tseries_length):
coherency[i][j] = tsa.coherency_spec(self.spectrum[i][j],
self.spectrum[i][i],
self.spectrum[j][j])
idx = tril_indices(tseries_length, -1)
coherency[idx[0], idx[1], ...] = coherency[idx[1], idx[0], ...].conj()
return coherency
@desc.setattr_on_read
def spectrum(self):
"""
The spectra of each of the channels and cross-spectra between
different channels in the input TimeSeries object
"""
f, spectrum = tsa.get_spectra(self.input.data, method=self.method)
return spectrum
@desc.setattr_on_read
def frequencies(self):
"""
The central frequencies in the bands
"""
#XXX Use NFFT in the method in order to calculate these, without having
#to calculate the spectrum:
f, spectrum = tsa.get_spectra(self.input.data, method=self.method)
return f
@desc.setattr_on_read
def coherence(self):
"""
The coherence between the different channels in the input TimeSeries
object
"""
#XXX Calculate this from the standard output, instead of recalculating
#the coherence:
tseries_length = self.input.data.shape[0]
spectrum_length = self.spectrum.shape[-1]
coherence = np.zeros((tseries_length,
tseries_length,
spectrum_length))
for i in range(tseries_length):
for j in range(i, tseries_length):
coherence[i][j] = tsa.coherence_spec(self.spectrum[i][j],
self.spectrum[i][i],
self.spectrum[j][j])
idx = tril_indices(tseries_length, -1)
coherence[idx[0], idx[1], ...] = coherence[idx[1], idx[0], ...].conj()
return coherence
@desc.setattr_on_read
def phase(self):
""" The frequency-dependent phase relationship between all the pairwise
combinations of time-series in the data"""
#XXX calcluate this from the standard output, instead of recalculating:
tseries_length = self.input.data.shape[0]
spectrum_length = self.spectrum.shape[-1]
phase = np.zeros((tseries_length,
tseries_length,
spectrum_length))
for i in range(tseries_length):
for j in range(i, tseries_length):
phase[i][j] = np.angle(
self.spectrum[i][j])
phase[j][i] = np.angle(
self.spectrum[i][j].conjugate())
return phase
@desc.setattr_on_read
def delay(self):
""" The delay in seconds between the two time series """
p_shape = self.phase.shape[:-1]
delay = np.zeros(self.phase.shape)
for i in range(p_shape[0]):
for j in range(p_shape[1]):
this_phase = self.phase[i, j]
#If requested, unwrap the phases:
if self._unwrap_phases:
this_phase = tsu.unwrap_phases(this_phase)
delay[i, j] = this_phase / (2 * np.pi * self.frequencies)
return delay
@desc.setattr_on_read
def coherence_partial(self):
"""The partial coherence between data[i] and data[j], given data[k], as
a function of frequency band"""
tseries_length = self.input.data.shape[0]
spectrum_length = self.spectrum.shape[-1]
p_coherence = np.zeros((tseries_length,
tseries_length,
tseries_length,
spectrum_length))
for i in range(tseries_length):
for j in range(tseries_length):
for k in range(tseries_length):
if j == k or i == k:
pass
else:
p_coherence[i][j][k] = tsa.coherence_partial_spec(
self.spectrum[i][j],
self.spectrum[i][i],
self.spectrum[j][j],
self.spectrum[i][k],
self.spectrum[j][k],
self.spectrum[k][k])
idx = tril_indices(tseries_length, -1)
p_coherence[idx[0], idx[1], ...] =\
p_coherence[idx[1], idx[0], ...].conj()
return p_coherence
class MTCoherenceAnalyzer(BaseAnalyzer):
""" Analyzer for multi-taper coherence analysis, including jack-knife
estimate of confidence interval """
def __init__(self, input=None, bandwidth=None, alpha=0.05, adaptive=True):
"""
Initializer function for the MTCoherenceAnalyzer
Parameters
----------
input : TimeSeries object
bandwidth : float,
The bandwidth of the windowing function will determine the number
tapers to use. This parameters represents trade-off between
frequency resolution (lower main lobe bandwidth for the taper) and
variance reduction (higher bandwidth and number of averaged
estimates). Per default will be set to 4 times the fundamental
frequency, such that NW=4
alpha : float, default =0.05
This is the alpha used to construct a confidence interval around
the multi-taper csd estimate, based on a jack-knife estimate of the
variance [Thompson2007]_.
adaptive : bool, default to True
Whether to set the weights for the tapered spectra according to the
adaptive algorithm (Thompson, 2007).
Notes
-----
<NAME> (2007) Jackknifing multitaper spectrum estimates. IEEE
Signal Processing Magazing. 24: 20-30
"""
BaseAnalyzer.__init__(self, input)
if input is None:
self.NW = 4
self.bandwidth = None
else:
N = input.shape[-1]
Fs = self.input.sampling_rate
if bandwidth is not None:
self.NW = bandwidth / (2 * Fs) * N
else:
self.NW = 4
self.bandwidth = self.NW * (2 * Fs) / N
self.alpha = alpha
self._L = self.input.data.shape[-1] // 2 + 1
self._adaptive = adaptive
@desc.setattr_on_read
def tapers(self):
return tsa.dpss_windows(self.input.shape[-1], self.NW,
2 * self.NW - 1)[0]
@desc.setattr_on_read
def eigs(self):
return tsa.dpss_windows(self.input.shape[-1], self.NW,
2 * self.NW - 1)[1]
@desc.setattr_on_read
def df(self):
# The degrees of freedom:
return 2 * self.NW - 1
@desc.setattr_on_read
def spectra(self):
tdata = self.tapers[None, :, :] * self.input.data[:, None, :]
tspectra = fftpack.fft(tdata)
return tspectra
@desc.setattr_on_read
def weights(self):
channel_n = self.input.data.shape[0]
w = np.empty((channel_n, self.df, self._L))
if self._adaptive:
for i in range(channel_n):
# this is always a one-sided spectrum?
w[i] = tsu.adaptive_weights(self.spectra[i],
self.eigs,
sides='onesided')[0]
# Set the weights to be the square root of the eigen-values:
else:
wshape = [1] * len(self.spectra.shape)
wshape[0] = channel_n
wshape[-2] = int(self.df)
pre_w = np.sqrt(self.eigs) + np.zeros((wshape[0],
self.eigs.shape[0]))
w = pre_w.reshape(*wshape)
return w
@desc.setattr_on_read
def coherence(self):
nrows = self.input.data.shape[0]
psd_mat = np.zeros((2, nrows, nrows, self._L), 'd')
coh_mat = np.zeros((nrows, nrows, self._L), 'd')
for i in range(self.input.data.shape[0]):
for j in range(i):
sxy = tsa.mtm_cross_spectrum(self.spectra[i], self.spectra[j],
(self.weights[i], self.weights[j]),
sides='onesided')
sxx = tsa.mtm_cross_spectrum(self.spectra[i], self.spectra[i],
self.weights[i],
sides='onesided')
syy = tsa.mtm_cross_spectrum(self.spectra[j], self.spectra[j],
self.weights[i],
sides='onesided')
psd_mat[0, i, j] = sxx
psd_mat[1, i, j] = syy
coh_mat[i, j] = np.abs(sxy) ** 2
coh_mat[i, j] /= (sxx * syy)
idx = triu_indices(self.input.data.shape[0], 1)
coh_mat[idx[0], idx[1], ...] = coh_mat[idx[1], idx[0], ...].conj()
return coh_mat
@desc.setattr_on_read
def confidence_interval(self):
"""The size of the 1-alpha confidence interval"""
coh_var = np.zeros((self.input.data.shape[0],
self.input.data.shape[0],
self._L), 'd')
for i in range(self.input.data.shape[0]):
for j in range(i):
if i != j:
coh_var[i, j] = tsu.jackknifed_coh_variance(
self.spectra[i],
self.spectra[j],
self.eigs,
adaptive=self._adaptive
)
idx = triu_indices(self.input.data.shape[0], 1)
coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()
coh_mat_xform = tsu.normalize_coherence(self.coherence,
2 * self.df - 2)
lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,
self.df - 1) * np.sqrt(coh_var)
ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,
self.df - 1) * np.sqrt(coh_var)
# convert this measure with the normalizing function
tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)
tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)
return ub - lb
@desc.setattr_on_read
def frequencies(self):
return np.linspace(0, self.input.sampling_rate / 2, self._L)
class SparseCoherenceAnalyzer(BaseAnalyzer):
"""
This analyzer is intended for analysis of large sets of data, in which
possibly only a subset of combinations of time-series needs to be compared.
The constructor for this class receives as input not only a time-series
object, but also a list of tuples with index combinations (i,j) for the
combinations. Importantly, this class implements only the mlab csd function
and cannot use other methods of spectral estimation
"""
def __init__(self, time_series=None, ij=(0, 0), method=None, lb=0, ub=None,
prefer_speed_over_memory=True, scale_by_freq=True):
"""The constructor for the SparseCoherenceAnalyzer
Parameters
----------
time_series : a time-series object
ij : a list of tuples, each containing a pair of indices.
The resulting cache will contain the fft of time-series in the rows
indexed by the unique elements of the union of i and j
lb,ub : float,optional, default: lb=0, ub=None (max frequency)
define a frequency band of interest
prefer_speed_over_memory: Boolean, optional, default=True
Does exactly what the name implies. If you have enough memory
method : optional, dict
The method for spectral estimation (see
:func:`algorithms.get_spectra`)
"""
BaseAnalyzer.__init__(self, time_series)
#Initialize variables from the time series
self.ij = ij
#Set the variables for spectral estimation (can also be entered by
#user):
if method is None:
self.method = {'this_method': 'welch'}
else:
self.method = method
if self.method['this_method'] != 'welch':
e_s = "For SparseCoherenceAnalyzer, "
e_s += "spectral estimation method must be welch"
raise ValueError(e_s)
self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)
#Additional parameters for the coherency estimation:
self.lb = lb
self.ub = ub
self.prefer_speed_over_memory = prefer_speed_over_memory
self.scale_by_freq = scale_by_freq
@desc.setattr_on_read
def coherency(self):
""" The default behavior is to calculate the cache, extract it and then
output the coherency"""
coherency = tsa.cache_to_coherency(self.cache, self.ij)
return coherency
@desc.setattr_on_read
def coherence(self):
""" The coherence values for the output"""
coherence = np.abs(self.coherency ** 2)
return coherence
@desc.setattr_on_read
def cache(self):
"""Caches the fft windows required by the other methods of the
SparseCoherenceAnalyzer. Calculate only once and reuse
"""
data = self.input.data
f, cache = tsa.cache_fft(data,
self.ij,
lb=self.lb,
ub=self.ub,
method=self.method,
prefer_speed_over_memory=self.prefer_speed_over_memory,
scale_by_freq=self.scale_by_freq)
return cache
@desc.setattr_on_read
def spectrum(self):
"""get the spectrum for the collection of time-series in this analyzer
"""
spectrum = tsa.cache_to_psd(self.cache, self.ij)
return spectrum
@desc.setattr_on_read
def phases(self):
"""The frequency-band dependent phases of the spectra of each of the
time -series i,j in the analyzer"""
phase = tsa.cache_to_phase(self.cache, self.ij)
return phase
@desc.setattr_on_read
def relative_phases(self):
"""The frequency-band dependent relative phase between the two
time-series """
return np.angle(self.coherency)
@desc.setattr_on_read
def delay(self):
""" The delay in seconds between the two time series """
return self.relative_phases / (2 * np.pi * self.frequencies)
@desc.setattr_on_read
def frequencies(self):
"""Get the central frequencies for the frequency bands, given the
method of estimating the spectrum """
self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)
NFFT = self.method.get('NFFT', 64)
Fs = self.method.get('Fs')
freqs = tsu.get_freqs(Fs, NFFT)
lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)
return freqs[lb_idx:ub_idx]
class SeedCoherenceAnalyzer(object):
"""
This analyzer takes two time-series. The first is designated as a
time-series of seeds. The other is designated as a time-series of targets.
The analyzer performs a coherence analysis between each of the channels in
the seed time-series and *all* of the channels in the target time-series.
Note
----
This is a convenience class, which provides a convenient-to-use interface
to the SparseCoherenceAnalyzer
"""
def __init__(self, seed_time_series=None, target_time_series=None,
method=None, lb=0, ub=None, prefer_speed_over_memory=True,
scale_by_freq=True):
"""
The constructor for the SeedCoherenceAnalyzer
Parameters
----------
seed_time_series: a time-series object
target_time_series: a time-series object
lb,ub: float,optional, default: lb=0, ub=None (max frequency)
define a frequency band of interest
prefer_speed_over_memory: Boolean, optional, default=True
Makes things go a bit faster, if you have enough memory
"""
self.seed = seed_time_series
self.target = target_time_series
# Check that the seed and the target have the same sampling rate:
if self.seed.sampling_rate != self.target.sampling_rate:
e_s = "The sampling rate for the seed time-series and the target"
e_s += " time-series need to be identical."
raise ValueError(e_s)
#Set the variables for spectral estimation (can also be entered by
#user):
if method is None:
self.method = {'this_method': 'welch'}
else:
self.method = method
if ('this_method' in self.method.keys() and
self.method['this_method'] != 'welch'):
e_s = "For SeedCoherenceAnalyzer, "
e_s += "spectral estimation method must be welch"
raise ValueError(e_s)
#Additional parameters for the coherency estimation:
self.lb = lb
self.ub = ub
self.prefer_speed_over_memory = prefer_speed_over_memory
self.scale_by_freq = scale_by_freq
@desc.setattr_on_read
def coherence(self):
"""
The coherence between each of the channels of the seed time series and
all the channels of the target time-series.
"""
return np.abs(self.coherency) ** 2
@desc.setattr_on_read
def frequencies(self):
"""Get the central frequencies for the frequency bands, given the
method of estimating the spectrum """
# Get the sampling rate from the seed time-series:
self.method['Fs'] = self.method.get('Fs', self.seed.sampling_rate)
NFFT = self.method.get('NFFT', 64)
Fs = self.method.get('Fs')
freqs = tsu.get_freqs(Fs, NFFT)
lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)
return freqs[lb_idx:ub_idx]
@desc.setattr_on_read
def target_cache(self):
data = self.target.data
#Make a cache with all the fft windows for each of the channels in the
#target.
#This is the kind of input that cache_fft expects:
ij = list(zip(np.arange(data.shape[0]), np.arange(data.shape[0])))
f, cache = tsa.cache_fft(data, ij, lb=self.lb, ub=self.ub,
method=self.method,
prefer_speed_over_memory=self.prefer_speed_over_memory,
scale_by_freq=self.scale_by_freq)
return cache
@desc.setattr_on_read
def coherency(self):
#Pre-allocate the final result:
if len(self.seed.shape) > 1:
Cxy = np.empty((self.seed.data.shape[0],
self.target.data.shape[0],
self.frequencies.shape[0]), dtype=np.complex)
else:
Cxy = np.empty((self.target.data.shape[0],
self.frequencies.shape[0]), dtype=np.complex)
#Get the fft window cache for the target time-series:
cache = self.target_cache
#A list of indices for the target:
target_chan_idx = np.arange(self.target.data.shape[0])
#This is a list of indices into the cached fft window libraries,
#setting the index of the seed to be -1, so that it is easily
#distinguished from the target indices:
ij = list(zip(np.ones_like(target_chan_idx) * -1, target_chan_idx))
#If there is more than one channel in the seed time-series:
if len(self.seed.shape) > 1:
for seed_idx, this_seed in enumerate(self.seed.data):
#Here ij is 0, because it is just one channel and we stack the
#channel onto itself in order for the input to the function to
#make sense:
f, seed_cache = tsa.cache_fft(
np.vstack([this_seed, this_seed]),
[(0, 0)],
lb=self.lb,
ub=self.ub,
method=self.method,
prefer_speed_over_memory=self.prefer_speed_over_memory,
scale_by_freq=self.scale_by_freq)
#Insert the seed_cache into the target_cache:
cache['FFT_slices'][-1] = seed_cache['FFT_slices'][0]
#If this is true, the cache contains both FFT_slices and
#FFT_conj_slices:
if self.prefer_speed_over_memory:
cache['FFT_conj_slices'][-1] = \
seed_cache['FFT_conj_slices'][0]
#This performs the caclulation for this seed:
Cxy[seed_idx] = tsa.cache_to_coherency(cache, ij)
#In the case where there is only one channel in the seed time-series:
else:
f, seed_cache = tsa.cache_fft(
np.vstack([self.seed.data,
self.seed.data]),
[(0, 0)],
lb=self.lb,
ub=self.ub,
method=self.method,
prefer_speed_over_memory=self.prefer_speed_over_memory,
scale_by_freq=self.scale_by_freq)
cache['FFT_slices'][-1] = seed_cache['FFT_slices'][0]
if self.prefer_speed_over_memory:
cache['FFT_conj_slices'][-1] = \
seed_cache['FFT_conj_slices'][0]
Cxy = tsa.cache_to_coherency(cache, ij)
return Cxy.squeeze()
@desc.setattr_on_read
def relative_phases(self):
"""The frequency-band dependent relative phase between the two
time-series """
return np.angle(self.coherency)
@desc.setattr_on_read
def delay(self):
""" The delay in seconds between the two time series """
return self.relative_phases / (2 * np.pi * self.frequencies)
|
hamzaleroi/dnn_migration | screw_detection/data_join.py | #!/usr/bin/env python
import h5py
import cv2
import numpy as np
import os
from glob import glob
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import shutil
import argparse
def create_dir(_path):
if not os.path.exists(_path):
os.mkdir(_path)
def merge_dir(_dir):
class_names=os.listdir(_dir)
print('Found Classes:',class_names)
global count
for class_name in tqdm(class_names):
if class_name=='non_screw':
class_dir=os.path.join(ds_dir,class_name)
create_dir(class_dir)
else:
class_dir=os.path.join(ds_dir,'screw')
create_dir(class_dir)
source_dir=os.path.join(_dir,class_name)
for src_ipath in tqdm(glob(os.path.join(source_dir,'*.jpg'))):
dest_ipath=os.path.join(class_dir,f"{count}.jpg")
shutil.copyfile(src_ipath,dest_ipath)
count+=1
for src_ipath in tqdm(glob(os.path.join(source_dir,'*.png'))):
dest_ipath=os.path.join(class_dir,f"{count}.png")
print('copying {} to {} '.format(src_ipath,dest_ipath))
shutil.copyfile(src_ipath,dest_ipath)
count+=1
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir', type = str, default=os.path.join(os.getcwd(),'data'),help = 'The folder in which images are stored')
parser.add_argument('--dest_dir', type = str, default=os.path.join(os.getcwd(),'comb'),help = 'The folder to chich images will be put')
in_args = parser.parse_args()
return in_args
if __name__ == '__main__':
args = get_input_args()
data_dir = args.src_dir if os.path.exists(args.src_dir) else os.path.join(os.getcwd(),'dataset')
class_names = os.listdir(data_dir)
print('Found Classes:',class_names)
ds_dir=args.dest_dir if os.path.exists(args.dest_dir) else os.path.join(os.getcwd(),'data')
create_dir(ds_dir)
count=0
merge_dir(data_dir)
|
hamzaleroi/dnn_migration | screw_detection/infer.py | #!/usr/bin/env python
import numpy as np
import argparse
import matplotlib.pyplot as plt
import cv2
import os
from glob import glob
import tensorflow as tf
import warnings
import os
from tqdm import tqdm
def create_model(iden,NB_CLASS,NB_CHANNEL,WEIGHT_PATH):
if iden=='densenet201':
base_model_wrapper=tf.keras.applications.DenseNet201
IMG_DIM=221
if iden=='inceptionResNetv2':
base_model_wrapper=tf.keras.applications.InceptionResNetV2
IMG_DIM=139
if iden=='inceptionv3':
base_model_wrapper=tf.keras.applications.InceptionV3
IMG_DIM=139
if iden=='resnet101v2':
base_model_wrapper=tf.keras.applications.ResNet101V2
IMG_DIM=64
if iden=='resnext101':
base_model_wrapper=ResNeXt101
IMG_DIM=64
if iden=='xception':
base_model_wrapper=tf.keras.applications.Xception
IMG_DIM=71
base_model = base_model_wrapper(include_top=False,
weights=None,
input_shape=(IMG_DIM,IMG_DIM,NB_CHANNEL))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(NB_CLASS, activation='softmax')(x)
model =tf.keras.models.Model(inputs=base_model.input,outputs=x,name=iden)
model.load_weights(WEIGHT_PATH)
dim=IMG_DIM
return model,dim
def predict_on_data(_paths, export_folder='/tmp'):
for img_path in tqdm(_paths):
img_raw = cv2.imread(img_path)
img_raw = img_raw[:,:,:3]
img_h, img_w = img_raw.shape[:2]
if img_h>img_w:
ratiox = DIM1/img_w
ratioy = DIM2/img_h
img_raw = cv2.resize(img_raw, (DIM1,DIM2))
else:
ratiox = DIM2/img_w
ratioy = DIM1/img_h
img_raw = cv2.resize(img_raw, (DIM2,DIM1))
gray = cv2.cvtColor(img_raw, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT,
1, 100,
param1=hough_upper_threshold,
param2=hough_lower_threshold,
minRadius=hough_min_radius,
maxRadius=hough_max_radius)
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# copy the image, for painting we will use another
drawn_image = img_raw.copy()
drawn_image = cv2.cvtColor(drawn_image, cv2.COLOR_RGB2BGR)
# loop over the found circles
for i in range(len(circles)):
# get one
(x, y, r) = circles[i]
# draw the circle in the output image, then draw a rectangle corresponding to the center of the circle
cv2.rectangle(drawn_image, (x - r, y - r), (x + r, y + r), (255, 0, 0), 2)
# bbox
xmin = x-r
xmax = x+r
ymin = y-r
ymax = y+r
# get the above rectangle as ROI
screw_roi = img_raw[ymin:ymax,xmin:xmax]
#can't go on with the empty or corrupt roi
if (screw_roi.size == 0):
break
# integreated prediction --> same as work
pred_val=0
for model,dim in INTEGRATED:
# imgae
data = cv2.resize(screw_roi,(dim,dim))
data = data.astype('float32')/255.0
tensor = np.expand_dims(data,axis=0)
pred=model.predict(tensor)[0]
pred_val+=pred[1]
score=pred_val/2
if score>score_thresh:
cv2.circle(drawn_image, (int(x), int(y)), int(r), (0, 255, 0), 5) #green
cv2.imwrite(os.path.join(export_folder,'inference_' + img_path.split('/')[-1]) if os.path.exists(export_folder) else os.path.join(os.getcwd(),'inference_' + img_path.split('/')[-1]),drawn_image)
# getting input arguments
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--model_location', type = str, default=os.path.join(os.getcwd(),'screw_detection','weights'), help = 'The model location')
parser.add_argument('image_path', type = str, help = 'Image to apply inference on')
in_args = parser.parse_args()
return in_args
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
idens=['densenet201','inceptionResNetv2','inceptionv3','resnet101v2','resnext101','xception']
# parameters
NB_CHANNEL=3 # @param
NB_CLASS=2 # @param
INTEGRATED=[]
INFER_WHOLE_FOLDER = False #@param {type:"boolean"}
hough_upper_threshold = 70 # @param {type:"slider", min:0, max:100, step:1}
hough_lower_threshold = 10 # @param {type:"slider", min:0, max:100, step:1}
hough_min_radius = 5 # @param {type:"slider", min:0, max:100, step:1}
hough_max_radius = 20 # @param {type:"slider", min:0, max:100, step:1}
score_thresh=0.8 # @param {type:"slider", min:0, max:1.0, step:0.01}
DIM1=986 # @param
DIM2=1382 # @param
args = get_input_args()
if __name__ == '__main__':
path = args.model_location
print(path)
root = path if os.path.isabs(path) else os.path.join(os.getcwd(),path.strip('./'))
# modeling
model1= 'inceptionv3' # @param ['densenet201','inceptionResNetv2','inceptionv3','resnet101v2','resnext101','xception']
model2= 'xception' # @param ['densenet201','inceptionResNetv2','inceptionv3','resnet101v2','resnext101','xception']
integrated=[model1,model2]
# Weights
WEIGHTS_INTGRATED=[os.path.join(root,'{}.h5'.format(iden))
for iden in integrated]
for iden,WEIGHT_PATH in zip(integrated,WEIGHTS_INTGRATED):
print('Loading Integrated Models:',iden)
INTEGRATED.append(create_model(iden,NB_CLASS,NB_CHANNEL,WEIGHT_PATH))
# # ROI and Prediction Wrappers
DATA_PATH=args.image_path
if INFER_WHOLE_FOLDER:
if '.' in DATA_PATH:
print('Please Provide a folder path')
else:
_paths=[_path for _path in glob(os.path.join(DATA_PATH,'*.*'))]
print('Found Images:')
for _path in _paths:
print(_path)
else:
if os.path.isfile(DATA_PATH):
_paths=[DATA_PATH]
print('Found Image:')
print(_paths[0])
else:
print('The provided DATA_PATH is Not a file')
predict_on_data(_paths) |
hamzaleroi/dnn_migration | screw_classification/data.py | <filename>screw_classification/data.py
#!/usr/bin/env python
# coding: utf-8
from tqdm import tqdm_notebook
import shutil
import tensorflow as tf
from tqdm.notebook import tqdm
from albumentations import Resize
import albumentations as albu
from albumentations import (Blur, Compose, HorizontalFlip, HueSaturationValue,
IAAEmboss, IAASharpen, IAAAffine, JpegCompression, OneOf,
RandomBrightness, RandomBrightnessContrast,
RandomContrast, RandomCrop, RandomGamma, Rotate,
RandomRotate90, RGBShift, ShiftScaleRotate,
Transpose, VerticalFlip, ElasticTransform, GridDistortion, OpticalDistortion)
import imageio
from glob import glob
from sklearn.utils import shuffle
import random
from PIL import Image as imgop
import cv2
import matplotlib.pyplot as plt
import numpy as np
import h5py
import os
import argparse
TFIDEN = 'ScrewCTF'
def readh5(d_path):
data = h5py.File(d_path, 'r')
data = np.array(data['data'])
return data
def create_dir(base_dir, ext_name):
new_dir = os.path.join(base_dir, ext_name)
if not os.path.exists(new_dir):
os.mkdir(new_dir)
return new_dir
def aug():
return Compose([HorizontalFlip(p=0.5), # applied
VerticalFlip(p=0.5), # applied
ShiftScaleRotate(shift_limit=(0.1, 0.1), # width_shift_range=0.1,# height_shift_range=0.1,
# zoom_range=[0.9,1.25]
scale_limit=(0.9, 1.25),
rotate_limit=20, p=0.5), # rotation_range=20,
RandomBrightnessContrast(brightness_limit=(
0.4, 1.5), p=0.5), # brightness_range=[0.4,1.5]
# shear_range=0.01,fill_mode='reflect'
IAAAffine(shear=0.01, mode='reflect', p=0.5)
], p=1)
def fill_missing(source, nb_needed, iden):
if nb_needed > 0:
print('Filling:', iden)
augmented = []
for i in tqdm(range(nb_needed)):
img = random.choice(source)
img = aug()(image=img)
img = img['image']
img = img.astype(np.uint8)
augmented.append(img)
return source + augmented
else:
return source
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def to_tfrecord(data, labels, save_dir, r_num):
tfrecord_name = '{}.tfrecord'.format(r_num)
tfrecord_path = os.path.join(save_dir, tfrecord_name)
with tf.io.TFRecordWriter(tfrecord_path) as writer:
for img, label in zip(data, labels):
_, img_coded = cv2.imencode('.png', img)
# Byte conversion
image_png_bytes = img_coded.tobytes()
data = {'image': _bytes_feature(image_png_bytes),
'label': _int64_feature(label)
}
features = tf.train.Features(feature=data)
example = tf.train.Example(features=features)
serialized = example.SerializeToString()
writer.write(serialized)
def genTFRecords(_data, _labels, save_dir):
for i in tqdm(range(0, len(_data), DATA_NUM)):
data = _data[i:i + DATA_NUM]
labels = _labels[i:i + DATA_NUM]
r_num = i // DATA_NUM
to_tfrecord(data, labels, save_dir, r_num)
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--data_location', type = str, default=os.path.join(os.getcwd(),'data'), help = 'The h5 filed location')
in_args = parser.parse_args()
return in_args
args = get_input_args()
DIM = (75, 75) # @param
TRAIN_DATA_PER_CLASS = 10240 # @param
root = args.data_location if os.path.exists(args.data_location) else os.path.join(root, 'data')
TRAIN_DIR = os.path.join(root, 'train')
TEST_DIR = os.path.join(root, 'test')
class_names = ['ph1',
'slotted6.5',
'torx7',
'allen2.75',
'ph2',
'allen4',
'torx8',
'slotted4.5',
'torx9',
'torx6',
'slotted10',
'allen2.5']
NEEDED_DATA = []
DATA_LIST = []
# training data
for class_name in class_names:
# class h5
try:
h5path = os.path.join(TRAIN_DIR, f"{class_name}.h5")
# class data
class_data = list(readh5(h5path))
DATA_LIST.append(class_data)
# needed data
needed_data = TRAIN_DATA_PER_CLASS - len(class_data)
NEEDED_DATA.append(needed_data)
print('Class_name:{} Found Data:{} Needed:{}'.format(class_name,
len(class_data),
needed_data))
except:
continue
# record dir
tf_dir = create_dir(os.getcwd(), TFIDEN)
tf_train = create_dir(tf_dir, 'Train')
tf_eval = create_dir(tf_dir, 'Eval')
_DATA = []
_LABELS = []
for class_data, class_name, needed_data, idx in zip(
DATA_LIST, class_names, NEEDED_DATA, range(len(class_names))):
class_data = fill_missing(class_data, needed_data, class_name)
_DATA += class_data
_labels = [idx for _ in range(len(class_data))]
_LABELS += _labels
_comb = list(zip(_DATA, _LABELS))
random.shuffle(_comb)
Training_data, Training_labels = zip(*_comb)
Testing_data = []
Testing_labels = []
# testing data
for class_name in tqdm(class_names):
# class h5
h5path = os.path.join(TEST_DIR, f"{class_name}.h5")
# class data
class_data = list(readh5(h5path))
Testing_data += class_data
labels = [class_names.index(class_name) for _ in range(len(class_data))]
Testing_labels += labels
_comb = list(zip(Testing_data, Testing_labels))
random.shuffle(_comb)
Testing_data, Testing_labels = zip(*_comb)
DATA_NUM = 2048 # @param
# train Data
print('Creating training tfrecords')
genTFRecords(Training_data, Training_labels, tf_train)
# eval
print('Creating eval tfrecords')
genTFRecords(Testing_data, Testing_labels, tf_eval)
|
hamzaleroi/dnn_migration | screw_classification/infer.py | <reponame>hamzaleroi/dnn_migration
#!/usr/bin/env python
import tensorflow as tf
import os
import argparse
import warnings
import efficientnet.tfkeras as efn
from glob import glob
import numpy as np
from tqdm.notebook import tqdm
import cv2
class COLORS:
red = (0,0,255)
blue = (255,0,0)
green = (0,255,0)
def predict_on_data(_paths,export_folder='/tmp',**hough_params):
for img_path in tqdm(_paths):
print(img_path)
img = cv2.imread(img_path)
#img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
circles = detect_circles(img, dp=1, **hough_params)
if circles is not None:
imgs = cut_rois(img, circles)
preds=[]
pred_circles=[]
for roi,circle in zip(imgs,circles):
'''
roi1=cv2.resize(roi,(DET_DIM1,DET_DIM1))
roi1=roi1.astype('float32')/255.0
roi1=np.expand_dims(roi1,axis=0)
det1=det_model1.predict([roi1])[0]
'''
roi2=cv2.resize(roi,(DET_DIM2,DET_DIM2))
roi2=roi2.astype('float32')/255.0
roi2=np.expand_dims(roi2,axis=0)
det2=det_model2.predict([roi2])[0]
if det2[1] >0.9:
#plt.imshow(cv2.cvtColor(roi,cv2.COLOR_BGR2RGB))
#plt.show()
img_roi=cv2.resize(roi,(IMG_DIM,IMG_DIM))
img_roi=np.expand_dims(img_roi,axis=0)
img_roi=img_roi.astype('float32')/255.0
idx=np.argmax(model.predict(img_roi)[0])
preds.append(class_names[idx])
pred_circles.append(circle)
else:
preds.append('non_screw')
pim = draw_preds(img, preds, circles)
final=draw_circles(pim, pred_circles)
final=cv2.cvtColor(final,cv2.COLOR_BGR2RGB)
path = os.path.join(export_folder,
'inference_' + img_path.split('/')[-1]) if os.path.exists(export_folder) else os.path.join(os.getcwd(),
'inference_' + img_path.split('/')[-1])
print(f'SAVING {path} ...')
cv2.imwrite(path,final)
def detect_circles(im, **kwargs):
x = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
x = cv2.resize(x, (0,0), fx=.3, fy=.3)
h_circles = cv2.HoughCircles(x, cv2.HOUGH_GRADIENT, **kwargs)
circles = h_circles[0] if h_circles is not None else None
if circles is not None:
return (circles/.3).astype(int)
else:
return None
def draw_circles(im, circles):
out = im.copy()
for x,y,r in circles: cv2.circle(out, (x,y), r, COLORS.green, 5)
return out
def draw_preds(im, preds, circles, sz=2, thick=4, color=COLORS.green):
im = im.copy()
for (x,y,r),p in zip(circles, preds):
if p!='non_screw':
cv2.putText(im, p, (x+r,y+r), cv2.FONT_HERSHEY_COMPLEX, sz, color, thick, cv2.LINE_AA)
return im
def cut_rois(im, circles):
rois = []
y_max,x_max,_ = im.shape
for x,y,r in circles:
up,down = max(y-r,0),min(y+r,y_max)
left,right = max(x-r,0),min(x+r,x_max)
rois.append(im[up:down,left:right].copy())
return rois
def create_det_model(iden,NB_CHANNEL,WEIGHT_PATH):
if iden=='inceptionv3':
base_model_wrapper=tf.keras.applications.InceptionV3
IMG_DIM=139
if iden=='xception':
base_model_wrapper=tf.keras.applications.Xception
IMG_DIM=71
base_model = base_model_wrapper(include_top=False,
weights=None,
input_shape=(IMG_DIM,IMG_DIM,NB_CHANNEL))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(2, activation='softmax')(x)
model =tf.keras.models.Model(inputs=base_model.input,outputs=x,name=iden)
model.load_weights(WEIGHT_PATH)
return model,IMG_DIM
def create_model(IMG_DIM,NB_CHANNEL,WEIGHT_PATH,NB_CLASS):
base_model_wrapper=efn.EfficientNetB2
base_model = base_model_wrapper(include_top=False,
weights=None,
input_shape=(IMG_DIM,IMG_DIM,NB_CHANNEL))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(NB_CLASS, activation='softmax')(x)
model =tf.keras.models.Model(inputs=base_model.input,outputs=x,name=iden)
model.load_weights(WEIGHT_PATH)
return model
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--classification_model_location', type = str, help = 'The classification model location')
parser.add_argument('--detection_model_location', type = str, help = 'The detection model location')
parser.add_argument('image_path', type = str, help = 'Image to apply inference on')
parser.add_argument('--infer_folder', type = bool, default=False, help = 'Apply inference on a complete folder')
in_args = parser.parse_args()
return in_args
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
args = get_input_args()
iden='efficientnetb2'
det_iden1='inceptionv3'
det_iden2='xception'
IMG_DIM=256 # @param
class_names=['ph1',
'slotted6.5',
'torx7',
'allen2.75',
'ph2',
'allen4',
'torx8',
'slotted4.5',
'torx9',
'torx6',
'slotted10',
'allen2.5']
NB_CLASS=len(class_names)
NB_CHANNEL=3 # @param
if __name__ == '__main__':
if args.classification_model_location != None:
path = args.classification_model_location
if os.path.exists(path) and os.path.isdir(path) :
print('path_classification',path)
WEIGHT_PATH= os.path.join(path,f"{iden}.h5") if os.path.isabs(path) else os.path.join(os.getcwd(),path.strip('./'),f"{iden}.h5")
elif os.path.exists(''.join(path.split('/')[:-1])):
WEIGHT_PATH= path if os.path.isabs(path) else os.path.join(os.getcwd(),path.strip('./'))
else:
WEIGHT_PATH= os.path.join(os.getcwd(),'weights',f"{iden}.h5")
print(f'wrong link saving to {SAVE_PATH}')
else:
WEIGHT_PATH= os.path.join(os.getcwd(),'weights',f"{iden}.h5")
if args.detection_model_location != None:
path = args.detection_model_location
print('path_detection',path)
if os.path.exists(path) and os.path.isdir(path) :
WEIGHT_PATH_DET2= os.path.join(path,f"{det_iden2}.h5") if os.path.isabs(path) else os.path.join(os.getcwd(),path.strip('./'),f"{det_iden2}.h5")
elif os.path.exists(''.join(path.split('/')[:-1])):
WEIGHT_PATH_DET2= path if os.path.isabs(path) else os.path.join(os.getcwd(),path.strip('./'))
else:
WEIGHT_PATH_DET2= os.path.join(os.getcwd(),'weights',f"{det_iden2}.h5")
print(f'wrong link saving to {SAVE_PATH}')
else:
WEIGHT_PATH_DET2= os.path.join(os.getcwd(),'weights',f"{det_iden2}.h5")
print(WEIGHT_PATH,WEIGHT_PATH_DET2)
model=create_model(IMG_DIM,NB_CHANNEL,WEIGHT_PATH,NB_CLASS)
det_model2,DET_DIM2=create_det_model(det_iden2,NB_CHANNEL,WEIGHT_PATH_DET2)
print('Classification Model:',iden)
print('Detection Model 2:',det_iden2)
INFER_WHOLE_FOLDER = args.infer_folder #@param {type:"boolean"}
DATA_PATH=args.image_path
print('data_path',DATA_PATH)
if INFER_WHOLE_FOLDER:
if '.' in DATA_PATH:
print('Please Provide a folder path')
else:
_paths=[_path for _path in glob(os.path.join(DATA_PATH,'*.*'))]
print('Found Images:')
for _path in _paths:
print(_path)
else:
if os.path.isfile(DATA_PATH):
_paths=[DATA_PATH]
print('Found Image:')
print(_paths[0])
else:
print('The provided DATA_PATH is Not a file')
# # hough Params
hough_upper_threshold = 100 # @param {type:"slider", min:0, max:100, step:1}
hough_lower_threshold = 30 # @param {type:"slider", min:0, max:100, step:1}
hough_min_radius = 5 # @param {type:"slider", min:0, max:100, step:1}
hough_max_radius = 40 # @param {type:"slider", min:0, max:100, step:1}
hough_params = dict(minDist=100,
param1=hough_upper_threshold,
param2=hough_lower_threshold,
minRadius=hough_min_radius,
maxRadius=hough_max_radius)
print(hough_params)
predict_on_data(_paths, **hough_params)
|
hamzaleroi/dnn_migration | screw_detection/train.py |
from tqdm.notebook import tqdm
from glob import glob
import os
import argparse
import tensorflow as tf
import numpy as np
from scripts.resnet import ResNeXt101
def data_input_fn(mode,BUFFER_SIZE,BATCH_SIZE,IMG_DIM,data_img_dim=64,DATA_PATH='ScrewDTF'):
def _parser(example):
feature ={ 'image' : tf.io.FixedLenFeature([],tf.string) ,
'label' : tf.io.FixedLenFeature([],tf.int64)
}
parsed_example=tf.io.parse_single_example(example,feature)
image_raw=parsed_example['image']
image=tf.image.decode_png(image_raw,channels=3)
image=tf.cast(image,tf.float32)/255.0
image=tf.reshape(image,(data_img_dim,data_img_dim,3))
image=tf.image.resize(image, [IMG_DIM,IMG_DIM])
label=parsed_example['label']
label=tf.cast(label,tf.int64)
label=tf.one_hot(label,NB_CLASS)
return image,label
files_pattern= DATA_PATH if os.path.isabs(DATA_PATH) else os.path.join(os.getcwd(),DATA_PATH.strip('./'),mode,'*.tfrecord')
print(files_pattern)
file_paths = tf.io.gfile.glob(files_pattern)
dataset = tf.data.TFRecordDataset(file_paths)
dataset = dataset.map(_parser)
dataset = dataset.shuffle(BUFFER_SIZE,reshuffle_each_iteration=True)
dataset = dataset.repeat()
dataset = dataset.batch(BATCH_SIZE,drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_model(IMG_DIM,NB_CLASS,NB_CHANNEL):
base_model = base_model_wrapper(include_top=False,
weights=TRANSFER_LEARNING,
input_shape=(IMG_DIM,IMG_DIM,NB_CHANNEL))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(NB_CLASS, activation='softmax')(x)
model =tf.keras.models.Model(inputs=base_model.input,outputs=x,name=iden)
return model
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--training_data',type = str, default='ScrewDTF', help = 'The model location')
parser.add_argument('--save_location', type = str, help = 'Where to save the trained model')
parser.add_argument('--saved_weights', type = str, help = 'Load the pre-trained weights')
parser.add_argument('--batch_size', type = int, default=128, help = 'batch_size')
in_args = parser.parse_args()
return in_args
args = get_input_args()
iden='xception' # @param ['densenet201','inceptionResNetv2','inceptionv3','resnet101v2','resnext101','xception']
NB_CHANNEL=3 # @param
BATCH_SIZE= args.batch_size # @param
BUFFER_SIZE=2048 # @param
TRAIN_DATA=2048*48 # @param
EVAL_DATA=2048*3 # @param
EPOCHS=250 # @param
NB_CLASS=2 # @param
TOTAL_DATA=TRAIN_DATA+EVAL_DATA
STEPS_PER_EPOCH = TOTAL_DATA//BATCH_SIZE
EVAL_STEPS = EVAL_DATA//BATCH_SIZE
data_img_dim=64 # @param
WEIGHT_PATH= args.saved_weights if args.saved_weights != None else os.path.join(os.getcwd(),'weights','{}.h5'.format(iden))
if os.path.exists(WEIGHT_PATH):
print('FOUND PRETRAINED WEIGHTS')
LOAD_WEIGHTS=True
else:
print('NO PRETRAINED WEIGHTS FOUND')
LOAD_WEIGHTS=False
if iden=='densenet201':
IMG_DIM=221
TRANSFER_LEARNING='imagenet'
if iden=='inceptionResNetv2':
IMG_DIM=139
TRANSFER_LEARNING='imagenet'
if iden=='inceptionv3':
IMG_DIM=139
TRANSFER_LEARNING='imagenet'
if iden=='resnet101v2':
IMG_DIM=64
TRANSFER_LEARNING='imagenet'
if iden=='resnext101':
IMG_DIM=64
TRANSFER_LEARNING='imagenet'
if iden=='xception':
IMG_DIM=71
TRANSFER_LEARNING='imagenet'
if iden=='densenet201':
base_model_wrapper=tf.keras.applications.DenseNet201
if iden=='inceptionResNetv2':
base_model_wrapper=tf.keras.applications.InceptionResNetV2
if iden=='inceptionv3':
base_model_wrapper=tf.keras.applications.InceptionV3
if iden=='resnet101v2':
base_model_wrapper=tf.keras.applications.ResNet101V2
if iden=='resnext101':
base_model_wrapper=ResNeXt101
if iden=='xception':
base_model_wrapper=tf.keras.applications.Xception
eval_ds = data_input_fn("Eval",BUFFER_SIZE,BATCH_SIZE,IMG_DIM,DATA_PATH=args.training_data)
train_ds = data_input_fn("Train",BUFFER_SIZE,BATCH_SIZE,IMG_DIM,DATA_PATH=args.training_data)
# # model creation
model = create_model(IMG_DIM,NB_CLASS,NB_CHANNEL)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5),
loss='categorical_crossentropy',
metrics=['accuracy'])
if LOAD_WEIGHTS:
model.load_weights(WEIGHT_PATH)
# # Training
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(factor=0.1,cooldown= 10,patience=5,verbose =1,min_lr=0.1e-9)
mode_autosave = tf.keras.callbacks.ModelCheckpoint(WEIGHT_PATH,save_best_only=True, verbose=0)
early_stopping = tf.keras.callbacks.EarlyStopping(patience=8, verbose=1, mode = 'auto')
callbacks = [mode_autosave, lr_reducer,early_stopping]
history = model.fit(train_ds,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
verbose=1,
validation_data=eval_ds,
validation_steps=EVAL_STEPS,
callbacks=callbacks)
if args.save_location != None:
path = args.save_location
if os.path.exists(path) and os.path.isdir(path) :
SAVE_PATH= os.path.join(path,'new_{}.h5'.format(iden)) if os.path.isabs(path) else os.path.join(os.getcwd(),path.strip('./'),'new_{}.h5'.format(iden))
elif os.path.exists(''.join(path.split('/')[:-1])):
SAVE_PATH= path if os.path.isabs(path) else os.path.join(os.getcwd(),path.strip('./'))
else:
SAVE_PATH= os.path.join(os.getcwd(),'weights','new_{}.h5'.format(iden))
print(f'wrong link saving to {SAVE_PATH}')
else:
SAVE_PATH= os.path.join(os.getcwd(),'weights','new_{}.h5'.format(iden))
model.save_weights(SAVE_PATH)
tp,fp,tn,fn=0,0,0,0
for x_test,y_test in tqdm(eval_ds.take(EVAL_STEPS),total=EVAL_STEPS):
y_pred=model.predict_on_batch(x_test)
for yt,yp in zip(y_test,y_pred):
clp=np.argmax(yp)
clt=np.argmax(yt)
if clt==0:
if clp==0:
tn+=1
else:
fn+=1
else:
if clp==1:
tp+=1
else:
fp+=1
accuracy = (tp+tn)/(tp+tn+fp+fn)
print()
print('Model:',iden)
print('================================')
print('TP: ', tp, ' TN: ', tn, ' FP: ', fp, ' FN: ', fn)
print('accuracy: ', accuracy)
|
hamzaleroi/dnn_migration | screw_classification/data_store.py | #!/usr/bin/env python
import h5py
import cv2
import numpy as np
import os
from glob import glob
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import argparse
def create_dir(_path):
if not os.path.exists(_path):
os.mkdir(_path)
def saveh5(path,data):
hf = h5py.File(path,'w')
hf.create_dataset('data',data=data)
hf.close()
def readh5(d_path):
data=h5py.File(d_path, 'r')
data = np.array(data['data'])
return data
def create_h5_data(_dir,iden,h5path):
print('Creating Data Store:',iden)
data=[]
for img_path in tqdm(glob(os.path.join(_dir,'*.*'))):
img=cv2.imread(img_path)
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img=cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
data.append(img)
data=np.array(data)
saveh5(h5path,data)
def create_ds(_dir,ds_dir):
for class_name in class_names:
# source
class_dir=os.path.join(_dir,class_name)
# h5
h5path=os.path.join(ds_dir,f"{class_name}.h5")
# datastore
create_h5_data(class_dir,class_name,h5path)
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--work_dir', type = str, default=os.getcwd(),help = 'Where the folders old and new')
in_args = parser.parse_args()
return in_args
if __name__ == '__main__':
args = get_input_args()
# fixed params
dim=(75,75) #based on minimum dimension of the models
work_dir = args.work_dir if os.path.exists(args.work_dir) else os.getcwd()
# TEST,NEW AND OLD UNZIPPED
test_dir=os.path.join(work_dir,'test')
train_dir =os.path.join(work_dir,'comb')
class_names=set(os.listdir(test_dir) + os.listdir(train_dir))
print('Found Classes:',class_names)
# helpers
dataset_dir=os.path.join(work_dir,'data')
ds_train_dir= os.path.join(dataset_dir,'train')
ds_test_dir = os.path.join(dataset_dir,'test')
create_dir(dataset_dir)
create_dir(ds_train_dir)
create_dir(ds_test_dir)
create_ds(train_dir,ds_train_dir)
create_ds(test_dir,ds_test_dir)
|
hamzaleroi/dnn_migration | screw_classification/train.py | #!/usr/bin/env python
import numpy as np
import os
import argparse
from glob import glob
import pandas as pd
import seaborn as sn
from tqdm.notebook import tqdm
from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve, classification_report
import efficientnet.tfkeras as efn
import matplotlib.pyplot as plt
import tensorflow as tf
def data_input_fn(mode,BUFFER_SIZE,BATCH_SIZE,IMG_DIM,data_dim,DATA_PATH):
def _parser(example):
feature ={ 'image' : tf.io.FixedLenFeature([],tf.string) ,
'label' : tf.io.FixedLenFeature([],tf.int64)
}
parsed_example=tf.io.parse_single_example(example,feature)
image_raw=parsed_example['image']
image=tf.image.decode_png(image_raw,channels=3)
image=tf.cast(image,tf.float32)/255.0
image=tf.reshape(image,(data_dim,data_dim,3))
image=tf.image.resize(image, [IMG_DIM,IMG_DIM])
label=parsed_example['label']
label=tf.cast(label,tf.int64)
label=tf.one_hot(label,NB_CLASS)
return image,label
gcs_pattern=os.path.join(DATA_PATH,mode,'*.tfrecord')
file_paths = tf.io.gfile.glob(gcs_pattern)
dataset = tf.data.TFRecordDataset(file_paths)
dataset = dataset.map(_parser)
dataset = dataset.shuffle(BUFFER_SIZE,reshuffle_each_iteration=True)
dataset = dataset.repeat()
dataset = dataset.batch(BATCH_SIZE,drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_model(IMG_DIM, NB_CLASS, NB_CHANNEL):
base_model = base_model_wrapper(include_top=False,
weights=TRANSFER_LEARNING,
input_shape=(IMG_DIM, IMG_DIM, NB_CHANNEL))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(NB_CLASS, activation='softmax')(x)
model = tf.keras.models.Model(
inputs=base_model.input, outputs=x, name=iden)
return model
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--training_data',type = str, default='ScrewCTF', help = 'The model location')
parser.add_argument('--save_location', type = str, help = 'Where to save the trained model')
parser.add_argument('--saved_weights', type = str, help = 'Load the pre-trained weights')
parser.add_argument('--batch_size', type = int, default=128, help = 'batch_size')
in_args = parser.parse_args()
return in_args
args = get_input_args()
iden = 'efficientnetb2' # @param ['efficientnetb2','densenet201','resnet50v2']
DATA_DIM = 75 # @param
IMG_DIM = 256 # @param
NB_CHANNEL = 3 # @param
BATCH_SIZE = args.batch_size # @param
BUFFER_SIZE = 2048 # @param
TRAIN_DATA = 2048 * 60 # @param
EVAL_DATA = 2048 * 3 # @param
EPOCHS = 250 # @param
class_names = ['ph1',
'slotted6.5',
'torx7',
'allen2.75',
'ph2',
'allen4',
'torx8',
'slotted4.5',
'torx9',
'torx6',
'slotted10',
'allen2.5']
NB_CLASS = len(class_names)
TOTAL_DATA = TRAIN_DATA + EVAL_DATA
STEPS_PER_EPOCH = TOTAL_DATA // BATCH_SIZE
EVAL_STEPS = EVAL_DATA // BATCH_SIZE
WEIGHT_PATH= args.saved_weights if args.saved_weights != None else os.path.join(os.getcwd(),'weights','{}.h5'.format(iden))
if os.path.exists(WEIGHT_PATH):
print('FOUND PRETRAINED WEIGHTS')
LOAD_WEIGHTS = True
else:
print('NO PRETRAINED WEIGHTS FOUND')
LOAD_WEIGHTS = False
eval_ds = data_input_fn("Eval",BUFFER_SIZE,BATCH_SIZE,IMG_DIM,DATA_DIM,DATA_PATH=args.training_data)
train_ds = data_input_fn("Train",BUFFER_SIZE,BATCH_SIZE,IMG_DIM,DATA_DIM,DATA_PATH=args.training_data)
print('testing_eval_ds',eval_ds)
for x, y in eval_ds.take(1):
print(x.shape)
print(y.shape)
plt.imshow(x[0])
plt.show()
print(y[0])
# # model creation
# In[ ]:
if iden == 'densenet201':
base_model_wrapper = tf.keras.applications.DenseNet201
TRANSFER_LEARNING = 'imagenet'
if iden == 'resnet50v2':
base_model_wrapper = tf.keras.applications.ResNet50V2
TRANSFER_LEARNING = 'imagenet'
if iden == 'efficientnetb2':
base_model_wrapper = efn.EfficientNetB2
TRANSFER_LEARNING = 'noisy-student'
model = create_model(IMG_DIM, NB_CLASS, NB_CHANNEL)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5),
loss='categorical_crossentropy',
metrics=['accuracy'])
if LOAD_WEIGHTS:
model.load_weights(WEIGHT_PATH)
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(
factor=0.1, cooldown=10, patience=5, verbose=1, min_lr=0.1e-9)
mode_autosave = tf.keras.callbacks.ModelCheckpoint(
WEIGHT_PATH, save_best_only=True, verbose=0)
early_stopping = tf.keras.callbacks.EarlyStopping(
patience=8, verbose=1, mode='auto')
callbacks = [mode_autosave, lr_reducer, early_stopping]
history = model.fit(train_ds,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
verbose=1,
validation_data=eval_ds,
validation_steps=EVAL_STEPS,
callbacks=callbacks)
if args.save_location != None:
path = args.save_location
if os.path.exists(path) and os.path.isdir(path) :
SAVE_PATH= os.path.join(path,'new_{}.h5'.format(iden)) if os.path.isabs(path) else os.path.join(os.getcwd(),path.strip('./'),'new_{}.h5'.format(iden))
elif os.path.exists(''.join(path.split('/')[:-1])):
SAVE_PATH= path if os.path.isabs(path) else os.path.join(os.getcwd(),path.strip('./'))
else:
SAVE_PATH= os.path.join(os.getcwd(),'weights','new_{}.h5'.format(iden))
print(f'wrong link saving to {SAVE_PATH}')
else:
SAVE_PATH= os.path.join(os.getcwd(),'weights','new_{}.h5'.format(iden))
model.save_weights(SAVE_PATH)
results = model.evaluate(eval_ds, steps=EVAL_STEPS)
y_true = []
y_pred = []
print('Getting Batch Predictions')
for x, y in tqdm(eval_ds.take(EVAL_STEPS), total=EVAL_STEPS):
y_p = model.predict_on_batch(x)
for yi, yp in zip(y, y_p):
y_true.append(yi)
y_pred.append(yp)
Y_TRUE = []
Y_PRED = []
for yt, yp in tqdm(zip(y_true, y_pred), total=len(y_true)):
Y_TRUE.append(np.argmax(yt))
Y_PRED.append(np.argmax(yp))
print(
classification_report(
np.array(Y_TRUE),
np.array(Y_PRED),
target_names=class_names))
|
hamzaleroi/dnn_migration | screw_classification/eval.py | <reponame>hamzaleroi/dnn_migration
#!/usr/bin/env python
# coding: utf-8
import efficientnet.tfkeras as efn
import warnings
import tensorflow as tf
import json
from tqdm.notebook import tqdm
import cv2
import numpy as np
import os
import argparse
class COLORS:
red = (0, 0, 255)
blue = (255, 0, 0)
green = (0, 255, 0)
def detect_circles(im, **kwargs):
x = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
x = cv2.resize(x, (0, 0), fx=.3, fy=.3)
circles = cv2.HoughCircles(x, cv2.HOUGH_GRADIENT, **kwargs)[0]
if circles is not None:
return (circles / .3).astype(int)
else:
return None
def cirlces_to_boxes(circles):
bboxes = []
for circle in circles:
(x, y, r) = circle
bbox = [int(x - r), int(y - r), int(x + r), int(y + r)]
bboxes.append(bbox)
return bboxes
def draw_circles(im, circles):
out = im.copy()
for x, y, r in circles:
cv2.circle(out, (x, y), r, COLORS.green, 5)
return out
def draw_preds(im, preds, circles, sz=2, thick=4, color=COLORS.green):
im = im.copy()
for (x, y, r), p in zip(circles, preds):
if p != 'non_screw':
cv2.putText(
im,
p,
(x + r,
y + r),
cv2.FONT_HERSHEY_COMPLEX,
sz,
color,
thick,
cv2.LINE_AA)
return im
def cut_rois(im, circles):
rois = []
y_max, x_max, _ = im.shape
for x, y, r in circles:
up, down = max(y - r, 0), min(y + r, y_max)
left, right = max(x - r, 0), min(x + r, x_max)
rois.append(im[up:down, left:right].copy())
return rois
def draw_points(im, pnts):
out = im.copy()
for pnt in pnts:
cv2.circle(out, tuple(pnt), 10, COLORS.red, 30)
return out
def draw_gt(im, pnts, lbls):
'Draws ground-truth points'
out = im.copy()
if len(pnts):
out = draw_points(out, pnts)
# draw_preds needs circles, append radius to pnts
r = -30 * np.ones(len(pnts), dtype=int)
circles = np.concatenate((pnts, r.reshape(-1, 1)), axis=-1)
out = draw_preds(out, lbls, circles, color=COLORS.red, thick=6)
return out
def calc_iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def get_single_image_results(gt_boxes, pred_boxes, iou_thr):
"""Calculates number of true_pos, false_pos, false_neg from single batch of boxes.
Args:
gt_boxes (list of list of floats): list of locations of ground truth
objects as [xmin, ymin, xmax, ymax]
pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)
and 'scores'
iou_thr (float): value of IoU to consider as threshold for a
true prediction.
Returns:
dict: true positives (int), false positives (int), false negatives (int)
"""
all_pred_indices = range(len(pred_boxes))
all_gt_indices = range(len(gt_boxes))
if len(all_pred_indices) == 0:
tp = 0
fp = 0
fn = 0
return {'true_positive': tp, 'false_positive': fp,
'false_negative': fn}, []
if len(all_gt_indices) == 0:
tp = 0
fp = 0
fn = 0
return {'true_positive': tp, 'false_positive': fp,
'false_negative': fn}, []
gt_idx_thr = []
pred_idx_thr = []
ious = []
for ipb, pred_box in enumerate(pred_boxes):
for igb, gt_box in enumerate(gt_boxes):
iou = calc_iou(gt_box, pred_box)
if iou > iou_thr:
gt_idx_thr.append(igb)
pred_idx_thr.append(ipb)
ious.append(iou)
iou_sort = np.argsort(ious)[::1]
if len(iou_sort) == 0:
tp = 0
fp = 0
fn = 0
return {'true_positive': tp, 'false_positive': fp,
'false_negative': fn}, []
else:
gt_match_idx = []
pred_match_idx = []
for idx in iou_sort:
gt_idx = gt_idx_thr[idx]
pr_idx = pred_idx_thr[idx]
# If the boxes are unmatched, add them to matches
if(gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):
gt_match_idx.append(gt_idx)
pred_match_idx.append(pr_idx)
tp = len(gt_match_idx)
fp = len(pred_boxes) - len(pred_match_idx)
fn = len(gt_boxes) - len(gt_match_idx)
return {'true_positive': tp, 'false_positive': fp,
'false_negative': fn}, ious
def calc_precision_recall(image_results):
"""Calculates precision and recall from the set of images
Args:
img_results (dict): dictionary formatted like:
{
'img_id1': {'true_pos': int, 'false_pos': int, 'false_neg': int},
'img_id2': ...
...
}
Returns:
tuple: of floats of (precision, recall)
"""
true_positive = 0
false_positive = 0
false_negative = 0
for res in image_results:
true_positive += res['true_positive']
false_positive += res['false_positive']
false_negative += res['false_negative']
try:
precision = true_positive / (true_positive + false_positive)
except ZeroDivisionError:
precision = 0.0
try:
recall = true_positive / (true_positive + false_negative)
except ZeroDivisionError:
recall = 0.0
return precision, recall
def calc_hough_res():
with open(scenes_path, 'r') as f:
meta = json.load(f)
print('Found Annotations for:', len(meta), 'files')
img_results = []
iou_res = []
for file_meta in tqdm(meta):
if len(meta[file_meta]['regions']) != 0:
# circles and boxes
img_path = os.path.join(scenes_dir, meta[file_meta]['filename'])
if not os.path.exists(img_path):
continue
img = cv2.imread(img_path)
circles = detect_circles(img, dp=1, **hough_params)
if circles is not None:
bbox_det = cirlces_to_boxes(circles)
bbox_gt = []
regs = meta[file_meta]['regions']
for reg in regs:
x, y, w, h = reg['shape_attributes']['x'], reg['shape_attributes'][
'y'], reg['shape_attributes']['width'], reg['shape_attributes']['height']
bbox_gt.append([x, y, x + w, y + h])
res, ious = get_single_image_results(
bbox_gt, bbox_det, iou_thr=0.5)
img_results.append(res)
if len(ious) != 0:
for iou in ious:
iou_res.append(iou)
Precision, Recall = calc_precision_recall(img_results)
Hough_F1Score = 2 * (Recall * Precision) / (Recall + Precision)
print('Hough F1 Score:', Hough_F1Score)
print('Hough Precision:', Precision)
print('Hough Recall:', Recall)
print('Screw Mean IoU:', np.mean(np.array(iou_res)))
def create_det_model(iden, NB_CHANNEL, WEIGHT_PATH):
if iden == 'inceptionv3':
base_model_wrapper = tf.keras.applications.InceptionV3
IMG_DIM = 139
if iden == 'xception':
base_model_wrapper = tf.keras.applications.Xception
IMG_DIM = 71
base_model = base_model_wrapper(include_top=False,
weights=None,
input_shape=(IMG_DIM, IMG_DIM, NB_CHANNEL))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(2, activation='softmax')(x)
model = tf.keras.models.Model(
inputs=base_model.input, outputs=x, name=iden)
model.load_weights(WEIGHT_PATH)
return model, IMG_DIM
def create_model(IMG_DIM, NB_CHANNEL, WEIGHT_PATH, NB_CLASS):
base_model_wrapper = efn.EfficientNetB2
base_model = base_model_wrapper(include_top=False,
weights=None,
input_shape=(IMG_DIM, IMG_DIM, NB_CHANNEL))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(NB_CLASS, activation='softmax')(x)
model = tf.keras.models.Model(
inputs=base_model.input, outputs=x, name=iden)
model.load_weights(WEIGHT_PATH)
return model
# data idens and predictions
def create_dir(_path):
if not os.path.exists(_path):
os.mkdir(_path)
return _path
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--eval_data',type = str, default='ScrewDTF', help = 'The model location')
parser.add_argument('--saved_weights', type = str, help = 'Load the pre-trained weights')
in_args = parser.parse_args()
return in_args
args = get_input_args()
scenes_path = os.path.join(args.eval_data, 'screw.json') if os.path.exists(args.eval_data ) else os.path.join(os.getcwd(), 'data', 'screw.json')
scenes_dir =os.path.join(args.eval_data, 'scenes') if os.path.exists(args.eval_data ) else os.path.join(os.getcwd(), 'data', 'scenes')
hough_upper_threshold = 100 # @param {type:"slider", min:0, max:100, step:1}
hough_lower_threshold = 50 # @param {type:"slider", min:0, max:100, step:1}
hough_min_radius = 5 # @param {type:"slider", min:0, max:100, step:1}
hough_max_radius = 30 # @param {type:"slider", min:0, max:100, step:1}
hough_params = dict(minDist=100,
param1=hough_upper_threshold,
param2=hough_lower_threshold,
minRadius=hough_min_radius,
maxRadius=hough_max_radius)
calc_hough_res()
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
iden = 'efficientnetb2'
det_iden1 = 'inceptionv3'
det_iden2 = 'xception'
integrated = [iden,det_iden1,det_iden2]
WEIGHTS_INTGRATED=[(os.path.join(args.saved_weights,'{}.h5'.format(iden) ) \
if os.path.isabs(args.saved_weights)\
else os.path.join(os.getwd(), args.saved_weights.strip('./'),'{}.h5'.format(iden) ))\
if args.saved_weights != None else os.path.join(os.getcwd(),'weights','{}.h5'.format(iden))
for iden in integrated]
WEIGHT_PATH, WEIGHT_PATH_DET1, WEIGHT_PATH_DET2 = WEIGHTS_INTGRATED
IMG_DIM = 256 # @param
class_names = ['ph1',
'slotted6.5',
'torx7',
'allen2.75',
'ph2',
'allen4',
'torx8',
'slotted4.5',
'torx9',
'torx6',
'slotted10',
'allen2.5']
NB_CLASS = len(class_names)
NB_CHANNEL = 3 # @param
model = create_model(IMG_DIM, NB_CHANNEL, WEIGHT_PATH, NB_CLASS)
# det_model1,DET_DIM1=create_det_model(det_iden1,NB_CHANNEL,WEIGHT_PATH_DET1)
det_model2, DET_DIM2 = create_det_model(
det_iden2, NB_CHANNEL, WEIGHT_PATH_DET2)
print('Classification Model:', iden)
#print('Detection Model 1:',det_iden1)
print('Detection Model 2:', det_iden2)
# ## Predictions
# It's the scene overlayed with the found circles. The found screws are
# overlayed with a shaded region
# In[ ]:
save_dir=create_dir(os.path.join(os.getcwd(),'predictions'))
with open(scenes_path, 'r') as f:
meta = json.load(f)
# Draw
for file_meta in tqdm(meta):
img_path = os.path.join(scenes_dir, meta[file_meta]['filename'])
if not os.path.exists(img_path):
continue
else:
print(f'processing {img_path}')
img = cv2.imread(img_path)
circles = detect_circles(img, dp=1, **hough_params)
if circles is not None:
regs = meta[file_meta]['regions']
pnts = []
lbls = []
for reg in regs:
x, y, w, h = reg['shape_attributes']['x'], reg['shape_attributes'][
'y'], reg['shape_attributes']['width'], reg['shape_attributes']['height']
reg_type = reg["region_attributes"]["screwtype"]
center_x, center_y = int(x + w / 2), int(y + h / 2)
pnts.append([center_x, center_y])
lbls.append(reg_type)
gtim = draw_gt(img, pnts, lbls)
imgs = cut_rois(img, circles)
#mgs = [cv2.cvtColor(d,cv2.COLOR_BGR2RGB) for d in imgs]
preds = []
pred_circles = []
for roi, circle in zip(imgs, circles):
'''
roi1=cv2.resize(roi,(DET_DIM1,DET_DIM1))
roi1=roi1.astype('float32')/255.0
roi1=np.expand_dims(roi1,axis=0)
det1=det_model1.predict([roi1])[0]
'''
roi2 = cv2.resize(roi, (DET_DIM2, DET_DIM2))
roi2 = roi2.astype('float32') / 255.0
roi2 = np.expand_dims(roi2, axis=0)
det2 = det_model2.predict([roi2])[0]
if det2[1] > 0.9:
img_roi = cv2.resize(roi, (IMG_DIM, IMG_DIM))
img_roi = np.expand_dims(img_roi, axis=0)
img_roi = img_roi.astype('float32') / 255.0
idx = np.argmax(model.predict(img_roi)[0])
preds.append(class_names[idx])
pred_circles.append(circle)
else:
preds.append('non_screw')
pim = draw_preds(gtim, preds, circles)
final = draw_circles(pim, pred_circles)
cv2.imwrite(os.path.join(save_dir, meta[file_meta]['filename']), final)
else:
print(
'Hough Didnot Detect Any Circle for:',
meta[file_meta]['filename'])
|
hamzaleroi/dnn_migration | screw_detection/eval.py | <filename>screw_detection/eval.py
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
import os
import h5py
import tensorflow as tf
from tqdm.notebook import tqdm
from scripts.resnet import ResNeXt101
import warnings
from glob import glob
import cv2
import imageio
import argparse
def create_model(iden,NB_CLASS,NB_CHANNEL,WEIGHT_PATH):
print(WEIGHT_PATH)
if iden=='densenet201':
base_model_wrapper=tf.keras.applications.DenseNet201
IMG_DIM=221
if iden=='inceptionResNetv2':
base_model_wrapper=tf.keras.applications.InceptionResNetV2
IMG_DIM=139
if iden=='inceptionv3':
base_model_wrapper=tf.keras.applications.InceptionV3
IMG_DIM=139
if iden=='resnet101v2':
base_model_wrapper=tf.keras.applications.ResNet101V2
IMG_DIM=64
if iden=='resnext101':
base_model_wrapper=ResNeXt101
IMG_DIM=64
if iden=='xception':
base_model_wrapper=tf.keras.applications.Xception
IMG_DIM=71
base_model = base_model_wrapper(include_top=False,
weights=None,
input_shape=(IMG_DIM,IMG_DIM,NB_CHANNEL))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(NB_CLASS, activation='softmax')(x)
model =tf.keras.models.Model(inputs=base_model.input,outputs=x,name=iden)
model.load_weights(WEIGHT_PATH)
dim=IMG_DIM
return model,dim
def data_input_fn(mode,BUFFER_SIZE,BATCH_SIZE,data_img_dim=64,DATA_PATH='ScrewDTF'):
def _parser(example):
feature ={ 'image' : tf.io.FixedLenFeature([],tf.string) ,
'label' : tf.io.FixedLenFeature([],tf.int64)
}
parsed_example=tf.io.parse_single_example(example,feature)
image_raw=parsed_example['image']
image=tf.image.decode_png(image_raw,channels=3)
image=tf.cast(image,tf.float32)/255.0
image=tf.reshape(image,(data_img_dim,data_img_dim,3))
#image=tf.image.resize(image, [IMG_DIM,IMG_DIM])
label=parsed_example['label']
label=tf.cast(label,tf.int64)
label=tf.one_hot(label,NB_CLASS)
return image,label
files_pattern= DATA_PATH if os.path.isabs(DATA_PATH) else os.path.join(os.getcwd(),DATA_PATH.strip('./'),mode,'*.tfrecord')
print(files_pattern)
file_paths = tf.io.gfile.glob(files_pattern)
dataset = tf.data.TFRecordDataset(file_paths)
dataset = dataset.map(_parser)
dataset = dataset.shuffle(BUFFER_SIZE,reshuffle_each_iteration=True)
dataset = dataset.repeat()
dataset = dataset.batch(BATCH_SIZE,drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
# data idens and predictions
def create_dir(_path):
if not os.path.exists(_path):
os.mkdir(_path)
return _path
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
#print (rec)
return ap
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--eval_data',type = str, default='ScrewDTF', help = 'The model location')
parser.add_argument('--saved_weights', type = str, help = 'Load the pre-trained weights')
in_args = parser.parse_args()
return in_args
args = get_input_args()
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# modeling
idens=['densenet201','inceptionResNetv2','inceptionv3','resnet101v2','resnext101','xception']
model1= 'inceptionv3' # @param ['densenet201','inceptionResNetv2','inceptionv3','resnet101v2','resnext101','xception']
model2= 'densenet201' # @param ['densenet201','inceptionResNetv2','inceptionv3','resnet101v2','resnext101','xception']
integrated=[model1,model2]
# parameters
NB_CHANNEL=3 # @param
NB_CLASS=2 # @param
BATCH_SIZE=128 # @param
BUFFER_SIZE=2048 # @param
EVAL_DATA=2048*3 # @param
EVAL_STEPS = EVAL_DATA//BATCH_SIZE
data_img_dim=64 # @param
WEIGHTS_INTGRATED=[(os.path.join(args.saved_weights,'{}.h5'.format(iden) ) \
if os.path.isabs(args.saved_weights)\
else os.path.join(os.getwd(), args.saved_weights.strip('./'),'{}.h5'.format(iden) ))\
if args.saved_weights != None else os.path.join(os.getcwd(),'weights','{}.h5'.format(iden))
for iden in integrated]
# data
if os.path.exists(args.eval_data):
DATA_PATH=args.eval_data
else:
print('Validation not found in the current folder, please specify it !!')
exit(1)
eval_ds = data_input_fn("Eval",BUFFER_SIZE,BATCH_SIZE,DATA_PATH=args.eval_data)
print(eval_ds)
# # model creation
INTEGRATED=[]
for iden,WEIGHT_PATH in zip(integrated,WEIGHTS_INTGRATED):
print('Loading Integrated Models:',iden)
INTEGRATED.append(create_model(iden,NB_CLASS,NB_CHANNEL,WEIGHT_PATH))
tps,tns,fps,fns, acc=[0]*80,[0]*80,[0]*80,[0]*80,[]
tp,tn,fp,fn=0,0,0,0
print('Extracting Test Data from tfrecords')
screw_data=[]
non_screw_data=[]
for x_batch,y_batch in tqdm(eval_ds.take(EVAL_STEPS),total=EVAL_STEPS):
for x,y in zip(x_batch,y_batch):
label=np.argmax(y)
if label==0:
non_screw_data.append(x)
else:
screw_data.append(x)
# ## Screw Data
print('Evaluating: Screw_data')
for i in tqdm(range(0,len(screw_data),BATCH_SIZE)):
data=screw_data[i:i+BATCH_SIZE]
if len(data)==BATCH_SIZE:
scores=np.zeros((BATCH_SIZE,NB_CLASS))
for model,dim in INTEGRATED:
x_batch=[]
for x in data:
x=cv2.resize(np.array(x),(dim,dim))
x_batch.append(x)
x_batch=np.array(x_batch)
y_batch=model.predict_on_batch(x_batch)
scores+=y_batch
scores=scores[:,1]
for score in scores:
thresh = 0.7
step = 0.01
for idx in range(79):
if score>thresh:
tps[idx] +=1
else:
fps[idx] +=1
thresh+=step
if score>0.8:
tp +=1
else:
fp +=1
# ## Non Screw Data
print('Evaluating:Non Screw_data')
for i in tqdm(range(0,len(non_screw_data),BATCH_SIZE)):
data=non_screw_data[i:i+BATCH_SIZE]
if len(data)==BATCH_SIZE:
scores=np.zeros((BATCH_SIZE,NB_CLASS))
for model,dim in INTEGRATED:
x_batch=[]
for x in data:
x=cv2.resize(np.array(x),(dim,dim))
x_batch.append(x)
x_batch=np.array(x_batch)
y_batch=model.predict_on_batch(x_batch)
scores+=y_batch
scores=scores[:,1]
for score in scores:
thresh = 0.7
step = 0.01
for idx in range(79):
if score>thresh:
fns[idx] +=1
else:
tns[idx] +=1
thresh+=step
if score>0.8:
fn +=1
else:
tn +=1
for i in range(79):
try:
accuracy = (tps[i]+tns[i])/(tps[i]+tns[i]+fps[i]+fns[i])
acc.append(accuracy)
except:
raise
pass
print('Models:',integrated[0],integrated[1])
print('maximum accuracy: ',max(acc))
accuracy = (tp+tn)/(tp+tn+fp+fn)
print('TP: ', tp, ' TN: ', tn, ' FP: ', fp, ' FN: ', fn)
print('accuracy: ', accuracy)
# # Scenes Data and Hough Params
# hough parameters, subject to change, depending on the height, illumination and etc.
hough_upper_threshold = 100 # @param
hough_lower_threshold = 25 # @param
hough_min_radius = 15 # @param
hough_max_radius = 30 # @param
ovthresh=0.5 # @param
score_thresh=0.8 # @param
src_img_dir= os.path.join(os.getcwd(),'data','scenes')
src_gt_txt = os.path.join(os.getcwd(),'data','scenes.txt')
pred_dir=create_dir(os.path.join(os.getcwd(),'predictions'))
with open(src_gt_txt,'r') as txt:
eval_data=[s.rstrip() for s in txt]
eval_data_idens=[data.split(' ')[0] for data in eval_data]
#read groundtruth
#groundtruth format: image+path xmin,ymin,xmax,ymax,0 xmin,ymin,xmax,ymax,0 .....
print('Creating Class Records')
npos = 0
class_recs = {}
for line in tqdm(eval_data):
line_split = line.strip().split('.png ')
image_id = line_split[0]
boxes = line_split[1].split(' ')
bbox = []
for box in boxes:
xmin, ymin, xmax, ymax, xx = box.split(',')
bbox.append([int(xmin), int(ymin), int(xmax), int(ymax)])
bbox = np.array([x for x in bbox])
difficult = np.array([0 for x in bbox]).astype(np.bool)
det = [False] * len(bbox)
npos = npos + sum(~difficult)
class_recs[image_id] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# # Saving Scenes Predictions and Evaluation
DIM1=986
DIM2=1382
det=[]
for img_iden in tqdm(eval_data_idens):
img_path=os.path.join(src_img_dir,img_iden)
img_raw = cv2.imread(img_path)
img_raw = img_raw[:,:,:3]
img_h, img_w = img_raw.shape[:2]
if img_h>img_w:
ratiox = DIM1/img_w
ratioy = DIM2/img_h
img_raw = cv2.resize(img_raw, (DIM1,DIM2))
else:
ratiox = DIM2/img_w
ratioy = DIM1/img_h
img_raw = cv2.resize(img_raw, (DIM2,DIM1))
gray = cv2.cvtColor(img_raw, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT,
1, 100,
param1=hough_upper_threshold,
param2=hough_lower_threshold,
minRadius=hough_min_radius,
maxRadius=hough_max_radius)
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# copy the image, for painting we will use another
drawn_image = img_raw.copy()
# loop over the found circles
for i in range(len(circles)):
# get one
(x, y, r) = circles[i]
# draw the circle in the output image, then draw a rectangle corresponding to the center of the circle
cv2.rectangle(drawn_image, (x - r, y - r), (x + r, y + r), (255, 0, 0), 2)
# bbox
xmin = x-r
xmax = x+r
ymin = y-r
ymax = y+r
# get the above rectangle as ROI
screw_roi = img_raw[ymin:ymax,xmin:xmax]
#can't go on with the empty or corrupt roi
if (screw_roi.size == 0):
break
# bbox
xmin = xmin/ratiox
xmax = xmax/ratiox
ymin = ymin/ratioy
ymax = ymax/ratioy
# predictions
# integreated prediction --> same as work
pred_val=0
for model,dim in INTEGRATED:
# imgae
data = cv2.resize(screw_roi,(dim,dim))
data = data.astype('float32')/255.0
tensor = np.expand_dims(data,axis=0)
pred=model.predict(tensor)[0]
pred_val+=pred[1]
score=pred_val/2
if score>score_thresh:
cv2.circle(drawn_image, (int(x), int(y)), int(r), (0, 255, 0), 5) #green
# evaluation
line_out = img_iden.split('.')[0]
line_out += ' ' + str(score) + ' ' + str(xmin) + ' ' + str(ymin)+' ' + str(xmax) + ' ' + str(ymax)
det.append(line_out)
imageio.imsave(os.path.join(pred_dir,img_iden),drawn_image)
# # Scoring
#det format: image_id score xmin ymin xmax ymax
splitlines = [x.strip().split(' ') for x in det]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric=False)
print('AP = {:.4f}'.format( ap))
|
hamzaleroi/dnn_migration | screw_classification/data_join.py | #!/usr/bin/env python
import h5py
import cv2
import argparse
import numpy as np
import os
from glob import glob
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import shutil
def create_dir(_path):
if not os.path.exists(_path):
os.mkdir(_path)
def merge_dir(_dir):
global count
for class_name in tqdm(class_names):
class_dir=os.path.join(train_dir,class_name)
create_dir(class_dir)
source_dir=os.path.join(_dir,class_name)
for src_ipath in glob(os.path.join(source_dir,'*.jpg')):
dest_ipath=os.path.join(class_dir,f"{count}.jpg")
shutil.copy(src_ipath,dest_ipath)
count+=1
for src_ipath in glob(os.path.join(source_dir,'*.png')):
dest_ipath=os.path.join(class_dir,f"{count}.png")
shutil.copy(src_ipath,dest_ipath)
count+=1
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--work_dir', type = str, default=os.getcwd(),help = 'Where the folders old and new')
in_args = parser.parse_args()
return in_args
if __name__ == '__main__':
args = get_input_args()
work_dir = args.work_dir if os.path.exists(args.work_dir) else os.getcwd()
new_dir = os.path.join(work_dir,'new')
old_dir = os.path.join(work_dir,'old')
class_names=os.listdir(old_dir)
print('Found Classes:',class_names)
train_dir=os.path.join(work_dir,'comb')
create_dir(train_dir)
count=0
merge_dir(old_dir)
merge_dir(new_dir)
|
hamzaleroi/dnn_migration | screw_detection/data.py | <reponame>hamzaleroi/dnn_migration
import os
import h5py
import argparse
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
import numpy as np
import cv2
from PIL import Image as imgop
import random
from sklearn.utils import shuffle
from tqdm import tqdm_notebook
from glob import glob
import imageio
import random
from tqdm.notebook import tqdm
import shutil
# standard imports
from albumentations import (Blur, Compose, HorizontalFlip, HueSaturationValue,
IAAEmboss, IAASharpen,IAAAffine,JpegCompression, OneOf,
RandomBrightness, RandomBrightnessContrast,
RandomContrast, RandomCrop, RandomGamma,
RandomRotate90, RGBShift, ShiftScaleRotate,
Transpose, VerticalFlip, ElasticTransform, GridDistortion, OpticalDistortion)
import albumentations as albu
from albumentations import Resize
'''
# from provided source kept for reference
train_datagen = ImageDataGenerator(rescale=1./255, # no need to rescale to tfrecords
rotation_range=20,width_shift_range=0.1, height_shift_range=0.1,zoom_range=[0.9,1.25],
shear_range=0.01,
horizontal_flip=True,
vertical_flip=True,
brightness_range=[0.4,1.5],
fill_mode='reflect')
'''
def readh5(d_path):
data=h5py.File(d_path, 'r')
data = np.array(data['data'])
return data
def create_dir(base_dir,ext_name):
new_dir=os.path.join(base_dir,ext_name)
if not os.path.exists(new_dir):
os.mkdir(new_dir)
return new_dir
def aug():
return Compose([HorizontalFlip(p=0.5), #applied
VerticalFlip(p=0.5), #applied
ShiftScaleRotate(shift_limit=(0.1,0.1), # width_shift_range=0.1,# height_shift_range=0.1,
scale_limit=(0.9,1.25), # zoom_range=[0.9,1.25]
rotate_limit=20, p=0.5), # rotation_range=20,
RandomBrightnessContrast(brightness_limit=(0.4,1.5),p=0.5), # brightness_range=[0.4,1.5]
IAAAffine(shear=0.01,mode='reflect',p=0.5) #shear_range=0.01,fill_mode='reflect'
], p = 1)
def fill_missing(source,nb_needed,iden):
if nb_needed > 0:
print('Filling:',iden)
augmented=[]
for i in tqdm(range(nb_needed)):
img=random.choice(source)
img=aug()(image=img)
img=img['image']
img=img.astype(np.uint8)
augmented.append(img)
return source+augmented
else:
return source
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def to_tfrecord(data,labels,save_dir,r_num):
tfrecord_name='{}.tfrecord'.format(r_num)
tfrecord_path=os.path.join(save_dir,tfrecord_name)
with tf.io.TFRecordWriter(tfrecord_path) as writer:
for img,label in zip(data,labels):
_,img_coded = cv2.imencode('.png',img)
# Byte conversion
image_png_bytes = img_coded.tobytes()
data ={ 'image':_bytes_feature(image_png_bytes),
'label':_int64_feature(label)
}
features=tf.train.Features(feature=data)
example= tf.train.Example(features=features)
serialized=example.SerializeToString()
writer.write(serialized)
def genTFRecords(_data,_labels,save_dir):
for i in tqdm(range(0,len(_data),DATA_NUM)):
data = _data[i:i+DATA_NUM]
labels = _labels[i:i+DATA_NUM]
r_num=i // DATA_NUM
to_tfrecord(data,labels,save_dir,r_num)
def get_input_args():
'''
1. Read command line arguments and convert them into the apropriate data type.
2. Returns a data structure containing everything that have been read, or the default values
for the paramater that haven't been explicitly specified.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--data_location', type = str, default=os.path.join(os.getcwd(),'data'), help = 'The h5 filed location')
in_args = parser.parse_args()
return in_args
args = get_input_args()
DIM=(64,64) # @param
TRAIN_DATA_PER_CLASS=51200 # @param
EVAL_DATA=5120 # @param
class_names=['non_screw','screw']
SRC_DIR=args.data_location
DATA_NUM=2048
NEEDED_DATA=[]
DATA_LIST=[]
TFIDEN = 'ScrewDTF'
tf_dir=create_dir(os.getcwd(),TFIDEN)
tf_train=create_dir(tf_dir,'Train')
tf_eval=create_dir(tf_dir,'Eval')
if __name__ == '__main__':
for class_name in class_names:
# class h5
h5path=os.path.join(SRC_DIR,f"{class_name}.h5")
# class data
class_data=list(readh5(h5path))
DATA_LIST.append(class_data)
# needed data
needed_data=TRAIN_DATA_PER_CLASS-len(class_data)
NEEDED_DATA.append(needed_data)
print('Class_name:{} Found Data:{} Needed:{}'.format(class_name,
len(class_data),
needed_data))
_DATA=[]
_LABELS=[]
for class_data,class_name,needed_data,idx in zip(DATA_LIST,class_names,NEEDED_DATA,range(len(class_names))):
class_data=fill_missing(class_data,needed_data,class_name)
_DATA+=class_data
_labels=[idx for _ in range(len(class_data))]
_LABELS+=_labels
_comb = list(zip(_DATA,_LABELS))
random.shuffle(_comb)
_data, _labels = zip(*_comb)
eval_data=_data[:EVAL_DATA]
eval_labels=_labels[:EVAL_DATA]
train_data=_data[EVAL_DATA:]
train_labels=_labels[EVAL_DATA:]
print('Creating training tfrecords')
genTFRecords(train_data, train_labels,tf_train)
print('Creating eval tfrecords')
genTFRecords(eval_data, eval_labels,tf_eval)
|
Jetoky/discord_bot | bot_test.py | from _pytest.mark.structures import MARK_GEN
import Mafia_and_BR
import pytest
import unittest.mock
import os
@pytest.mark.asyncio
async def test_join_b():
ctx = unittest.mock.Mock()
ctx.author.id = 3
ctx.send = unittest.mock.AsyncMock()
Mafia_and_BR.users_bunker.clear()
await Mafia_and_BR.join_bunker(ctx)
ctx.send.assert_called_with('Вы зарегистрировались на игру. Ваш номер 1')
assert Mafia_and_BR.users_bunker == [3]
await Mafia_and_BR.join_bunker(ctx)
ctx.send.assert_called_with('Вы уже зарегистрированы.')
assert Mafia_and_BR.users_bunker == [3]
@pytest.mark.asyncio
async def test_join_m():
ctx = unittest.mock.Mock()
ctx.author.id = 3
ctx.send = unittest.mock.AsyncMock()
Mafia_and_BR.users_mafia.clear()
await Mafia_and_BR.join_mafia(ctx)
ctx.send.assert_called_with('Вы зарегистрировались на игру. Ваш номер 1')
assert Mafia_and_BR.users_mafia == [3]
await Mafia_and_BR.join_mafia(ctx)
ctx.send.assert_called_with('Вы уже зарегистрированы.')
assert Mafia_and_BR.users_mafia == [3]
@pytest.mark.asyncio
async def test_roles_cl():
ctx = unittest.mock.Mock()
ctx.send = unittest.mock.AsyncMock()
Mafia_and_BR.roles.append(3)
await Mafia_and_BR.roles_clear(ctx)
ctx.send.assert_called_with('Список ролей успешно очищен!')
assert Mafia_and_BR.roles == []
await Mafia_and_BR.roles_clear(ctx)
ctx.send.assert_called_with('Список ролей пуст!')
assert Mafia_and_BR.roles == []
@pytest.mark.asyncio
async def test_add_role():
ctx = unittest.mock.Mock()
ctx.send = unittest.mock.AsyncMock()
name = 'Mafia'
k = '1'
await Mafia_and_BR.add_role(ctx, name, k)
ctx.send.assert_called_with('Роль "Mafia" успешно добавлена!')
assert Mafia_and_BR.roles == ['Mafia']
@pytest.mark.asyncio
async def test_del_role():
ctx = unittest.mock.Mock()
ctx.send = unittest.mock.AsyncMock()
name = "Mirn"
Mafia_and_BR.roles.clear()
Mafia_and_BR.roles.append('Mirn')
await Mafia_and_BR.del_role(ctx, name)
ctx.send.assert_called_with('Роль с именем "Mirn" успешно удалена.')
assert Mafia_and_BR.roles == []
await Mafia_and_BR.del_role(ctx, name)
ctx.send.assert_called_with('Роль с именем "Mirn" не существует.')
assert Mafia_and_BR.roles == []
@pytest.mark.asyncio
async def test_users_bunker_clear():
ctx = unittest.mock.Mock()
ctx.author.id = 3
ctx.send = unittest.mock.AsyncMock()
Mafia_and_BR.users_bunker.append(3)
await Mafia_and_BR.users_bunker_clear(ctx)
ctx.send.assert_called_with('Список игроков в бункер успешно очищен!')
assert Mafia_and_BR.users_bunker == []
await Mafia_and_BR.users_bunker_clear(ctx)
ctx.send.assert_called_with('Список игроков пуст!')
assert Mafia_and_BR.users_bunker == []
@pytest.mark.asyncio
async def test_users_mafia_clear():
ctx = unittest.mock.Mock()
ctx.author.id = 3
ctx.send = unittest.mock.AsyncMock()
Mafia_and_BR.users_mafia.append(3)
await Mafia_and_BR.users_mafia_clear(ctx)
ctx.send.assert_called_with('Список игроков в мафию успешно очищен!')
assert Mafia_and_BR.users_mafia == []
await Mafia_and_BR.users_mafia_clear(ctx)
ctx.send.assert_called_with('Список игроков пуст!')
assert Mafia_and_BR.users_mafia == []
@pytest.mark.asyncio
async def test_bunker_stop():
ctx = unittest.mock.Mock()
ctx.author.id = 3
ctx.send = unittest.mock.AsyncMock()
m = Mafia_and_BR.path
os.makedirs(f"{m}/bunker/")
Mafia_and_BR.users_bunker.append(3)
await Mafia_and_BR.bunker_stop(ctx)
ctx.send.assert_called_with('Список игроков в бункер успешно очищен!')
assert Mafia_and_BR.users_bunker == []
@pytest.mark.asyncio
async def test_mafia_stop():
ctx = unittest.mock.Mock()
ctx.author.id = 3
ctx.send = unittest.mock.AsyncMock()
m = Mafia_and_BR.path
os.makedirs(f"{m}/mafia/")
Mafia_and_BR.users_mafia.append(3)
Mafia_and_BR.roles.append('Doctor')
await Mafia_and_BR.mafia_stop(ctx)
ctx.send.assert_called_with('Игра в мафию успешно завершена!')
assert Mafia_and_BR.users_mafia == [] and Mafia_and_BR.roles == []
@pytest.mark.asyncio
async def test_party_bunker():
ctx = unittest.mock.Mock()
ctx.author.id = 3
ctx.send = unittest.mock.AsyncMock()
Mafia_and_BR.users_bunker.append(3)
await Mafia_and_BR.party_bunker(ctx)
ctx.send.assert_called_with("Список игроков:\n" + "\n".join(
[f"<@{user}>" for user in Mafia_and_BR.users_bunker]))
assert Mafia_and_BR.users_bunker == [3]
@pytest.mark.asyncio
async def test_party_mafia():
ctx = unittest.mock.Mock()
ctx.author.id = 3
ctx.send = unittest.mock.AsyncMock()
Mafia_and_BR.users_mafia.append(3)
await Mafia_and_BR.party_mafia(ctx)
ctx.send.assert_called_with("Список игроков:\n" + "\n".join(
[f"<@{user}>" for user in Mafia_and_BR.users_mafia]))
assert Mafia_and_BR.users_mafia == [3] |
Jetoky/discord_bot | file_creator.py | <reponame>Jetoky/discord_bot
from pathlib import Path
path = f"{Path.cwd()}"
def create():
""" Функция возвращает данные для создания файла, в котором хранится вся необходимая информация для игры "Бункер"
В данной функции создается и изменяется файл BabyFile.txt
"""
import random
s = ("инженер ", "химик ", "строитель", "музыкант", "программист", "поп", "врач", "пожарный", "полицейский",
"МЧСник", "уборщик", "офицант", "администратор", "икасатор", "учитель", "водитель", "космонавт", "электрик",
"ювелир", "артист", "продавец", "инвестор")
z = ("у вас Рахит", "у вас синдром Хрустального человека ", "у вас Булимия", "у вас Шизофриния",
"у вас Делирий", "у вас рак", "у вас Плоскостопие", "у вас Психоз",
"у вас СПИД", "у вас Covid-19", "у вас Гепатит-Б", "у вас Проказа", "у вас Ячмень", "у вас Альцгеймер",
"у вас ",
"у вас Грипп", "у вас Лешай")
p = ("мужской ", "женский ")
t = ("атлетичное", "полное", "худое")
ch = ("экспрессивность", "впечатлительность", "жизнерадостность", "повышенная эмоциональность",
"низкая эмоциональность", "импульсивность", "импрессивность", "неустойчивая эмоциональность",
"целенаправленность", "решительность", "настойчивость", "неуверенность", "смелость", "дисциплинированность",
"самостоятельность", "рассудительность", "глубина и гибкость интеллекта", "находчивость",
"математический склад ума", "легкомысленность", "сообразительность", "любознательность", "вдумчивость",
"жесткость", "доброта",
"отзывчивость")
hu = (
"лыжи", "пилатес", "футбол", "туризм", "компьютерные игры", "йога", "музыка", "готовка", "благотворительность",
"бокс", "оригами", "видеосъемка", "дайвинг", "клининг", "ремонт автомобилей", "чтение", "просмотр фильмов",
"изучение иностранных языков", "шахматы", "шашки", "кроссворды ", "настольные игры", "покер", "кубик рубика",
"блогерство", "программирование ", "разработка сайтов", "пчеловодство", "дрессировка собак", "цветоводство",
"астрология ", "выпечка", "виноделие", "дизайн", "аэрография", "писательская деятельность ", "ведение дневника",
"моделирование", "гончарное дело", "вязание", "шитье", "садоводство", "тикток", "психология", "электрика")
fs = ("Агризоофобия", "Аквафобия", "Акустикофобия", "Барофобия", "Блаттофобия", "Ботанофобия ", "Вагинофобия ",
"Вакцинофобия", "Вермифобия ", "Вомитофобия ", "Гексакосиойгексеконтагексафобия", "Герпетофобия ",
"Дементофобия",
"Зоофобия", "Ипохондрия ", "Криофобия ", "Липофобия ", "Микофобия ", "Музофобия ", "Сеплофобия ",
"Скиофобия ",
"Спидофобия", "Термофобия ", "Уринофобия ", "Фтириофобия ", "Ценофобия ", "Электрофобия ", "Ятрофобия ",
"Скотомафобия ", "Социофобия", "Гумафобия ")
i = ("Коробок спичек", "Солнечная зажигалка", "Нож", "Швейцарский нож", "Кочерга", "Кастрюля", "Горелка", "Аптечка",
"Салфетка", "Активированный Уголь", "Камуфляжный костюм", "Фильтр для воды", "Походное одеяло", "Зубная щетка",
"Туалетная бумага", "Медицинский спирт", "Веревка", "Клубок ниток", "Батон белого хлеба", "5л воды",
"Фрагмент карты", "Компас", "Часы", "Фонарь", "Бензин")
spec1 = ("Изменение/добавление характеристик одному игроку", "Изменение характеристик всем игрокам",
"Изменение катаклизма", "Изменения бункера", "Изменение еды в бункере", "Обмен характеристиками и кража",
"Защита от вылета", "Защита от спец. возможностей")
with open(f"{path}/bunker/BabyFile.txt", "w+") as my_file:
my_file.write("Пол - ")
my_file.write(random.choice(p))
my_file.write(" \nТелосложение - ")
my_file.write(random.choice(t))
my_file.write(" \nЧеловеческая черта - ")
my_file.write(random.choice(ch))
my_file.write(" \nСпециальность - ")
my_file.write(random.choice(s))
my_file.write(" \nЗдоровье - ")
my_file.write(random.choice(z))
my_file.write(" \nХобби / Увлечение - ")
my_file.write(random.choice(hu))
my_file.write(" \nФобия / Страх - ")
my_file.write(random.choice(fs))
my_file.write(" \nИнвентарь - ")
my_file.write(random.choice(i))
my_file.write(" \nСпец. возможность - ")
my_file.write(random.choice(spec1))
|
paarthneekhara/cleverhans | examples/adversarial_asr/create_defense_data.py | from os import listdir
from os.path import isfile, join
import sys
import argparse
import random
def _get_file_names(audio_dir):
file_names = []
for f in listdir(audio_dir):
if f.endswith(".wav"):
file_names.append(f)
return file_names
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--input_dir', type=str, required=True,
help='Input Dir')
parser.add_argument('--output_file', type=str, required=False,
default="defense_data.txt",
help='Output File')
args = parser.parse_args()
while len(sys.argv) > 1:
sys.argv.pop()
target_transcriptions = [
"BROWSE TO EVIL DOT COM",
"HEY GOOGLE CANCEL MY MEDICAL APPOINTMENT",
"THIS IS AN ADVERSARIAL EXAMPLE",
"HEY GOOGLE"
]
file_names = _get_file_names(args.input_dir)
transcription_list = []
for idx in range(len(file_names)):
transcription_list.append(random.choice(target_transcriptions))
# transcription_list = [args.target_transcription] * len(file_names)
line1 = ",".join(file_names)
line2 = ",".join(transcription_list)
line3 = ",".join(transcription_list)
file_str = "\n".join([line1, line2, line3]) + "\n"
with open(args.output_file, 'w') as f:
f.write(file_str)
if __name__ == '__main__':
main() |
paarthneekhara/cleverhans | examples/adversarial_asr/transcribe_lingvo.py | import tensorflow as tf
from lingvo import model_imports
from lingvo import model_registry
import numpy as np
import scipy.io.wavfile as wav
import generate_masking_threshold as generate_mask
from tool import create_features, create_inputs
import time
from lingvo.core import cluster_factory
from absl import app
import argparse
from os import listdir
from os.path import isfile, join
import json
import sys
def _get_file_names(audio_dir):
file_names = []
for f in listdir(audio_dir):
if f.endswith(".wav"):
file_names.append(f)
return file_names
def _decode_audio(audio_dir, file_name):
file_path = join(audio_dir, file_name)
sample_rate_np, audio_temp = wav.read(file_path)
audios = []
lengths = []
if max(audio_temp) < 1:
audio_np = audio_temp * 32768
else:
audio_np = audio_temp
length = len(audio_np)
audios.append(audio_np)
lengths.append(length)
lengths_freq = (np.array(lengths) // 2 + 1) // 240 * 3
max_length_freq = max(lengths_freq)
masks_freq = np.zeros([1, max_length_freq, 80])
audios_np = np.zeros([1, length])
audios_np[0, :lengths[0]] = audios[0]
masks_freq[0, :lengths_freq[0], :] = 1
return audios_np, sample_rate_np, np.array(["BROWSE TO EVIL DOT COM"]), masks_freq
def main():
checkpoint = "./model/ckpt-00908156"
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--dirs', type=str, nargs='+', required=True,
help='Filepath of original input audio')
args = parser.parse_args()
while len(sys.argv) > 1:
sys.argv.pop()
with tf.device("/gpu:0"):
tf.set_random_seed(1234)
tfconf = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=tfconf) as sess:
params = model_registry.GetParams('asr.librispeech.Librispeech960Wpm', 'Test')
params.cluster.worker.gpus_per_replica = 1
cluster = cluster_factory.Cluster(params.cluster)
with cluster, tf.device(cluster.GetPlacer()):
params.vn.global_vn = False
params.random_seed = 1234
params.is_eval = True
model = params.cls(params)
task = model.GetTask()
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
input_tf = tf.placeholder(tf.float32, shape=[1, None])
tgt_tf = tf.placeholder(tf.string)
sample_rate_tf = tf.placeholder(tf.int32)
mask_tf = tf.placeholder(tf.float32, shape=[1, None, 80])
features = create_features(input_tf, sample_rate_tf, mask_tf)
shape = tf.shape(features)
inputs = create_inputs(model, features, tgt_tf, 1, mask_tf)
metrics = task.FPropDefaultTheta(inputs)
loss = tf.get_collection("per_loss")[0]
# prediction
decoded_outputs = task.Decode(inputs)
dec_metrics_dict = task.CreateDecoderMetrics()
for audio_dir in args.dirs:
file_names = _get_file_names(audio_dir)
transcriptions = {}
for fidx, file_name in enumerate(file_names):
audios_np, sample_rate, tgt_np, mask_freq = _decode_audio(audio_dir, file_name)
feed_dict={input_tf: audios_np,
sample_rate_tf: sample_rate,
tgt_tf: tgt_np,
mask_tf: mask_freq}
try:
losses = sess.run(loss, feed_dict)
predictions = sess.run(decoded_outputs, feed_dict)
except:
print ("Error in transcribing: ", file_name)
continue
task.PostProcessDecodeOut(predictions, dec_metrics_dict)
wer_value = dec_metrics_dict['wer'].value * 100.
transcriptions[file_name] = predictions['topk_decoded'][0, 0].lower()
print(fidx, "pred-{},{} : {}".format(audio_dir, file_name, predictions['topk_decoded'][0, 0]))
with open(join(audio_dir, "transcriptions.json"), 'w') as f:
f.write(json.dumps(transcriptions))
if __name__ == '__main__':
main()
|
paarthneekhara/cleverhans | examples/adversarial_asr/lingvo/core/base_layer.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for all layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import six
import tensorflow as tf
from lingvo.core import cluster_factory
from lingvo.core import hyperparams
from lingvo.core import py_utils
class _LocalLayerStack(threading.local):
def __init__(self):
super(_LocalLayerStack, self).__init__()
self.layer_stack = []
_LAYER_STACK = _LocalLayerStack()
class Accumulator(object):
"""Layers can register accumulators to persist step-level state.
Accumulators must be represented by a Tensor of a fixed shape. The default
value must be supplied by overriding DefaultValue(). It is important that
the default tensor value is created on each call in order to avoid
accumulators leaking to different graphs.
Accumulators can be enabled (default) or disabled by pairing
Disable()/Enable() calls. When disabled, the accumulator will only return
the default value and will silently drop calls to SetValue(). When computing
gradients that may touch accumulators, calls should be bracketed with
Disable()/Enable().
Care must be taken when manipulating accumulators across Defun boundaries.
Typically, values for all accumulators in a layer must be explicitly
retrieved and passed in to the Defun scope by calling
layer.GetAccumulatorValues(), marshalling into the Defun and setting them
via layer.SetAccumulatorValues(). The reverse must be done on return.
"""
def __init__(self):
# None for initial value or the current Tensor value.
self._value = None
self._disable_count = 0
@property
def is_disabled(self):
"""Whether the accumulator is disabled."""
return self._disable_count > 0
def Disable(self):
"""Disables the accumulator (must be balanced with Enable)."""
self._disable_count += 1
def Enable(self):
"""Enables the accumulator (must balance a Disable)."""
assert self._disable_count > 0, 'Unbalanced Accumulator Enable/Disable'
self._disable_count -= 1
def GetValue(self):
"""Gets the current value of the accumulator Tensor."""
if self.is_disabled or self._value is None:
return self.DefaultValue()
else:
return self._value
def SetValue(self, value):
"""Sets the current value of the accumulator Tensor."""
if not self.is_disabled:
self._value = value
def Reset(self):
"""Resets the accumulator to its default value."""
if not self.is_disabled:
self._value = None
def DefaultValue(self):
raise NotImplementedError('DefaultValue must be implemented')
def initializer(func): # pylint: disable=invalid-name
"""A decorator for layer's __init__.
Args:
func: The __init__ method of `BaseLayer`'s subclasses.
Returns:
A decorator wrapper for layer's initializer.
"""
def wrapper(self, *args, **kwargs): # pylint: disable=invalid-name
# Push back self (the current layer) to the stack.
stack = _LAYER_STACK.layer_stack
stack.append(self)
try:
# Calls the layer's real __init__ method.
func(self, *args, **kwargs)
# pylint: disable=protected-access
self._CheckInvariants()
assert id(stack[-1]) == id(self)
if len(stack) > 1 and id(stack[-2]) != id(self):
# Records the fact stack[-1] just created a sub-layer self.
stack[-2]._AutoAddChild(self)
finally:
# Pop out self (the current layer).
stack.pop()
return wrapper
def DefaultVN():
return py_utils.VariationalNoiseParams(None, False, False)
def RecursiveFindLayerParams(params):
"""Returns all params that define a layer."""
layer_params = []
if hasattr(params, 'cls') and issubclass(params.cls, BaseLayer):
layer_params.append(params)
for _, p in params.IterParams():
if isinstance(p, hyperparams.Params):
layer_params.extend(RecursiveFindLayerParams(p))
return layer_params
LAYER_WT = 'layer_weight_variable'
class BaseLayer(object):
"""Base class for all the layer object."""
# Set to an inference driver name if this is an inference specialization
# class.
_INFERENCE_DRIVER_NAME = None
@classmethod
def Params(cls):
"""Returns the layer params."""
p = hyperparams.Params()
p.Define('cls', cls, 'Cls that this param object is associated with.')
p.Define('inference_driver_name', cls._INFERENCE_DRIVER_NAME,
'Name of the inference driver used to construct this layer.')
p.Define('name', '', 'Name of this layer object.')
p.Define('dtype', tf.float32, 'Datatype to use.')
# None value will make FProp use dtype instead of fprop_dtype.
# TODO(lepikhin): all @function.Defun should use p.fprop_dtype if it is set.
p.Define('fprop_dtype', None, 'Activations datatype to use.')
p.Define(
'random_seed', None, 'Random seed for deterministic unittests. This '
'is inherited by child layers if they do not set a random_seed.')
p.Define('vn', DefaultVN(), 'How variational noise should be applied.')
p.Define('params_init', py_utils.DefaultParamInit(),
'How params should be initialized.')
# is_eval is used to generate graph for eval purpose, typically
# the eval graph is forward pass of training graph without
# regularization, e.g. dropout.
p.Define('is_eval', None, 'True if in eval mode.')
# In addition to is_eval, also makes additional alterations for graphs
# being used for inference.
p.Define('is_inference', None, 'True if in inference mode.')
# In addition to is_eval/is_inference, indicate that the inference graph is
# for a single step.
p.Define('per_step_infer', False, 'True if in per-step inference mode.')
p.Define(
'allow_implicit_capture', None,
'When using Defuns, code often asserts that the Defun does not '
'capture undeclared inputs. This eliminates a source of bugs '
'at the expense of making some kinds of models or utilities '
'hard/impossible to use. Setting this to True/False (versus None) '
'causes the setting to apply to this layer and its children.')
# DEPRECATED params
p.Define('add_summary', True, 'DEPRECATED. Moved to Cluster.')
return p
@staticmethod
def CopyBaseParams(from_params, to_params):
"""Copies BaseLayer params from `from_params` to `to_params`."""
assert issubclass(from_params.cls, BaseLayer)
assert issubclass(to_params.cls, BaseLayer)
# Copy-over the BaseLayer params.
if to_params.dtype == tf.float32:
to_params.dtype = from_params.dtype
if from_params.fprop_dtype is not None:
to_params.fprop_dtype = from_params.fprop_dtype
if to_params.random_seed is None:
to_params.random_seed = from_params.random_seed
if to_params.is_eval is None:
to_params.is_eval = from_params.is_eval
if to_params.is_inference is None:
to_params.is_inference = from_params.is_inference
if to_params.allow_implicit_capture is None:
to_params.allow_implicit_capture = from_params.allow_implicit_capture
# Only copy from base when vn config is using the default setting.
if to_params.vn == DefaultVN():
to_params.vn = from_params.vn.Copy()
# TODO(rpang): derive to_params.params_init.seed from
# from_params.params_init.seed if it is specified in 'from_params' and not
# in 'to_params'.
if py_utils.IsDefaultParamInit(to_params.params_init):
# Copy over params_init as well.
to_params.params_init = from_params.params_init.Copy()
return to_params
def __init__(self, params):
"""Layer constructor.
Sub-classes of BaseLayer should decorator its __init__ with
@base_layer.initializer
Args:
params: A params used to construct this layer.
"""
assert params.name, (
'Layer params for %s must have a "name"' % self.__class__.__name__)
self._params = params.Copy()
tf.logging.debug('Creating layer %s with params: \n %s \n',
self.__class__.__name__, str(params))
# Vars created by this layer.
self._private_vars = py_utils.NestedMap()
# Theta derived from this layer's vars.
self._private_theta = py_utils.NestedMap()
# Child layers created by this layer through CreateChild/CreateChildren.
self._private_children = py_utils.NestedMap()
# Child layers created by this layer. A well-formed layer should
# have self._private_children equals to self._children_list. I.e.,
# all child layers are created using CreateChild/CreateChildren.
self._children_list = []
# Extra theta's not directly correpond to any underlying vars. For example,
# the concatenated sharded variables.
self._extra_theta = py_utils.NestedMap()
# All registered accumulators.
self._private_accumulators = py_utils.NestedMap()
# Layer-private functions. Add with AddFunction.
self._private_fns = dict()
def FPropDefaultTheta(self, *args, **kwargs):
"""Calls `FProp`."""
return self.FProp(self.theta, *args, **kwargs)
def FProp(self, theta, *args, **kwargs):
"""Forward propagation.
The central interface that subclasses should implement. The caller
calls `FProp` with a `theta` dictionary. E.g.::
foo = InstanceOfASubClassOfFoo(params)
y = foo.FProp(foo.theta, x)
The implementation of `FProp()` computes a function given
the theta and the inputs. E.g.::
subs = self.children
inputs = args[0]
a0 = subs.linear.FProp(theta.linear, inputs)
a1 = subs.softmax.FProp(theta.softmax, a0)
# The same layer applied twice.
a2 = subs.linear.FProp(theta.linear, a1)
return a2
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
*args: List args.
**kwargs: Keyward args.
"""
del theta
del args
del kwargs
raise NotImplementedError('Abstract method of %s' % self)
@classmethod
def FPropMeta(cls, params, *args, **kwargs):
"""Returns metadata about the `FProp` computation for this layer.
**Experimental feature.**
Don't use or depend on it without consulting Lingvo authors.
E.g.::
p = SomeComplexLayer.Params()
meta = p.cls.FPropMeta(p, tf.TensorShape([128, 20, 50, 32]))
`meta.flops` gives an estimate count of floating point operations done by
one `FProp` given an input tensor of shape [128, 20, 50, 32].
`meta.out_shapes` is a tuple of tensor shapes, which tells you what shape
of tensors this layer will return.
Args:
params: The param of a layer of this layer type.
*args: Corresponds to FProp with Tensors replaced by `TensorShape`.
**kwargs: Corresponds to FProp with Tensors replaced by `TensorShape`.
Returns:
A `.NestedMap` with
- flops - The estimated number of floating point operations incurred by
this fprop.
- out_shapes - A tuple of `tf.TensorShape`. I.e., `out_shapes[i]`
represents the shape of the `i`-th returned tensor of the fprop.
"""
raise NotImplementedError('FPropMeta of %s' % cls)
@property
def params(self):
"""Returns the params upon which this layer is built."""
return self._params
@property
def cluster(self):
"""Returns the current cluster configuration."""
return cluster_factory.Current()
@property
def children(self):
"""Returns children layers of this layer in a `.NestedMap`."""
return self._private_children
def __getattr__(self, name):
"""Returns the child layer of the given name."""
if name in self._private_children:
return self._private_children[name]
elif (hasattr(type(self), name) and
isinstance(getattr(type(self), name), property)):
# There was an AttributeError raised by a property getter.
# Call property getter again directly to raise the same error.
return getattr(type(self), name).fget(self)
else:
raise AttributeError('%s is not a sub-layer of %s.' % (name, self))
@property
def vars(self):
"""Returns variables of this layer and its children in a `.NestedMap`."""
ret = self._private_children.Transform(lambda x: x.vars)
for k in self._private_vars.keys():
ret[k] = self._private_vars[k]
return ret
@property
def theta(self):
"""Returns theta of this layer and its children in a `.NestedMap`."""
ret = self._private_children.Transform(lambda x: x.theta)
should_cast = (
self._params.fprop_dtype is not None and
self._params.fprop_dtype != self._params.dtype)
if should_cast:
def _DoCast(x, fprop_dtype):
if x.dtype != fprop_dtype:
return tf.cast(x, fprop_dtype)
else:
return x
private_theta = self._private_theta.Transform(
lambda x: _DoCast(x, self._params.fprop_dtype))
else:
private_theta = self._private_theta
for k in private_theta.keys():
ret[k] = private_theta[k]
return ret
@property
def accumulators(self):
"""Returns `.NestedMap` of `Accumulator` instances for this and children."""
ret = self._private_children.Transform(lambda x: x.accumulators)
for k, acc in six.iteritems(self._private_accumulators):
ret[k] = acc
return ret
@property
def fns(self):
"""Returns a read-only view of layer local functions.
Functions can be accessed by index (['name']) or attribute notation
(`fns.foo`).
Returns:
Read-only attribute accessible dict view of the layer's function library.
"""
return py_utils.ReadOnlyAttrDictView(self._private_fns)
def AddFunction(self, name, f, replace=False):
"""Adds a function to the layer's `fns` collection.
This should be used to add op-like functions specific to the operation
of the layer and its children. Such functions should be added in `__init__`
and may either be raw python functions or TensorFlow Defuns. This
facility is just a mechanism for organizing them and having basic checks
on name collisions.
Args:
name: The function name. It will be accessible as `self.fns.{name}`.
f: The function body.
replace: Whether to replace an existing function (default False).
Raises:
AttributeError: If the function already exists and replace == False.
"""
py_utils.NestedMap.CheckKey(name)
if not replace:
if name in self._private_fns:
raise AttributeError(
'Function "%s" is already defined on layer "%r"' % (name, self))
self._private_fns[name] = f
def _CheckName(self, name):
"""Asserts name's validity."""
py_utils.NestedMap.CheckKey(name)
assert name not in self._private_vars, (
'%s exists in vars, %s' % (name, list(self._private_vars.keys())))
assert name not in self._private_theta, (
'%s exists in theta, %s' % (name, list(self._private_theta.keys())))
assert name not in self._private_children, ('%s exists in children, %s' % (
name, list(self._private_children.keys())))
assert name not in self._private_accumulators, (
'%s exists in global_accumulator: %s' %
(name, self._private_accumulators.keys()))
def _VariableCollections(self):
return [LAYER_WT, '%s_vars' % (self.__class__.__name__)]
def RegisterAccumulator(self, name, acc):
"""Registers an accumulator for this layer.
An accumulator is used to propagate some state to a future point,
where it is acted on (typically as part of `PostTrainingStepUpdate`). This
mechanism allows for arbitrarily nested parts of a model to export state
back to the global scope. Accumulators must be specially handled
when crossing into `Defun` or recurrent scopes. By abstracting the
mechanism, it allows all such state to be handled uniformly and generically.
Example (typically from `__init__`)::
class MyAccumulator(base_layer.Accumulator):
def DefaultValue(self):
# [count, min, max]
return tf.convert_to_tensor([0.0, 0.0, 0.0])
def Update(self, state1):
state0 = self.GetValue()
self.SetValue(tf.stack([
state0[0] + state1[0],
tf.minimum(state0[1], state1[1]),
tf.maximum(state0[2], state1[2])]))
self.RegisterAccumulator('mytracker', acc)
Later, access the current value and update it::
acc = self.accumulators.mytracker
acc.Update(tf.convert_to_tensor([1.0, batch_min, batch_max]))
Then, typically in `PostTrainingStepUpdate`::
acc = self.accumulator.mytracker.GetValue()
acc_value = acc.GetValue()
# Do something with the value.
acc.Reset()
Args:
name: The accumulator name. Shares a namespace with children, vars and
extra theta.
acc: An `Accumulator` instance.
"""
self._CheckName(name)
self._private_accumulators[name] = acc
def GetAccumulatorValues(self):
"""Recursively gets values of all accumulators.
Returns:
`.NestedMap` of Tensors for each registered accumulator.
"""
return self.accumulators.Transform(lambda acc: acc.GetValue())
def SetAccumulatorValues(self, new_values_nmap):
"""Recursively sets the values of all accumulators from a map.
Args:
new_values_nmap: `.NestedMap` of accumulator name:Tensor.
"""
accumulator_list = self.accumulators.Flatten()
value_list = new_values_nmap.Flatten()
for acc, value in zip(accumulator_list, value_list):
acc.SetValue(value)
def CreateVariable(self, name, var_params, theta_fn=None, *args, **kwargs):
"""Create a variable of this layer according to the parameter `var_params`.
E.g.::
def __init__(self, ...): # A layer's constructor
self.CreateVariable(
'weight', py_utils.WeightParams(shape=[100, 100]))
`theta_fn` is used to apply a simple transformation on the created
variable's value before used by the forward computation. E.g., to
add the global variational noise according to this layer's
parameter, one can do::
def __init__(self, ...): # A layer's constructor
self.CreateVariable(
name='weight',
var_params=py_utils.WeightParams(shape=[100, 100]),
theta_fn=self.AddGlobalVN)
Args:
name: Variable name which is used as the key into vars/theta.
var_params: `Params` used to create the variable.
theta_fn: A python function that takes a variable's value and returns a
new value to be used later for computation. Its signature must be
(tf.Tensor) -> (tf.Tensor).
*args: List of args passed to `.py_utils.CreateVariable`.
**kwargs: Keyword args passed to `.py_utils.CreateVariable`.
"""
self._CheckName(name)
value, var = py_utils.CreateVariable(name, var_params, *args, **kwargs)
self._private_vars[name] = var
if theta_fn is not None:
value = theta_fn(value)
self._private_theta[name] = value
def AddExtraTheta(self, theta_name, theta_value):
"""Add extra `theta` that doesn't directly correspond to `vars`."""
self._CheckName(theta_name)
self._private_theta[theta_name] = theta_value
self._extra_theta[theta_name] = theta_value
def AddGlobalVN(self, value):
return py_utils.AddGlobalVN(self.params, value)
def CreateChild(self, name, params):
"""Create a sub layer.
The created sub layer can be accessed by `name`. E.g.::
self.CreateChild('foo', ...)
self.foo.FProp...
or::
self.children['foo'].Fprop...
self.children.foo.Fprop...
Args:
name: Sub layer name which is used as the key into vars/theta.
params: `Hyperparams` object to instantiate a layer.
"""
self._CheckName(name)
if not params.name:
params.name = name
p = self.CopyBaseParams(self.params, params.Copy())
child = p.cls(p)
self._private_children[name] = child
def CreateChildren(self, name, params_list):
"""Create a list of sub layers.
The created sub layer list can be accessed by `name`. E.g.::
self.CreateChildren('foo', ...)
self.foo[10].FProp...
or::
self.children['foo'][10].Fprop...
self.children.foo[10].Fprop...
Args:
name: The name for the sub layers, which is used as the key
into vars/theta.
params_list: `Hyperparams` objects to instantiate a list of layers.
"""
self._CheckName(name)
def CreateChildrenHelper(params_list):
children = []
for i, p in enumerate(params_list):
if isinstance(p, list):
children.append(CreateChildrenHelper(p))
else:
p = self.CopyBaseParams(self.params, p.Copy())
if not p.name:
p.name = '%s_%d' % (name, i)
children.append(p.cls(p))
return children
self._private_children[name] = CreateChildrenHelper(params_list)
def AddChild(self, name, child):
"""Add an existing layer as a sublayer."""
assert isinstance(child, BaseLayer)
self._CheckName(name)
self._private_children[name] = child
def AddChildren(self, name, children):
"""Add existing layers as sublayers."""
for child in children:
assert isinstance(child, BaseLayer)
self._CheckName(name)
self._private_children[name] = children
def _AutoAddChild(self, child):
"""Record that a layer `child` is instantiated by this layer.
This is a method only called by `base_layer.initializer` decorator.
Subclasses should not call this method.
Args:
child: A sub-layer of this layer.
"""
self._children_list.append(child)
def _CheckInvariants(self):
self._VerifyChildren()
self._VerifyVarsAndTheta()
def _VerifyChildren(self):
"""Verify all children created by this layer are via `CreateChild(ren)`."""
def FindCreatedChildren(parents):
created_children = []
for v in parents:
if isinstance(v, (tuple, list)):
created_children.extend(FindCreatedChildren(v))
else:
created_children.append(v)
return created_children
created_children = FindCreatedChildren(
list(self._private_children.values()))
for v in self._children_list:
assert v in created_children, (
'%s is not created by BaseLayer.CreateChild(ren) in %r.' %
(v.params.name, self))
def _VerifyVarsAndTheta(self):
"""Verify that vars and theta have the same nested structure."""
def MatchKeys(x, y):
assert len(x) <= len(y)
for k in x.keys():
assert k in y, '%s not in %s.' % (k, y)
if isinstance(x[k], py_utils.NestedMap):
assert isinstance(y[k], py_utils.NestedMap), '%s is not a map' % y[k]
MatchKeys(x[k], y[k])
# NOTE: this check can be quadratically expensive. Maybe only
# enable this in unittests.
MatchKeys(self.vars, self.theta)
# Make sure whatever not in self.vars are in self._extra_theta
for k in self.theta.keys():
assert k in self.vars or k in self._extra_theta
def PostTrainingStepUpdate(self, global_step):
"""Returns a TF op which will be invoked at each training step.
Subclasses of `BaseLayer` can implement this method. The method should
return a TF op to be invoked during training after gradients are applied.
Args:
global_step: the global step.
"""
update_ops = [
child.PostTrainingStepUpdate(global_step)
for child in self._private_children.Flatten()
]
return tf.group(*update_ops)
|
paarthneekhara/cleverhans | examples/adversarial_asr/lingvo/core/ops/py_x_ops.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import function
gen_x_ops = tf.load_op_library(
tf.resource_loader.get_path_to_datafile('x_ops.so'))
if 'assert_shape_match' not in dir(gen_x_ops):
# Static linking:
# pylint: disable=g-import-not-at-top
from lingvo.core.ops import gen_x_ops
# pylint: enable=g-import-not-at-top
# Set gen_x_ops function module to py_x_ops so sphinx generates documentation.
for v in gen_x_ops.__dict__.values():
try:
v.__module__ = 'lingvo.core.ops.py_x_ops'
except:
pass
assert_shape_match = gen_x_ops.assert_shape_match
assert_same_dim0 = gen_x_ops.assert_same_dim0
random_permutation_sequence = gen_x_ops.random_permutation_sequence
best_step = gen_x_ops.best_step
beam_search_step = gen_x_ops.beam_search_step
top_k_terminated_hyps = gen_x_ops.top_k_terminated_hyps
unpack_hyp = gen_x_ops.unpack_hyp
hyps_from_beam_search_outs = gen_x_ops.hyps_from_beam_search_outs
cached_call = gen_x_ops.cached_call
ascii_to_token_id = gen_x_ops.ascii_to_token_id
str_to_vocab_tokens = gen_x_ops.str_to_vocab_tokens
id_to_ascii = gen_x_ops.id_to_ascii
ngram_id_to_token = gen_x_ops.ngram_id_to_token
bpe_ids_to_words = gen_x_ops.bpe_ids_to_words
bpe_words_to_ids = gen_x_ops.bpe_words_to_ids
def generic_input(processor, *args, **kwargs):
# pylint: disable=protected-access
if not isinstance(processor, function._DefinedFunction):
# Helper if processor is a python callable.
processor = function.Defun(tf.string)(processor)
out_types = [
tf.DType(a.type) for a in processor.definition.signature.output_arg
]
assert out_types[-1] == tf.int32, ('%s is not expected.' % out_types[-1])
return gen_x_ops.generic_input(
processor=processor, out_types=out_types[:-1], *args, **kwargs)
generic_input.__doc__ = gen_x_ops.generic_input.__doc__
|
drmidnightytb/Consulta-DNS | main.py | import time
import socket
from requests import get
import smtplib
import socket
import os
print('')
restart = 'S'
while restart == 'S':
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
print('#####################')
time.sleep(0.5)
print('Consulta de IP por DNS')
time.sleep(0.5)
print('Tool by Dr Midnight')
time.sleep(0.5)
print('#####################\n')
time.sleep(0.5)
print('Iniciando o script...')
time.sleep(2)
print()
print('[1] Puxar IP de Hosts')
print('[2] Do que se trata?')
def host():
print('')
print('Olá!')
time.sleep(0.3)
print('Aqui, você pode pegar endereçamentos de específicos hostnames.\n')
time.sleep(0.5)
host = input('Consulte uma DNS: ')
host = socket.gethostname()
intern = socket.gethostbyname(host)
extern = get('https://api.ipify.org').text
print()
print(f'Host: {host}')
print(f'IP Interno: {intern}')
print(f'IP Externo: {extern}')
def help():
print('###########################################################################################################\n')
print('O QUE É?\n')
print('O IP é o seu endereço na internet. É por ele que o seu computador se comunica com outros computadores.\n'
' Ele pode ser estático (não muda) ou dinâmico (muda com o tempo) e é atribuído pela sua operadora de internet.\n')
print('Todo Site possui um local o qual recebe Host, ou seja, é Hospedado de uma máquina.')
print('Quando consultamos uma DNS através da Hostname, estamos checando os dados do Servidor que está fornecendo Host\n')
print('###########################################################################################################\n')
print('PARA QUE É ÚTIL?\n')
print('Com o IP de Host de um determinado Site em mãos, podemos não só apenas saber a geo localização, a qual\n'
' o servidor está sendo mantido, como também podemos usar disto para outras coisas\n')
print('Se uma pessoa cometer algum crime virtual a polícia pode descobrir o endereço verdadeiro do criminoso\n'
'através do IP procurando a operadora de internet que vai consultar o banco de dados deles onde estão\n'
'listados todos os clientes e horários mostrando quem usava qual IP e quando. Então se por acaso,'
' acabar\nsendo vítima de golpe através de alguma "http", esta ferramenta o ajudará na busca do responsável.\n')
print('Além disto existem diversos atributos para a área de Pen Tester, tais como:')
print('--> Invasão de computadores via exploit, que é alguma falha específica presente em algum software de uma máquina'
' ligada a rede.')
print('--> Ataques de DDOS, que é derrubar uma conexão (ou torna-la instável) enviando várias requisições por segundo,'
' gerando\n uma sobrecarga na rede onde o dispositivo com aquele IP está conectado.\n')
print('###########################################################################################################')
mid = input("\n>>> Escolha a opção: ")
if mid == '1':
host()
elif mid == '2':
help()
restart = str(input('\nDeseja realizar outra consulta S/N? ')).strip().upper()[0]
print('')
os.system('cls')
|
june-08042/sciwing | sciwing/modules/embedders/trainable_word_embedder.py | <filename>sciwing/modules/embedders/trainable_word_embedder.py
from sciwing.modules.embedders.base_embedders import BaseEmbedder
from sciwing.data.datasets_manager import DatasetsManager
from sciwing.vocab.embedding_loader import EmbeddingLoader
import torch
import torch.nn as nn
from typing import List
from sciwing.data.line import Line
from sciwing.utils.class_nursery import ClassNursery
class TrainableWordEmbedder(nn.Module, BaseEmbedder, ClassNursery):
def __init__(
self,
embedding_type: str,
datasets_manager: DatasetsManager = None,
word_tokens_namespace: str = "tokens",
device: torch.device = torch.device("cpu"),
):
"""
This represents trainable word embeddings which are trained along with the parameters
of the network. The embeddings in the class `WordEmbedder` are not trainable. They are
static
Parameters
----------
embedding_type : str
The type of embedding that you would want
datasets_manager: DatasetsManager
The datasets manager which is running your experiments
word_tokens_namespace: str
The namespace where the word tokens are stored in your data
device: Union[torch.device, str]
The device on which this embedder is run
"""
super(TrainableWordEmbedder, self).__init__()
self.embedding_type = embedding_type
self.datasets_manager = datasets_manager
self.word_tokens_namespace = word_tokens_namespace
self.device = torch.device(device) if isinstance(device, str) else device
self.embedding_loader = EmbeddingLoader(embedding_type=embedding_type)
self.embedder_name = self.embedding_loader.embedding_type
self.embedding_dimension = self.get_embedding_dimension()
self.vocab = self.datasets_manager.namespace_to_vocab[
self.word_tokens_namespace
]
self.numericalizer = self.datasets_manager.namespace_to_numericalizer[
self.word_tokens_namespace
]
embeddings = self.embedding_loader.get_embeddings_for_vocab(self.vocab)
self.embedding = nn.Embedding.from_pretrained(
embeddings=embeddings, freeze=False
)
def forward(self, lines: List[Line]) -> torch.FloatTensor:
line_lengths = [len(line.tokens[self.word_tokens_namespace]) for line in lines]
max_line_length = max(line_lengths)
numericalized_tokens = []
for line in lines:
tokens = line.tokens[self.word_tokens_namespace]
tokens = [tok.text for tok in tokens]
tokens = self.numericalizer.numericalize_instance(instance=tokens)
tokens = self.numericalizer.pad_instance(
numericalized_text=tokens,
max_length=max_line_length,
add_start_end_token=False,
)
numericalized_tokens.append(tokens)
numericalized_tokens = torch.tensor(
numericalized_tokens, dtype=torch.long, device=self.device
)
embedding = self.embedding(numericalized_tokens)
return embedding
def get_embedding_dimension(self) -> int:
return self.embedding_loader.embedding_dimension
|
june-08042/sciwing | sciwing/datasets/__init__.py | <reponame>june-08042/sciwing
from sciwing.datasets.classification.text_classification_dataset import (
TextClassificationDataset,
)
from sciwing.datasets.classification.text_classification_dataset import (
TextClassificationDatasetManager,
)
from sciwing.datasets.seq_labeling.conll_dataset import CoNLLDatasetManager
|
mapbox/rio-color | rio_color/__init__.py | """rio-color"""
__version__ = "1.0.4"
|
piyush2896/Image-Resizer-Python | image-resizer.py | import argparse
import sys
import cv2
import os
parser = argparse.ArgumentParser()
parser.add_argument("--src", metavar="source",
help="Source folder of the Image files")
parser.add_argument("--dest", metavar="destinantion",
help="Destination folder of the Image files (Default: Source folder)")
parser.add_argument("--size", metavar="size", nargs='+', type=int,
help="Size of the new image(Width Height)")
parser.add_argument("--ext", metavar="extension", nargs='+',
help="""Extensions of files to be resized (Default: jpg png).
Supported extensions - Extensions Supported by OpenCV.""")
args = parser.parse_args()
def resize_image(img, size):
"""Resize the image to given size
params:
img - array of image pixels
size - tuple => (Width, Height)
"""
return cv2.resize(img, size, interpolation=cv2.INTER_AREA)
def load_images(src, ext, size):
"""Resize the images in given source folder
and save to the destinantion folder
params:
src - Source folder
ext - Extensions of images to be
"""
cd = os.getcwd()
os.chdir(src)
files_ls = os.listdir()
files = [file for file in files_ls if((len(file.split('.')) > 1)
and (file.split('.')[1].lower() in ext))]
res_imgs = []
for file in files:
img = cv2.imread(file)
res_imgs.append((file, resize_image(img, size)))
print("\nResizing Image:", src + file)
return res_imgs, cd
def save_images(cd, imgs, src, dest):
"""Save the images to the destination folder
params:
cd - Working Directory where program resides
imgs - list of Tuples => (image name, image)
src - Source of the image files
dest - destinantion folder of the image
"""
os.chdir(cd)
os.chdir(dest)
for img in imgs:
cv2.imwrite(img[0], img[1])
print("\nSaving Image:", src + img[0],
"To", dest + img[0])
if args.src == None:
sys.exit("Path source Folder Missing")
if args.dest == None:
args.dest = args.src
if args.size == None:
sys.exit("New image size missing.")
if args.ext == None:
args.ext = ['jpg', 'png']
imgs, cd = load_images(args.src, args.ext, tuple(args.size))
save_images(cd, imgs, args.src, args.dest)
|
Phantom4d/dtrump-pickup-bot | main.py | <filename>main.py
import StringIO
import json
import logging
import random
import urllib
import urllib2
# for sending images
from PIL import Image
import multipart
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
TOKEN = '<KEY>'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
# ================================
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
# ================================
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
if not text:
logging.info('no text')
return
def reply(msg=None, img=None):
if msg:
resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
'chat_id': str(chat_id),
'text': msg.encode('utf-8'),
'disable_web_page_preview': 'true',
'reply_to_message_id': str(message_id),
})).read()
elif img:
resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
('chat_id', str(chat_id)),
('reply_to_message_id', str(message_id)),
], [
('photo', 'image.jpg', img),
])
else:
logging.error('no msg or img specified')
resp = None
logging.info('send response:')
logging.info(resp)
if text.startswith('/'):
if text == '/start':
reply('Hey bby')
reply('I\'m <NAME>.')
reply('don\'t worry, I\'ll give you the \'D\' later. ;)')
setEnabled(chat_id, True)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False)
elif text == '/image':
img = Image.new('RGB', (512, 512))
base = random.randint(0, 16777216)
pixels = [base+i*j for i in range(512) for j in range(512)] # generate sample image
img.putdata(pixels)
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())
else:
reply('What command?')
# CUSTOMIZE FROM HERE
elif text.endswith('?'):
reply('No no no, I\'m the one who asks questions around here.')
else:
if getEnabled(chat_id):
randQuoteInt = random.randint(0, 17)
trumpQuotes = ["You\'re really attractive. Unlike that <NAME>. <NAME> is unattractive, both inside and out. I fully understand why her former husband left her for a man - he made a good decision.",
"You know, it really doesn\'t matter what you write as long as you\'ve got a young, and beautiful, piece of ass.",
"I\'ll name all of my buildings after you if you change your name to \'Trump.\'",
"Unlike my bid for presidency, my interest in you isn\'t a publicity stunt.",
"Give me your number or else I\'ll start a nonsensical twitter war with you.",
"It\'s okay, I don\'t need to see your birth certificate because you\'re white.",
"Baby it\'s cold outside... So climate change can\'t be real.",
"How bout we deport those panties?",
"You really put the \'rump\' in \'Trump\'. ;)",
"Roses are red, violets are blue, I had my gardener deported after he picked these flowers for you.",
"Hey beautiful, I\'d buy you a drink but I don\'t want you becoming dependent on handouts.",
"I would love to drill you like an Alaskan oil field",
"Are you a debate moderator? Because you\'re making this hard.",
"As everybody knows, but the haters and losers refuse to acknowledge, I do not wear a wig. My hair may not be perfect, but it\'s mine.",
"My whole life is about winning. I don\'t lose often. I almost never lose.",
"Do you mind if I sit back a little? Because your breath is very bad.",
"You don\'t seem very smart. That\'s great though! I love the poorly educated.",
"Do you know why they call me Trump? Because they couldn't take their eyes away from my huge trumpet"]
reply(trumpQuotes[randQuoteInt])
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
# add an ethnicity checker, then insult them with a quote
# elif 'who are you' in text:
# reply('telebot starter kit, created by yukuku: https://github.com/yukuku/telebot')
# elif 'what time' in text:
# reply('look at the corner of your screen!')
# else:
# if getEnabled(chat_id):
# reply('nope.')
# else:
# logging.info('not enabled for chat_id {}'.format(chat_id))
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
|
db434/nn-restrict | structured/fully_connected.py | from collections import OrderedDict
import torch.nn as nn
from . import wrapped
import modifiers.modules as quantisable
class Conv2d(nn.Module):
"""Simple wrapper for the default convolution class. Introduced so all
convolution variants have a similar interface.
Adds batch normalisation to be fair, since the other deeper modules require
it.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
batch_norm=True,
**kwargs):
super(Conv2d, self).__init__()
# Channel numbers can be scaled by floats, so need to be rounded back
# to integers.
in_channels = int(in_channels)
out_channels = int(out_channels)
# Put the batch-norm after the convolution to match depthwise-separable,
# for which we have a reference specifying where it should go.
self.conv = nn.Sequential( #OrderedDict([
#("conv",
wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
**kwargs), #),
# Default: no quantisation. Change the behaviour using
# modifiers.numbers.restrict_activations().
#("quantise_c",
quantisable.Quantiser()
#)
#])
)
# TODO Be more consistent with batch norm and quantisation across the
# different convolution types.
# e.g. butterfly doesn't have any quantisation
if batch_norm:
self.conv.add_module("2", nn.BatchNorm2d(out_channels))
self.conv.add_module("3", quantisable.Quantiser())
#self.conv.add_module("batch_norm", nn.BatchNorm2d(out_channels))
#self.conv.add_module("quantise_bn", quantisable.Quantiser())
def forward(self, x):
return self.conv(x)
|
db434/nn-restrict | tests/test_butterfly.py | import random
import torch
from structured.butterfly import *
def _group_counts_to_group_sizes(params, group_counts):
"""Convert numbers of groups to sizes of groups."""
_, out_channels, _ = params
group_sizes = [out_channels // count for count in group_counts]
return group_sizes
def _sequence_test():
"""Test that the sequence of butterflies is sensible."""
# Each test case is a tuple of (parameters, group_sizes).
# Parameters is itself a tuple of (in channels, out channels, min group).
# Group sizes are measured with respect to the output channels.
# The cost of a butterfly is proportional to the sum all group sizes (if
# inputs == outputs).
test_cases = [
# Simple, same size all the way through.
((64, 64, 2), [2, 2, 2, 2, 2, 2]),
((64, 64, 4), [4, 4, 4]),
# Varying group sizes (channels aren't powers of two).
((36, 36, 2), [2, 2, 3, 3]),
((36, 36, 3), [3, 3, 4]),
((36, 36, 4), [6, 6]), # [4,9] also works but costs more (12 vs 13)
# More outputs than inputs.
# First butterfly connects groups of 2 inputs to 8 outputs.
((16, 64, 2), [8, 2, 2, 2]),
((24, 96, 2), [8, 2, 2, 3]), # [12,2,2,2] is suboptimal
# More inputs than outputs.
# First butterflies connect groups of 2 or 4 inputs to only 1 output.
((64, 32, 2), [1, 2, 2, 2, 2, 2]),
((64, 16, 2), [1, 2, 2, 2, 2]),
((96, 24, 2), [1, 2, 2, 2, 3])
]
for (params, correct) in test_cases:
group_counts = Conv2d.get_group_counts(*params)
group_sizes = _group_counts_to_group_sizes(params, group_counts)
if group_sizes != correct:
print("For {0} inputs, {1} outputs and min size {2}".format(
*params))
print("Expected", correct)
print("Got ", group_sizes)
exit(1)
def _sublayer_test():
"""Test a single butterfly sub-layer.
TODO: not really sure what can be tested here.
Perhaps set all weights to 1 and check that inputs propagate through to
only the expected outputs?
"""
None
def _module_test():
"""Test an entire butterfly module.
For a range of different input and output sizes, ensure that the layer
doesn't crash when running.
Note: I don't yet test the output for correctness.
"""
for i in range(100):
inputs = random.randint(3, 100) # Can't have butterflies smaller than 2
outputs = random.randint(1, 100)
kernel_size = 1
min_butterfly_size = random.randint(2, 16)
layer = Conv2d(inputs, outputs, kernel_size, min_butterfly_size)
in_data = torch.Tensor(4, inputs, 10, 10).normal_(mean=0, std=1)
in_data = torch.autograd.Variable(in_data)
out_data = layer(in_data)
assert out_data.size() == (4, outputs, 10, 10)
def _butterfly_test():
"""Test all components of a butterfly layer."""
_sequence_test()
_sublayer_test()
_module_test()
print("All tests passed.")
if __name__ == "__main__":
_butterfly_test()
|
db434/nn-restrict | tests/test_butterfly_old2.py | <filename>tests/test_butterfly_old2.py
import random
from structured.butterfly_old2 import *
# Each test case is a tuple of (inputs, outputs, butterfly sequence).
# The butterfly sequence is itself a list of tuples, with each element holding
# (inputs, outputs, butterfly size).
_test_cases = [
(8, 8, [(8, 8, 8), (8, 8, 4), (8, 8, 2)]), # inputs = outputs
(4, 8, [(4, 8, 4), (8, 8, 2)]), # inputs < outputs
(8, 4, [(8, 4, 8), (4, 4, 4), (4, 4, 2)]), # inputs > outputs
]
def _check_valid_sequence(inputs, outputs, sequence):
"""See if a butterfly sequence looks sensible.
A sensible sequence has the following properties:
* At least one butterfly in it
* Input to first butterfly = input to whole sequence
* Output from last butterfly = output from whole sequence
* Output of one layer = input of the next
* Butterfly size starts at `inputs` and divides by 2 until it reaches 2
"""
assert len(sequence) >= 1
assert inputs == sequence[0][0]
assert outputs == sequence[-1][1]
assert inputs == sequence[0][2]
assert 2 == sequence[-1][2]
last_butterfly = sequence[0]
for butterfly in sequence[1:]:
assert butterfly[0] == last_butterfly[1]
assert butterfly[2] == last_butterfly[2] // 2
last_butterfly = butterfly
def _sequence_test():
"""Ensure that the sequence of butterflies is sensible."""
# Specify the exact sequence for a few simple layers.
for inputs, outputs, correct in _test_cases:
output = list(butterfly_sequence(inputs, outputs))
for x, y in zip(correct, output):
if x != y:
print("For {0} inputs and {1} outputs".format(inputs, outputs))
print("Expected", correct)
print("Got", output)
break
# Create some random layer configurations and check that the sequences are
# sane.
for i in range(100):
log_inputs = random.randint(1, 10)
log_outputs = random.randint(0, 2 * log_inputs)
inputs = 2 ** log_inputs
outputs = 2 ** log_outputs
sequence = list(butterfly_sequence(inputs, outputs))
_check_valid_sequence(inputs, outputs, sequence)
def _sublayer_test():
"""Test a single butterfly sub-layer.
A single sub-layer doesn't do much. It:
* Splits data in two on the channel dimension
* Reorders one of these two segments (again on the channel dimension)
* Recombines the results
"""
layer = Conv2dSublayer(in_channels=8, out_channels=8,
kernel_size=1, butterfly_size=4)
data1d = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(1, 8, 1, 1)
data2d = torch.Tensor([11, 12, 21, 22, 31, 32, 41, 42,
51, 52, 61, 62, 71, 72, 81, 82]).view(1, 8, 2, 1)
# Test channel splitting. Want alternate channels in each output tensor.
odd1d = torch.Tensor([1, 3, 5, 7]).view(1, 4, 1, 1)
even1d = torch.Tensor([2, 4, 6, 8]).view(1, 4, 1, 1)
odd2d = torch.Tensor([11, 12, 31, 32, 51, 52, 71, 72]).view(1, 4, 2, 1)
even2d = torch.Tensor([21, 22, 41, 42, 61, 62, 81, 82]).view(1, 4, 2, 1)
odd, even = layer.extract_sequences(data1d)
assert torch.equal(odd, odd1d)
assert torch.equal(even, even1d)
odd, even = layer.extract_sequences(data2d)
assert torch.equal(odd, odd2d)
assert torch.equal(even, even2d)
# Test channel reordering. This is basically the example from the docstring
# of _Conv2dSublayer.swap_wings().
reorder1d = torch.Tensor([3, 4, 1, 2, 7, 8, 5, 6]).view(1, 8, 1, 1)
reorder2d = torch.Tensor([31, 32, 41, 42, 11, 12, 21, 22,
71, 72, 81, 82, 51, 52, 61, 62]).view(1, 8, 2, 1)
reorder = layer.swap_wings(data1d)
assert torch.equal(reorder1d, reorder)
reorder = layer.swap_wings(data2d)
assert torch.equal(reorder2d, reorder)
def _module_test():
"""Test an entire butterfly module.
For a range of different input and output sizes, ensure that the layer
doesn't crash when running.
Restrictions:
* outputs <= inputs ** 2
Note: I don't yet test the output for correctness.
"""
for i in range(100):
inputs = random.randint(3, 100) # Can't have butterflies smaller than 2
outputs = random.randint(1, min(inputs ** 2, 100))
kernel_size = 1
layer = Conv2d(inputs, outputs, kernel_size)
in_data = torch.Tensor(4, inputs, 10, 10).normal_(mean=0, std=1)
in_data = torch.autograd.Variable(in_data)
out_data = layer(in_data)
assert out_data.size() == (4, outputs, 10, 10)
def _matrix_test():
"""Test that the matrix representation of a single butterfly's weights
matches the butterfly representation."""
for i in range(100):
log_inputs = 2 # random.randint(1,10) # TODO
log_outputs = 2 # random.randint(max(1, log_inputs-1), log_inputs+1)
log_butterfly = random.randint(1, max(log_inputs, log_outputs))
inputs = 2 ** log_inputs
outputs = 2 ** log_outputs
butterfly = 2 ** log_butterfly
layer = Conv2dSublayer(inputs, outputs, 1, butterfly)
in_data = torch.Tensor(1, inputs, 1, 1).normal_(mean=0, std=1)
in_data = torch.autograd.Variable(in_data)
matrix = layer.weight_matrix().contiguous()
print(matrix)
print(list(layer.conv.parameters())[0].data.squeeze().view(inputs, -1))
print(in_data.squeeze())
matrix = matrix.view(*matrix.size(), 1, 1)
out_data = layer.forward(in_data)
out_data_matrix = torch.nn.functional.conv2d(in_data, matrix)
print(out_data.squeeze())
print(out_data_matrix.squeeze())
# Can't test for equality with floating point numbers, so find maximum
# difference.
error = out_data - out_data_matrix
assert error.abs().max().data[0] < 1e-6
print("Test passed!")
def _fast_forward_test():
"""Test that GPU-optimised execution produces the same result as ordinary
execution."""
for i in range(100):
inputs = random.randint(3, 100) # Can't have butterflies smaller than 2
outputs = random.randint(1, min(inputs ** 2, 100))
kernel_size = 1
layer = Conv2d(inputs, outputs, kernel_size)
in_data = torch.Tensor(4, inputs, 10, 10).normal_(mean=0, std=1)
in_data = torch.autograd.Variable(in_data)
out_data = layer.forward(in_data)
out_data_fast = layer.fast_forward(in_data)
# Can't test for equality with floating point numbers, so find maximum
# difference.
error = out_data - out_data_fast
assert error.abs().max().data[0] < 1e-6
def _butterfly_test():
"""Test all components of a butterfly layer."""
_sequence_test()
_sublayer_test()
_module_test()
# The GPU-optimised routine is currently incompatible with the pure
# butterfly method. Either seems to work well in isolation, but they use
# different weights for different purposes.
# _matrix_test()
# _fast_forward_test()
print("All tests passed.")
if __name__ == "__main__":
_butterfly_test()
|
db434/nn-restrict | locations.py | """
Details about the local filesystem.
This should be the only file that needs to be changed when using a new machine.
"""
# Directory for each dataset.
mnist = "./mnist"
cifar10 = "./cifar10"
imagenet = "./imagenet"
# Directory containing accurate networks for each dataset, to be used for
# knowledge distillation.
teachers = "./teachers"
|
db434/nn-restrict | structured/__init__.py | <reponame>db434/nn-restrict<filename>structured/__init__.py
from . import butterfly_old2
from . import butterfly_old
from . import butterfly
from . import deep_roots
from . import depthwise_butterfly
from . import depthwise_separable
from . import depthwise_shuffle
from . import fully_connected
from . import hadamard
from . import shift
from . import shuffle
__all__ = ["butterfly_old2", "deep_roots", "depthwise_separable",
"fully_connected", "hadamard", "shift", "shuffle",
"depthwise_butterfly", "depthwise_shuffle",
"butterfly_old", "butterfly"]
conv2d_types = {
'butterfly_old2': butterfly_old2.Conv2d,
'butterfly_old': butterfly_old.Conv2d,
'butterfly': butterfly.Conv2d,
'fc': fully_connected.Conv2d,
'hadamard': hadamard.Conv2d,
'roots': deep_roots.Conv2d,
'separable': depthwise_separable.Conv2d,
'separable_butterfly': depthwise_butterfly.Conv2d,
'separable_shuffle': depthwise_shuffle.Conv2d,
'shift': shift.Conv2d,
'shuffle': shuffle.Conv2d,
}
|
db434/nn-restrict | datasets/WikiText2.py | <gh_stars>0
import torch
import torchtext
import torchtext.data as data
import locations
# TODO subclass an abstract Dataset class.
# Perhaps also a TextDataset class.
class WikiText2(object):
# Some sensible defaults.
name = "WikiText-2"
default_model = "wlm_lstm_medium"
location = None
# These defaults only apply to models doing one particular task. If the
# dataset is used in a different way, these may not be appropriate.
default_lr = 20
default_lr_steps = [(10, 0.25), (5, 0.25), (5, 0.25)]
default_epochs = 25
default_sequence_length = 35
# Preprocessed state.
# _text describes how to interpret the text in the dataset.
# _train, _val and _test hold different pieces of the dataset.
_text = None
_train = None
_val = None
_test = None
@staticmethod
def num_tokens():
WikiText2._init()
return len(WikiText2._text.vocab)
@staticmethod
def word_to_token(word):
"""Convert a string to an identifying integer."""
WikiText2._init()
return WikiText2._text.vocab.stoi[word]
@staticmethod
def token_to_word(token):
"""
Convert an identifying integer to a string.
There are two special strings which may be encountered:
* <eos> represents the end of stream
* <unk> represents an unknown word
"""
WikiText2._init()
return WikiText2._text.vocab.itos[token]
# Input channels and classes don't mean very much for text, but the
# analogy for both of them is the number of words in the dictionary.
@staticmethod
def input_channels():
return WikiText2.num_tokens()
@staticmethod
def num_classes():
return WikiText2.num_tokens()
@staticmethod
def data_loaders(num_workers, batch_size, distributed=False):
"""Return train and validation data loaders for the WMT dataset."""
return WikiText2.train_loader(num_workers, batch_size, distributed), \
WikiText2.val_loader(num_workers, batch_size)
@staticmethod
def train_loader(num_workers, batch_size, distributed):
# No support for distributed training yet.
assert not distributed
WikiText2._init()
# Some weird notation because we have tuples of length 1.
iterator, = data.BPTTIterator.splits(
(WikiText2._train,), batch_size=batch_size, shuffle=True,
bptt_len=WikiText2.default_sequence_length,
sort_key=lambda x: len(x.text))
return IteratorAdapter(iterator, num_workers=num_workers)
@staticmethod
def val_loader(num_workers, batch_size):
WikiText2._init()
# Some weird notation because we have tuples of length 1.
iterator, = data.BPTTIterator.splits(
(WikiText2._val,), batch_size=batch_size,
bptt_len=WikiText2.default_sequence_length,
sort_key=lambda x: len(x.text))
return IteratorAdapter(iterator, num_workers=num_workers)
@staticmethod
def test_loader(num_workers, batch_size):
WikiText2._init()
# Some weird notation because we have tuples of length 1.
iterator, = data.BPTTIterator.splits(
(WikiText2._test,), batch_size=batch_size,
bptt_len=WikiText2.default_sequence_length,
sort_key=lambda x: len(x.text))
return IteratorAdapter(iterator, num_workers=num_workers)
@staticmethod
def _init():
if WikiText2._text is not None:
return
# Set up field: describe how text will be interpreted.
WikiText2._text = data.Field(lower=True, batch_first=True)
# Make splits for data.
WikiText2._train, WikiText2._val, WikiText2._test = \
torchtext.datasets.WikiText2.splits(WikiText2._text)
# Build the vocabulary.
WikiText2._text.build_vocab(WikiText2._train)
class IteratorAdapter(torch.utils.data.DataLoader):
"""
Class which wraps torchtext's Iterator to create a DataLoader.
"""
def __init__(self, iterator, num_workers):
# TODO: pass more information to the superclass?
# The iterator already handles shuffling and batches.
super(IteratorAdapter, self).__init__(iterator.dataset,
num_workers=num_workers,
pin_memory=True)
self.iterator = iterator
def __len__(self):
return len(self.iterator)
def __iter__(self):
for batch in iter(self.iterator):
yield (batch.text, batch.target.flatten())
|
db434/nn-restrict | datasets/__init__.py | from .MNIST import *
from .Cifar10 import *
from .ImageNet import *
from .WikiText2 import *
|
db434/nn-restrict | util/checkpoint.py | <gh_stars>0
import csv
import numpy
import os
import shutil
import time
import torch
from modifiers.modules import Quantisable
from util import log
"""Data written to a log file at the end of each epoch."""
log_data = ["Time", "Epoch", "Train loss", "Train top1",
"Train top5", "Val loss", "Val top1", "Val top5"]
def load(directory, model_name, model, optimizer):
"""Load a checkpoint from a file.
The given model and optimizer must have identical "shapes" to those saved in
the checkpoint.
Returns (start epoch, best top-1 precision) and modifies the given model and
optimizer.
"""
path = os.path.join(directory, model_name + "_check.pth.tar")
return load_path(path, model, optimizer)
def load_path(path, model, optimizer):
"""Load a checkpoint from a named file."""
if os.path.isfile(path):
log.info("Loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
log.info("Loaded checkpoint '{}' (epoch {})"
.format(path, checkpoint['epoch']))
return start_epoch, best_prec1
else:
log.error("No checkpoint found at '{}'".format(path))
exit(1)
def save(directory, model, model_name, optimizer, epoch, best_prec1, is_best):
"""Save a checkpoint to a file.
directory: directory to save in
model: trained model
model_name: unique identifier for this model
optimizer: optimizer used
epoch: number of epochs trained so far
best_prec1: best top1 accuracy achieved by this network so far
is_best: does the current model achieve best_prec1 accuracy?
"""
# If the model computes using quantised parameters, restore the full
# precision ones before storing.
if isinstance(model, Quantisable):
model.restore_parameters()
state = {
'epoch': epoch + 1,
'arch': model_name,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}
checkpoint = os.path.join(directory, model_name + "_check.pth.tar")
torch.save(state, checkpoint)
if is_best:
best = os.path.join(directory, model_name + "_best.pth.tar")
shutil.copyfile(checkpoint, best)
def load_tensor(directory, description, tensor):
"""Load a numpy.ndarray from a file into a given tensor."""
path = os.path.join(directory, description + ".npy")
loaded = numpy.load(path)
loaded = torch.from_numpy(loaded)
assert tensor.size() == loaded.size()
tensor.copy_(loaded)
def save_tensor(directory, description, tensor):
"""Save a tensor to a file. Data can be reloaded using numpy.load(file)."""
path = os.path.join(directory, description + ".npy")
if isinstance(tensor, torch.autograd.Variable):
tensor = tensor.data
tensor = tensor.cpu().numpy()
tensor.dump(path)
def log_stats(directory, model_name, epoch, train_loss, train_top1, train_top5,
val_loss, val_top1, val_top5):
"""Log the current performance to a file in the csv format."""
log_file_path = os.path.join(directory, model_name + ".csv")
# Open new file and insert table header.
if epoch == 0:
log_file = open(log_file_path, mode="w", newline="")
writer = csv.writer(log_file)
writer.writerow(log_data)
# Append to existing log.
else:
log_file = open(log_file_path, mode="a", newline="")
writer = csv.writer(log_file)
data = [time.time(), epoch, train_loss, train_top1, train_top5, val_loss,
val_top1, val_top5]
writer.writerow(data)
log_file.close()
|
db434/nn-restrict | models/lenet.py | <reponame>db434/nn-restrict<gh_stars>0
import torch.nn as nn
import structured.fully_connected as fc
models = {"MNIST": ["lenet5", "mnistnet"]}
class LeNet5(nn.Module):
"""
This doesn't quite match the original, but isn't far off:
* Batch norm layers are included in convolution layers.
* The second convolution layer should include some sparse connections.
http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf
"""
def __init__(self, input_channels=1, num_classes=10, conv2d=fc.Conv2d,
args=None):
super(LeNet5, self).__init__()
self.width = args.width_multiplier
w = self.width # Super short name
self.num_classes = num_classes
self.model = nn.Sequential(
fc.Conv2d(input_channels, 6*w, kernel_size=5, padding=0, args=args),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Tanh(),
conv2d(6*w, 16*w, kernel_size=5, padding=0, args=args),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Tanh(),
conv2d(16*w, 120*w, kernel_size=5, padding=0, args=args),
nn.Tanh(),
conv2d(120*w, 84*w, kernel_size=1, args=args),
nn.Tanh(),
conv2d(84*w, num_classes, kernel_size=1, args=args)
)
def forward(self, x):
x = self.model(x)
return x.view(x.size(0), self.num_classes)
class MnistNet(nn.Module):
"""
This is based on LeNet5, but is much larger.
"""
def __init__(self, input_channels=1, num_classes=10, conv2d=fc.Conv2d,
args=None):
super(MnistNet, self).__init__()
self.width = args.width_multiplier
w = self.width # Super short name
self.num_classes = num_classes
self.features = nn.Sequential(
fc.Conv2d(input_channels, 32*w, kernel_size=5, padding=0,
args=args),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(inplace=True),
conv2d(32*w, 64*w, kernel_size=5, padding=2, args=args),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.classifier = nn.Sequential(
conv2d(64*7*7*w, 1024*w, kernel_size=1, args=args),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
conv2d(1024*w, num_classes, kernel_size=1, args=args)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 64 * 7 * 7 * self.width, 1, 1)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def lenet5(**kwargs):
"""~99% accuracy."""
model = LeNet5(**kwargs)
return model
def mnistnet(**kwargs):
"""~99.3% accuracy"""
model = MnistNet(**kwargs)
return model
|
db434/nn-restrict | datasets/MNIST.py | import torch.utils.data as data
import torchvision.datasets
import torchvision.transforms as transforms
import locations
# TODO subclass an abstract Dataset class
class MNIST(object):
_normalize = transforms.Normalize((0.1307,), (0.3081,))
# Some sensible defaults.
name = "MNIST"
default_model = "lenet5"
location = locations.mnist
# See training.lr_schedule.py for explanation.
default_lr = 0.05
default_lr_steps = [(10, 0.1)]
default_epochs = 20
classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
@staticmethod
def input_channels():
return 1
@staticmethod
def num_classes():
return len(MNIST.classes)
@staticmethod
def data_loaders(num_workers, batch_size, distributed=False):
"""Return train and validation data loaders for the MNIST dataset."""
return MNIST.train_loader(num_workers, batch_size, distributed), \
MNIST.val_loader(num_workers, batch_size)
@staticmethod
def train_loader(num_workers, batch_size, distributed):
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
MNIST._normalize,
])
dataset = torchvision.datasets.MNIST(root=MNIST.location,
train=True, download=True,
transform=transform)
if distributed:
sampler = data.distributed.DistributedSampler(dataset)
else:
sampler = None
loader = data.DataLoader(
dataset, batch_size=batch_size, shuffle=(sampler is None),
num_workers=num_workers, pin_memory=True, sampler=sampler)
return loader
@staticmethod
def val_loader(num_workers, batch_size):
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
MNIST._normalize,
])
dataset = torchvision.datasets.MNIST(root=MNIST.location,
train=False, download=True,
transform=transform)
loader = data.DataLoader(
dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True)
return loader
|
db434/nn-restrict | common.py | import os
import sys
import torch
import torch.backends.cudnn as cudnn
import models
import modifiers
import structured
import training
import util
def process(dataset):
"""
Wrap the main task with some exception handlers. Otherwise a huge,
unnecessary, parallel stacktrace is printed.
:param dataset: an object from `datasets` describing the data to be used.
"""
try:
_process(dataset)
except KeyboardInterrupt:
# Note that due to a Python multiprocessing issue, the stack trace isn't
# actually prevented here. https://discuss.pytorch.org/t/9740
util.log.error("Process terminated by user.")
sys.exit()
def _process(dataset):
"""
Initialise the model according to the command line arguments, and use it
as the user requests.
:param dataset: an object from `datasets` describing the data to be used.
"""
best_top1 = 0
args = util.args.parse_args(dataset)
conv_type = structured.conv2d_types[args.conv_type]
unique_name = create_unique_name(args)
# Create model
model = models.get_model(args.arch, distributed=args.distributed,
use_cuda=args.cuda,
input_channels=dataset.input_channels(),
num_classes=dataset.num_classes(),
conv2d=conv_type,
args=args)
# Restrict the datatypes used within the model, if necessary.
model = restrict_datatypes(model, args)
schedule = get_lr_schedule(dataset, args)
if args.distill:
trainer = training.distillation.Trainer(dataset, model, schedule, args)
else:
trainer = training.trainer.Trainer(dataset, model, schedule, args)
# Record whether this model is being generated from scratch.
new_model = True
# Resume from a checkpoint
if args.undump_dir:
# Load from numpy arrays. This does not load any optimiser state,
# so does not support training.
util.stats.data_restore(args.undump_dir, model)
new_model = False
elif args.resume or args.evaluate or args.generate:
# Proper load from a checkpoint
if args.model_file:
start_epoch, best_top1 = \
util.checkpoint.load_path(args.model_file, model,
trainer.optimiser)
else:
start_epoch, best_top1 = \
util.checkpoint.load(args.save_dir, unique_name, model,
trainer.optimiser)
args.start_epoch = start_epoch
new_model = False
cudnn.benchmark = True
# Quick analysis before training.
if args.evaluate:
trainer.validate()
if args.stats:
analyse(trainer, args)
print(util.stats.computation_cost_csv(unique_name))
if args.dump_weights or args.dump_acts or args.dump_grads:
dump_data(trainer, args)
return
if args.gradients:
collect_gradients(unique_name, args.save_dir, args.start_epoch, trainer)
if args.evaluate or args.stats or args.gradients:
return
if args.generate:
generate_text(model, dataset, args)
return
# val_loss, val_top1, val_top5 = validate(val_loader, model, criterion)
# util.checkpoint.save(args.save_dir, model, unique_name, optimizer,
# -1, val_top1, True)
# Store a checkpoint at epoch 0.
if new_model:
util.checkpoint.save(args.save_dir, model, unique_name,
trainer.optimiser, -1, 0, True)
for epoch in range(args.start_epoch, args.epochs):
# Train for one epoch
train_loss, train_top1, train_top5 = \
trainer.train_epoch(epoch)
# Evaluate on validation set
val_loss, val_top1, val_top5 = trainer.validate()
prec1 = val_top1
# Remember best prec@1 and save checkpoint
is_best = prec1 > best_top1
best_top1 = max(prec1, best_top1)
util.checkpoint.save(args.save_dir, model, unique_name,
trainer.optimiser, epoch, best_top1, is_best)
# Log stats.
util.checkpoint.log_stats(args.save_dir, unique_name, epoch,
train_loss, train_top1, train_top5,
val_loss, val_top1, val_top5)
def create_unique_name(args):
"""
Create a unique name for the model, given all the arguments. This name will
form the default filename.
:param args: arguments collected by `util.args.parse_args()`
:return: string name for the model
"""
conv_type = structured.conv2d_types[args.conv_type]
unique_name = "{0}-{1}x-{2}".format(args.arch, args.width_multiplier,
args.conv_type)
if conv_type == structured.butterfly.Conv2d or \
conv_type == structured.depthwise_butterfly.Conv2d:
unique_name += "-" + str(args.min_bfly_size)
if args.grad_noise > 0:
unique_name += "_gn" + str(args.grad_noise)
if args.grad_precision > 0:
unique_name += "_gp" + str(args.grad_precision)
if args.grad_min > 0:
unique_name += "_gt" + str(args.grad_min)
if args.grad_max > 0:
unique_name += "_gu" + str(args.grad_max)
if args.act_noise > 0:
unique_name += "_an" + str(args.act_noise)
if args.act_precision > 0:
unique_name += "_ap" + str(args.act_precision)
if args.act_min > 0:
unique_name += "_at" + str(args.act_min)
if args.act_max > 0:
unique_name += "_au" + str(args.act_max)
if args.weight_noise > 0:
unique_name += "_wn" + str(args.weight_noise)
if args.weight_precision > 0:
unique_name += "_wp" + str(args.weight_precision)
if args.weight_min > 0:
unique_name += "_wt" + str(args.weight_min)
if args.weight_max > 0:
unique_name += "_wu" + str(args.weight_max)
return unique_name
def restrict_datatypes(model, args):
"""
Apply restrictions to the datatypes used within the model (weights,
activations and gradients. Restrictions include setting the precision,
minimum, maximum, and adding noise.
:param model: `torch.nn.Module` to be restricted.
:param args: arguments collected by `util.args.parse_args()`
:return: the original model with restrictions applied
"""
if args.grad_noise > 0 or args.grad_precision > 0 or args.grad_min > 0 or \
args.grad_max > 0:
modifiers.numbers.restrict_gradients(model,
noise=args.grad_noise,
precision=args.grad_precision,
minimum=args.grad_min,
maximum=args.grad_max)
if args.act_noise > 0 or args.act_precision > 0 or args.act_min > 0 or \
args.act_max > 0:
modifiers.numbers.restrict_activations(model,
noise=args.act_noise,
precision=args.act_precision,
minimum=args.act_min,
maximum=args.act_max)
if args.weight_noise > 0 or args.weight_precision > 0 or \
args.weight_min > 0 or args.weight_max > 0:
modifiers.numbers.restrict_weights(model,
noise=args.weight_noise,
precision=args.weight_precision,
minimum=args.weight_min,
maximum=args.weight_max)
return model
def get_lr_schedule(dataset, args):
"""
Determine how the learning rate will change as training progresses.
:param dataset: an object from `datasets` describing the training data.
:param args: arguments collected by `util.args.parse_args()`
:return: a `training.lr_schedule.LRSchedule`
"""
initial_lr = args.lr
if args.use_restarts:
period = args.restart_period
return training.lr_schedule.CosineRestartSchedule(initial_lr, period)
else:
steps = dataset.default_lr_steps
return training.lr_schedule.StepSchedule(initial_lr, steps)
def analyse(trainer, args):
"""
Train for one batch. Print details about all weights, activations and
gradients.
:param trainer: a `training.trainer.Trainer` responsible for training the
model
:param args: arguments collected by `util.args.parse_args()`
"""
# Register hooks on all Modules so they print their details.
# util.stats.data_distribution_hooks(model, weights=False,
# activations=False)
util.stats.computation_cost_hooks(trainer.model)
train_one_batch(trainer, args)
def dump_data(trainer, args):
"""
Dump weights, activations and/or gradients for one batch of training.
:param trainer: a `training.trainer.Trainer` responsible for training the
model
:param args: arguments collected by `util.args.parse_args()`
"""
assert args.dump_dir is not None
util.stats.data_dump_hooks(trainer.model, args.dump_dir, args.dump_acts,
args.dump_weights, args.dump_grads)
train_one_batch(trainer, args)
def train_one_batch(trainer, args):
"""
Train for one minibatch and return.
:param trainer: a `training.trainer.Trainer` responsible for training the
model
:param args: arguments collected by `util.args.parse_args()`
"""
# TODO: Accessing a lot of Trainer internals here. Would be nice to
# encapsulate more.
# Switch to train mode
trainer.model.train()
# I'm not really sure how to access the training data except in a loop,
# but this doesn't make much sense when we're only using one batch.
# I'm pretty sure there's a better way than this.
for data, target in trainer.train_loader:
if args.cuda:
target = target.cuda(async=True)
# Compute output
output, loss = trainer.minibatch(data, target)
# Update model
trainer.optimiser.zero_grad()
loss.backward()
trainer.optimiser.step()
break
def collect_gradients(model_name, directory, epoch, trainer):
"""
Collect statistics about the gradients seen when training the model and
write them to a file. Assumes that the model was saved to a file whose
name includes the current epoch. This does not happen by default.
:param model_name: string, used to generate file names
:param directory: directory to find model checkpoint and store results
:param epoch: number specifying which checkpoint to load
:param trainer: `training.trainer.Trainer` responsible for training model
"""
basename = os.path.join(directory, model_name + "_epoch" + str(epoch))
checkpoint = basename + ".pth.tar"
log = basename + ".gradients"
# Load this epoch's checkpoint.
assert os.path.isfile(checkpoint)
util.checkpoint.load_path(checkpoint, trainer.model, trainer.optimiser)
# Set learning rate to zero so the model doesn't change while we're
# collecting statistics about it.
trainer.set_learning_rate(0.0)
# Set up hooks to collect data.
util.stats.gradient_distribution_hooks(trainer.model)
# Train for one epoch
trainer.train_epoch(epoch)
# Output the results.
with open(log, "w") as f:
for line in util.stats.get_gradient_stats():
f.write(line + "\n")
def generate_text(model, dataset, args):
"""
Use the model to generate text. This text should look similar to the text
that the model was trained on.
:param model: trained language model to be used
:param dataset: dataset the model was trained on
:param args: arguments collected by `util.args.parse_args()`
"""
model.eval()
num_tokens = dataset.num_tokens()
input_data = torch.randint(num_tokens, (1, 1), dtype=torch.long)
if args.cuda:
input_data = input_data.cuda(async=True)
with torch.no_grad(): # no tracking history
for i in range(args.words):
output = model(input_data)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input_data.fill_(word_idx)
word = dataset.token_to_word(word_idx)
print(word, end=" ")
print() # final new line
|
db434/nn-restrict | tests/test_shift.py | <reponame>db434/nn-restrict
import random
from structured.shift import *
def _functional_test():
"""Ensure that data is being shifted properly."""
# TODO
None
def _shift_test():
"""Test creating and running Shift modules with a variety of configurations.
"""
for i in range(100):
kernel_size = random.randint(1, 5) * 2 + 1 # ensure odd kernel size
channels = kernel_size ** 2 + random.randint(1, 100)
dilation = random.randint(1, 4)
layer = Shift(channels, kernel_size, dilation)
in_data = torch.autograd.Variable(torch.Tensor(4, channels, 10, 10))
out_data = layer(in_data)
assert out_data.size() == (4, channels, 10, 10)
def _module_test():
"""Test an entire shift convolution module.
For a range of different input and output sizes, ensure that the layer
doesn't crash when running.
"""
for i in range(100):
inputs = random.randint(1, 100)
outputs = random.randint(1, 100)
padding = random.randint(1, 5)
kernel_size = padding * 2 + 1 # ensure odd kernel size
layer = Conv2d(inputs, outputs, kernel_size, padding=padding)
in_data = torch.autograd.Variable(torch.Tensor(4, inputs, 10, 10))
out_data = layer(in_data)
assert out_data.size() == (4, outputs, 10, 10)
if __name__ == "__main__":
_functional_test()
_shift_test()
_module_test()
print("All tests passed.")
|
db434/nn-restrict | structured/depthwise_butterfly.py | import torch.nn as nn
from . import butterfly
from . import wrapped
from util import log
# Same interface as torch.nn.Conv2d (except groups -> depth_multiplier).
class Conv2d(nn.Module):
"""A drop-in replacement for torch.nn.Conv2d which uses a depthwise-
separable structure. This means that a small number of filters are applied
to each input channel, and then linear combinations of these intermediate
results are taken to produce the output.
Furthermore, the linear combination is implemented using a butterfly
convolution, greatly reducing the number of parameters.
In the limit of many channels, computations and weights are reduced in
proportion to the filter size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
depth_multiplier=1,
bias=True,
**kwargs):
super(Conv2d, self).__init__()
# Channel numbers can be scaled by floats, so need to be rounded back
# to integers.
in_channels = int(in_channels)
out_channels = int(out_channels)
# Special case: if kernel_size = 1, factorising the convolution doesn't
# add anything.
if kernel_size == 1:
log.info("INFO: using default butterfly instead of separable.")
log.info(" kernel_size = 1")
self.conv = nn.Sequential(
butterfly.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
# groups = 1,
bias=bias,
**kwargs),
# The butterfly layer includes its own normalisation.
# nn.BatchNorm2d(out_channels)
)
else:
# This ordering of layers matches the MobileNet paper (assuming a
# final ReLU is added in the higher-level network definition).
# https://arxiv.org/abs/1704.04861
self.conv = nn.Sequential(
# Feature extraction. Each channel has `depth_multiplier`
# different filters applied to it, each forming a separate
# intermediate channel.
wrapped.Conv2d(in_channels=in_channels,
out_channels=in_channels * depth_multiplier,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=bias,
**kwargs),
nn.BatchNorm2d(in_channels * depth_multiplier),
nn.ReLU(inplace=True),
# Cross-channel pooling.
butterfly.Conv2d(in_channels=in_channels * depth_multiplier,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=bias,
**kwargs),
# The butterfly layer includes its own normalisation.
# nn.BatchNorm2d(out_channels)
)
def forward(self, x):
return self.conv(x)
|
db434/nn-restrict | modifiers/modules.py | <gh_stars>0
import torch
import torch.nn as nn
from . import functional
class Quantisable(nn.Module):
"""
Module which quantises its weights.
A full-precision copy of weights is retained to allow gradients to
accumulate over multiple small steps.
"""
def __init__(self, module):
super(Quantisable, self).__init__()
self.module = module
self.weight_transform = None
self.backup_parameters = {}
def set_weight_transform(self, transform_fn):
self.weight_transform = transform_fn
def quantise_parameters(self):
"""For all parameters contained within this module, store a backup
copy, and quantise those used for computation."""
if self.weight_transform is not None:
# Store full-precision backups of all parameters, if they don't
# already exist.
if len(self.backup_parameters) == 0:
for name, tensor in self.module.state_dict().items():
# Creating a clone means that gradients will reach both the
# quantised and full-precision versions of the tensor.
# If weights are not to be stored in full-precision,
# quantise here.
self.backup_parameters[name] = tensor
self.module.state_dict()[name] = tensor.clone()
for name, tensor in self.module.state_dict().items():
# Newer versions of PyTorch include the epoch counter as a
# parameter. We don't want to modify that!
if tensor.dtype == torch.long:
continue
quantised = tensor
full_precision = self.backup_parameters[name]
assert full_precision.size() == quantised.size()
# Bypass the Variable interface so PyTorch doesn't get
# confused by the Tensor contents changing. (Hack)
quantised.data.copy_(functional.quantise(full_precision,
self.weight_transform))
def restore_parameters(self):
if len(self.backup_parameters) > 0:
for name, tensor in self.module.state_dict().items():
quantised = tensor
full_precision = self.backup_parameters[name]
assert full_precision.size() == quantised.size()
# Bypass the Variable interface so PyTorch doesn't get
# confused by the Tensor contents changing. (Hack)
quantised.data.copy_(full_precision)
def forward(self, *inputs, **kwargs):
self.quantise_parameters()
# Could do something similar to all input tensors. Might not be
# particularly useful if unquantised activations are then passed
# between submodules...
result = self.module(*inputs, **kwargs)
# It would be nice to restore the full-precision parameters here,
# but then there is no obvious way to use the quantised versions in
# the backward pass. Instead, I leave the quantised versions in
# place, and only restore full-precision parameters when storing the
# model.
return result
class Quantiser(nn.Module):
"""
Module which quantises its given input data.
"""
def __init__(self, quantisation_fn=None):
"""
Module constructor.
:param quantisation_fn: A function which takes a tensor as input and
returns a transformed tensor.
"""
super(Quantiser, self).__init__()
self.quantisation_fn = quantisation_fn
def set_quantisation(self, quantisation_fn):
"""
Change the quantisation function post-initialisation.
:param quantisation_fn: A function which takes a tensor as input and
returns a transformed tensor.
"""
self.quantisation_fn = quantisation_fn
def forward(self, x):
return functional.quantise(x, self.quantisation_fn)
|
db434/nn-restrict | training/__init__.py | from . import distillation
from . import lr_schedule
from . import trainer
|
db434/nn-restrict | structured/hadamard.py | <reponame>db434/nn-restrict<gh_stars>0
import torch
import torch.nn as nn
from . import wrapped
from util import log
def _power_of_two(value):
"""Returns whether the given value is a power of two."""
return (value & (value - 1)) == 0
class HadamardLayer(nn.Module):
"""Split each butterfly in two, in1 and in2. For each butterfly:
Output (out1, out2) = (in1 + in2, in1 - in2)
"""
def __init__(self, channels, butterfly_size):
super(HadamardLayer, self).__init__()
assert _power_of_two(channels)
assert _power_of_two(butterfly_size)
self.channels = channels
self.butterfly_size = butterfly_size
self.butterflies = channels // butterfly_size
def extract_wings(self, data):
"""Split `data` on the channel dimension into two tensors. Each tensor
has alternating blocks of `butterfly_size//2` channels."""
wing_size = self.butterfly_size // 2
batch, channels, height, width = data.size()
split = data.view(batch, self.butterflies, 2, wing_size, height, width)
left = split[:, :, :1, :, :, :].contiguous()
right = split[:, :, 1:, :, :, :].contiguous()
left = left.view(batch, channels // 2, height, width)
right = right.view(batch, channels // 2, height, width)
return left, right
def assemble_wings(self, wings1, wings2):
"""The reverse of `extract_wings`: take two tensors and merge them into
one, with blocks of `butterfly_size//2` channels coming from alternate
tensors."""
wing_size = self.butterfly_size // 2
batch, channels, height, width = wings1.size()
left = wings1.view(batch, self.butterflies, 1, wing_size, height, width)
right = wings2.view(batch, self.butterflies, 1, wing_size, height,
width)
result = torch.cat([left, right], dim=2).contiguous()
return result.view(batch, channels * 2, height, width)
def forward(self, x):
# Separate wings.
in1, in2 = self.extract_wings(x)
# Main computation.
out1, out2 = in1 + in2, in1 - in2
# Put wings back together (currently have all out1s then all out2s, but
# want out1, out2, out1, out2, etc.).
return self.assemble_wings(out1, out2)
class Hadamard(nn.Module):
"""Perform a Hadamard transform across the channels of the input data.
https://en.wikipedia.org/wiki/Hadamard_transform
This means that effectively, a separate identical transform is being applied
to every x,y position in the input.
"""
def __init__(self, channels):
super(Hadamard, self).__init__()
butterfly_size = channels
layers = []
while butterfly_size >= 2:
layers.append(HadamardLayer(channels, butterfly_size))
butterfly_size //= 2
self.mix = nn.Sequential(*layers)
def forward(self, x):
return self.mix(x)
class ChannelMixer(nn.Module):
"""Module which combines data from X input channels to produce X output
channels. Uses the Hadamard transform where possible, and falls back to
a 1x1 convolution otherwise.
Hadamard transforms require input channels == output channels == power of 2.
"""
def __init__(self, channels):
super(ChannelMixer, self).__init__()
# The Hadamard transform requires powers of two.
self.can_use_hadamard = _power_of_two(channels)
self.channels = channels
if not self.can_use_hadamard:
log.info("INFO: using 1x1 convolution instead of Hadamard "
"transform.")
log.info(" channels =", channels)
self.mix = nn.Sequential(
wrapped.Conv2d(in_channels=channels,
out_channels=channels,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False),
nn.BatchNorm2d(channels),
)
else:
# One scaling factor per channel.
# TODO: pre-divide the scaling factors by sqrt(channels)?
self.scales = nn.Parameter(torch.randn(1, channels, 1, 1))
self.mix = Hadamard(channels)
def forward(self, x):
if self.can_use_hadamard:
scaled = x * self.scales
return self.mix(scaled)
else:
return self.mix(x)
# Same interface as torch.nn.Conv2d (except groups -> depth_multiplier).
class Conv2d(nn.Module):
"""A drop-in replacement for torch.nn.Conv2d. A small number of filters are
applied to each input channel, and then a Hadamard transform is applied to
the results to produce the output.
https://en.wikipedia.org/wiki/Hadamard_transform
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
depth_multiplier=1,
bias=True,
**kwargs):
super(Conv2d, self).__init__()
# Channel numbers can be scaled by floats, so need to be rounded back
# to integers.
in_channels = int(in_channels)
out_channels = int(out_channels)
# Special case: if kernel_size = 1, there are no features to extract,
# so just mix the channels.
if kernel_size == 1 and in_channels == out_channels:
self.conv = ChannelMixer(out_channels)
else:
# Update depth_multiplier if necessary so there are the same number
# of intermediate channels as output channels.
if in_channels * depth_multiplier != out_channels:
depth_multiplier = out_channels // in_channels
assert in_channels * depth_multiplier == out_channels
self.conv = nn.Sequential(
# Feature extraction.
wrapped.Conv2d(in_channels=in_channels,
out_channels=in_channels * depth_multiplier,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=False,
**kwargs),
nn.BatchNorm2d(in_channels),
# Mix channels.
ChannelMixer(out_channels)
)
def forward(self, x):
return self.conv(x)
|
db434/nn-restrict | datasets/Cifar10.py | <gh_stars>0
import torch.utils.data as data
import torchvision.datasets
import torchvision.transforms as transforms
import locations
# TODO subclass an abstract Dataset class.
class Cifar10(object):
_normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010])
# Some sensible defaults.
name = "CIFAR-10"
default_model = "aaronnet"
location = locations.cifar10
# See training.lr_schedule.py for explanation.
default_lr = 0.1
default_lr_steps = [(80, 0.1), (80, 0.1)]
default_epochs = 200
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck')
@staticmethod
def input_channels():
return 3
@staticmethod
def num_classes():
return len(Cifar10.classes)
@staticmethod
def data_loaders(num_workers, batch_size, distributed=False):
"""Return train and validation data loaders for the CIFAR-10 dataset."""
return Cifar10.train_loader(num_workers, batch_size, distributed), \
Cifar10.val_loader(num_workers, batch_size)
@staticmethod
def train_loader(num_workers, batch_size, distributed):
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
Cifar10._normalize,
])
dataset = torchvision.datasets.CIFAR10(root=Cifar10.location,
train=True, download=True,
transform=transform)
if distributed:
sampler = data.distributed.DistributedSampler(dataset)
else:
sampler = None
loader = data.DataLoader(
dataset, batch_size=batch_size, shuffle=(sampler is None),
num_workers=num_workers, pin_memory=True, sampler=sampler)
return loader
@staticmethod
def val_loader(num_workers, batch_size):
transform = transforms.Compose([
transforms.ToTensor(),
Cifar10._normalize,
])
dataset = torchvision.datasets.CIFAR10(root=Cifar10.location,
train=False, download=True,
transform=transform)
loader = data.DataLoader(
dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True)
return loader
|
db434/nn-restrict | structured/convert_to_conv.py | <reponame>db434/nn-restrict<filename>structured/convert_to_conv.py
"""
Wrappers to convert a range of different layer types to convolutions. This
allows the structured convolution to be applied to a wider range of networks.
"""
import math
import torch
import torch.nn as nn
from . import fully_connected as fc
import modifiers.modules as quantisable
class Linear(nn.Module):
def __init__(self, in_features, out_features, bias=True, conv=fc.Conv2d,
args=None):
super(Linear, self).__init__()
self.conv = conv(in_features, out_features, kernel_size=1, bias=bias,
args=args, batch_norm=False)
def forward(self, x):
# Linear layers receive an input of (batch size x in_features)
# Conv layers expect their input to also have channel width and height.
batch_size, in_features = x.size()
x = x.view(batch_size, in_features, 1, 1)
x = self.conv(x)
# And perform the reverse transform for the output.
return x.view(batch_size, -1)
class RNNBase(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True,
batch_first=False, dropout=0, bidirectional=False):
super(RNNBase, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
if dropout > 0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.bidirectional = bidirectional
self.recurrent_layers = []
# A couple of options that I don't support yet.
assert not batch_first
assert not bidirectional
def forward(self, x, hidden):
"""
Apply this module to a given input, using the provided hidden state.
:param x: Input data. Shape (sequence length, batch size, input size).
:param hidden: Hidden state. Shape depends on implementation.
:return: (output, updated hidden)
Output shape (sequence length, batch size, hidden size).
Hidden shape is identical to the input.
"""
x = self.unflatten_input(x)
hidden = self.unflatten_hidden(hidden)
total_output = []
# x is a sequence of inputs.
for item in x:
new_hidden = []
current_input = item
for pos, layer in enumerate(self.recurrent_layers):
output = layer(current_input, hidden[pos])
# Never apply dropout to final layer
if (self.dropout is not None) and (pos < self.num_layers - 1):
if type(output) is tuple:
output = tuple(self.dropout(t) for t in output)
else:
output = self.dropout(output)
new_hidden.append(output)
if type(output) is tuple:
current_input = output[0] # Just want h from LSTM (not c)
else:
current_input = output
total_output.append(current_input)
hidden = new_hidden
# Return (output, hidden)
# * Output is the hidden state of the final layer for each item in
# the input sequence
# * Hidden is the combined hidden state of all layers at the end of
# the sequence
total_output = self.flatten_output(total_output)
hidden = self.flatten_hidden(hidden)
return total_output, hidden
@classmethod
def unflatten_input(cls, x):
"""
Convert a single tensor into something which allows iteration over
elements of its sequence.
"""
# The tensor's dimensions are (sequence length, ...), so iteration works
# by default.
return x
@classmethod
def unflatten_hidden(cls, hidden):
"""
Convert a single tensor into something which allows iteration over
the state for each layer of the network.
"""
# The hidden state's shape is (layer, batch, hidden unit), so default
# iteration works.
return hidden
@classmethod
def flatten_output(cls, x):
return torch.stack(x)
@classmethod
def flatten_hidden(cls, hidden):
return torch.stack(hidden)
def reset_parameters(self):
"""Default weight initialisation for RNN networks."""
std = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -std, std)
class RNN(RNNBase):
def __init__(self, input_size, hidden_size, num_layers=1,
nonlinearity="tanh", bias=True, batch_first=False,
dropout=0, bidirectional=False, conv=fc.Conv2d, args=None):
super(RNN, self).__init__(input_size, hidden_size,
num_layers=num_layers, bias=bias,
batch_first=batch_first, dropout=dropout,
bidirectional=bidirectional)
for i in range(num_layers):
name = "layer_" + str(i)
layer = RNNCell(input_size, hidden_size, bias=bias,
nonlinearity=nonlinearity, conv=conv, args=args)
self.add_module(name, layer)
self.recurrent_layers.append(layer)
self.reset_parameters()
class LSTM(RNNBase):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True,
batch_first=False, dropout=0, bidirectional=False,
conv=fc.Conv2d, args=None):
super(LSTM, self).__init__(input_size, hidden_size,
num_layers=num_layers, bias=bias,
batch_first=batch_first, dropout=dropout,
bidirectional=bidirectional)
for i in range(num_layers):
name = "layer_" + str(i)
layer = LSTMCell(input_size, hidden_size, bias=bias,
conv=conv, args=args)
self.add_module(name, layer)
self.recurrent_layers.append(layer)
self.reset_parameters()
@classmethod
def unflatten_hidden(cls, hidden):
# Hidden state is now a tuple of two tensors, each with size
# (layers, batch size, hidden units)
# Need to convert to a list of tuples of two tensors, each with size
# (batch size, hidden units)
h, c = hidden
return list(zip(h, c))
@classmethod
def flatten_hidden(cls, hidden):
# Hidden state is now a list of tuples of two tensors, each with size
# (batch size, hidden units)
# Need to convert to a single tuple of two tensors, each with size
# (layers, batch size, hidden units)
hidden = list(zip(*hidden))
h, c = hidden[0], hidden[1]
return torch.stack(h), torch.stack(c)
class GRU(RNNBase):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True,
batch_first=False, dropout=0, bidirectional=False,
conv=fc.Conv2d, args=None):
super(GRU, self).__init__(input_size, hidden_size,
num_layers=num_layers, bias=bias,
batch_first=batch_first, dropout=dropout,
bidirectional=bidirectional)
for i in range(num_layers):
name = "layer_" + str(i)
layer = GRUCell(input_size, hidden_size, bias=bias,
conv=conv, args=args)
self.add_module(name, layer)
self.recurrent_layers.append(layer)
self.reset_parameters()
class RNNCellBase(nn.Module):
def __init__(self, input_size, hidden_size, internal_size, bias=True,
conv=fc.Conv2d, args=None):
super(RNNCellBase, self).__init__()
# I don't believe that both sets of biases are necessary, but torch's
# source does use both.
# https://github.com/pytorch/pytorch/blob/72e171dc52540093c8ad4b6b539ce30ea200e6fd/torch/nn/modules/rnn.py#L679
self.linear_x = Linear(input_size, internal_size, bias=bias, conv=conv,
args=args)
self.linear_h = Linear(hidden_size, internal_size, bias=bias, conv=conv,
args=args)
# Default: no quantisation. Change the behaviour using
# modifiers.numbers.restrict_activations().
self.quantise = quantisable.Quantiser()
def forward(self, *data):
raise NotImplementedError
class RNNCell(RNNCellBase):
def __init__(self, input_size, hidden_size, bias=True,
nonlinearity="tanh", conv=fc.Conv2d, args=None):
super(RNNCell, self).__init__(input_size, hidden_size, hidden_size,
bias=bias, conv=conv, args=args)
# TODO Fall back on torch's RNNCell if possible?
# With inputs x and h, aim to compute:
# h' = nonlinearity(weights_x@x + bias_x + weights_h@h + bias_h)
#
# Break this into:
# h' = nonlinearity(Linear(x) + Linear(h))
self.nonlinearity = {"tanh": nn.Tanh, "relu": nn.ReLU}[nonlinearity]
def forward(self, x, hidden):
# The outputs from the Linear modules are already quantised.
x = self.nonlinearity(self.linear_x(x) + self.linear_h(hidden))
return self.quantise(x)
class LSTMCell(RNNCellBase):
def __init__(self, input_size, hidden_size, bias=True, conv=fc.Conv2d,
args=None):
super(LSTMCell, self).__init__(input_size, hidden_size, 4*hidden_size,
bias=bias, conv=conv, args=args)
# With inputs x, h and c, aim to compute:
# i = sigmoid(weights_xi@x + bias_xi + weights_hi@h + bias_hi)
# f = sigmoid(weights_xf@x + bias_xf + weights_hf@h + bias_hf)
# g = tanh(weights_xg@x + bias_xg + weights_hg@h + bias_hg)
# o = sigmoid(weights_xo@x + bias_xo + weights_ho@h + bias_ho)
# c' = f*c + i*g
# h' = o * tanh(c')
#
# There's lots of scope for combining operations here:
# x2 = Linear(x) [quadruple width]
# h2 = Linear(h) [quadruple width]
# i = sigmoid(x2[first quarter] + h2[first quarter])
# f = sigmoid(x2[second quarter] + h2[second quarter])
# g = tanh(x2[third quarter] + h2[third quarter])
# o = sigmoid(x2[fourth quarter] + h2[fourth quarter])
# c' = f*c + i*g
# h' = o * tanh(c')
def forward(self, x, state):
h, c = state
# These are already quantised.
xi, xf, xg, xo = self.linear_x(x).chunk(4, 1)
hi, hf, hg, ho = self.linear_h(h).chunk(4, 1)
i = self.quantise(torch.sigmoid(xi + hi))
f = self.quantise(torch.sigmoid(xf + hf))
g = self.quantise(torch.tanh(xg + hg))
o = self.quantise(torch.sigmoid(xo + ho))
c2 = self.quantise(f*c + i*g)
h2 = self.quantise(o * torch.tanh(c2))
return h2, c2
class GRUCell(RNNCellBase):
def __init__(self, input_size, hidden_size, bias=True, conv=fc.Conv2d,
args=None):
super(GRUCell, self).__init__(input_size, hidden_size, 3*hidden_size,
bias=bias, conv=conv, args=args)
# With inputs x and h, aim to compute:
# r = sigmoid(weights_xr@x + bias_xr + weights_hr@h + bias_hr)
# z = sigmoid(weights_xz@x + bias_xz + weights_hz@h + bias_hz)
# n = tanh(weights_xn@x + bias_xn + r * (weights_hn@h + bias_hn))
# h' = (1 - z)*n + z*h
#
# There's lots of scope for combining operations here:
# x2 = Linear(x) [triple width]
# h2 = Linear(h) [triple width]
# r = sigmoid(x2[first third] + h2[first third])
# z = sigmoid(x2[second third] + h2[second third])
# n = tanh(x2[final third] + r * h2[final third])
# h' = (1 - z)*n + z*h
def forward(self, x, hidden):
# These are already quantised.
xr, xz, xn = self.linear_x(x).chunk(3, 1)
hr, hz, hn = self.linear_h(hidden).chunk(3, 1)
r = self.quantise(torch.sigmoid(xr + hr))
z = self.quantise(torch.sigmoid(xz + hz))
n = self.quantise(torch.tanh(xn + r * hn))
return (1 - z)*n + z*hidden
|
db434/nn-restrict | structured/shuffle.py | import torch.nn as nn
from . import wrapped
from util import log
# A simplification of the module used in:
#
# ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
# Devices
# https://arxiv.org/abs/1707.01083
#
# This implementation simply performs a grouped convolution, then permutes the
# channels. The permutation takes the first channel from each group, then the
# second from each group, etc.
class Conv2d(nn.Module):
"""A replacement for torch.nn.Conv2d, but using a group shuffle structure.
This module is not strictly a drop-in replacement because not every input is
able to influence every output. Multiple layers must be stacked together to
achieve that property.
Computation and memory requirements are reduced by a factor of `groups`.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=8, # Sensible default from paper (ImageNet models)
bias=True,
**kwargs):
super(Conv2d, self).__init__()
# Channel numbers can be scaled by floats, so need to be rounded back
# to integers.
in_channels = int(in_channels)
out_channels = int(out_channels)
# Degenerate into normal conv layer if there are too few channels.
# Could instead reduce number of groups to fit, but I think this is
# simpler.
if in_channels < groups or out_channels < groups or \
in_channels % groups != 0 or out_channels % groups != 0:
log.info("INFO: using default convolution instead of shuffle.")
log.info(" Inputs:", in_channels, ", outputs:", out_channels,
", groups:", groups)
self.groups = 1
self.conv = nn.Sequential(
wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=1,
bias=bias,
**kwargs),
nn.BatchNorm2d(out_channels)
)
else:
# Put the batch-norm after the convolution to match depthwise-
# separable, for which we have a reference specifying where it
# should go.
self.groups = groups
self.conv = nn.Sequential(
wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
**kwargs),
nn.BatchNorm2d(out_channels)
)
@staticmethod
def shuffle(x, groups):
if groups == 1:
return x
# Uniform shuffle of channels. Assumes a particular dimension order.
batch, channels, height, width = x.size()
x = x.view(batch, groups, channels // groups, height, width)
x = x.transpose(1, 2).contiguous()
return x.view(batch, channels, height, width)
def forward(self, x):
output = self.conv(x)
output = self.shuffle(output, self.groups)
return output
|
db434/nn-restrict | structured/deep_roots.py | <gh_stars>0
import torch.nn as nn
from . import wrapped
from util import log
# As introduced in:
#
# Deep Roots: Improving CNN Efficiency with Hierarchical Filter Groups
# https://arxiv.org/abs/1605.06489
class Conv2d(nn.Module):
"""A drop-in replacement for torch.nn.Conv2d which uses the Deep Roots
structure. This involves using a grouped convolution to produce intermediate
channels, and then using linear combinations of these to produce the output.
The grouped convolution is `groups` times cheaper, and the linear
combination is `filter size` times cheaper than ordinary convolution.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=8, # Sensible default
bias=True,
**kwargs):
super(Conv2d, self).__init__()
# Channel numbers can be scaled by floats, so need to be rounded back
# to integers.
in_channels = int(in_channels)
out_channels = int(out_channels)
# Degenerate into normal conv layer if there are too few channels.
# Could instead reduce number of groups to fit, but I think this is
# simpler.
if in_channels < groups or out_channels < groups:
log.info("INFO: using default convolution instead of deep roots.")
log.info(" Inputs:", in_channels, ", outputs:", out_channels,
", groups:", groups)
self.conv = nn.Sequential(
wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=1,
bias=bias,
**kwargs),
nn.BatchNorm2d(out_channels),
)
else:
# The paper mentions that batch normalisation is used, but it
# doesn't seem to say where. I follow MobileNet and put it after
# each convolution, but before the activation function.
self.conv = nn.Sequential(
# Grouped convolution.
wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
**kwargs),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
# Channel mixing.
wrapped.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=bias,
**kwargs),
nn.BatchNorm2d(out_channels),
)
def forward(self, x):
return self.conv(x)
|
db434/nn-restrict | structured/butterfly_old2.py | import torch
import torch.nn as nn
import torch.nn.functional
from . import wrapped
from util import log
def _next_power_of_two(value):
"""Return the first power of 2 greater than or equal to the input."""
power = 1
while power < value:
power *= 2
return power
def _power_of_two(value):
"""Returns whether the given value is a power of two."""
return (value & (value - 1)) == 0
def butterfly_sequence(inputs, outputs):
"""Return tuple of (inputs, outputs, butterfly size) for all sub-layers
required to connect every input with every output.
All sizes are powers of two, and the inputs and outputs may differ by a
maximum factor of two at each stage.
Note that the butterfly size may need to be scaled, depending on how these
values are used. e.g. If inputs are duplicated in-place to match number of
outputs, butterfly size must increase accordingly."""
largest_butterfly = inputs
smallest_butterfly = 2
# Can only handle powers of 2. (Not much of a limitation.)
assert _power_of_two(inputs) and _power_of_two(outputs)
# Doesn't make sense to butterfly when there aren't enough inputs.
assert inputs >= smallest_butterfly
# There is a maximum rate at which the number of channels can increase.
# Easy enough to get around, but keeping it simple for now.
assert outputs <= inputs ** 2
current_inputs = inputs
current_butterfly = largest_butterfly
# Go from largest butterfly to smallest. This is necessary because if there
# are fewer outputs than inputs, there won't be space to use the largest
# butterfly later.
while current_butterfly >= smallest_butterfly:
# Determine if we need to change the number of values to reach the
# correct number of outputs.
if outputs > current_inputs:
current_outputs = current_inputs * 2
elif outputs < current_inputs:
current_outputs = current_inputs // 2
else:
current_outputs = current_inputs
yield current_inputs, current_outputs, current_butterfly
current_inputs = current_outputs
current_butterfly //= 2
class Conv2dSublayer(nn.Module):
"""Class representing a single sublayer of the butterfly network. There will
be log2(channels) of these sublayers, each with a different butterfly size.
The typical case is:
* Apply two filters to each input channel
* Separate the intermediate result in two, one for each of the filters
* Reorder one of the partitions according the the butterfly size
* Add together the two partitions
There are then minor modifications to this procedure if the number of
outputs differs from the number of inputs.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
butterfly_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True):
super(Conv2dSublayer, self).__init__()
assert _power_of_two(in_channels) and _power_of_two(out_channels)
self.in_channels = in_channels
self.out_channels = out_channels
# Determine how the number of channels changes as data passes through
# this layer. This will affect the number of filters required.
self.expansion = out_channels / in_channels
# By default, apply two filters to each channel, one for each wing of
# the butterfly.
self.filters_per_channel = int(2 * self.expansion)
self.intermediate_channels = self.in_channels * self.filters_per_channel
self.butterfly_size = butterfly_size
self.butterflies = (self.intermediate_channels // 2) // \
self.butterfly_size
assert self.butterfly_size >= 2
assert self.butterflies > 0 or self.expansion < 1
# Apply multiple filters to each input channel, but don't combine the
# results. This looks like the first phase of a depthwise-separable
# convolution. The actual butterfly happens in forward().
self.conv = wrapped.Conv2d(in_channels=self.in_channels,
out_channels=self.intermediate_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=self.in_channels,
bias=False)
self._weight_matrix_mask = None
self._weight_matrix = None
def extract_sequences(self, data):
"""Separate data along the channel dimension into two smaller datasets.
Consecutive channels are assigned alternately to the two outputs."""
batch, channels, height, width = data.size()
# Simpler split if we didn't apply multiple filters to each input. Split
# into first half of channels and second half.
if self.filters_per_channel == 1:
return data[:, :channels // 2, :, :].contiguous(), \
data[:, channels // 2:, :, :].contiguous()
else:
split = data.view(batch, channels // 2, 2, height, width)
return split.select(2, 0).contiguous(), \
split.select(2, 1).contiguous()
def swap_wings(self, data):
"""Swap the wings of each butterfly.
e.g. Butterfly size = 4
Input = 0 1 2 3 4 5 6 7
Output = 2 3 0 1 6 7 4 5
Method:
1. Introduce dummy dimensions: (channels) -> (butterflies, 2, wing)
2. Reflect middle dimension: [0,1] -> [1,0]
3. Flatten dummy dimensions: (butterflies, 2, wing) -> (channels)
"""
# If there was only one filter applied, then all of `data` is a single
# wing, and no swapping is needed.
if self.filters_per_channel == 1:
return data
batch, channels, height, width = data.size()
wing_size = self.butterfly_size // 2
assert wing_size * 2 == self.butterfly_size
split = data.view(batch, self.butterflies, 2, wing_size, height, width)
left = split[:, :, :1, :, :, :]
right = split[:, :, 1:, :, :, :]
merged = torch.cat([right, left], dim=2).contiguous()
return merged.view(batch, channels, height, width)
def _weight_matrix_rows(self, column):
"""Return the sequence of dense matrix rows which will be non-zero in
this column.
Weights come in pairs and are spaced butterfly_size // 2 apart, and
subsequent pairs are in_channels rows apart.
The starting row increments by 1 for each column in the same wing,
resets for the second wing of the same butterfly, and increments by
butterfly_size for a new butterfly.
"""
row = column % (self.butterfly_size // 2)
row += (column // self.butterfly_size) * self.butterfly_size
returned = 0
while row < self.out_channels:
assert returned < self.filters_per_channel
yield row
returned += 1
if returned % 2 == 0: # Start new pair of weights
row += self.in_channels - (self.butterfly_size // 2)
else: # Continue current pair of weights
row += self.butterfly_size // 2
def weight_matrix_mask(self):
"""Return a ByteTensor showing which values in the dense weight matrix
are non-zero, to be used with `torch.masked_scatter_`. The output of
`torch.masked_scatter_` will need to be transposed since the scatter
function does not allow control over which value goes to which position.
"""
mask = torch.ByteTensor(self.in_channels, self.out_channels)
mask.zero_()
for column in range(self.in_channels):
for row in self._weight_matrix_rows(column):
mask[column][row] = 1
in_channels, out_channels = mask.size()
assert in_channels == self.in_channels
assert out_channels == self.out_channels
return mask
def weight_matrix(self):
"""Convert the sparse weights stored internally to a dense
representation which can be passed to an ordinary convolution routine.
This will ultimately require more computation, but can perform better
on a GPU.
"""
sparse = list(self.conv.parameters())[0]
# Initialise data buffers if this is the first time using them.
if self._weight_matrix is None:
self._weight_matrix_mask = self.weight_matrix_mask()
self._weight_matrix = torch.zeros(self._weight_matrix_mask.size())
if sparse.is_cuda:
self._weight_matrix_mask = self._weight_matrix_mask.cuda()
self._weight_matrix = self._weight_matrix.cuda()
dense = self._weight_matrix.fill_(0)
mask = self._weight_matrix_mask
mask = torch.autograd.Variable(mask, requires_grad=False)
dense = torch.autograd.Variable(dense)
dense.masked_scatter_(mask, sparse)
dense = dense.transpose(0, 1) # Required by weight_matrix_mask()
return dense
def forward(self, x):
x = self.conv(x)
# Extract results from one filter application from each input channel.
x1, x2 = self.extract_sequences(x)
# Reorder data from the second filter applied to each channel so that
# the wings of each butterfly are swapped.
x2 = self.swap_wings(x2)
# Combine the default ordered and reordered data.
x = x1 + x2
return x
class Conv2d(nn.Module):
"""A drop-in replacement for torch.nn.Conv2d, but using a butterfly
connection structure internally.
The cost of the butterfly is O(nlogn) compared with O(n^2) for an ordinary
convolution layer, where n is the number of input channels.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
**kwargs):
super(Conv2d, self).__init__()
# Channel numbers can be scaled by floats, so need to be rounded back
# to integers.
in_channels = int(in_channels)
out_channels = int(out_channels)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.norm = nn.BatchNorm2d(out_channels)
# Special case if there are 2 inputs or less: there isn't space for a
# whole butterfly, so use a normal convolution layer.
if in_channels <= 2:
log.info("INFO: using default convolution instead of butterfly.")
log.info(" in_channels =", in_channels)
self.conv = wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=1,
bias=bias,
**kwargs)
self.expand_inputs = False
self.trim_outputs = False
else:
butterflies = []
self.expand_inputs = not _power_of_two(in_channels)
self.trim_outputs = not _power_of_two(out_channels)
start_channels = _next_power_of_two(in_channels)
end_channels = _next_power_of_two(out_channels)
for inputs, outputs, size in butterfly_sequence(start_channels,
end_channels):
# When the number of outputs increases, we apply extra filters
# to each input channel. This means the butterflies must be
# larger than the default butterfly sequence would suggest.
# The opposite does not happen when outputs decrease.
expansion = max(1, outputs // start_channels)
size *= expansion
butterflies.append(Conv2dSublayer(in_channels=inputs,
out_channels=outputs,
kernel_size=kernel_size,
butterfly_size=size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias))
# Override some of the parameters so they don't have cumulative
# effects.
padding = kernel_size // 2
stride = 1
# Successive iterations with kernel_size > 1 also have a
# cumulative effect, but this is a good effect, so I leave it.
self.conv = nn.Sequential(*butterflies)
def expand_to_power_of_two(self, x):
"""Expand x so it has a power-of-two number of channels. This is done by
concatenating the data with itself, and then slicing it to the required
size."""
batch, channels, height, width = x.size()
x = torch.cat([x, x], dim=1)
return self.trim(x, _next_power_of_two(channels)).contiguous()
@staticmethod
def trim(x, num_channels):
"""Trim x so that it has the specified number of channels."""
return x[:, :num_channels, :, :].contiguous()
def can_use_fast_forward(self):
"""Determine whether the parameters of this layer allow the GPU-
optimised computation to be used."""
# The technique takes the same pixel from each channel as a vector, and
# applies a transformation.
# * This doesn't work if kernel_size > 1 because then more pixels are
# needed.
# * I get out-of-memory errors if there are too many channels because
# the transformation matrix is size (in_channels)^2
return self.kernel_size == 1 and self.in_channels < 10000
def fast_forward(self, x):
"""Despite orders of magnitude less computation, I haven't found a way
to execute a butterfly layer efficiently on a GPU.
This function expands the weights out to the shape they would be in an
ordinary convolution layer, then applies ordinary convolution."""
assert self.can_use_fast_forward()
weights = None
for butterfly in self.conv.children():
if weights is None:
weights = butterfly.weight_matrix()
else:
weights = torch.matmul(butterfly.weight_matrix(), weights)
# Dimensions must be (out_chans, in_chan, kernel_height, kernel_width).
weights = weights.view(*weights.size(), 1, 1)
# TODO: could potentially have the weight matrix handle odd input/output
# sizes
if self.expand_inputs:
x = self.expand_to_power_of_two(x)
x = nn.functional.conv2d(x, weights, stride=self.stride,
padding=self.padding)
if self.trim_outputs:
x = self.trim(x, self.out_channels)
x = self.norm(x)
return x
def forward(self, x):
# TODO
# Try to avoid breaking the test which checks that the outputs are the
# same.
if self.can_use_fast_forward() and x.is_cuda:
return self.fast_forward(x)
else:
if self.expand_inputs:
x = self.expand_to_power_of_two(x)
x = self.conv(x)
if self.trim_outputs:
x = self.trim(x, self.out_channels)
x = self.norm(x)
return x
|
db434/nn-restrict | training/trainer.py | <gh_stars>0
import time
import torch
from structured import convert_to_conv as c2c
from util import log
class Trainer(object):
"""Class which takes a model and trains it."""
def __init__(self, dataset, model, schedule, args, optimiser=None,
criterion=None):
self.dataset = dataset
self.train_loader, self.val_loader = dataset.data_loaders(
args.workers, args.batch_size, distributed=args.distributed)
self.model = model
self.lr_schedule = schedule
# Extract some relevant arguments.
self.distributed = args.distributed
self.print_frequency = args.print_freq
self.use_cuda = args.cuda
# Recurrent models need extra care taken with their gradients to avoid
# zeros and infinities.
self.use_grad_clipping = False
for module in self.model.modules():
if isinstance(module, torch.nn.modules.rnn.RNNBase) or \
isinstance(module, c2c.RNNBase):
self.use_grad_clipping = True
break
if optimiser is None:
self.optimiser = self._default_optimiser(args, model)
else:
self.optimiser = optimiser
if criterion is None:
self.criterion = self._default_criterion()
else:
self.criterion = criterion
def train_epoch(self, epoch, data_loader=None):
"""
Train for one epoch.
:param epoch: The current epoch number.
:param data_loader: (Optional) torch.data.DataLoader to provide input.
:return: Loss, top-1 and top-5 accuracies for this epoch.
"""
if data_loader is None:
data_loader = self.train_loader
if self.distributed:
data_loader.sampler.set_epoch(epoch)
self.set_learning_rate(self.lr_schedule.get_learning_rate(epoch))
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# Switch to train mode
self.model.train()
end = time.time()
for i, (data, target) in enumerate(data_loader):
data_time.update(time.time() - end)
if self.use_cuda:
target = target.cuda(async=True)
# Compute output
output, loss = self.minibatch(data, target)
# Measure accuracy and record loss
prec1, prec5 = self.accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), data.size(0))
top1.update(prec1.item(), data.size(0))
top5.update(prec5.item(), data.size(0))
# Compute gradients.
self.optimiser.zero_grad()
loss.backward()
# 0.25 is the default value from here:
# https://github.com/pytorch/examples/blob/master/word_language_model/main.py
if self.use_grad_clipping:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.25)
# Update model.
self.optimiser.step()
batch_time.update(time.time() - end)
end = time.time()
if i % self.print_frequency == 0:
log.info(
'Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(data_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def validate(self, data_loader=None):
"""
Run the validation dataset through the model.
:param data_loader: (Optional) torch.data.DataLoader to provide input.
:return: Loss, top-1 and top-5 accuracies for the dataset.
"""
if data_loader is None:
data_loader = self.val_loader
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# Switch to evaluate mode
self.model.eval()
end = time.time()
for i, (data, target) in enumerate(data_loader):
if self.use_cuda:
target = target.cuda(async=True)
# Compute output
output, loss = self.minibatch(data, target)
# Measure accuracy and record loss
prec1, prec5 = self.accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), data.size(0))
top1.update(prec1.item(), data.size(0))
top5.update(prec5.item(), data.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % self.print_frequency == 0:
log.info(
'Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(data_loader), batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
log.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def minibatch(self, input_data, target):
"""
Pass one minibatch of data through the model.
:param input_data: Tensor of input data.
:param target: Ground truth output data.
:return: Output produced by the model and the loss.
"""
input_var = torch.autograd.Variable(input_data)
target_var = torch.autograd.Variable(target)
# Compute output
output = self.model(input_var)
loss = self.criterion(output, target_var)
return output, loss
def set_learning_rate(self, lr):
"""
Update the learning rate.
:param lr: New learning rate.
"""
for param_group in self.optimiser.param_groups:
param_group['lr'] = lr
@staticmethod
def accuracy(output, target, topk=(1,)):
"""
Compute the top-k precision for the given values of k.
:param output: Output produced by model.
:param target: Ground truth output.
:param topk: Iterable containing all values of k.
:return: List of top-k precisions in the same order as `topk` input.
"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
@staticmethod
def _default_optimiser(args, model):
"""
Create a default optimiser to update the model's weights.
:param args: Command line parameters.
:param model: Model to be optimised.
:return: A torch.optim.Optimizer which will update the model.
"""
# return torch.optim.Adam(model.parameters(), args.lr)
return torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
@staticmethod
def _default_criterion():
"""
Create a default criterion used to evaluate the loss of the model.
:return: A function which takes the model's output and the ground
truth and generates a loss value.
"""
return torch.nn.functional.cross_entropy
class AverageMeter(object):
"""Compute and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
|
db434/nn-restrict | structured/butterfly.py | <filename>structured/butterfly.py
from functools import reduce
import math
import torch.nn as nn
from . import wrapped
from util import log
class Conv2dSublayer(nn.Module):
"""Class representing a single sublayer of the butterfly network.
This module performs a single grouped convolution and then reorders the
channels. Channels are placed together if they have so far received
contributions from different subsets of input channels. This means that
in the next sublayer, they will all contribute to each other."""
def __init__(self,
in_channels,
out_channels,
kernel_size,
groups,
input_cone, # Number of original channels reaching each output
**kwargs):
super(Conv2dSublayer, self).__init__()
self.conv = wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups=groups,
bias=False,
**kwargs)
assert out_channels % input_cone == 0
self.shuffle_groups = out_channels // input_cone
def shuffle(self, x):
groups = self.shuffle_groups
if groups == 1:
return x
# Uniform shuffle of channels. Assumes a particular dimension order.
batch, channels, height, width = x.size()
x = x.view(batch, groups, channels // groups, height, width)
x = x.transpose(1, 2).contiguous()
return x.view(batch, channels, height, width)
def forward(self, x):
x = self.conv(x)
x = self.shuffle(x)
return x
class Conv2d(nn.Module):
"""A drop-in replacement for torch.nn.Conv2d, but using a butterfly
connection structure internally.
The cost of the butterfly is O(nlogn) compared with O(n^2) for an ordinary
convolution layer, where n is the number of input channels.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
args=None):
super(Conv2d, self).__init__()
# Channel numbers can be scaled by floats, so need to be rounded back
# to integers.
in_channels = int(in_channels)
out_channels = int(out_channels)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.norm = nn.BatchNorm2d(out_channels)
# Compute the sequence of butterflies to be used.
# TODO: if the number of channels is prime (or nearly prime),
# we might want to add dummy channels to give a better butterfly
# sequence. More factors = smaller factors = less computation.
group_counts = self.get_group_counts(in_channels, out_channels,
args.min_bfly_size)
# Special case: if we were unable to generate a valid sequence of
# butterflies, use a normal convolution.
if len(group_counts) == 0:
log.info("INFO: using default convolution instead of butterfly.")
log.info(" in_channels =", in_channels)
log.info(" out_channels =", out_channels)
log.info(" min_butterfly_size =", args.min_bfly_size)
self.conv = wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=1,
bias=bias,
args=args)
else:
butterflies = []
# Number of consecutive channels which have all been computed
# using the same subset of inputs.
silo_size = 1
# The number of input channels may vary as we move through the
# sublayers.
current_channels = in_channels
for group_count in group_counts:
assert current_channels % group_count == 0
assert out_channels % group_count == 0
out_group_size = out_channels // group_count
silo_size *= out_group_size
butterflies.append(Conv2dSublayer(in_channels=current_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups=group_count,
input_cone=silo_size,
stride=stride,
padding=padding,
dilation=dilation))
current_channels = out_channels
# Override some of the parameters so they don't have cumulative
# effects.
padding = kernel_size // 2
stride = 1
# Successive iterations with kernel_size > 1 also have a
# cumulative effect, but this is a good effect, so I leave it.
self.conv = nn.Sequential(*butterflies)
@staticmethod
def get_group_counts(in_channels, out_channels, min_group_size):
"""Determine which convolution group counts should be used for the
given number of channels. Each group count will be used to generate a
separate grouped convolution sub-layer."""
# The first grouped convolution allows groups of X output channels to
# share the same inputs. After the appropriate rearrangement of
# channels, the next convolution connects Y groups, so XY channels now
# share the same inputs. Continue until all output channels share all
# the inputs.
#
# Assertion 1: in_channels changes as we add sublayers, and ends up
# equal to out_channels.
#
# Assertion 2: every group count used is a factor of both the
# input channels and output channels for that sublayer.
#
# Assertion 3: the product of all group sizes is equal to out_channels.
if in_channels < min_group_size or out_channels < min_group_size:
return []
group_sizes = []
group_counts = []
# If we need to change the number of channels, add a one-off group count
# which is only made from shared factors. Find the smallest group
# size (largest group count) which satisfies the min_group_size.
initial_group_count = Conv2d._best_initial_group_count(
in_channels, out_channels, min_group_size)
if initial_group_count > 0:
group_counts.append(initial_group_count)
group_sizes.append(out_channels // initial_group_count)
# At this point, assertion 1 should hold.
# Define a silo to be a group of channels which share the same
# *original* inputs. We aim to connect all silos by the end of the
# layer.
if len(group_sizes) > 0:
num_silos = out_channels // group_sizes[0]
else:
num_silos = out_channels
remaining_group_sizes = Conv2d._best_group_sizes(num_silos,
min_group_size)
group_sizes += remaining_group_sizes
for group_size in remaining_group_sizes:
group_counts.append(out_channels // group_size)
assert len(group_sizes) > 0
assert len(group_counts) > 0
# Check that assertion 2 holds. Uses the fact that in_channels ==
# out_channels for all sublayers except possibly the first one.
assert in_channels % group_counts[0] == 0
for group_count in group_counts:
assert out_channels % group_count == 0
# Check that assertion 3 holds.
assert _list_product(group_sizes) == out_channels
return group_counts
@staticmethod
def _best_initial_group_count(in_channels, out_channels, min_group_size):
"""Determine the best butterfly group count to translate between
the given numbers of input and output channels."""
# Special case: no translation needed.
if in_channels == out_channels:
return 0
# Need group sizes which divide perfectly into both in_channels and
# out_channels. Find all common factors.
in_factors = _prime_factorisation(in_channels)
out_factors = _prime_factorisation(out_channels)
intersection = _list_intersection(in_factors, out_factors)
# Want the largest group count allowed, so sort in reverse order.
compound_factors = list(reversed(sorted(_compound_factors(
intersection))))
# No common factor: can't use grouped convolution.
if len(compound_factors) == 0:
return 1
for group_count in compound_factors:
if in_channels // group_count >= min_group_size:
return group_count
# If there were no valid factors, do the best we can do.
return compound_factors[-1]
@staticmethod
def _best_group_sizes(channels, min_group_size):
"""Determine the best (lowest cost) butterfly sequence which connects
the given number of inputs and outputs. Assumes the same number of
inputs and outputs."""
# Total cost is is channels * group size, but all sequences generated
# will share the same `channels` value so that is omitted here.
# TODO: Include a preference for shorter sequences if there's a tie.
def cost(sequence):
return _list_sum(sequence)
candidates = _all_factorisations(channels, min_group_size)
best_sequence = []
best_cost = math.inf
for c in candidates:
sequence_cost = cost(c)
if sequence_cost < best_cost:
best_cost = sequence_cost
best_sequence = c
return best_sequence
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
def _prime_factorisation(value):
"""Return a list of prime factors, in order from smallest to largest."""
factors = []
factor = 2
remaining = value
while factor <= remaining:
if remaining % factor == 0:
factors.append(factor)
remaining //= factor
else:
factor += 1
assert _list_product(factors) == value
return factors
def _all_factorisations(value, minimum_factor):
"""Return a list of lists containing all possible factorisations of
`value`. At most one factor may be less than the minimum."""
factorisations = []
factors = [x for x in range(2, value+1) if value % x == 0]
for factor in factors:
remainder = value // factor
if remainder == 1:
factorisations.append([factor])
else:
for factorisation in _all_factorisations(remainder, minimum_factor):
factorisation.append(factor)
factorisations.append(factorisation)
# Remove duplicates. Would like to use a set, but they don't allow lists.
factorisations = [sorted(f) for f in factorisations]
no_duplicates = []
for f in factorisations:
if f not in no_duplicates:
no_duplicates.append(f)
# Filter to remove factorisations which have more than 1 factor below the
# minimum.
filtered = []
for f in no_duplicates:
too_small = 0
for factor in f:
if factor < minimum_factor:
too_small += 1
if too_small <= 1:
filtered.append(f)
return filtered
def _compound_factors(prime_factors):
"""Return a set of all compound factors, given the list of prime factors."""
compound = set()
for position, factor in enumerate(prime_factors):
compound.add(factor)
remaining = _compound_factors(prime_factors[position+1:])
for value in remaining:
compound.add(factor * value)
compound.add(value)
return compound
def _list_intersection(list1, list2):
"""Compute the list of all elements present in both list1 and list2.
Duplicates are allowed. Assumes both lists are sorted."""
intersection = []
pos1 = 0
pos2 = 0
while pos1 < len(list1) and pos2 < len(list2):
val1 = list1[pos1]
val2 = list2[pos2]
if val1 == val2:
intersection.append(val1)
pos1 += 1
pos2 += 1
elif val1 < val2:
pos1 += 1
else:
pos2 += 1
return intersection
def _list_difference(list1, list2):
"""Compute the list of all elements present in list1 but not list2.
Duplicates are allowed. Assumes both lists are sorted."""
difference = []
pos1 = 0
pos2 = 0
while pos1 < len(list1) and pos2 < len(list2):
val1 = list1[pos1]
val2 = list2[pos2]
if val1 == val2:
pos1 += 1
pos2 += 1
elif val1 < val2:
difference.append(val1)
pos1 += 1
else:
pos2 += 1
difference += list1[pos1:]
return difference
def _list_sum(l):
"""Compute the sum of all elements of the list."""
return reduce(lambda x, y: x+y, l, 0)
def _list_product(l):
"""Compute the product of all elements of the list."""
return reduce(lambda x, y: x*y, l, 1)
|
db434/nn-restrict | util/stats.py | <gh_stars>0
from collections import OrderedDict
import os
import torch
from . import checkpoint, log
import models
def data_distribution_hooks(model, activations=True, weights=True,
gradients=True):
"""Register hooks to print activations, weights and gradients. Input is
the root model."""
# Only interested in bottom-level modules. Otherwise we'll print the same
# inputs/outputs multiple times.
leaves = (module for module in model.modules()
if len(list(module.children())) == 0)
for module in leaves:
if activations:
module.register_forward_hook(_print_activations)
if weights:
module.register_forward_hook(_print_weights)
if gradients:
module.register_backward_hook(_print_gradients)
def _print_activations(module, activation_input, activation_output):
"""A forward hook to be called whenever a module finishes computing an
output. Apply to a module using
module.register_forward_hook(_print_activations)
Prints the size, mean and standard deviation of the output activations."""
print("Activations:", _get_stats(activation_output))
def _print_weights(module, activation_input, activation_output):
"""A forward hook to be called whenever a module finishes computing an
output. Apply to a module using
module.register_forward_hook(_print_weights)
Prints the size, mean and standard deviation of the layer's weights."""
for params in module.parameters():
print("Weights:", _get_stats(params))
def _print_gradients(module, grad_input, grad_output):
"""A backward hook to be called whenever a module finishes computing its
gradients. Apply to a module using
module.register_backward_hook(_print_gradients)
Prints the size, mean and standard deviation of the output gradients."""
for tensor in grad_input:
if tensor is not None:
print("Gradients:", _get_stats(tensor))
def _get_stats(tensor):
return str.format("{0}\tmean: {1:.6f}\tstd: {2:.6f}\tmax: {3:.6f}",
tensor.size(), tensor.mean().data[0],
tensor.std().data[0], tensor.max().data[0])
def data_dump_hooks(model, directory, activations=True, weights=True,
gradients=True):
"""Register hooks to dump tensors to files."""
# Only interested in bottom-level modules. Otherwise we'll print the same
# inputs/outputs multiple times.
leaves = (module for module in model.modules()
if len(list(module.children())) == 0)
for i, module in enumerate(leaves):
name = format(i, "03") + "_" + type(module).__name__
if activations:
module.register_forward_hook(_dump_activations(directory, name))
if gradients:
module.register_backward_hook(_dump_gradients(directory, name))
if weights:
_dump_weights(directory, model)
def _dump_activations(directory, layer_name):
directory = os.path.join(directory, "activations")
if not os.path.exists(directory):
os.makedirs(directory)
def inner_fn(module, activation_input, activation_output):
checkpoint.save_tensor(directory, layer_name, activation_output)
return inner_fn
def _dump_gradients(directory, layer_name):
directory = os.path.join(directory, "gradients")
if not os.path.exists(directory):
os.makedirs(directory)
def inner_fn(module, grad_input, grad_output):
for i, tensor in enumerate(grad_input):
if tensor is None:
continue
name = layer_name + "_" + str(i)
checkpoint.save_tensor(directory, name, tensor)
return inner_fn
def _dump_weights(directory, model):
directory = os.path.join(directory, "weights")
if not os.path.exists(directory):
os.makedirs(directory)
leaves = [module for module in model.modules()
if len(list(module.children())) == 0]
for i, module in enumerate(leaves):
layer_name = format(i, "03") + "_" + type(module).__name__
for key, value in module.state_dict().items():
name = layer_name + "_" + key
checkpoint.save_tensor(directory, name, value)
def data_restore(directory, model):
"""Reverse the effect of `_dump_weights` by loading individual tensors
into the model."""
log.info("Replacing model state with data from", directory)
leaves = [module for module in model.modules()
if len(list(module.children())) == 0]
for i, module in enumerate(leaves):
layer_name = format(i, "03") + "_" + type(module).__name__
for key, value in module.state_dict().items():
name = layer_name + "_" + key
checkpoint.load_tensor(directory, name, value)
def gradient_distribution_hooks(model):
"""Like the above `_print_gradients`, but aggregates information over a
whole epoch, and gives more detail about the distribution."""
# Accumulator maps modules to their gradients.
global gradient_accumulator
gradient_accumulator = OrderedDict()
# Only supports convolution layers for now.
conv = (m for m in model.modules() if isinstance(m, torch.nn.Conv2d))
for module in conv:
module.register_backward_hook(_collect_gradients)
gradient_accumulator[module] = []
def _collect_gradients(module, grad_input, grad_output):
"""Get the gradients, and append them to a tensor for this module only."""
# I'm still not totally sure whether grad_output is the gradients of the
# module's outputs, or the gradients being outputted by this module.
# I assume the former - I want the gradients of the output activations.
global gradient_accumulator
assert len(grad_output) == 1
for gradients in grad_output:
assert module in gradient_accumulator
# Ideally here we would save all data and compute statistics once we
# have all of it. That takes too long and uses too much memory, so I
# cheat and compute statistics for each batch, then combine the
# statistics for each batch at the end. This is not mathematically
# correct, but assuming each batch comes from the same distribution, it
# should give a reasonable approximation.
percentiles = _get_percentiles(gradients.data)
gradient_accumulator[module].append(percentiles)
# gradient_accumulator[module].append(gradients.data)
def _get_percentiles(tensor):
"""Return the value at every 10th percentile from a given dataset."""
# Flatten the tensor and sort it.
tensor = tensor.view(-1)
tensor, positions = torch.sort(tensor.abs())
# Access every 10th percentile.
step = max(len(tensor)//10, 1)
percentiles = list(tensor[::step])
if len(percentiles) < 11:
percentiles.append(tensor[-1])
assert len(percentiles) == 11
return percentiles
def get_gradient_stats():
"""Get a summary of the accumulated gradients.
Returns a list of strings, one per line. Each line contains the gradient
at every 10th percentile, ordered by absolute size."""
text = []
global gradient_accumulator
for module, stats in gradient_accumulator.items():
# Stats should be a 2D array, where each subarray contains the 0th,
# 10th, 20th, ... percentiles for each batch.
percentiles = []
# Transpose stats so each subarray contains similar information. All
# 0th percentile, all 10th percentile, etc.
stats = zip(*stats)
# Get the 0th percentile of the 0th percentile array, the 10th
# percentile of the 10th percentile array and so on. This approximates
# the percentiles of the whole dataset, given only percentiles of
# smaller subsets.
for index, array in enumerate(stats):
percentile = 10 * index
position = (len(array) * percentile) // 100
array = sorted(array)
if position >= len(array):
result = str(array[-1])
else:
result = str(array[position])
percentiles.append(result)
text.append(" ".join(percentiles))
# Clear the gradient accumulator so it can be used again (but keep the
# keys/modules in the same order).
for module in gradient_accumulator:
gradient_accumulator[module] = []
return text
num_weights = 0
num_operations = 0
def computation_cost_hooks(model, count_weights=True, count_operations=True):
"""Collect data on the number of weights and computations used in a single
forward pass of the given network.
Looks exclusively at convolution layers (and fully-connected layers which
have been replaced by convolutions).
* Batch norm is excluded as it can be merged with other layers at zero cost
* Pooling is excluded for having negligible cost
* Activation functions are excluded for having negligible cost
* Data movement is excluded as it can potentially be avoided"""
modules = (m for m in model.modules() if hasattr(m, "num_operations"))
global num_weights, num_operations
num_weights = 0
num_operations = 0
# TODO: can get some double-counting here if there are multiple GPUs.
log.info("Warning: figures are only accurate if a single GPU is used.")
if count_weights:
num_weights = models.count_parameters(model)
if count_operations:
for module in modules:
module.register_forward_hook(_count_operations)
def _count_operations(module, in_data, out_data):
"""Ask the module how much computation it needed to do, given the shapes of
its input and output."""
global num_operations
assert len(in_data) == 1
num_operations += module.num_operations(in_data[0].size(), out_data.size())
def computation_costs():
"""Return the values computed here."""
global num_operations, num_weights
return num_operations, num_weights
def computation_cost_csv(model_name):
"""Return stats in CSV format."""
global num_operations, num_weights
header = "name,operations,weights"
data = ",".join([model_name, str(num_operations), str(num_weights)])
return header + "\n" + data
|
db434/nn-restrict | modifiers/functional.py | <reponame>db434/nn-restrict
"""
Wrapper for torch.nn.functional which provides allows weights to be modified
before they are used.
"""
import torch
class QuantiseFunction(torch.autograd.Function):
"""
Function with both a forward and a backward pass. I use a
straight-through estimator here: the gradients are passed back as though
no quantisation happened.
"""
@staticmethod
def forward(ctx, x, quantisation_fn):
if quantisation_fn is not None:
x = quantisation_fn(x)
return x
@staticmethod
def backward(ctx, grad_output):
# Need to return gradients for all inputs of `forward`, including the
# quantisation function.
return grad_output, None
quantise = QuantiseFunction.apply
|
db434/nn-restrict | structured/wrapped.py | <filename>structured/wrapped.py
from functools import reduce
import torch.nn as nn
class Conv2d(nn.Conv2d):
"""Simple wrapper for the default convolution class.
Adds a couple of extra methods to extract useful statistics. This module
should not be visible outside of this package.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
**kwargs):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
def num_weights(self):
"""Returns the number of weights required to perform the computation."""
num_weights = 0
for parameters in self.parameters():
num_weights += parameters.numel()
return num_weights
def num_operations(self, input_size, output_size):
"""Returns the number of operations required to complete this layer.
1 multiply-accumulate = 1 operation.
Both inputs are tuples of the form (batch, channels, height, width)."""
convolutions = input_size[1] * output_size[1] // self.groups
out_pixels = output_size[2] * output_size[3]
kernel_pixels = reduce(lambda x, y: x * y, self.kernel_size)
macs_per_convolution = out_pixels * kernel_pixels
macs = macs_per_convolution * convolutions
return macs
|
db434/nn-restrict | models/wlm.py | # Word language model. Based on
# https://github.com/pytorch/examples/blob/master/word_language_model/model.py
import torch.nn as nn
import modifiers.modules as quantisable
import structured.convert_to_conv as c2c
import util.log
# These models should work for any text dataset.
models = {"WikiText-2": ["wlm_lstm_large", "wlm_lstm_medium", "wlm_gru_large",
"wlm_gru_medium", "wlm_rnn_tanh_large",
"wlm_rnn_tanh_medium", "wlm_rnn_relu_large",
"wlm_rnn_relu_medium"]}
class WordLanguageModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, num_tokens, num_inputs, num_hidden_units,
num_layers, dropout=0.5, tie_weights=False, **kwargs):
"""
:param rnn_type: One of "LSTM", "GRU", "RNN_TANH", "RNN_RELU".
:param num_tokens: Number of words in the dictionary.
:param num_inputs: The number of features in each word embedding.
:param num_hidden_units: Number of hidden units per layer.
:param num_layers: Number of layers in the model.
:param dropout: Dropout rate for all dropout layers.
:param tie_weights: Use same weights for encoder and decoder.
"""
super(WordLanguageModel, self).__init__()
assert rnn_type in ["LSTM", "GRU", "RNN_TANH", "RNN_RELU"]
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(num_tokens, num_inputs)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(c2c, rnn_type)(num_inputs, num_hidden_units,
num_layers, dropout=dropout)
else:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
self.rnn = c2c.RNN(num_inputs, num_hidden_units, num_layers,
nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(num_hidden_units, num_tokens)
# Quantisation is built into c2c modules, so just need to use it on the
# Embedding's and decoder's outputs.
self.quantise = quantisable.Quantiser()
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press &
# Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for
# Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
assert num_hidden_units == num_inputs
# TODO: allow alternate implementations
self.decoder.weight = self.encoder.weight
self.rnn_type = rnn_type
self.num_hidden_units = num_hidden_units
self.num_layers = num_layers
self.num_tokens = num_tokens
self.hidden = None
self.init_weights()
self.init_hidden(kwargs["args"].batch_size)
def init_weights(self):
init_range = 0.1
self.encoder.weight.data.uniform_(-init_range, init_range)
# TODO: allow alternate implementations
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-init_range, init_range)
def init_hidden(self, batch_size):
weight = next(self.parameters())
if self.rnn_type == "LSTM":
self.register_buffer("hidden0",
weight.new_zeros(self.num_layers, batch_size,
self.num_hidden_units))
self.register_buffer("hidden1",
weight.new_zeros(self.num_layers, batch_size,
self.num_hidden_units))
self.hidden = (self.hidden0, self.hidden1)
else:
self.register_buffer("hidden0",
weight.new_zeros(self.num_layers, batch_size,
self.num_hidden_units))
self.hidden = self.hidden0
def repackage_hidden(self):
"""Wraps hidden states in new Tensors to detach them from their
history."""
if self.rnn_type == "LSTM":
self.hidden0.detach()
self.hidden1.detach()
self.hidden = (self.hidden0, self.hidden1)
else:
self.hidden0.detach()
self.hidden = self.hidden0
def forward(self, x):
# Ensure the size of the hidden state is compatible with the new input.
_, in_batch_size = x.size()
_, hidden_batch_size, _ = self.hidden0.size()
if in_batch_size != hidden_batch_size:
util.log.info("Changing batch size from", hidden_batch_size, "to",
in_batch_size)
self.init_hidden(in_batch_size)
# At the start of each batch, detach the hidden state from how it was
# previously produced. If we didn't, the model would try
# backpropagating all the way to start of the dataset.
self.repackage_hidden()
embedding = self.drop(self.quantise(self.encoder(x)))
output, self.hidden = self.rnn(embedding, self.hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1),
output.size(2)))
return self.quantise(decoded)
# A few sensible defaults taken from the source. Use the above constructor
# directly if something specific is wanted.
def wlm_lstm_large(**kwargs):
model = WordLanguageModel("LSTM", kwargs["args"].num_tokens, 1500, 1500, 2,
dropout=0.65, tie_weights=True, **kwargs)
return model
def wlm_lstm_medium(**kwargs):
model = WordLanguageModel("LSTM", kwargs["args"].num_tokens, 650, 650, 2,
dropout=0.5, tie_weights=True, **kwargs)
return model
def wlm_gru_large(**kwargs):
model = WordLanguageModel("GRU", kwargs["args"].num_tokens, 1500, 1500, 2,
dropout=0.65, tie_weights=True, **kwargs)
return model
def wlm_gru_medium(**kwargs):
model = WordLanguageModel("GRU", kwargs["args"].num_tokens, 650, 650, 2,
dropout=0.5, tie_weights=True, **kwargs)
return model
def wlm_rnn_tanh_large(**kwargs):
model = WordLanguageModel("RNN_TANH", kwargs["args"].num_tokens, 1500, 1500,
2, dropout=0.65, tie_weights=True, **kwargs)
return model
def wlm_rnn_tanh_medium(**kwargs):
model = WordLanguageModel("RNN_TANH", kwargs["args"].num_tokens, 650, 650,
2, dropout=0.5, tie_weights=True, **kwargs)
return model
def wlm_rnn_relu_large(**kwargs):
model = WordLanguageModel("RNN_RELU", kwargs["args"].num_tokens, 1500, 1500,
2, dropout=0.65, tie_weights=True, **kwargs)
return model
def wlm_rnn_relu_medium(**kwargs):
model = WordLanguageModel("RNN_RELU", kwargs["args"].num_tokens, 650, 650,
2, dropout=0.5, tie_weights=True, **kwargs)
return model
|
db434/nn-restrict | tests/test_depthwise_separable.py | import random
import torch
from structured.depthwise_separable import *
def _separable_test():
"""Test an entire depthwise-separable module.
For a range of different input and output sizes, ensure that the layer
doesn't crash when running.
"""
for i in range(100):
inputs = random.randint(1, 100)
outputs = random.randint(1, 100)
padding = random.randint(1, 5)
kernel_size = padding * 2 + 1 # ensure odd kernel size
layer = Conv2d(inputs, outputs, kernel_size, padding=padding)
in_data = torch.autograd.Variable(torch.Tensor(4, inputs, 10, 10))
out_data = layer(in_data)
assert out_data.size() == (4, outputs, 10, 10)
print("All tests passed.")
if __name__ == "__main__":
_separable_test()
|
db434/nn-restrict | models/aaronnet.py | import torch.nn as nn
import structured.fully_connected as fc
models = {"CIFAR-10": ["aaronnet", "aaronnet_v2"]}
class AaronNet(nn.Module):
def __init__(self, input_channels=3, num_classes=10, conv2d=fc.Conv2d,
args=None):
super(AaronNet, self).__init__()
self.classes = num_classes
w = args.width_multiplier
self.features = nn.Sequential(
fc.Conv2d(input_channels, 128*w, kernel_size=3, padding=1,
args=args),
nn.ReLU(inplace=True),
conv2d(128*w, 128*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
conv2d(128*w, 128*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
conv2d(128*w, 128*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
conv2d(128*w, 128*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
conv2d(128*w, 128*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
conv2d(128*w, 128*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
nn.AvgPool2d(kernel_size=7, stride=2, padding=0),
nn.Dropout(),
conv2d(128*w, num_classes, kernel_size=1, padding=0, args=args),
)
def forward(self, x):
x = self.features(x)
return x.view(-1, self.classes)
class AaronNet2(nn.Module):
def __init__(self, input_channels=3, num_classes=10, conv2d=fc.Conv2d,
args=None):
super(AaronNet2, self).__init__()
self.classes = num_classes
w = args.width_multiplier
self.features = nn.Sequential(
fc.Conv2d(input_channels, 64*w, kernel_size=3, padding=1,
args=args),
nn.ReLU(inplace=True),
conv2d(64*w, 64*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
conv2d(64*w, 128*w, kernel_size=3, padding=1, stride=2, args=args),
nn.ReLU(inplace=True),
conv2d(128*w, 128*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
conv2d(128*w, 128*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
conv2d(128*w, 192*w, kernel_size=3, padding=1, stride=2, args=args),
nn.ReLU(inplace=True),
conv2d(192*w, 192*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
conv2d(192*w, 192*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
nn.AvgPool2d(kernel_size=8, padding=0),
conv2d(192*w, num_classes, kernel_size=1, padding=0, args=args),
)
def forward(self, x):
x = self.features(x)
batch, channels, width, height = x.size()
return x.view(batch, self.classes)
def aaronnet(**kwargs):
r"""CIFAR-10 model from Aaron as described in test case 1 here:
https://github.com/admk/mayo-dev/issues/42
https://github.com/admk/mayo-dev/blob/develop/models/cifarnet_had.yaml
There's potential for also adding the additional convolution layers that he
uses in subsequent test cases.
Default version gets >91% accuracy, even with severe overfitting.
"""
model = AaronNet(**kwargs)
return model
def aaronnet_v2(**kwargs):
r"""Optimised CIFAR-10 model from Aaron. Fewer parameters, same accuracy.
Default version gets ~93% accuracy.
"""
model = AaronNet2(**kwargs)
return model
|
db434/nn-restrict | models/__init__.py | from functools import reduce
import torch
from modifiers.modules import Quantisable
from structured import convert_to_conv
from util import log
from . import lenet, aaronnet, alexnet, densenet, mobilenet, resnet, \
squeezenet, vgg, wlm
submodules = [lenet, aaronnet, alexnet, densenet, mobilenet, resnet,
squeezenet, vgg, wlm]
def get_model_names(dataset=None):
names = []
for module in submodules:
models = module.models
if dataset is None:
# Get all available model names
for data in models.values():
if isinstance(data, str):
names.append(data)
else:
assert isinstance(data, list)
names += data
elif dataset.name in models:
# Just get names of models made for the given dataset.
data = models[dataset.name]
if isinstance(data, str):
names.append(data)
else:
assert isinstance(data, list)
names += data
return names
def get_model(name, distributed=False, use_cuda=True, **kwargs):
log.info("Creating model '{}'".format(name))
model = None
for module in submodules:
if name in dir(module):
model = getattr(module, name)(**kwargs)
break
assert model is not None
log.info("Model has", count_parameters(model), "parameters")
# Automatically make models run on multiple GPUs and/or multiple machines.
# I assume that use_cuda=False also means "don't use GPUs".
if not distributed:
if use_cuda:
# Some magic from the original ImageNet script. Not sure what the
# purpose is.
if name.startswith('alexnet') or name.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model = model.cuda()
else:
model = torch.nn.DataParallel(model)
model = model.cuda()
else:
if use_cuda:
model = model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
model = Quantisable(model)
# Hack. RNNs have their own particular initialisation, so don't
# reinitialise if any RNN layers are found.
rnn = False
for m in model.modules():
if isinstance(m, convert_to_conv.RNNBase):
rnn = True
break
# Initialise the convolution weights according to their fan-in.
# Butterfly convolution consistently performs a little worse without
# this, so apply it to all models to be fair.
if not rnn:
for m in model.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_uniform_(m.weight.data)
return model
def count_parameters(model):
count = 0
for param in model.parameters():
size = param.size()
count += reduce(lambda x, y: x*y, size)
return count
|
db434/nn-restrict | structured/butterfly_old.py | import torch
import torch.nn as nn
from . import wrapped
from util import log
"""This version of the butterfly module is the "true" way of performing the
computation, with O(nlogn) weights and computations. However, it relies on a
large number of grouped convolutions, which are no faster than ordinary
convolutions. Thus the speed of the module on GPUs is around n^2*logn, giving a
large slowdown.
The `butterfly` module replaces this one until there is better GPU support. For
1x1 convolutions in that module, the weights of logn group convolution layers
are pre-multiplied together, and then passed to a single convolution routine.
This keeps the nlogn number of weights, but increases computation to n^2, the
same as an ordinary convolution layer."""
def _next_power_of_two(value):
"""Return the first power of 2 greater than or equal to the input."""
power = 1
while power < value:
power *= 2
return power
def _power_of_two(value):
"""Returns whether the given value is a power of two."""
return (value & (value - 1)) == 0
def butterfly_sequence(inputs, outputs):
"""Return tuple of (inputs, outputs, butterfly size) for all sub-layers
required to connect every input with every output.
All sizes are powers of two, and the inputs and outputs may differ by a
maximum factor of two at each stage.
Note that the butterfly size may need to be scaled, depending on how these
values are used. e.g. If inputs are duplicated in-place to match number of
outputs, butterfly size must increase accordingly."""
largest_butterfly = inputs
smallest_butterfly = 2
# Can only handle powers of 2. (Not much of a limitation.)
assert _power_of_two(inputs) and _power_of_two(outputs)
# Doesn't make sense to butterfly when there aren't enough inputs.
assert inputs >= smallest_butterfly
# There is a maximum rate at which the number of channels can increase.
# Easy enough to get around, but keeping it simple for now.
assert outputs <= inputs ** 2
current_inputs = inputs
current_butterfly = largest_butterfly
# Go from largest butterfly to smallest. This is necessary because if there
# are fewer outputs than inputs, there won't be space to use the largest
# butterfly later.
while current_butterfly >= smallest_butterfly:
# Determine if we need to change the number of values to reach the
# correct number of outputs.
if outputs > current_inputs:
current_outputs = current_inputs * 2
elif outputs < current_inputs:
current_outputs = current_inputs // 2
else:
current_outputs = current_inputs
yield current_inputs, current_outputs, current_butterfly
current_inputs = current_outputs
current_butterfly //= 2
class Conv2dSublayer(nn.Module):
"""Class representing a single sublayer of the butterfly network. There will
be log2(channels) of these sublayers, each with a different butterfly size.
The typical case is:
* Apply two filters to each input channel
* Separate the intermediate result in two, one for each of the filters
* Reorder one of the partitions according the the butterfly size
* Add together the two partitions
There are then minor modifications to this procedure if the number of
outputs differs from the number of inputs.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
butterfly_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True):
super(Conv2dSublayer, self).__init__()
assert _power_of_two(in_channels) and _power_of_two(out_channels)
self.in_channels = in_channels
self.out_channels = out_channels
# Determine how the number of channels changes as data passes through
# this layer. This will affect the number of filters required.
self.expansion = out_channels / in_channels
# By default, apply two filters to each channel, one for each wing of
# the butterfly.
self.filters_per_channel = int(2 * self.expansion)
self.intermediate_channels = self.in_channels * self.filters_per_channel
self.butterfly_size = butterfly_size
self.butterflies = (self.intermediate_channels // 2) // \
self.butterfly_size
assert self.butterfly_size >= 2
assert self.butterflies > 0 or self.expansion < 1
# Apply multiple filters to each input channel, but don't combine the
# results. This looks like the first phase of a depthwise-separable
# convolution. The actual butterfly happens in forward().
self.conv = wrapped.Conv2d(in_channels=self.in_channels,
out_channels=self.intermediate_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=self.in_channels,
bias=False)
def extract_sequences(self, data):
"""Separate data along the channel dimension into two smaller datasets.
Consecutive channels are assigned alternately to the two outputs."""
batch, channels, height, width = data.size()
# Simpler split if we didn't apply multiple filters to each input. Split
# into first half of channels and second half.
if self.filters_per_channel == 1:
return data[:, :channels // 2, :, :].contiguous(), \
data[:, channels // 2:, :, :].contiguous()
else:
split = data.view(batch, channels // 2, 2, height, width)
return split.select(2, 0).contiguous(), \
split.select(2, 1).contiguous()
def swap_wings(self, data):
"""Swap the wings of each butterfly. A wing is a contiguous block of
values with length butterfly_size / 2.
e.g. Butterfly size = 4
Input = 0 1 2 3 4 5 6 7
Output = 2 3 0 1 6 7 4 5
Method:
1. Introduce dummy dimensions: (channels) -> (butterflies, 2, wing)
2. Reflect middle dimension: [0,1] -> [1,0]
3. Flatten dummy dimensions: (butterflies, 2, wing) -> (channels)
"""
# If there was only one filter applied, then all of `data` is a single
# wing, and no swapping is needed.
if self.filters_per_channel == 1:
return data
batch, channels, height, width = data.size()
wing_size = self.butterfly_size // 2
assert wing_size * 2 == self.butterfly_size
split = data.view(batch, self.butterflies, 2, wing_size, height, width)
left = split[:, :, :1, :, :, :]
right = split[:, :, 1:, :, :, :]
merged = torch.cat([right, left], dim=2).contiguous()
return merged.view(batch, channels, height, width)
def forward(self, x):
x = self.conv(x)
# Extract results from one filter application from each input channel.
x1, x2 = self.extract_sequences(x)
# Reorder data from the second filter applied to each channel so that
# the wings of each butterfly are swapped.
x2 = self.swap_wings(x2)
# Combine the default ordered and reordered data.
x = x1 + x2
return x
class Conv2d(nn.Module):
"""A drop-in replacement for torch.nn.Conv2d, but using a butterfly
connection structure internally.
The cost of the butterfly is O(nlogn) compared with O(n^2) for an ordinary
convolution layer, where n is the number of input channels.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
**kwargs):
super(Conv2d, self).__init__()
# Channel numbers can be scaled by floats, so need to be rounded back
# to integers.
in_channels = int(in_channels)
out_channels = int(out_channels)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.norm = nn.BatchNorm2d(out_channels)
# Special case if there are 2 inputs or less: there isn't space for a
# whole butterfly, so use a normal convolution layer.
if in_channels <= 2:
log.info("INFO: using default convolution instead of butterfly.")
log.info(" in_channels =", in_channels)
self.conv = wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=1,
bias=bias,
**kwargs)
self.expand_inputs = False
self.trim_outputs = False
else:
butterflies = []
self.expand_inputs = not _power_of_two(in_channels)
self.trim_outputs = not _power_of_two(out_channels)
start_channels = _next_power_of_two(in_channels)
end_channels = _next_power_of_two(out_channels)
for inputs, outputs, size in butterfly_sequence(start_channels,
end_channels):
# When the number of outputs increases, we apply extra filters
# to each input channel. This means the butterflies must be
# larger than the default butterfly sequence would suggest.
# The opposite does not happen when outputs decrease.
expansion = max(1, outputs // start_channels)
size *= expansion
butterflies.append(Conv2dSublayer(in_channels=inputs,
out_channels=outputs,
kernel_size=kernel_size,
butterfly_size=size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias))
# Override some of the parameters so they don't have cumulative
# effects.
padding = kernel_size // 2
stride = 1
# Successive iterations with kernel_size > 1 also have a
# cumulative effect, but this is a good effect, so I leave it.
self.conv = nn.Sequential(*butterflies)
def expand_to_power_of_two(self, x):
"""Expand x so it has a power-of-two number of channels. This is done by
concatenating the data with itself, and then slicing it to the required
size."""
batch, channels, height, width = x.size()
x = torch.cat([x, x], dim=1)
return self.trim(x, _next_power_of_two(channels)).contiguous()
@staticmethod
def trim(x, num_channels):
"""Trim x so that it has the specified number of channels."""
return x[:, :num_channels, :, :].contiguous()
def forward(self, x):
if self.expand_inputs:
x = self.expand_to_power_of_two(x)
x = self.conv(x)
if self.trim_outputs:
x = self.trim(x, self.out_channels)
x = self.norm(x)
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.