code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from flask import render_template
from flask import current_app as app
import pandas as pd
from parsl.monitoring.visualization.models import Workflow, Task, Status, db
from parsl.monitoring.visualization.plots.default.workflow_plots import task_gantt_plot, task_per_app_plot, workflow_dag_plot
from parsl.monitoring.visualization.plots.default.task_plots import time_series_cpu_per_task_plot, time_series_memory_per_task_plot
from parsl.monitoring.visualization.plots.default.workflow_resource_plots import resource_distribution_plot, resource_efficiency, worker_efficiency
dummy = True
import datetime
def format_time(value):
if value is None:
return value
elif isinstance(value, float):
return str(datetime.timedelta(seconds=round(value)))
elif isinstance(value, datetime.datetime):
return value.replace(microsecond=0)
elif isinstance(value, datetime.timedelta):
rounded_timedelta = datetime.timedelta(days=value.days, seconds=value.seconds)
return rounded_timedelta
else:
return "Incorrect time format found (neither float nor datetime.datetime object)"
def format_duration(value):
(start, end) = value
if start and end:
return format_time(end - start)
else:
return "-"
app.jinja_env.filters['timeformat'] = format_time
app.jinja_env.filters['durationformat'] = format_duration
@app.route('/')
def index():
workflows = Workflow.query.all()
for workflow in workflows:
workflow.status = 'Running'
if workflow.time_completed is not None:
workflow.status = 'Completed'
return render_template('workflows_summary.html', workflows=workflows)
@app.route('/workflow/<workflow_id>/')
def workflow(workflow_id):
workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()
if workflow_details is None:
return render_template('error.html', message="Workflow %s could not be found" % workflow_id)
df_status = pd.read_sql_query(
"SELECT run_id, task_id, task_status_name, timestamp FROM status WHERE run_id='%s'" % workflow_id, db.engine)
df_task = pd.read_sql_query("""SELECT task_id, task_func_name,
task_time_returned from task
WHERE run_id='%s'"""
% (workflow_id), db.engine)
df_task_tries = pd.read_sql_query("""SELECT task.task_id, task_func_name,
task_try_time_running, task_try_time_returned from task, try
WHERE task.task_id = try.task_id AND task.run_id='%s' and try.run_id='%s'"""
% (workflow_id, workflow_id), db.engine)
task_summary = db.engine.execute(
"SELECT task_func_name, count(*) as 'frequency' from task WHERE run_id='%s' group by task_func_name;" % workflow_id)
return render_template('workflow.html',
workflow_details=workflow_details,
task_summary=task_summary,
task_gantt=task_gantt_plot(df_task, df_status, time_completed=workflow_details.time_completed),
task_per_app=task_per_app_plot(df_task_tries, df_status))
@app.route('/workflow/<workflow_id>/app/<app_name>')
def parsl_app(workflow_id, app_name):
workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()
if workflow_details is None:
return render_template('error.html', message="Workflow %s could not be found" % workflow_id)
task_summary = Task.query.filter_by(
run_id=workflow_id, task_func_name=app_name)
return render_template('app.html',
app_name=app_name,
workflow_details=workflow_details,
task_summary=task_summary)
@app.route('/workflow/<workflow_id>/task/')
def parsl_apps(workflow_id):
workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()
if workflow_details is None:
return render_template('error.html', message="Workflow %s could not be found" % workflow_id)
task_summary = Task.query.filter_by(run_id=workflow_id)
return render_template('app.html',
app_name="All Apps",
workflow_details=workflow_details,
task_summary=task_summary)
@app.route('/workflow/<workflow_id>/task/<task_id>')
def task(workflow_id, task_id):
workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()
if workflow_details is None:
return render_template('error.html', message="Workflow %s could not be found" % workflow_id)
task_details = Task.query.filter_by(
run_id=workflow_id, task_id=task_id).first()
task_status = Status.query.filter_by(
run_id=workflow_id, task_id=task_id).order_by(Status.timestamp)
df_resources = pd.read_sql_query(
"SELECT * FROM resource WHERE run_id='%s' AND task_id='%s'" % (workflow_id, task_id), db.engine)
return render_template('task.html',
workflow_details=workflow_details,
task_details=task_details,
task_status=task_status,
time_series_cpu_percent=time_series_cpu_per_task_plot(
df_resources, 'psutil_process_cpu_percent', 'CPU Utilization'),
time_series_memory_resident=time_series_memory_per_task_plot(
df_resources, 'psutil_process_memory_resident', 'Memory Usage'),
)
@app.route('/workflow/<workflow_id>/dag_<path:path>')
@app.route('/workflow/<workflow_id>/dag_<path:path>')
def workflow_dag_details(workflow_id, path='group_by_apps'):
workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()
query = """SELECT task.task_id, task.task_func_name, task.task_depends, status.task_status_name
FROM task LEFT JOIN status
ON task.task_id = status.task_id
AND task.run_id = status.run_id
AND status.timestamp = (SELECT MAX(status.timestamp)
FROM status
WHERE status.task_id = task.task_id and status.run_id = task.run_id
)
WHERE task.run_id='%s'""" % (workflow_id)
df_tasks = pd.read_sql_query(query, db.engine)
group_by_apps = (path == "group_by_apps")
return render_template('dag.html',
workflow_details=workflow_details,
group_by_apps=group_by_apps,
workflow_dag_plot=workflow_dag_plot(df_tasks, group_by_apps=group_by_apps))
@app.route('/workflow/<workflow_id>/resource_usage')
def workflow_resources(workflow_id):
workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()
if workflow_details is None:
return render_template('error.html', message="Workflow %s could not be found" % workflow_id)
df_resources = pd.read_sql_query(
"SELECT * FROM resource WHERE run_id='%s'" % (workflow_id), db.engine)
if df_resources.empty:
return render_template('error.html',
message="Workflow %s does not have any resource usage records." % workflow_id)
df_task = pd.read_sql_query(
"SELECT * FROM task WHERE run_id='%s'" % (workflow_id), db.engine)
df_task_tries = pd.read_sql_query("""SELECT task.task_id, task_func_name,
task_try_time_launched, task_try_time_running, task_try_time_returned from task, try
WHERE task.task_id = try.task_id AND task.run_id='%s' and try.run_id='%s'"""
% (workflow_id, workflow_id), db.engine)
df_node = pd.read_sql_query(
"SELECT * FROM node WHERE run_id='%s'" % (workflow_id), db.engine)
return render_template('resource_usage.html', workflow_details=workflow_details,
user_time_distribution_avg_plot=resource_distribution_plot(
df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='avg'),
user_time_distribution_max_plot=resource_distribution_plot(
df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='max'),
memory_usage_distribution_avg_plot=resource_distribution_plot(
df_resources, df_task, type='psutil_process_memory_resident', label='Memory Distribution', option='avg'),
memory_usage_distribution_max_plot=resource_distribution_plot(
df_resources, df_task, type='psutil_process_memory_resident', label='Memory Distribution', option='max'),
cpu_efficiency=resource_efficiency(df_resources, df_node, label='CPU'),
memory_efficiency=resource_efficiency(df_resources, df_node, label='mem'),
worker_efficiency=worker_efficiency(df_task_tries, df_node),
)
| Parsl/parsl | parsl/monitoring/visualization/views.py | Python | apache-2.0 | 9,340 |
import pytest
from api.base.settings.defaults import API_BASE
from osf.models import MetaSchema
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
DraftRegistrationFactory,
)
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
from website.project.metadata.utils import create_jsonschema_from_metaschema
from website.settings import PREREG_ADMIN_TAG
from website.util import permissions
@pytest.mark.django_db
class DraftRegistrationTestCase:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user, user_write_contrib, user_read_contrib):
project_public = ProjectFactory(is_public=True, creator=user)
project_public.add_contributor(user_write_contrib, permissions=[permissions.WRITE])
project_public.add_contributor(user_read_contrib, permissions=[permissions.READ])
project_public.save()
return project_public
@pytest.fixture()
def prereg_metadata(self):
def metadata(draft):
test_metadata = {}
json_schema = create_jsonschema_from_metaschema(draft.registration_schema.schema)
for key, value in json_schema['properties'].iteritems():
response = 'Test response'
if value['properties']['value'].get('enum'):
response = value['properties']['value']['enum'][0]
if value['properties']['value'].get('properties'):
response = {'question': {'value': 'Test Response'}}
test_metadata[key] = {'value': response}
return test_metadata
return metadata
@pytest.mark.django_db
class TestDraftRegistrationList(DraftRegistrationTestCase):
@pytest.fixture()
def schema(self):
return MetaSchema.objects.get(name='Open-Ended Registration', schema_version=LATEST_SCHEMA_VERSION)
@pytest.fixture()
def draft_registration(self, user, project_public, schema):
return DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_public
)
@pytest.fixture()
def url_draft_registrations(self, project_public):
return '/{}nodes/{}/draft_registrations/'.format(API_BASE, project_public._id)
def test_admin_can_view_draft_list(self, app, user, draft_registration, schema, url_draft_registrations):
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['attributes']['registration_supplement'] == schema._id
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
def test_cannot_view_draft_list(self, app, user_write_contrib, user_read_contrib, user_non_contrib, url_draft_registrations):
# test_read_only_contributor_cannot_view_draft_list
res = app.get(url_draft_registrations, auth=user_read_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_read_write_contributor_cannot_view_draft_list
res = app.get(url_draft_registrations, auth=user_write_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_logged_in_non_contributor_cannot_view_draft_list
res = app.get(url_draft_registrations, auth=user_non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_unauthenticated_user_cannot_view_draft_list
res = app.get(url_draft_registrations, expect_errors=True)
assert res.status_code == 401
def test_draft_with_registered_node_does_not_show_up_in_draft_list(self, app, user, project_public, draft_registration, url_draft_registrations):
reg = RegistrationFactory(project = project_public)
draft_registration.registered_node = reg
draft_registration.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_draft_with_deleted_registered_node_shows_up_in_draft_list(self, app, user, project_public, draft_registration, schema, url_draft_registrations):
reg = RegistrationFactory(project=project_public)
draft_registration.registered_node = reg
draft_registration.save()
reg.is_deleted = True
reg.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['attributes']['registration_supplement'] == schema._id
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
@pytest.mark.django_db
class TestDraftRegistrationCreate(DraftRegistrationTestCase):
@pytest.fixture()
def metaschema_open_ended(self):
return MetaSchema.objects.get(name='Open-Ended Registration', schema_version=LATEST_SCHEMA_VERSION)
@pytest.fixture()
def payload(self, metaschema_open_ended):
return {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': metaschema_open_ended._id
}
}
}
@pytest.fixture()
def url_draft_registrations(self, project_public):
return '/{}nodes/{}/draft_registrations/'.format(API_BASE, project_public._id)
def test_type_is_draft_registrations(self, app, user, metaschema_open_ended, url_draft_registrations):
draft_data = {
'data': {
'type': 'nodes',
'attributes': {
'registration_supplement': metaschema_open_ended._id
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
assert res.status_code == 409
def test_admin_can_create_draft(self, app, user, project_public, payload, metaschema_open_ended):
url = '/{}nodes/{}/draft_registrations/?embed=branched_from&embed=initiator'.format(API_BASE, project_public._id)
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
data = res.json['data']
assert data['attributes']['registration_supplement'] == metaschema_open_ended._id
assert data['attributes']['registration_metadata'] == {}
assert data['embeds']['branched_from']['data']['id'] == project_public._id
assert data['embeds']['initiator']['data']['id'] == user._id
def test_cannot_create_draft(self, app, user_write_contrib, user_read_contrib, user_non_contrib, project_public, payload, url_draft_registrations):
# test_write_only_contributor_cannot_create_draft
assert user_write_contrib in project_public.contributors.all()
res = app.post_json_api(url_draft_registrations, payload, auth=user_write_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_read_only_contributor_cannot_create_draft
assert user_read_contrib in project_public.contributors.all()
res = app.post_json_api(url_draft_registrations, payload, auth=user_read_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_non_authenticated_user_cannot_create_draft
res = app.post_json_api(url_draft_registrations, payload, expect_errors=True)
assert res.status_code == 401
# test_logged_in_non_contributor_cannot_create_draft
res = app.post_json_api(url_draft_registrations, payload, auth=user_non_contrib.auth, expect_errors=True)
assert res.status_code == 403
def test_registration_supplement_errors(self, app, user, url_draft_registrations):
# test_registration_supplement_not_found
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': 'Invalid schema'
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_registration_supplement_must_be_active_metaschema
schema = MetaSchema.objects.get(name='Election Research Preacceptance Competition', active=False)
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': schema._id
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.'
# test_registration_supplement_must_be_most_recent_metaschema
schema = MetaSchema.objects.get(name='Open-Ended Registration', schema_version=1)
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': schema._id
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.'
def test_cannot_create_draft_errors(self, app, user, project_public, payload):
# test_cannot_create_draft_from_a_registration
registration = RegistrationFactory(project=project_public, creator=user)
url = '/{}nodes/{}/draft_registrations/'.format(API_BASE, registration._id)
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_cannot_create_draft_from_deleted_node
project = ProjectFactory(is_public=True, creator=user)
project.is_deleted = True
project.save()
url_project = '/{}nodes/{}/draft_registrations/'.format(API_BASE, project._id)
res = app.post_json_api(url_project, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 410
assert res.json['errors'][0]['detail'] == 'The requested node is no longer available.'
# test_cannot_create_draft_from_collection
collection = CollectionFactory(creator=user)
url = '/{}nodes/{}/draft_registrations/'.format(API_BASE, collection._id)
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_required_metaschema_questions_not_required_on_post(self, app, user, project_public, prereg_metadata):
prereg_schema = MetaSchema.objects.get(name='Prereg Challenge', schema_version=LATEST_SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
url = '/{}nodes/{}/draft_registrations/?embed=initiator&embed=branched_from'.format(API_BASE, project_public._id)
registration_metadata = prereg_metadata(prereg_draft_registration)
del registration_metadata['q1']
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': prereg_schema._id,
'registration_metadata': registration_metadata
}
}
}
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 201
data = res.json['data']
assert res.json['data']['attributes']['registration_metadata']['q2']['value'] == 'Test response'
assert data['attributes']['registration_supplement'] == prereg_schema._id
assert data['embeds']['branched_from']['data']['id'] == project_public._id
assert data['embeds']['initiator']['data']['id'] == user._id
def test_registration_supplement_must_be_supplied(self, app, user, url_draft_registrations):
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
}
}
}
res = app.post_json_api(url_draft_registrations, draft_data, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'This field is required.'
assert errors['source']['pointer'] == '/data/attributes/registration_supplement'
def test_registration_metadata_must_be_a_dictionary(self, app, user, payload, url_draft_registrations):
payload['data']['attributes']['registration_metadata'] = 'Registration data'
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['source']['pointer'] == '/data/attributes/registration_metadata'
assert errors['detail'] == 'Expected a dictionary of items but got type "unicode".'
def test_registration_metadata_question_values_must_be_dictionaries(self, app, user, payload, url_draft_registrations):
schema = MetaSchema.objects.get(name='OSF-Standard Pre-Data Collection Registration', schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = 'No, data collection has not begun'
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'u\'No, data collection has not begun\' is not of type \'object\''
def test_registration_metadata_question_keys_must_be_value(self, app, user, payload, url_draft_registrations):
schema = MetaSchema.objects.get(name='OSF-Standard Pre-Data Collection Registration', schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = {
'incorrect_key': 'No, data collection has not begun'
}
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'Additional properties are not allowed (u\'incorrect_key\' was unexpected)'
def test_question_in_registration_metadata_must_be_in_schema(self, app, user, payload, url_draft_registrations):
schema = MetaSchema.objects.get(name='OSF-Standard Pre-Data Collection Registration', schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['q11'] = {
'value': 'No, data collection has not begun'
}
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'Additional properties are not allowed (u\'q11\' was unexpected)'
def test_multiple_choice_question_value_must_match_value_in_schema(self, app, user, payload, url_draft_registrations):
schema = MetaSchema.objects.get(name='OSF-Standard Pre-Data Collection Registration', schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = {
'value': 'Nope, data collection has not begun'
}
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'u\'Nope, data collection has not begun\' is not one of [u\'No, data collection has not begun\', u\'Yes, data collection is underway or complete\']'
def test_reviewer_cannot_create_draft_registration(self, app, user_read_contrib, project_public, payload, url_draft_registrations):
user = AuthUserFactory()
user.add_system_tag(PREREG_ADMIN_TAG)
user.save()
assert user_read_contrib in project_public.contributors.all()
res = app.post_json_api(url_draft_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
| chrisseto/osf.io | api_tests/nodes/views/test_node_draft_registration_list.py | Python | apache-2.0 | 17,680 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2003-2017 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# -*- coding: utf-8 -*-
"""The configuration for the AD sync."""
from Cerebrum.Utils import Factory
# Provide defaults for all settings.
from Cerebrum.config.adconf import *
from Cerebrum.config.adconf import ConfigUtils as cu
co = Factory.get('Constants')(Factory.get('Database')())
host_uio = 'ceresynk02.uio.no'
SYNCS['AD_account'] = {
'sync_classes': ('Cerebrum.modules.ad2.ADSync/UserSync',),
'object_classes': (
'Cerebrum.modules.ad2.CerebrumData/CerebrumUser',
),
'domain': 'uio.no',
# Connection settings:
# The hostname of the Member server we should connect to:
'server': host_uio,
# The user we should authenticate to the server with:
'auth_user': 'cereauth',
# The user we can administrate the AD domain with:
'domain_admin': 'uio.no\\ceresynk02_service',
'target_ou': 'OU=users,OU=cerebrum,DC=uio,DC=no',
# TODO: should we add the DC-parts automatically through the 'domain'
# definition? Could be set for all OU definitions.
'search_ou': 'OU=cerebrum,DC=uio,DC=no',
# OUs to ignore. TODO: what to do with objects outside of search_ou?
# What to do with objects unknown in Cerebrum (includes those without
# AD-spread), and those who are known and have the correct spread, but
# is considered not active (inactive, disabled):
# Possible options:
# - ignore: do nothing. The OUs will not be cleaned up in.
# - disable: Mark the object as disabled. Note that this only works for
# accounts.
# - move: deactivate and move the object to a given OU
# - delete: delete the object. Note that this might not be undone!
'handle_unknown_objects': ('disable', None),
'handle_deactivated_objects': ('disable', None),
'create_ous': True,
# If objects that are not in the correct OU should be moved:
'move_objects': True,
# The different languages to use, ordered by priority:
# Used for instance for the Title attribute.
'language': ('nb', 'nn', 'en'),
# If SID should be stored in Cerebrum. Default: False.
#'store_sid': False,
# What change types the quicksync should treat:
# TODO: note that if new change types are added, all such events would
# be processed, even those created before the previous quicksync run.
'change_types': (#('account', 'create'),
#('account', 'deactivate'),
#('account', 'modify'),
('account_password', 'set'),
('quarantine', 'add'),
('quarantine', 'modify'),
('quarantine', 'remove'),
('quarantine', 'refresh'),
('ad_attr', 'add'),
('ad_attr', 'remove')),
'attributes': {'SamAccountName': None,
#'Name': None, # TODO: how should we update Name - can't be
#updated in the normal way, but through a rename-command.
'UserPrincipalName': None,
'GivenName': None,
'DisplayName': None,
# Note that Surname, sn and Sn marks the same attribute,
# but Surname is not possible to write - use Sn or sn.
'Surname': None,
# Titles need some config:
'Title': ('PERSONALTITLE', 'WORKTITLE'), # what titles to use, in priority
# TODO: others
#'mail': None,
# TODO: these are for subclasses
#'HomeDrive': None,
#'HomeDirectory': None,
## TODO:
##
## TODO: Not tested yet:
#'Mail': None,
#'ProfilePath': None,
},
}
# AD sync for Office365 attributes
SYNCS['consent'] = {
'sync_classes': ('Cerebrum.modules.ad2.froupsync/ConsentGroupSync',
'Cerebrum.modules.ad2.froupsync/AffGroupSync',),
'object_classes': (
'Cerebrum.modules.ad2.CerebrumData/CerebrumGroup', ),
'domain': 'uio.no',
# Connection settings:
# The hostname of the Member server we should connect to:
'server': host_uio,
# The user we should authenticate to the server with:
'auth_user': 'cereauth',
# The user we can administrate the AD domain with:
'domain_admin': 'uio.no\\ceresynk02_service',
# 'encrypted': True,
# 'ca': os.path.join(cereconf.DB_AUTH_DIR, 'ad.pem'),
'target_ou': 'OU=Groups,OU=ad_only,DC=uio,DC=no',
'search_ou': 'OU=Groups,OU=ad_only,DC=uio,DC=no',
'create_ous': False,
'move_objects': False,
'target_type': 'account',
'target_spread': 'AD_account',
'attributes': {
'SamAccountName': ConfigUtils.AttrConfig(default='%(ad_id)s'),
'DisplayName': ConfigUtils.AttrConfig(default='%(ad_id)s'),
'DisplayNamePrintable': ConfigUtils.AttrConfig(default='%(ad_id)s'),
'Description': ConfigUtils.CallbackAttr(
lambda e: getattr(e, 'description', 'N/A').strip()),
'Member': ConfigUtils.CallbackAttr(
default=[],
callback=lambda g: ['CN=%s,OU=users,OU=cerebrum,DC=uio,DC=no' % m
for m in getattr(g, 'members', [])]), },
'script': {},
'change_types': (
('consent', 'approve'),
('consent', 'decline'),
('consent', 'delete'),
('person_aff', 'add'),
('person_aff', 'modify'),
('person_aff', 'remove'), ),
'handle_unknown_objects': ('delete', None),
'handle_deactivated_objects': ('disable', None),
'affiliation_groups': {
'uioOffice365staff': [('SAP', 'ANSATT'), ],
'uioOffice365student': [('FS', 'STUDENT'), ], },
'consent_groups': {
'uioOffice365consent': ['office365'], },
}
| unioslo/cerebrum | testsuite/docker/test-config/uio/adconf.py | Python | gpl-2.0 | 6,569 |
DATA = """ 37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690 """
def trim(big_str):
nums = big_str.split('\n')
return [int(line.strip()[:11]) for line in nums]
def main():
nums = trim(DATA)
print(str(sum(nums))[:10])
if __name__ == '__main__':
main()
| noelevans/sandpit | euler/big_summation_q13.py | Python | mit | 6,516 |
def nested_object_from_json_data(json_data, key, decode_func):
rv = None
if key in json_data:
data = json_data[key]
if data:
rv = decode_func(json_data[key])
return rv
def get_attr_as_tuple_unless_none(object, name):
value_as_tuple = None
value = getattr(object, name, None)
if value is not None:
value_as_tuple = tuple(value)
return value_as_tuple
def get_attr_as_list_unless_none(object, name):
value_as_list = None
value = getattr(object, name, None)
if value is not None:
value_as_list = list(value)
return value_as_list
| teamfruit/defend_against_fruit | defend_against_fruit/daf_fruit_dist/daf_fruit_dist/build/build_util.py | Python | apache-2.0 | 617 |
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'users.views.sign_up'),
url(r'^signup/$', 'users.views.sign_up', name='sign_up'),
url(r'^login/$', 'users.views.login', name='login'),
url(r'^logout/$', 'users.views.logout'),
url(r'^dashboard/', include('dashboard.urls')),
url(r'^posts/', include('posts.urls')),
] + static('static_files', document_root=settings.MEDIA_ROOT)
| NA5G/coco-server-was | coco/coco/urls.py | Python | mit | 657 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Francesco Frassinelli <fraph24@gmail.com>
#
# pylife is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Implementation of: http://en.wikipedia.org/wiki/Conway's_Game_of_Life
Tested on Python 2.6.4 and Python 3.1.1 """
from collections import defaultdict
from itertools import product
from sys import argv
def add(board, pos):
""" Adds eight cells near current cell """
row, column = pos
return \
board[row-1, column-1] +\
board[row-1, column] +\
board[row-1, column+1] +\
board[row, column-1] +\
board[row, column+1] +\
board[row+1, column-1] +\
board[row+1, column] +\
board[row+1, column+1]
def snext(board):
""" Calculates the next stage """
new = defaultdict(int, board)
for pos in list(board.keys()):
near = add(board, pos)
item = board[pos]
if near not in (2, 3) and item:
new[pos] = 0
elif near == 3 and not item:
new[pos] = 1
return new
def process(board):
""" Finds if this board repeats itself """
history = [defaultdict(None, board)]
while 1:
board = snext(board)
if board in history:
if board == history[0]:
return board
return None
history.append(defaultdict(None, board))
def generator(rows, columns):
""" Generates a board """
ppos = [(row, column) for row in range(rows)
for column in range(columns)]
possibilities = product((0, 1), repeat=rows*columns)
for case in possibilities:
board = defaultdict(int)
for pos, value in zip(ppos, case):
board[pos] = value
yield board
def bruteforce(rows, columns):
global count
count = 0
for board in map(process, generator(rows, columns)):
if board is not None:
count += 1
#print board
if __name__ == "__main__":
rows, columns = 4, 3
bruteforce(rows, columns)
print count
# try:
# rows, columns = int(argv[1]), int(argv[2])
# except IndexError:
# print("Usage: %s [rows] [columns]" % argv[0])
# except ValueError:
# print("Usage: %s [rows] [columns]" % argv[0])
# else:
# bruteforce(rows, columns)
| shedskin/shedskin | examples/life.py | Python | gpl-3.0 | 2,935 |
# -*- coding: utf-8 -*-
from random import choice
from django.core.management.base import BaseCommand
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
help = "Generates a new SECRET_KEY that can be used in a project settings file."
requires_system_checks = False
@signalcommand
def handle(self, *args, **options):
return ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
| neilpelow/wmap-django | venv/lib/python3.5/site-packages/django_extensions/management/commands/generate_secret_key.py | Python | gpl-3.0 | 482 |
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import argparse
# you need this!
# https://github.com/LabKey/labkey-api-python
#
# sys.path.insert(0,'/Users/bpb/repos/labkey-api-python/')
# sys.path.insert(0,'/global/homes/b/bpb/repos/labkey-api-python/')
# import labkey as lk
from labkey.api_wrapper import APIWrapper
# import requests
# import json
import pandas as pd
# import hashlib
import numpy as np
import re
from datetime import datetime, time as dtime
from subprocess import check_output
import time
import math
EXTENSIONS = {'mzml':'.mzML',
'hdf5':'.h5',
'spectralhits':'_spectral-hits.tab.gz',
'pactolus':'.pactolus.gz',
'raw':'.raw'}
STATUS = {'initiation':'01 initiation',
'running':'04 running',
'custom':'06 custom',
'complete':'07 complete',
'hold':'08 hold',
'error':'09 error',
'submitted':'10 submitted',
'corrupted':'11 corrupted'}
PROJECT_DIRECTORY = '/global/project/projectdirs/metatlas/raw_data'
GETTER_SPEC = {'raw':{'extension':'.raw',
'lims_table':'raw_file'},
'mzml':{'extension':'.mzml',
'lims_table':'mzml_file'},
'hdf5':{'extension':'.h5',
'lims_table':'hdf5_file'},
'spectralhits':{'extension':'_spectral-hits.tab.gz',
'lims_table':'spectralhits_file'},
'pactolus':{'extension':'.pactolus.gz',
'lims_table':'pactolus_file'}}
key_file = '/global/cfs/cdirs/metatlas/labkey_user.txt'
with open(key_file,'r') as fid:
api_key = fid.read().strip()
labkey_server='metatlas-dev.nersc.gov'
project_name='LIMS/'
api = APIWrapper(labkey_server, project_name, use_ssl=True,api_key=api_key)
def get_files_from_disk(directory,extension):
"""
Get on disk with date
"""
get_with_date = ''.join(['find %s -iname "*%s"' % (directory,extension),' -printf "%Ts SplitThat%p\n"'])
files = check_output(get_with_date, shell=True)
files = files.decode('utf-8').splitlines()
files = [f.split('SplitThat') for f in files]
dates = [int(f[0].strip()) for f in files]
files = [f[1].strip() for f in files]
return dates,files
def complex_name_splitter(filename,
extensions=set(['raw', 'tab', 'gz', 'pactolus', 'mzML', 'd','h5']),
strippath='/global/project/projectdirs/metatlas/raw_data'):
#Get the filename
basename = os.path.basename(filename)
#Path is everything not filename
pathname = filename.replace(basename,'')
#Don't store the basepath since files will likely move
pathname = pathname.replace(strippath,'')
pathname = pathname.strip('/')
#remove extension, but keep any internal . separeted content
pieces = set(basename.split('.')) - extensions
name = '.'.join(pieces)
name = name.replace('_spectral-hits','')
#this will be a basename that has typically two folders
#ths should not have an extension
new_name = os.path.join(pathname,name)
return new_name
def hash_bytestr_iter(bytesiter, hasher, ashexstr=False):
for block in bytesiter:
hasher.update(block)
return hasher.hexdigest() if ashexstr else hasher.digest()
def file_as_blockiter(afile, blocksize=65536):
with afile:
block = afile.read(blocksize)
while len(block) > 0:
yield block
block = afile.read(blocksize)
def make_sha256(fname):
return hash_bytestr_iter(file_as_blockiter(open(fname, 'rb')), hashlib.sha256())
def get_acqtime_from_mzml(mzml_file):
startTimeStamp=None
with open(mzml_file) as mzml:
for line in mzml:
if 'startTimeStamp' in line:
startTimeStamp = line.split('startTimeStamp="')[1].split('"')[0].replace('T',' ').rstrip('Z')
break
# print startTimeStamp
if not '-infinity' in startTimeStamp:
date_object = datetime.strptime(startTimeStamp, '%Y-%m-%d %H:%M:%S')
utc_timestamp = int(time.mktime(date_object.timetuple()))
else:
utc_timestamp = int(0)
return utc_timestamp
def get_table_from_lims(table,columns=None):
if columns is None:
sql = """SELECT * FROM %s;"""%table
else:
sql = """SELECT %s FROM %s;"""%(','.join(columns),table)
# base execute_sql
schema = 'lists'
sql_result = api.query.execute_sql(schema, sql,max_rows=1e6)
if sql_result is None:
print(('execute_sql: Failed to load results from ' + schema + '.' + table))
return None
else:
df = pd.DataFrame(sql_result['rows'])
df = df[[c for c in df.columns if not c.startswith('_')]]
return df
def update_table_in_lims(df,table,method='update',max_size=1000):
"""
Note: Do ~1000 rows at a time. Any more and you get a 504 error. Maybe increasing the timeout would help.
In the header, timeout is a variable that gets set. Check to see what its set to. Maybe increasing it would let
more rows be updated at a time
Use it like this:
update_table_in_lims(df_lims,'mzml_files')
whatever is in 'name' or 'Key' will replace whatever used to be there with the other columns
"""
# if columns is None:
# cols = df.columns
# else:
# cols = pd.unique([index_column] + columns)
# One of the cols needs to be the index column (almost always: Key or Name)
N = math.ceil(float(df.shape[0]) / max_size)
for sub_df in np.array_split(df, N):
payload = sub_df.to_dict('records')
if method=='update':
api.query.update_rows('lists', table, payload,timeout=10000)
elif method=='insert':
api.query.insert_rows('lists', table, payload,timeout=10000)
elif method=='delete':
api.query.delete_rows('lists', table, payload,timeout=10000)
else:
print(('ERROR: Nothing to do. Method %s is not programmed'%method))
print('updated')
def get_union_of_all_lcms_names(tables=['mzml_file','hdf5_file','pactolus_file','raw_file','spectralhits_file']):
# sort out the lcmsrun table
sql = ['select name from %s'%t for t in tables]
sql = ' union '.join(sql)
# con = lk.utils.create_server_context(labkey_server, project_name, use_ssl=True,)
# base execute_sql
schema = 'lists'
sql_result = api.query.execute_sql(schema, sql,max_rows=1e6)
if sql_result is None:
print(('execute_sql: Failed to load results from ' + schema + '.' + table))
else:
return [r['name'] for r in sql_result['rows']]
def update_lcmsrun_names(tables=['mzml_file','hdf5_file','pactolus_file','raw_file','spectralhits_file']):
#get all the names in the various raw data tables
names = get_union_of_all_lcms_names(tables)
#get all the names in lcmsrun (rawdata relationship) table
lcmsruns = get_table_from_lims('lcmsrun',columns=['name'])
lcmsruns = lcmsruns['name'].tolist()
# this is likeley a recently uploaded file that was just created
missing_from_lcmsruns = list(set(names) - set(lcmsruns))
#hopefully there aren't any of these, but always good to check
extra_in_lcmsruns = list(set(lcmsruns) - set(names))
#add missing ones
if len(missing_from_lcmsruns)>0:
temp = pd.DataFrame()
temp['name'] = missing_from_lcmsruns
update_table_in_lims(temp,'lcmsrun',method='insert')
#remove extra ones
if len(extra_in_lcmsruns)>0:
sql = """SELECT Key FROM lcmsrun where name IN (%s);"""%','.join(['\'%s\''%e for e in extra_in_lcmsruns])
# print(sql)
schema = 'lists'
sql_result = api.query.execute_sql(schema, sql,max_rows=1e6)
if sql_result is None:
print(('execute_sql: Failed to load results from ' + schema + '.' + table))
# return None
else:
temp = pd.DataFrame(sql_result['rows'])
temp = temp[[c for c in temp.columns if not c.startswith('_')]]
# return df
if temp.shape[0]>0:
update_table_in_lims(temp,'lcmsrun',method='delete')
return missing_from_lcmsruns,extra_in_lcmsruns
def update_lcmsrun_matrix(file_type):
lcmsruns = get_table_from_lims('lcmsrun',columns=['Key','name',file_type])
lcmsruns.fillna(-1,inplace=True) #replace None indices so absolute value below has something to work on
lcmsruns.rename(columns={file_type:'%s_existing'%file_type},inplace=True)
data = get_table_from_lims(file_type,columns=['Key','name'])
df = pd.merge(lcmsruns,data,on='name',how='inner')
df.rename(columns={'Key_x':'Key','Key_y':file_type},inplace=True)
df = df[abs(df['%s_existing'%file_type]-df[file_type])>0]
df.drop(columns=['name','%s_existing'%file_type],inplace=True)
print((df.shape))
if df.shape[0]>0:
update_table_in_lims(df,'lcmsrun',method='update')#,index_column='Key',columns=None,labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
print('done updating')
def get_lcmsrun_matrix():#labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
sql = 'select '
for f in ['mzml','hdf5','raw','spectralhits','pactolus']:
sql = '%s %s_file.filename as %s_filename,'%(sql,f,f)
sql = '%s from lcmsrun'%sql
# base execute_sql
schema = 'lists'
sql_result = api.query.execute_sql(schema, sql,max_rows=1e8)
if sql_result is None:
print(('execute_sql: Failed to load results from ' + schema + '.' + table))
return None
else:
lcmsruns = pd.DataFrame(sql_result['rows'])
return lcmsruns
def update_file_conversion_tasks(task,lcmsruns=None,file_conversion_tasks=None):#,labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
"""
gets current tasks and current files and determines if new tasks need to be made:
task will be:['mzml_to_hdf5','raw_to_mzml','mzml_to_spectralhits','mzml_to_pactolus']
"""
input_type = task.split('_')[0]
output_type = task.split('_')[-1]
if file_conversion_tasks is None:
file_conversion_tasks = get_table_from_lims('file_conversion_task',columns=['Key','input_file','output_file','task','status'])
# task_idx = file_conversion_tasks['task']==task
if lcmsruns is None:
lcmsruns = get_lcmsrun_matrix()
done_input_files = lcmsruns.loc[pd.notna(lcmsruns['%s_filename'%input_type]),'%s_filename'%input_type]
done_output_files = lcmsruns.loc[pd.notna(lcmsruns['%s_filename'%output_type]),'%s_filename'%output_type]
task_idx = file_conversion_tasks['task']==task
inputfile_idx = file_conversion_tasks['input_file'].isin(done_input_files)
outputfile_idx = file_conversion_tasks['output_file'].isin(done_output_files)
# This finds where output file exists
done_tasks_idx = (task_idx) & (outputfile_idx)
if sum(done_tasks_idx)>0:
update_table_in_lims(file_conversion_tasks.loc[done_tasks_idx,['Key']],'file_conversion_task',method='delete')#,labkey_server=labkey_server,project_name=project_name)
print(('%s: There are %d tasks where output file exist and will be removed'%(task,file_conversion_tasks[done_tasks_idx].shape[0])))
# This finds where input file is missing
done_tasks_idx = (task_idx) & (~inputfile_idx)
if sum(done_tasks_idx)>0:
update_table_in_lims(file_conversion_tasks.loc[done_tasks_idx,['Key']],'file_conversion_task',method='delete')#,labkey_server=labkey_server,project_name=project_name)
print(('%s: There are %d tasks where input file is missing and will be removed'%(task,file_conversion_tasks[done_tasks_idx].shape[0])))
right_now_str = datetime.now().strftime("%Y%m%d %H:%M:%S")
idx = (pd.notna(lcmsruns['%s_filename'%input_type])) & (pd.isna(lcmsruns['%s_filename'%output_type]))
temp = pd.DataFrame()
temp['input_file'] = lcmsruns.loc[idx,'%s_filename'%input_type]
temp['output_file'] = temp['input_file'].apply(lambda x: re.sub('\%s$'%EXTENSIONS[input_type],'%s'%EXTENSIONS[output_type],x))
temp['task'] = task
temp['status'] = STATUS['initiation']
temp['log'] = 'detected: %s'%right_now_str
temp.reset_index(drop=True,inplace=True)
cols = temp.columns
temp = pd.merge(temp,file_conversion_tasks.add_suffix('_task'),left_on=['input_file','output_file'],right_on=['input_file_task','output_file_task'],how='outer',indicator=True)
new_tasks = temp[temp['_merge']=='left_only'].copy()
new_tasks = new_tasks[cols]
new_tasks.reset_index(drop=True,inplace=True)
print(("There are %d new tasks"%new_tasks.shape[0]))
if new_tasks.shape[0]>0:
update_table_in_lims(new_tasks,'file_conversion_task',method='insert')
def update_file_table(file_table):
file_type = file_table.split('_')[0]
v = GETTER_SPEC[file_type]
print(('Getting %s files from disk'%(file_type)))
dates,files = get_files_from_disk(PROJECT_DIRECTORY,v['extension'])
if len(files)>0:
df = pd.DataFrame(data={'filename':files,'file_type':file_type,'timeepoch':dates})
df['basename'] = df['filename'].apply(os.path.basename)
df['name'] = df['filename'].apply(complex_name_splitter) #make a name for grouping associated content
else:
df = pd.DataFrame()
df['filename'] = 'None'
df['file_type'] = file_type
df['timeepoch'] = 0
df['basename'] = 'None'
df['name'] = 'None'
print(('\tThere were %d files on disk'%len(files)))
cols = ['filename','name','Key']
df_lims = get_table_from_lims(v['lims_table'],columns=cols)
print(('\tThere were %d files from LIMS table %s'%(df_lims.shape[0],v['lims_table'])))
diff_df = pd.merge(df, df_lims,on=['filename','name'], how='outer', indicator='Exist')
diff_df = diff_df.loc[diff_df['Exist'] != 'both'] #(left_only, right_only, or both)
print(('\tThere are %d different'%diff_df.shape[0]))
print('')
# diff_df.fillna('',inplace=True)
diff_df['parameters'] = 1
cols = ['file_type','filename','timeepoch','basename','name']
temp = diff_df.loc[diff_df['Exist']=='left_only',cols]
if temp.shape[0]>0:
update_table_in_lims(temp,file_table,method='insert')#,index_column='Key',columns=None,labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
cols = ['Key','filename']
temp = diff_df.loc[diff_df['Exist']=='right_only',cols]
temp['Key'] = temp['Key'].astype(int)
if temp.shape[0]>0:
update_table_in_lims(temp,file_table,method='delete')#,index_column='Key',columns=None,labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
# df.to_csv('/global/homes/b/bpb/Downloads/%s_files.tab'%k,index=None,sep='\t')
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
# print command line arguments
parser = argparse.ArgumentParser(description='a command line tool for updating the metatlas labkey lims at nersc.')
# parser.add_argument('-m2t','--ms2_tolerance', help='tolerance in Daltons for ms2', type=float,default=0.01)
# parser.add_argument('-m1pn','--ms1_pos_neutralizations', help='adducts to neutralize for in ms1: 1.007276,18.033823,22.989218', type=float,nargs='+',default=[1.007276,18.033823,22.989218])
parser.add_argument('-update_file_tables','--update_file_tables', help='Update the file tables', default=False)
parser.add_argument('-update_lcmsrun_names','--update_lcmsrun_names', help='Update the names in the lcmsruns matrix', default=False)
parser.add_argument('-update_lcmsrun_items','--update_lcmsrun_items', help='Update the associations in the lcmsruns matrix', default=False)
parser.add_argument('-update_fileconversion_tasks','--update_fileconversion_tasks', help='Update the file conversions tasks', default=False)
# parser.add_argument('-n','--num_cores', help='number of cores to use for multiprocessing', type=int,default=32)
# parser.add_argument('-overwrite','--overwrite', help='Overwrite pre-existing file(s): True/False', type=bool,default=False)
args = vars(parser.parse_args())
# trees = np.load(args['tree_file'])
print(args)
tables=['mzml_file','hdf5_file','pactolus_file','raw_file','spectralhits_file']
# tables=['spectralhits_file']
if str2bool(args['update_file_tables'])==True:
# 1. Update each table individually
for t in tables:
update_file_table(t)
if str2bool(args['update_lcmsrun_names'])==True:
# 2. When complete, update the rows in lcmsrun matrix
missing_from_lcmsruns,extra_in_lcmsruns = update_lcmsrun_names()
if str2bool(args['update_lcmsrun_items'])==True:
# 3. populate lcmsrun table that associates all the file types under one entry
for t in tables:
update_lcmsrun_matrix(t)
# 4. remove any file conversion tasks that have already occured
# TODO!!!!!# TODO!!!!!# TODO!!!!!
# TODO!!!!!# TODO!!!!!# TODO!!!!!
# TODO!!!!!# TODO!!!!!# TODO!!!!!
# TODO!!!!!# TODO!!!!!# TODO!!!!!
if str2bool(args['update_fileconversion_tasks'])==True:
# 5. populate any file conversion tasks that need to occur
lcmsruns = get_lcmsrun_matrix() #this could be moved up abote to step 3 and save a few queries
file_conversion_tasks = get_table_from_lims('file_conversion_task',columns=['Key','input_file','output_file','task','status'])
for task in ['mzml_to_hdf5','raw_to_mzml','mzml_to_spectralhits','mzml_to_pactolus']:
update_file_conversion_tasks(task,lcmsruns=lcmsruns,file_conversion_tasks=file_conversion_tasks)
if __name__ == "__main__":
main()
# LOOK FOR INCOMPLETE FILES MZML FILES
# # </indexedmzML>
# df = pd.read_excel('lcmsrun_2020-07-02_22-41-09.xlsx')
# bad_files = []
# for f in df['Filename']:
# with open(f,'r') as fid:
# mzml = fid.read().strip().endswith('</indexedmzML>')
# # print(mzml)
# if mzml==False:
# bad_files.append(f)
# for f in bad_files:
# print(f)
# print('')
| metabolite-atlas/metatlas | metatlas/io/update_lcmsfiles_in_lims.py | Python | bsd-3-clause | 18,330 |
from datetime import datetime, timedelta
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.defaultfilters import date as dj_date, slugify
from django.utils.safestring import mark_safe
from caching.base import CachingManager, CachingMixin
from sorl.thumbnail import ImageField
from source.people.models import Organization
from source.utils.caching import expire_page_cache
def get_today():
return datetime.now().date()
def get_today_plus_30():
return datetime.now().date() + timedelta(days=30)
class LiveJobManager(CachingManager):
def get_query_set(self):
today = get_today()
return super(LiveJobManager, self).get_query_set().filter(
is_live=True, listing_start_date__lte=today, listing_end_date__gte=today
)
class Job(CachingMixin, models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
is_live = models.BooleanField('Display on site', default=True, help_text='Job will display if this is checked and dates are within proper range')
organization = models.ForeignKey(Organization, blank=True, null=True)
name = models.CharField('Job name', max_length=128)
slug = models.SlugField(unique=True)
description = models.TextField(blank=True)
listing_start_date = models.DateField(default=get_today)
listing_end_date = models.DateField(default=get_today_plus_30)
tweeted_at = models.DateTimeField(blank=True, null=True)
url = models.URLField(blank=True, null=True, verify_exists=False)
contact_name = models.CharField('Contact name', max_length=128, blank=True)
email = models.EmailField('Contact email', blank=True)
location = models.CharField('Job location', max_length=128, blank=True)
objects = models.Manager()
live_objects = LiveJobManager()
class Meta:
ordering = ('organization','slug',)
def __unicode__(self):
return u'%s: %s' % (self.name, self.organization)
def will_show_on_site(self):
today = get_today()
return (self.is_live and self.listing_start_date <= today and self.listing_end_date >= today)
will_show_on_site.boolean = True
@property
def get_list_page_url(self):
return '%s%s#job-%s' % (settings.BASE_SITE_URL, reverse('job_list'), self.pk)
@property
def organization_sort_name(self):
return self.organization.name.replace('The ', '')
@property
def get_contact_email(self):
'''returns job email, falls back to organzation email'''
return self.email or self.organization.email
@property
def pretty_start_date(self):
'''pre-process for simpler template logic'''
return dj_date(self.listing_start_date,"F j, Y")
@property
def pretty_expiration_date(self):
'''pre-process for simpler template logic'''
return dj_date(self.listing_end_date,"F j, Y")
@property
def wrapped_job_name(self):
if self.url:
link = '<a class="job-name" href="%s">%s</a>' % (self.url, self.name)
return mark_safe(link)
else:
return self.name
@property
def wrapped_organization_name(self):
if self.organization.is_live and self.organization.show_in_lists:
link = '<a class="job-organization" href="%s">%s</a>' % (self.organization.get_absolute_url(), self.organization.name)
return mark_safe(link)
else:
return self.organization.name
@property
def wrapped_contact_name(self):
if self.get_contact_email:
name = self.contact_name or 'Email'
link = '<a href="mailto:%s">%s</a>' % (self.get_contact_email, name)
return mark_safe(link)
else:
name = self.contact_name or ''
return name
def save(self, *args, **kwargs):
'''prepend pk to job slug to keep things unique'''
# save so we have a pk for new records
if not self.pk:
super(Job, self).save(*args, **kwargs)
# if we're resaving an existing record, strip the pk
# off the front so we don't end up multiplying them
slug_prefix = '%s-' % self.pk
if self.slug.startswith(slug_prefix):
self.slug = self.slug.replace(slug_prefix, '')
if self.slug == '':
self.slug = slugify(self.name)[:40]
# prefix with pk
self.slug = '%s%s' % (slug_prefix, self.slug)
# call this manually because of the double-save for slugging
clear_caches_for_job(self)
super(Job, self).save(*args, **kwargs)
def clear_caches_for_job(instance):
'''
Not triggering this via signal, as we seemed to have
trouble getting consistent results with the double-save
required for a unique slug. Called manually instead.
'''
# clear cache for job list page
expire_page_cache(reverse('job_list'))
# clear caches for related organization
if instance.organization:
expire_page_cache(instance.organization.get_absolute_url())
| ryanpitts/source | source/jobs/models.py | Python | bsd-3-clause | 5,260 |
import pytest
from opencivicdata.models import Organization
from pupa.scrape import Organization as ScrapeOrganization
from pupa.importers import OrganizationImporter
from pupa.exceptions import UnresolvedIdError
@pytest.mark.django_db
def test_full_organization():
org = ScrapeOrganization('United Nations', classification='international')
org.add_identifier('un')
org.add_name('UN', start_date='1945')
org.add_contact_detail(type='phone', value='555-555-1234', note='this is fake')
org.add_link('http://example.com/link')
org.add_source('http://example.com/source')
# import org
od = org.as_dict()
OrganizationImporter('jurisdiction-id').import_data([od])
# get person from db and assert it imported correctly
o = Organization.objects.get()
assert 'ocd-organization' in o.id
assert o.name == org.name
assert o.identifiers.all()[0].identifier == 'un'
assert o.identifiers.all()[0].scheme == ''
assert o.other_names.all()[0].name == 'UN'
assert o.other_names.all()[0].start_date == '1945'
assert o.contact_details.all()[0].type == 'phone'
assert o.contact_details.all()[0].value == '555-555-1234'
assert o.contact_details.all()[0].note == 'this is fake'
assert o.links.all()[0].url == 'http://example.com/link'
assert o.sources.all()[0].url == 'http://example.com/source'
@pytest.mark.django_db
def test_deduplication_similar_but_different():
o1 = ScrapeOrganization('United Nations', classification='international')
# different classification
o2 = ScrapeOrganization('United Nations', classification='global')
# different name
o3 = ScrapeOrganization('United Nations of Earth', classification='international')
# has a parent
o4 = ScrapeOrganization('United Nations', classification='international', parent_id=o1._id)
# similar, but no duplicates
orgs = [o1.as_dict(), o2.as_dict(), o3.as_dict(), o4.as_dict()]
OrganizationImporter('jurisdiction-id').import_data(orgs)
assert Organization.objects.count() == 4
# should get a new one when jurisdiction_id changes
o5 = ScrapeOrganization('United Nations', classification='international')
OrganizationImporter('new-jurisdiction-id').import_data([o5.as_dict()])
assert Organization.objects.count() == 5
@pytest.mark.django_db
def test_deduplication_parties():
party = ScrapeOrganization('Wild', classification='party')
OrganizationImporter('jurisdiction-id').import_data([party.as_dict()])
assert Organization.objects.count() == 1
# parties shouldn't get jurisdiction id attached, so don't differ on import
party = ScrapeOrganization('Wild', classification='party')
OrganizationImporter('new-jurisdiction-id').import_data([party.as_dict()])
assert Organization.objects.count() == 1
@pytest.mark.django_db
def test_deduplication_prevents_identical():
org1 = ScrapeOrganization('United Nations', classification='international')
org2 = ScrapeOrganization('United Nations', classification='international',
founding_date='1945')
OrganizationImporter('jurisdiction-id').import_data([org1.as_dict()])
assert Organization.objects.count() == 1
OrganizationImporter('jurisdiction-id').import_data([org2.as_dict()])
assert Organization.objects.count() == 1
@pytest.mark.django_db
def test_pseudo_ids():
wild = Organization.objects.create(id='1', name='Wild', classification='party')
senate = Organization.objects.create(id='2', name='Senate', classification='upper',
jurisdiction_id='jid1')
house = Organization.objects.create(id='3', name='House', classification='lower',
jurisdiction_id='jid1')
un = Organization.objects.create(id='4', name='United Nations', classification='international',
jurisdiction_id='jid2')
oi1 = OrganizationImporter('jid1')
assert oi1.resolve_json_id('~{"classification":"upper"}') == senate.id
assert oi1.resolve_json_id('~{"classification":"lower"}') == house.id
assert oi1.resolve_json_id('~{"classification":"party", "name":"Wild"}') == wild.id
with pytest.raises(UnresolvedIdError):
oi1.resolve_json_id('~{"classification":"international", "name":"United Nations"}')
oi2 = OrganizationImporter('jid2')
assert (oi2.resolve_json_id('~{"classification":"international", "name":"United Nations"}') ==
un.id)
@pytest.mark.django_db
def test_parent_id_resolution():
parent = ScrapeOrganization('UN', classification='international')
child = ScrapeOrganization('UNESCO', classification='unknown', parent_id=parent._id)
OrganizationImporter('jurisdiction-id').import_data([parent.as_dict(), child.as_dict()])
assert Organization.objects.count() == 2
assert Organization.objects.get(name='UN').children.count() == 1
assert Organization.objects.get(name='UNESCO').parent.name == 'UN'
@pytest.mark.django_db
def test_pseudo_parent_id_resolution():
parent = ScrapeOrganization('UN', classification='international')
child = ScrapeOrganization('UNESCO', classification='unknown',
parent_id='~{"classification": "international"}')
OrganizationImporter('jurisdiction-id').import_data([parent.as_dict(), child.as_dict()])
assert Organization.objects.count() == 2
assert Organization.objects.get(name='UN').children.count() == 1
assert Organization.objects.get(name='UNESCO').parent.name == 'UN'
@pytest.mark.django_db
def test_extras_organization():
org = ScrapeOrganization('United Nations', classification='international')
org.extras = {"hello": "world",
"foo": {"bar": "baz"}}
od = org.as_dict()
OrganizationImporter('jurisdiction-id').import_data([od])
o = Organization.objects.get()
assert o.extras['foo']['bar'] == 'baz'
| rshorey/pupa | pupa/tests/importers/test_organization_importer.py | Python | bsd-3-clause | 5,939 |
"""MiniAEFrame - A minimal AppleEvent Application framework.
There are two classes:
AEServer -- a mixin class offering nice AE handling.
MiniApplication -- a very minimal alternative to FrameWork.py,
only suitable for the simplest of AppleEvent servers.
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the MiniAEFrame module is removed.", stacklevel=2)
import traceback
import MacOS
from Carbon import AE
from Carbon.AppleEvents import *
from Carbon import Evt
from Carbon.Events import *
from Carbon import Menu
from Carbon import Win
from Carbon.Windows import *
from Carbon import Qd
import aetools
import EasyDialogs
kHighLevelEvent = 23 # Not defined anywhere for Python yet?
class MiniApplication:
"""A minimal FrameWork.Application-like class"""
def __init__(self):
self.quitting = 0
# Initialize menu
self.appleid = 1
self.quitid = 2
Menu.ClearMenuBar()
self.applemenu = applemenu = Menu.NewMenu(self.appleid, "\024")
applemenu.AppendMenu("%s;(-" % self.getaboutmenutext())
if MacOS.runtimemodel == 'ppc':
applemenu.AppendResMenu('DRVR')
applemenu.InsertMenu(0)
self.quitmenu = Menu.NewMenu(self.quitid, "File")
self.quitmenu.AppendMenu("Quit")
self.quitmenu.SetItemCmd(1, ord("Q"))
self.quitmenu.InsertMenu(0)
Menu.DrawMenuBar()
def __del__(self):
self.close()
def close(self):
pass
def mainloop(self, mask = everyEvent, timeout = 60*60):
while not self.quitting:
self.dooneevent(mask, timeout)
def _quit(self):
self.quitting = 1
def dooneevent(self, mask = everyEvent, timeout = 60*60):
got, event = Evt.WaitNextEvent(mask, timeout)
if got:
self.lowlevelhandler(event)
def lowlevelhandler(self, event):
what, message, when, where, modifiers = event
h, v = where
if what == kHighLevelEvent:
msg = "High Level Event: %r %r" % (code(message), code(h | (v<<16)))
try:
AE.AEProcessAppleEvent(event)
except AE.Error, err:
print 'AE error: ', err
print 'in', msg
traceback.print_exc()
return
elif what == keyDown:
c = chr(message & charCodeMask)
if modifiers & cmdKey:
if c == '.':
raise KeyboardInterrupt, "Command-period"
if c == 'q':
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
return
elif what == mouseDown:
partcode, window = Win.FindWindow(where)
if partcode == inMenuBar:
result = Menu.MenuSelect(where)
id = (result>>16) & 0xffff # Hi word
item = result & 0xffff # Lo word
if id == self.appleid:
if item == 1:
EasyDialogs.Message(self.getabouttext())
elif item > 1 and hasattr(Menu, 'OpenDeskAcc'):
name = self.applemenu.GetMenuItemText(item)
Menu.OpenDeskAcc(name)
elif id == self.quitid and item == 1:
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
Menu.HiliteMenu(0)
return
# Anything not handled is passed to Python/SIOUX
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
else:
print "Unhandled event:", event
def getabouttext(self):
return self.__class__.__name__
def getaboutmenutext(self):
return "About %s\311" % self.__class__.__name__
class AEServer:
def __init__(self):
self.ae_handlers = {}
def installaehandler(self, classe, type, callback):
AE.AEInstallEventHandler(classe, type, self.callback_wrapper)
self.ae_handlers[(classe, type)] = callback
def close(self):
for classe, type in self.ae_handlers.keys():
AE.AERemoveEventHandler(classe, type)
def callback_wrapper(self, _request, _reply):
_parameters, _attributes = aetools.unpackevent(_request)
_class = _attributes['evcl'].type
_type = _attributes['evid'].type
if (_class, _type) in self.ae_handlers:
_function = self.ae_handlers[(_class, _type)]
elif (_class, '****') in self.ae_handlers:
_function = self.ae_handlers[(_class, '****')]
elif ('****', '****') in self.ae_handlers:
_function = self.ae_handlers[('****', '****')]
else:
raise 'Cannot happen: AE callback without handler', (_class, _type)
# XXXX Do key-to-name mapping here
_parameters['_attributes'] = _attributes
_parameters['_class'] = _class
_parameters['_type'] = _type
if '----' in _parameters:
_object = _parameters['----']
del _parameters['----']
# The try/except that used to be here can mask programmer errors.
# Let the program crash, the programmer can always add a **args
# to the formal parameter list.
rv = _function(_object, **_parameters)
else:
#Same try/except comment as above
rv = _function(**_parameters)
if rv is None:
aetools.packevent(_reply, {})
else:
aetools.packevent(_reply, {'----':rv})
def code(x):
"Convert a long int to the 4-character code it really is"
s = ''
for i in range(4):
x, c = divmod(x, 256)
s = chr(c) + s
return s
class _Test(AEServer, MiniApplication):
"""Mini test application, handles required events"""
def __init__(self):
MiniApplication.__init__(self)
AEServer.__init__(self)
self.installaehandler('aevt', 'oapp', self.open_app)
self.installaehandler('aevt', 'quit', self.quit)
self.installaehandler('****', '****', self.other)
self.mainloop()
def quit(self, **args):
self._quit()
def open_app(self, **args):
pass
def other(self, _object=None, _class=None, _type=None, **args):
print 'AppleEvent', (_class, _type), 'for', _object, 'Other args:', args
if __name__ == '__main__':
_Test()
| teeple/pns_server | work/install/Python-2.7.4/Lib/plat-mac/MiniAEFrame.py | Python | gpl-2.0 | 6,519 |
from elasticsearch_dsl.search import Search, Q
def test_count_all(data_client):
s = Search(using=data_client).index('git')
assert 53 == s.count()
def test_count_type(data_client):
s = Search(using=data_client).index('git').doc_type('repos')
assert 1 == s.count()
def test_count_filter(data_client):
s = Search(using=data_client).index('git').filter(~Q('exists', field='parent_shas'))
# initial commit + repo document
assert 2 == s.count()
| f-santos/elasticsearch-dsl-py | test_elasticsearch_dsl/test_integration/test_count.py | Python | apache-2.0 | 470 |
# Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nnabla.utils.rnn import _create_fixed_length_rnn
from nbla_test_utils import list_context
ctxs = list_context('RNN')
def execute_fixed_length_rnn(xs_np, h0_np, w0_np, w_np, b_np, num_layers=1, nonlinearity='tanh', dropout=0.0, bidirectional=False, training=True):
# Inputs are numpy arrays
num_directions = 2 if bidirectional else 1
seq_len = xs_np.shape[0]
batch_size = xs_np.shape[1]
hidden_size = h0_np.shape[3]
xs = nn.Variable.from_numpy_array(xs_np)
h0 = nn.Variable.from_numpy_array(h0_np)
w0 = nn.Variable.from_numpy_array(w0_np)
w = None
b = None
with_bias = False
if num_layers > 1:
w = nn.Variable.from_numpy_array(w_np)
if type(b_np) is np.ndarray:
b = nn.Variable.from_numpy_array(b_np)
with_bias = True
ys, hn = _create_fixed_length_rnn(
xs, h0, w0, w, b, num_layers, nonlinearity, num_directions, with_bias) # returns Variables
dummy = F.sink(ys, hn)
dummy.forward()
# returns numpy arrays
ys = F.reshape(ys, (seq_len, batch_size, num_directions * hidden_size))
ys.forward()
return ys.d, hn.d
def get_rnn_grad(xs_np, h0_np, w0_np, w_np, b_np, dy, dh, num_layers=1, nonlinearity='tanh', dropout=0.0, bidirectional=False, training=True, **kw):
# Inputs are numpy arrays
num_directions = 2 if bidirectional else 1
seq_len = xs_np.shape[0]
batch_size = xs_np.shape[1]
hidden_size = h0_np.shape[3]
xs = nn.Variable.from_numpy_array(xs_np, need_grad=True)
h0 = nn.Variable.from_numpy_array(h0_np, need_grad=True)
w0 = nn.Variable.from_numpy_array(w0_np, need_grad=True)
w = None
b = None
with_bias = False
if num_layers > 1:
w = nn.Variable.from_numpy_array(w_np, need_grad=True)
if type(b_np) == np.ndarray:
b = nn.Variable.from_numpy_array(b_np, need_grad=True)
with_bias = True
xs.grad.zero()
h0.grad.zero()
w0.grad.zero()
if num_layers > 1:
w.grad.zero()
if with_bias:
b.grad.zero()
ys, hn = _create_fixed_length_rnn(
xs, h0, w0, w, b, num_layers, nonlinearity, num_directions, with_bias) # returns Variables
dummy = F.sink(ys, hn, one_input_grad=False)
dummy.forward()
ys.g = np.reshape(dy, ys.shape)
hn.g = dh
dummy.backward()
if num_layers > 1 and with_bias:
return np.concatenate((xs.g.flat, h0.g.flat, w0.g.flat, w.g.flat, b.g.flat))
elif num_layers > 1 and not with_bias:
return np.concatenate((xs.g.flat, h0.g.flat, w0.g.flat, w.g.flat))
elif num_layers == 1 and with_bias:
return np.concatenate((xs.g.flat, h0.g.flat, w0.g.flat, b.g.flat))
else:
return np.concatenate((xs.g.flat, h0.g.flat, w0.g.flat))
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [100])
@pytest.mark.parametrize("num_layers", [1])
@pytest.mark.parametrize("nonlinearity", ["tanh", "relu"])
@pytest.mark.parametrize("dropout", [0.0])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("seq_len", [2, 5])
@pytest.mark.parametrize("batch_size", [3])
@pytest.mark.parametrize("input_size", [2])
@pytest.mark.parametrize("hidden_size", [3])
@pytest.mark.parametrize("with_bias", [True, False])
def test_rnn(seed, num_layers, nonlinearity, dropout, bidirectional, training, seq_len, batch_size, input_size, hidden_size, with_bias, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
num_directions = 1
if bidirectional:
num_directions = 2
inputs = [rng.randn(seq_len, batch_size, input_size).astype(np.float32)]
inputs += [rng.randn(num_layers, num_directions,
batch_size, hidden_size).astype(np.float32)]
inputs += [rng.randn(num_directions, hidden_size,
input_size + hidden_size)]
if num_layers > 1:
inputs += [rng.randn(max(1, num_layers-1), num_directions, hidden_size,
num_directions*hidden_size + hidden_size).astype(np.float32)]
else:
inputs += [None]
if with_bias:
inputs += [rng.randn(num_layers, num_directions,
hidden_size).astype(np.float32)]
else:
inputs += [None]
backward = [False for _ in inputs]
if training:
backward = [True for _ in inputs]
function_tester(rng, F.rnn, execute_fixed_length_rnn, inputs, func_kwargs=dict(
num_layers=num_layers, nonlinearity=nonlinearity, dropout=dropout, bidirectional=bidirectional, training=training), atol_f=2e-1, atol_b=2e-1, dstep=1e-3, backward=backward, ctx=ctx, func_name=func_name, ref_grad=get_rnn_grad, disable_half_test=True)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [100])
@pytest.mark.parametrize("num_layers", [1, 2])
@pytest.mark.parametrize("nonlinearity", ["tanh", "relu"])
@pytest.mark.parametrize("dropout", [0.0])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("training", [True])
@pytest.mark.parametrize("seq_len", [2, 5])
@pytest.mark.parametrize("batch_size", [1])
@pytest.mark.parametrize("input_size", [2])
@pytest.mark.parametrize("hidden_size", [2])
@pytest.mark.parametrize("with_bias", [True, False])
def test_rnn_double_backward(seed, num_layers, nonlinearity, dropout, bidirectional, training,
seq_len, batch_size, input_size, hidden_size, with_bias, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
num_directions = 1
if bidirectional:
num_directions = 2
inputs = [rng.randn(seq_len, batch_size, input_size).astype(np.float32)]
inputs += [rng.randn(num_layers, num_directions,
batch_size, hidden_size).astype(np.float32)]
inputs += [rng.randn(num_directions, hidden_size,
input_size + hidden_size)]
if num_layers > 1:
inputs += [rng.randn(max(1, num_layers-1), num_directions, hidden_size,
num_directions*hidden_size + hidden_size).astype(np.float32)]
else:
inputs += [None]
if with_bias:
inputs += [rng.randn(num_layers, num_directions,
hidden_size).astype(np.float32)]
else:
inputs += [None]
backward = [False for _ in inputs]
if training:
backward = [True for _ in inputs]
backward_function_tester(rng, F.rnn, inputs, func_kwargs=dict(
num_layers=num_layers, nonlinearity=nonlinearity, dropout=dropout,
bidirectional=bidirectional,
training=training),
atol_f=1e-4, dstep=1e-3, backward=backward,
ctx=ctx, skip_backward_check=True)
@pytest.mark.parametrize("num_layers", [2])
@pytest.mark.parametrize("bidirectional", [False])
@pytest.mark.parametrize("seq_len", [2, 5])
@pytest.mark.parametrize("batch_size", [3])
@pytest.mark.parametrize("input_size", [2])
@pytest.mark.parametrize("hidden_size", [3])
@pytest.mark.parametrize("ctx, func_name", ctxs)
def test_inference_backward(num_layers, bidirectional, seq_len, batch_size, input_size, hidden_size, ctx, func_name):
with nn.context_scope(ctx):
num_directions = 1
if bidirectional:
num_directions = 2
x = nn.Variable((seq_len, batch_size, input_size), need_grad=True)
h = nn.Variable((num_layers, num_directions,
batch_size, hidden_size), need_grad=True)
w0 = nn.Variable((num_directions, hidden_size,
input_size + hidden_size), need_grad=True)
w = nn.Variable((max(1, num_layers-1), num_directions, hidden_size,
num_directions*hidden_size + hidden_size), need_grad=True)
b = nn.Variable((num_layers, num_directions,
hidden_size), need_grad=True)
y, hn = F.rnn(x, h, w0, w, b, num_layers=num_layers,
nonlinearity="tanh", training=False)
y.forward()
with pytest.raises(RuntimeError) as e_info:
y.backward()
| sony/nnabla | python/test/function/test_rnn.py | Python | apache-2.0 | 8,914 |
from test_slides import *
from test_presenter import *
import unittest
unittest.main()
| gabber12/slides.vim | tests/__main__.py | Python | mit | 87 |
__author__ = "Loran425"
def ranges(lst):
temp_start = None
temp_end = None
prev = None
output = []
for x in lst:
temp_end = x
if temp_start is None:
temp_start = x
prev = x
continue
if temp_end != prev + 1:
if temp_start != prev: # single number isn't a range
output.append('{}->{}'.format(temp_start, prev))
temp_start = x
prev = x
continue
else:
prev = x
# Make sure the final range makes it into the output
if temp_start != temp_end: # single number isn't a range
output.append('{}->{}'.format(temp_start, temp_end))
return output
if __name__ == '__main__':
import unittest
class TestRanges(unittest.TestCase):
def test1(self):
"""
Tests a basic input that should only have 1 element in the list
:return:
"""
self.assertEqual(ranges([1, 2]), ['1->2'])
def test2(self):
"""
Tests a case in which the return value should contain no elements
:return:
"""
self.assertEqual(ranges([1]), [])
def test3(self):
self.assertEqual(ranges([1, 2, 3, 4, 5, 8, 9, 10]), ['1->5', '8->10'])
def test4(self):
self.assertEqual(ranges([5, 6, 7, 20, 21, 22, 25]), ['5->7', '20->22'])
def test5(self):
self.assertEqual(ranges([5, 6, 9, 10, 12, 13, 15, 16, 18, 19, 25, 30]),
['5->6', '9->10', '12->13', '15->16', '18->19'])
def test6(self):
self.assertEqual(ranges([5, 6, 7, 10, 15, 18, 20, 21, 22]), ['5->7', '20->22'])
unittest.main()
| DakRomo/2017Challenges | challenge_6/python/Loran425/src/ranges.py | Python | mit | 1,775 |
# -*- coding: utf-8 -*-
#
# Name: face.com Python API client library
# Description: face.com REST API Python client library.
#
# For more information about the API and the return values,
# visit the official documentation at http://developers.face.com/docs/api/.
#
# Author: Tomaž Muraus (http://www.tomaz-muraus.info)
# License: GPL (http://www.gnu.org/licenses/gpl.html)
# Version: 1.0
import urllib
import urllib2
import json
import os.path
API_URL = 'http://api.face.com'
class FaceClient():
def __init__(self, api_key = None, api_secret = None):
if not api_key or not api_secret:
raise AttributeError('Missing api_key or api_secret argument')
self.api_key = api_key
self.api_secret = api_secret
self.format = 'json'
self.twitter_credentials = None
self.facebook_credentials = None
def set_twitter_user_credentials(self, user = None, password = None):
if not user or not password:
raise AttributeError('Missing Twitter username or password')
self.twitter_credentials = {'twitter_user': user,
'twitter_password': password}
def set_twitter_oauth_credentials(self, user = None, secret = None, token = None):
if not user or not secret or not token:
raise AttributeError('Missing one of the required arguments')
self.twitter_credentials = {'twitter_oauth_user': user,
'twitter_oauth_secret': secret,
'twitter_oauth_token': token}
def set_facebook_credentials(self, user = None, session = None):
if not user or not session:
raise AttributeError('Missing Facebook user or session argument')
self.facebook_credentials = {'fb_user': user,
'fb_session': session}
### Recognition engine methods ###
def faces_detect(self, urls = None, file = None, aggressive=False):
"""
Returns tags for detected faces in one or more photos, with geometric information
of the tag, eyes, nose and mouth, as well as the gender, glasses, and smiling attributes.
http://developers.face.com/docs/api/faces-detect/
"""
if not urls and not file:
raise AttributeError('Missing URLs/filename argument')
if file:
# Check if the file exists
if not os.path.exists(file):
raise IOError('File %s does not exist' % (file))
data = {'file': file}
else:
data = {'urls': urls}
if aggressive:
data['detector'] = 'Aggressive'
response = self.send_request('faces/detect', data)
return response
def faces_status(self, uids = None, namespace = None):
"""
Reports training set status for the specified UIDs.
http://developers.face.com/docs/api/faces-status/
"""
if not uids:
raise AttributeError('Missing user IDs')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, namespace = namespace)
response = self.send_request('faces/status', data)
return response
def faces_recognize(self, uids = None, urls = None, file = None, train = None, \
namespace = None):
"""
Attempts to detect and recognize one or more user IDs' faces, in one or more photos.
For each detected face, the face.com engine will return the most likely user IDs,
or empty result for unrecognized faces. In addition, each tag includes a threshold
score - any score below this number is considered a low-probability hit.
http://developers.face.com/docs/api/faces-recognize/
"""
if not uids or (not urls and not file):
raise AttributeError('Missing required arguments')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids}
if file:
# Check if the file exists
if not os.path.exists(file):
raise IOError('File %s does not exist' % (file))
data.update({'file': file})
else:
data.update({'urls': urls})
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, train = train, namespace = namespace)
response = self.send_request('faces/recognize', data)
return response
def faces_train(self, uids = None, namespace = None):
"""
Calls the training procedure for the specified UIDs, and reports back changes.
http://developers.face.com/docs/api/faces-train/
"""
if not uids:
raise AttributeError('Missing user IDs')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, namespace = namespace)
response = self.send_request('faces/train', data)
return response
### Methods for managing face tags ###
def tags_get(self, uids = None, urls = None, pids = None, order = 'recent', \
limit = 5, together = False, filter = None, namespace = None):
"""
Returns saved tags in one or more photos, or for the specified User ID(s).
This method also accepts multiple filters for finding tags corresponding to
a more specific criteria such as front-facing, recent, or where two or more
users appear together in same photos.
http://developers.face.com/docs/api/tags-get/
"""
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids,
'urls': urls,
'together': together,
'limit': limit}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, pids = pids, filter = filter, \
namespace = namespace)
response = self.send_request('tags/get', data)
return response
def tags_add(self, url = None, x = None, y = None, width = None, uid = None, \
tagger_id = None, label = None, password = None):
"""
Add a (manual) face tag to a photo. Use this method to add face tags where
those were not detected for completeness of your service.
http://developers.face.com/docs/api/tags-add/
"""
if not url or not x or not y or not width or not uid or not tagger_id:
raise AttributeError('Missing one of the required arguments')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uid)
data = {'url': url,
'x': x,
'y': y,
'width': width,
'uid': uid,
'tagger_id': tagger_id}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, label = label, password = password)
response = self.send_request('tags/add', data)
return response
def tags_save(self, tids = None, uid = None, tagger_id = None, label = None, \
password = None):
"""
Saves a face tag. Use this method to save tags for training the face.com
index, or for future use of the faces.detect and tags.get methods.
http://developers.face.com/docs/api/tags-save/
"""
if not tids or not uid:
raise AttributeError('Missing required argument')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uid)
data = {'tids': tids,
'uid': uid}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, tagger_id = tagger_id, label = label, \
password = password)
response = self.send_request('tags/save', data)
return response
def tags_remove(self, tids = None, password = None):
"""
Remove a previously saved face tag from a photo.
http://developers.face.com/docs/api/tags-remove/
"""
if not tids:
raise AttributeError('Missing tag IDs')
data = {'tids': tids}
response = self.send_request('tags/remove', data)
return response
### Account management methods ###
def account_limits(self):
"""
Returns current rate limits for the account represented by the passed API key and Secret.
http://developers.face.com/docs/api/account-limits/
"""
response = self.send_request('account/limits')
return response['usage']
def account_users(self, namespaces = None):
"""
Returns current rate limits for the account represented by the passed API key and Secret.
http://developers.face.com/docs/api/account-limits/
"""
if not namespaces:
raise AttributeError('Missing namespaces argument')
response = self.send_request('account/users', {'namespaces': namespaces})
return response
def __check_user_auth_credentials(self, uids):
# Check if needed credentials are provided
facebook_uids = [uid for uid in uids.split(',') \
if uid.find('@facebook.com') != -1]
twitter_uids = [uid for uid in uids.split(',') \
if uid.find('@twitter.com') != -1]
if facebook_uids and not self.facebook_credentials:
raise AttributeError('You need to set Facebook credentials to perform action on Facebook users')
if twitter_uids and not self.twitter_credentials:
raise AttributeError('You need to set Twitter credentials to perform action on Twitter users')
return (facebook_uids, twitter_uids)
def __append_user_auth_data(self, data, facebook_uids, twitter_uids):
if facebook_uids:
data.update({'user_auth': 'fb_user:%s,fb_session:%s' % (self.facebook_credentials['fb_user'],
self.facebook_credentials['fb_session'])})
if twitter_uids:
# If both user/password and OAuth credentials are provided, use
# OAuth as default
if self.twitter_credentials.get('twitter_oauth_user', None):
data.update({'user_auth': 'twitter_oauth_user:%s,twitter_oauth_secret:%s,twitter_oauth_token:%s' %
(self.twitter_credentials['twitter_oauth_user'], self.twitter_credentials['twitter_oauth_secret'], \
self.twitter_credentials['twitter_oauth_token'])})
else:
data.update({'user_auth': 'twitter_user:%s,twitter_password:%s' % (self.twitter_credentials['twitter_user'],
self.twitter_credentials['twitter_password'])})
def __append_optional_arguments(self, data, **kwargs):
for key, value in kwargs.iteritems():
if value:
data.update({key: value})
def send_request(self, method = None, parameters = None):
url = '%s/%s' % (API_URL, method)
data = {'api_key': self.api_key,
'api_secret': self.api_secret,
'format': self.format}
if parameters:
data.update(parameters)
# Local file is provided, use multi-part form
if 'file' in parameters:
from multipart import Multipart
form = Multipart()
for key, value in data.iteritems():
if key == 'file':
with open(value, 'r') as file:
form.file(os.path.basename(key), os.path.basename(key), file.read())
else:
form.field(key, value)
(content_type, post_data) = form.get()
headers = {'Content-Type': content_type}
else:
post_data = urllib.urlencode(data)
headers = {}
request = urllib2.Request(url, headers = headers, data = post_data)
response = urllib2.urlopen(request)
response = response.read()
response_data = json.loads(response)
if 'status' in response_data and \
response_data['status'] == 'failure':
raise FaceError(response_data['error_code'], response_data['error_message'])
return response_data
class FaceError(Exception):
def __init__(self, error_code, error_message):
self.error_code = error_code
self.error_message = error_message
def __str__(self):
return '%s (%d)' % (self.error_message, self.error_code)
| pizzapanther/Church-Source | dependencies/face_client/face_client.py | Python | gpl-3.0 | 11,111 |
# Test cases for Multi-AP
# Copyright (c) 2018, The Linux Foundation
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import hostapd
def test_multi_ap_association(dev, apdev):
"""Multi-AP association in backhaul BSS"""
run_multi_ap_association(dev, apdev, 1)
dev[1].connect("multi-ap", psk="12345678", scan_freq="2412",
wait_connect=False)
ev = dev[1].wait_event(["CTRL-EVENT-DISCONNECTED",
"CTRL-EVENT-CONNECTED",
"CTRL-EVENT-ASSOC-REJECT"],
timeout=5)
dev[1].request("DISCONNECT")
if ev is None:
raise Exception("Connection result not reported")
if "CTRL-EVENT-ASSOC-REJECT" not in ev:
raise Exception("Association rejection not reported")
if "status_code=12" not in ev:
raise Exception("Unexpected association status code: " + ev)
def test_multi_ap_association_shared_bss(dev, apdev):
"""Multi-AP association in backhaul BSS (with fronthaul BSS enabled)"""
run_multi_ap_association(dev, apdev, 3)
dev[1].connect("multi-ap", psk="12345678", scan_freq="2412")
def run_multi_ap_association(dev, apdev, multi_ap, wait_connect=True):
params = hostapd.wpa2_params(ssid="multi-ap", passphrase="12345678")
if multi_ap:
params["multi_ap"] = str(multi_ap)
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("multi-ap", psk="12345678", scan_freq="2412",
multi_ap_backhaul_sta="1", wait_connect=wait_connect)
def test_multi_ap_disabled_on_ap(dev, apdev):
"""Multi-AP association attempt when disabled on AP"""
run_multi_ap_association(dev, apdev, 0, wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED",
"CTRL-EVENT-CONNECTED"],
timeout=5)
dev[0].request("DISCONNECT")
if ev is None:
raise Exception("Connection result not reported")
if "CTRL-EVENT-DISCONNECTED" not in ev:
raise Exception("Unexpected connection result")
def test_multi_ap_fronthaul_on_ap(dev, apdev):
"""Multi-AP association attempt when only fronthaul BSS on AP"""
run_multi_ap_association(dev, apdev, 2, wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED",
"CTRL-EVENT-CONNECTED",
"CTRL-EVENT-ASSOC-REJECT"],
timeout=5)
dev[0].request("DISCONNECT")
if ev is None:
raise Exception("Connection result not reported")
if "CTRL-EVENT-DISCONNECTED" not in ev:
raise Exception("Unexpected connection result")
def run_multi_ap_wps(dev, apdev, params, multi_ap_bssid=None):
"""Helper for running Multi-AP WPS tests
dev[0] does multi_ap WPS, dev[1] does normal WPS. apdev[0] is the fronthaul
BSS. If there is a separate backhaul BSS, it must have been set up by the
caller. params are the normal SSID parameters, they will be extended with
the WPS parameters. multi_ap_bssid must be given if it is not equal to the
fronthaul BSSID."""
if multi_ap_bssid is None:
multi_ap_bssid = apdev[0]['bssid']
params.update({"wps_state": "2", "eap_server": "1"})
# WPS with multi-ap station dev[0]
hapd = hostapd.add_ap(apdev[0], params)
hapd.request("WPS_PBC")
if "PBC Status: Active" not in hapd.request("WPS_GET_STATUS"):
raise Exception("PBC status not shown correctly")
dev[0].request("WPS_PBC multi_ap=1")
dev[0].wait_connected(timeout=20)
status = dev[0].get_status()
if status['wpa_state'] != 'COMPLETED' or status['bssid'] != multi_ap_bssid:
raise Exception("Not fully connected")
if status['ssid'] != params['multi_ap_backhaul_ssid'].strip('"'):
raise Exception("Unexpected SSID %s != %s" % (status['ssid'], params["multi_ap_backhaul_ssid"]))
if status['pairwise_cipher'] != 'CCMP':
raise Exception("Unexpected encryption configuration %s" % status['pairwise_cipher'])
if status['key_mgmt'] != 'WPA2-PSK':
raise Exception("Unexpected key_mgmt")
status = hapd.request("WPS_GET_STATUS")
if "PBC Status: Disabled" not in status:
raise Exception("PBC status not shown correctly")
if "Last WPS result: Success" not in status:
raise Exception("Last WPS result not shown correctly")
if "Peer Address: " + dev[0].own_addr() not in status:
raise Exception("Peer address not shown correctly")
if len(dev[0].list_networks()) != 1:
raise Exception("Unexpected number of network blocks")
# WPS with non-Multi-AP station dev[1]
hapd.request("WPS_PBC")
if "PBC Status: Active" not in hapd.request("WPS_GET_STATUS"):
raise Exception("PBC status not shown correctly")
dev[1].request("WPS_PBC")
dev[1].wait_connected(timeout=20)
status = dev[1].get_status()
if status['wpa_state'] != 'COMPLETED' or status['bssid'] != apdev[0]['bssid']:
raise Exception("Not fully connected")
if status['ssid'] != params["ssid"]:
raise Exception("Unexpected SSID")
# Fronthaul may be something else than WPA2-PSK so don't test it.
status = hapd.request("WPS_GET_STATUS")
if "PBC Status: Disabled" not in status:
raise Exception("PBC status not shown correctly")
if "Last WPS result: Success" not in status:
raise Exception("Last WPS result not shown correctly")
if "Peer Address: " + dev[1].own_addr() not in status:
raise Exception("Peer address not shown correctly")
if len(dev[1].list_networks()) != 1:
raise Exception("Unexpected number of network blocks")
def test_multi_ap_wps_shared(dev, apdev):
"""WPS on shared fronthaul/backhaul AP"""
ssid = "multi-ap-wps"
passphrase = "12345678"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params.update({"multi_ap": "3",
"multi_ap_backhaul_ssid": '"%s"' % ssid,
"multi_ap_backhaul_wpa_passphrase": passphrase})
run_multi_ap_wps(dev, apdev, params)
def test_multi_ap_wps_shared_psk(dev, apdev):
"""WPS on shared fronthaul/backhaul AP using PSK"""
ssid = "multi-ap-wps"
psk = "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
params = hostapd.wpa2_params(ssid=ssid)
params.update({"wpa_psk": psk,
"multi_ap": "3",
"multi_ap_backhaul_ssid": '"%s"' % ssid,
"multi_ap_backhaul_wpa_psk": psk})
run_multi_ap_wps(dev, apdev, params)
def test_multi_ap_wps_split(dev, apdev):
"""WPS on split fronthaul and backhaul AP"""
backhaul_ssid = "multi-ap-backhaul-wps"
backhaul_passphrase = "87654321"
params = hostapd.wpa2_params(ssid="multi-ap-fronthaul-wps",
passphrase="12345678")
params.update({"multi_ap": "2",
"multi_ap_backhaul_ssid": '"%s"' % backhaul_ssid,
"multi_ap_backhaul_wpa_passphrase": backhaul_passphrase})
params_backhaul = hostapd.wpa2_params(ssid=backhaul_ssid,
passphrase=backhaul_passphrase)
params_backhaul.update({"multi_ap": "1"})
hapd_backhaul = hostapd.add_ap(apdev[1], params_backhaul)
run_multi_ap_wps(dev, apdev, params, hapd_backhaul.own_addr())
def test_multi_ap_wps_split_psk(dev, apdev):
"""WPS on split fronthaul and backhaul AP"""
backhaul_ssid = "multi-ap-backhaul-wps"
backhaul_psk = "1234567890abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
params = hostapd.wpa2_params(ssid="multi-ap-fronthaul-wps",
passphrase="12345678")
params.update({"multi_ap": "2",
"multi_ap_backhaul_ssid": '"%s"' % backhaul_ssid,
"multi_ap_backhaul_wpa_psk": backhaul_psk})
params_backhaul = hostapd.wpa2_params(ssid=backhaul_ssid)
params_backhaul.update({"multi_ap": "1", "wpa_psk": backhaul_psk})
hapd_backhaul = hostapd.add_ap(apdev[1], params_backhaul)
run_multi_ap_wps(dev, apdev, params, hapd_backhaul.own_addr())
def test_multi_ap_wps_split_mixed(dev, apdev):
"""WPS on split fronthaul and backhaul AP with mixed-mode fronthaul"""
backhaul_ssid = "multi-ap-backhaul-wps"
backhaul_passphrase = "87654321"
params = hostapd.wpa_mixed_params(ssid="multi-ap-fronthaul-wps",
passphrase="12345678")
params.update({"multi_ap": "2",
"multi_ap_backhaul_ssid": '"%s"' % backhaul_ssid,
"multi_ap_backhaul_wpa_passphrase": backhaul_passphrase})
params_backhaul = hostapd.wpa2_params(ssid=backhaul_ssid,
passphrase=backhaul_passphrase)
params_backhaul.update({"multi_ap": "1"})
hapd_backhaul = hostapd.add_ap(apdev[1], params_backhaul)
run_multi_ap_wps(dev, apdev, params, hapd_backhaul.own_addr())
def test_multi_ap_wps_split_open(dev, apdev):
"""WPS on split fronthaul and backhaul AP with open fronthaul"""
backhaul_ssid = "multi-ap-backhaul-wps"
backhaul_passphrase = "87654321"
params = {"ssid": "multi-ap-wps-fronthaul", "multi_ap": "2",
"multi_ap_backhaul_ssid": '"%s"' % backhaul_ssid,
"multi_ap_backhaul_wpa_passphrase": backhaul_passphrase}
params_backhaul = hostapd.wpa2_params(ssid=backhaul_ssid,
passphrase=backhaul_passphrase)
params_backhaul.update({"multi_ap": "1"})
hapd_backhaul = hostapd.add_ap(apdev[1], params_backhaul)
run_multi_ap_wps(dev, apdev, params, hapd_backhaul.own_addr())
def test_multi_ap_wps_fail_non_multi_ap(dev, apdev):
"""Multi-AP WPS on non-WPS AP fails"""
params = hostapd.wpa2_params(ssid="non-multi-ap-wps", passphrase="12345678")
params.update({"wps_state": "2", "eap_server": "1"})
hapd = hostapd.add_ap(apdev[0], params)
hapd.request("WPS_PBC")
if "PBC Status: Active" not in hapd.request("WPS_GET_STATUS"):
raise Exception("PBC status not shown correctly")
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412")
dev[0].request("WPS_PBC %s multi_ap=1" % apdev[0]['bssid'])
# Since we will fail to associate and WPS doesn't even get started, there
# isn't much we can do except wait for timeout. For PBC, it is not possible
# to change the timeout from 2 minutes. Instead of waiting for the timeout,
# just check that WPS doesn't finish within reasonable time.
for i in range(2):
ev = dev[0].wait_event(["WPS-SUCCESS", "WPS-FAIL",
"CTRL-EVENT-DISCONNECTED"], timeout=10)
if ev and "WPS-" in ev:
raise Exception("WPS operation completed: " + ev)
dev[0].request("WPS_CANCEL")
| s0lst1c3/eaphammer | local/hostapd-eaphammer/tests/hwsim/test_multi_ap.py | Python | gpl-3.0 | 10,834 |
from elasticsearch import Elasticsearch
from elasticsearch.client import IndicesClient
import numpy as np
import datetime as dt
import math
import json
import pprint
with open("config", "r+") as txt:
contents = list(map(str.rstrip, txt))
es = Elasticsearch([{
'host': contents[4], 'port': contents[5]
}], timeout=30)
esCon = IndicesClient(es)
pp = pprint.PrettyPrinter(indent=4)
properties = {"properties":
{
"stdCoreHr":{"type":"double"},
"maxCoreHr":{"type":"double"},
"minCoreHr":{"type":"double"},
"medianCoreHr":{"type":"double"},
"meanCoreHr":{"type":"double"},
"stdCpuBadput":{"type":"double"},
"meanCpuBadput":{"type":"double"},
"medianCpuBadput":{"type":"double"},
"maxCpuBadput":{"type":"double"},
"minCpuBadput":{"type":"double"},
"minCpuEff":{"type":"double"},
"meanCpuEff":{"type":"double"},
"maxCpuEff":{"type":"double"},
"medianCpuEff":{"type":"double"},
"stdCpuEff":{"type":"double"},
"meanCpuTimeHr":{"type":"double"},
"minCpuTimeHr":{"type":"double"},
"maxCpuTimeHr":{"type":"double"},
"medianCpuTimeHr":{"type":"double"},
"stdCpuTimeHr":{"type":"double"},
"minEventRate":{"type":"double"},
"meanEventRate":{"type":"double"},
"maxEventRate":{"type":"double"},
"medianEventRate":{"type":"double"},
"stdEventRate":{"type":"double"},
"stdKEvents":{"type":"double"},
"meanKEvents":{"type":"double"},
"medianKEvents":{"type":"double"},
"minKEvents":{"type":"double"},
"maxKEvents":{"type":"double"},
"meanMemoryMB":{"type":"double"},
"medianMemoryMB":{"type":"double"},
"minMemoryMB":{"type":"double"},
"maxMemoryMB":{"type":"double"},
"stdMemoryMB":{"type":"double"},
"Workflow":{"type":"keyword"},
"LastRemoteHost":{"type":"keyword"},
"meanInputGB":{"type":"double"},
"medianInputGB":{"type":"double"},
"stdInputGB":{"type":"double"},
"minInputGB":{"type":"double"},
"maxInputGB":{"type":"double"},
"stdLumosity":{"type":"double"},
"maxLumosity":{"type":"double"},
"minLumosity":{"type":"double"},
"medianLumosity":{"type":"double"},
"meanLumosity":{"type":"double"},
"meanQueueHrs":{"type":"double"},
"medianQueueHrs":{"type":"double"},
"maxQueueHrs":{"type":"double"},
"minQueueHrs":{"type":"double"},
"stdQueueHrs":{"type":"double"},
"meanRequestCpus":{"type":"double"},
"medianRequestCpus":{"type":"double"},
"minRequestCpus":{"type":"double"},
"maxRequestCpus":{"type":"double"},
"stdRequestCpus":{"type":"double"},
"meanRequestMemory":{"type":"double"},
"minRequestMemory":{"type":"double"},
"maxRequestMemory":{"type":"double"},
"medianRequestMemory":{"type":"double"},
"stdRequestMemory":{"type":"double"},
"meanWallClockHr":{"type":"double"},
"medianWallClockHr":{"type":"double"},
"maxWallClockHr":{"type":"double"},
"minWallClockHr":{"type":"double"},
"stdWallClockHr":{"type":"double"},
"stdCMSSWKLumis":{"type":"double"},
"meanCMSSWKLumis":{"type":"double"},
"medianCMSSWKLumis":{"type":"double"},
"maxCMSSWKLumis":{"type":"double"},
"minCMSSWKLumis":{"type":"double"},
"beginDate":{"type":"date","format":"epoch_second"},
"dest":{"type":"keyword"},
"maxdestLatency":{"type":"double"},
"mindestLatency":{"type":"double"},
"mediandestLatency":{"type":"double"},
"meandestLatency":{"type":"double"},
"mindestLatency":{"type":"double"},
"stddestPacket":{"type":"double"},
"maxdestPacket":{"type":"double"},
"mindestPacket":{"type":"double"},
"mediandestPacket":{"type":"double"},
"meandestPacket":{"type":"double"},
"stddestThroughput":{"type":"double"},
"meandestThroughput":{"type":"double"},
"mediandestThroughput":{"type":"double"},
"maxdestThroughput":{"type":"double"},
"mindestThroughput":{"type":"double"},
"endDate":{"type":"date","format":"epoch_second"},
"src":{"type":"keyword"},
"stdsrcLatency":{"type":"double"},
"minsrcLatency":{"type":"double"},
"maxsrcLatency":{"type":"double"},
"mediansrcLatency":{"type":"double"},
"meansrcLatency":{"type":"double"},
"stdsrcPacket":{"type":"double"},
"maxsrcPacket":{"type":"double"},
"minsrcPacket":{"type":"double"},
"mediansrcPacket":{"type":"double"},
"meansrcPacket":{"type":"double"},
"stdReadTimeMins":{"type":"double"},
"meanReadTimeMins":{"type":"double"},
"medianReadTimeMins":{"type":"double"},
"maxReadTimeMins":{"type":"double"},
"minReadTimeMins":{"type":"double"},
"stdsrcThroughput":{"type":"double"},
"meansrcThroughput":{"type":"double"},
"mediansrcThroughput":{"type":"double"},
"maxsrcThroughput":{"type":"double"},
"minsrcThroughput":{"type":"double"},
}
}
def esConQuery():
queryBody={"mappings":
{"dev":properties,
"DataProcessing":properties,
"RECO":properties,
"DIGI":properties,
"DIGIRECO":properties
}
}
if esCon.exists(index="net-health"):
pp.pprint(esCon.delete(index="net-health"))
scannerCon = esCon.create(index="net-health",
body=queryBody)
return scannerCon
#print(esConAgg("src"))
#print(esConAgg("dest"))
def main():
temp = esConQuery()
pp.pprint(temp)
# Run Main code
print("start")
main()
print("finished")
| robogen/CMS-Mining | RunScripts/index_reseter.py | Python | mit | 8,101 |
#!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for msgutil module."""
import array
import Queue
import struct
import unittest
import zlib
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket.extensions import DeflateFrameExtensionProcessor
from mod_pywebsocket.extensions import PerFrameCompressionExtensionProcessor
from mod_pywebsocket.extensions import PerMessageCompressionExtensionProcessor
from mod_pywebsocket import msgutil
from mod_pywebsocket.stream import InvalidUTF8Exception
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamHixie75
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
from test import mock
# We use one fixed nonce for testing instead of cryptographically secure PRNG.
_MASKING_NONCE = 'ABCD'
def _mask_hybi(frame):
frame_key = map(ord, _MASKING_NONCE)
frame_key_len = len(frame_key)
result = array.array('B')
result.fromstring(frame)
count = 0
for i in xrange(len(result)):
result[i] ^= frame_key[count]
count = (count + 1) % frame_key_len
return _MASKING_NONCE + result.tostring()
def _install_extension_processor(processor, request, stream_options):
response = processor.get_extension_response()
if response is not None:
processor.setup_stream_options(stream_options)
request.ws_extension_processors.append(processor)
def _create_request_from_rawdata(
read_data, deflate_stream=False, deflate_frame_request=None,
perframe_compression_request=None, permessage_compression_request=None):
req = mock.MockRequest(connection=mock.MockConn(''.join(read_data)))
req.ws_version = common.VERSION_HYBI_LATEST
stream_options = StreamOptions()
stream_options.deflate_stream = deflate_stream
req.ws_extension_processors = []
if deflate_frame_request is not None:
processor = DeflateFrameExtensionProcessor(deflate_frame_request)
_install_extension_processor(processor, req, stream_options)
elif perframe_compression_request is not None:
processor = PerFrameCompressionExtensionProcessor(
perframe_compression_request)
_install_extension_processor(processor, req, stream_options)
elif permessage_compression_request is not None:
processor = PerMessageCompressionExtensionProcessor(
permessage_compression_request)
_install_extension_processor(processor, req, stream_options)
req.ws_stream = Stream(req, stream_options)
return req
def _create_request(*frames):
"""Creates MockRequest using data given as frames.
frames will be returned on calling request.connection.read() where request
is MockRequest returned by this function.
"""
read_data = []
for (header, body) in frames:
read_data.append(header + _mask_hybi(body))
return _create_request_from_rawdata(read_data)
def _create_blocking_request():
"""Creates MockRequest.
Data written to a MockRequest can be read out by calling
request.connection.written_data().
"""
req = mock.MockRequest(connection=mock.MockBlockingConn())
req.ws_version = common.VERSION_HYBI_LATEST
stream_options = StreamOptions()
req.ws_stream = Stream(req, stream_options)
return req
def _create_request_hixie75(read_data=''):
req = mock.MockRequest(connection=mock.MockConn(read_data))
req.ws_stream = StreamHixie75(req)
return req
def _create_blocking_request_hixie75():
req = mock.MockRequest(connection=mock.MockBlockingConn())
req.ws_stream = StreamHixie75(req)
return req
class MessageTest(unittest.TestCase):
# Tests for Stream
def test_send_message(self):
request = _create_request()
msgutil.send_message(request, 'Hello')
self.assertEqual('\x81\x05Hello', request.connection.written_data())
payload = 'a' * 125
request = _create_request()
msgutil.send_message(request, payload)
self.assertEqual('\x81\x7d' + payload,
request.connection.written_data())
def test_send_medium_message(self):
payload = 'a' * 126
request = _create_request()
msgutil.send_message(request, payload)
self.assertEqual('\x81\x7e\x00\x7e' + payload,
request.connection.written_data())
payload = 'a' * ((1 << 16) - 1)
request = _create_request()
msgutil.send_message(request, payload)
self.assertEqual('\x81\x7e\xff\xff' + payload,
request.connection.written_data())
def test_send_large_message(self):
payload = 'a' * (1 << 16)
request = _create_request()
msgutil.send_message(request, payload)
self.assertEqual('\x81\x7f\x00\x00\x00\x00\x00\x01\x00\x00' + payload,
request.connection.written_data())
def test_send_message_unicode(self):
request = _create_request()
msgutil.send_message(request, u'\u65e5')
# U+65e5 is encoded as e6,97,a5 in UTF-8
self.assertEqual('\x81\x03\xe6\x97\xa5',
request.connection.written_data())
def test_send_message_fragments(self):
request = _create_request()
msgutil.send_message(request, 'Hello', False)
msgutil.send_message(request, ' ', False)
msgutil.send_message(request, 'World', False)
msgutil.send_message(request, '!', True)
self.assertEqual('\x01\x05Hello\x00\x01 \x00\x05World\x80\x01!',
request.connection.written_data())
def test_send_fragments_immediate_zero_termination(self):
request = _create_request()
msgutil.send_message(request, 'Hello World!', False)
msgutil.send_message(request, '', True)
self.assertEqual('\x01\x0cHello World!\x80\x00',
request.connection.written_data())
def test_send_message_deflate_stream(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
request = _create_request_from_rawdata('', deflate_stream=True)
msgutil.send_message(request, 'Hello')
expected = compress.compress('\x81\x05Hello')
expected += compress.flush(zlib.Z_SYNC_FLUSH)
self.assertEqual(expected, request.connection.written_data())
def test_send_message_deflate_frame(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
'', deflate_frame_request=extension)
msgutil.send_message(request, 'Hello')
msgutil.send_message(request, 'World')
expected = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
expected += '\xc1%c' % len(compressed_hello)
expected += compressed_hello
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
expected += '\xc1%c' % len(compressed_world)
expected += compressed_world
self.assertEqual(expected, request.connection.written_data())
def test_send_message_deflate_frame_comp_bit(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
'', deflate_frame_request=extension)
self.assertEquals(1, len(request.ws_extension_processors))
deflate_frame_processor = request.ws_extension_processors[0]
msgutil.send_message(request, 'Hello')
deflate_frame_processor.disable_outgoing_compression()
msgutil.send_message(request, 'Hello')
deflate_frame_processor.enable_outgoing_compression()
msgutil.send_message(request, 'Hello')
expected = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
expected += '\xc1%c' % len(compressed_hello)
expected += compressed_hello
expected += '\x81\x05Hello'
compressed_2nd_hello = compress.compress('Hello')
compressed_2nd_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_2nd_hello = compressed_2nd_hello[:-4]
expected += '\xc1%c' % len(compressed_2nd_hello)
expected += compressed_2nd_hello
self.assertEqual(expected, request.connection.written_data())
def test_send_message_deflate_frame_no_context_takeover_parameter(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
extension.add_parameter('no_context_takeover', None)
request = _create_request_from_rawdata(
'', deflate_frame_request=extension)
for i in xrange(3):
msgutil.send_message(request, 'Hello')
compressed_message = compress.compress('Hello')
compressed_message += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_message = compressed_message[:-4]
expected = '\xc1%c' % len(compressed_message)
expected += compressed_message
self.assertEqual(
expected + expected + expected, request.connection.written_data())
def test_deflate_frame_bad_request_parameters(self):
"""Tests that if there's anything wrong with deflate-frame extension
request, deflate-frame is rejected.
"""
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
# max_window_bits less than 8 is illegal.
extension.add_parameter('max_window_bits', '7')
processor = DeflateFrameExtensionProcessor(extension)
self.assertEqual(None, processor.get_extension_response())
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
# max_window_bits greater than 15 is illegal.
extension.add_parameter('max_window_bits', '16')
processor = DeflateFrameExtensionProcessor(extension)
self.assertEqual(None, processor.get_extension_response())
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
# Non integer max_window_bits is illegal.
extension.add_parameter('max_window_bits', 'foobar')
processor = DeflateFrameExtensionProcessor(extension)
self.assertEqual(None, processor.get_extension_response())
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
# no_context_takeover must not have any value.
extension.add_parameter('no_context_takeover', 'foobar')
processor = DeflateFrameExtensionProcessor(extension)
self.assertEqual(None, processor.get_extension_response())
def test_deflate_frame_response_parameters(self):
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
processor = DeflateFrameExtensionProcessor(extension)
processor.set_response_window_bits(8)
response = processor.get_extension_response()
self.assertTrue(response.has_parameter('max_window_bits'))
self.assertEqual('8', response.get_parameter_value('max_window_bits'))
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
processor = DeflateFrameExtensionProcessor(extension)
processor.set_response_no_context_takeover(True)
response = processor.get_extension_response()
self.assertTrue(response.has_parameter('no_context_takeover'))
self.assertTrue(
response.get_parameter_value('no_context_takeover') is None)
def test_send_message_perframe_compress_deflate(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(
common.PERFRAME_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
'', perframe_compression_request=extension)
msgutil.send_message(request, 'Hello')
msgutil.send_message(request, 'World')
expected = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
expected += '\xc1%c' % len(compressed_hello)
expected += compressed_hello
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
expected += '\xc1%c' % len(compressed_world)
expected += compressed_world
self.assertEqual(expected, request.connection.written_data())
def test_send_message_permessage_compress_deflate(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(
common.PERMESSAGE_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
'', permessage_compression_request=extension)
msgutil.send_message(request, 'Hello')
msgutil.send_message(request, 'World')
expected = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
expected += '\xc1%c' % len(compressed_hello)
expected += compressed_hello
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
expected += '\xc1%c' % len(compressed_world)
expected += compressed_world
self.assertEqual(expected, request.connection.written_data())
def test_receive_message(self):
request = _create_request(
('\x81\x85', 'Hello'), ('\x81\x86', 'World!'))
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('World!', msgutil.receive_message(request))
payload = 'a' * 125
request = _create_request(('\x81\xfd', payload))
self.assertEqual(payload, msgutil.receive_message(request))
def test_receive_medium_message(self):
payload = 'a' * 126
request = _create_request(('\x81\xfe\x00\x7e', payload))
self.assertEqual(payload, msgutil.receive_message(request))
payload = 'a' * ((1 << 16) - 1)
request = _create_request(('\x81\xfe\xff\xff', payload))
self.assertEqual(payload, msgutil.receive_message(request))
def test_receive_large_message(self):
payload = 'a' * (1 << 16)
request = _create_request(
('\x81\xff\x00\x00\x00\x00\x00\x01\x00\x00', payload))
self.assertEqual(payload, msgutil.receive_message(request))
def test_receive_length_not_encoded_using_minimal_number_of_bytes(self):
# Log warning on receiving bad payload length field that doesn't use
# minimal number of bytes but continue processing.
payload = 'a'
# 1 byte can be represented without extended payload length field.
request = _create_request(
('\x81\xff\x00\x00\x00\x00\x00\x00\x00\x01', payload))
self.assertEqual(payload, msgutil.receive_message(request))
def test_receive_message_unicode(self):
request = _create_request(('\x81\x83', '\xe6\x9c\xac'))
# U+672c is encoded as e6,9c,ac in UTF-8
self.assertEqual(u'\u672c', msgutil.receive_message(request))
def test_receive_message_erroneous_unicode(self):
# \x80 and \x81 are invalid as UTF-8.
request = _create_request(('\x81\x82', '\x80\x81'))
# Invalid characters should raise InvalidUTF8Exception
self.assertRaises(InvalidUTF8Exception,
msgutil.receive_message,
request)
def test_receive_fragments(self):
request = _create_request(
('\x01\x85', 'Hello'),
('\x00\x81', ' '),
('\x00\x85', 'World'),
('\x80\x81', '!'))
self.assertEqual('Hello World!', msgutil.receive_message(request))
def test_receive_fragments_unicode(self):
# UTF-8 encodes U+6f22 into e6bca2 and U+5b57 into e5ad97.
request = _create_request(
('\x01\x82', '\xe6\xbc'),
('\x00\x82', '\xa2\xe5'),
('\x80\x82', '\xad\x97'))
self.assertEqual(u'\u6f22\u5b57', msgutil.receive_message(request))
def test_receive_fragments_immediate_zero_termination(self):
request = _create_request(
('\x01\x8c', 'Hello World!'), ('\x80\x80', ''))
self.assertEqual('Hello World!', msgutil.receive_message(request))
def test_receive_fragments_duplicate_start(self):
request = _create_request(
('\x01\x85', 'Hello'), ('\x01\x85', 'World'))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_receive_fragments_intermediate_but_not_started(self):
request = _create_request(('\x00\x85', 'Hello'))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_receive_fragments_end_but_not_started(self):
request = _create_request(('\x80\x85', 'Hello'))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_receive_message_discard(self):
request = _create_request(
('\x8f\x86', 'IGNORE'), ('\x81\x85', 'Hello'),
('\x8f\x89', 'DISREGARD'), ('\x81\x86', 'World!'))
self.assertRaises(msgutil.UnsupportedFrameException,
msgutil.receive_message, request)
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertRaises(msgutil.UnsupportedFrameException,
msgutil.receive_message, request)
self.assertEqual('World!', msgutil.receive_message(request))
def test_receive_close(self):
request = _create_request(
('\x88\x8a', struct.pack('!H', 1000) + 'Good bye'))
self.assertEqual(None, msgutil.receive_message(request))
self.assertEqual(1000, request.ws_close_code)
self.assertEqual('Good bye', request.ws_close_reason)
def test_receive_message_deflate_stream(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = compress.compress('\x81\x85' + _mask_hybi('Hello'))
data += compress.flush(zlib.Z_SYNC_FLUSH)
data += compress.compress('\x81\x89' + _mask_hybi('WebSocket'))
data += compress.flush(zlib.Z_FINISH)
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data += compress.compress('\x81\x85' + _mask_hybi('World'))
data += compress.flush(zlib.Z_SYNC_FLUSH)
# Close frame
data += compress.compress(
'\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye'))
data += compress.flush(zlib.Z_SYNC_FLUSH)
request = _create_request_from_rawdata(data, deflate_stream=True)
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('WebSocket', msgutil.receive_message(request))
self.assertEqual('World', msgutil.receive_message(request))
self.assertFalse(request.drain_received_data_called)
self.assertEqual(None, msgutil.receive_message(request))
self.assertTrue(request.drain_received_data_called)
def test_receive_message_deflate_frame(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
data += '\xc1%c' % (len(compressed_hello) | 0x80)
data += _mask_hybi(compressed_hello)
compressed_websocket = compress.compress('WebSocket')
compressed_websocket += compress.flush(zlib.Z_FINISH)
compressed_websocket += '\x00'
data += '\xc1%c' % (len(compressed_websocket) | 0x80)
data += _mask_hybi(compressed_websocket)
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
data += '\xc1%c' % (len(compressed_world) | 0x80)
data += _mask_hybi(compressed_world)
# Close frame
data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
data, deflate_frame_request=extension)
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('WebSocket', msgutil.receive_message(request))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual(None, msgutil.receive_message(request))
def test_receive_message_deflate_frame_client_using_smaller_window(self):
"""Test that frames coming from a client which is using smaller window
size that the server are correctly received.
"""
# Using the smallest window bits of 8 for generating input frames.
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -8)
data = ''
# Use a frame whose content is bigger than the clients' DEFLATE window
# size before compression. The content mainly consists of 'a' but
# repetition of 'b' is put at the head and tail so that if the window
# size is big, the head is back-referenced but if small, not.
payload = 'b' * 64 + 'a' * 1024 + 'b' * 64
compressed_hello = compress.compress(payload)
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
data += '\xc1%c' % (len(compressed_hello) | 0x80)
data += _mask_hybi(compressed_hello)
# Close frame
data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
data, deflate_frame_request=extension)
self.assertEqual(payload, msgutil.receive_message(request))
self.assertEqual(None, msgutil.receive_message(request))
def test_receive_message_deflate_frame_comp_bit(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
data += '\xc1%c' % (len(compressed_hello) | 0x80)
data += _mask_hybi(compressed_hello)
data += '\x81\x85' + _mask_hybi('Hello')
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_2nd_hello = compress.compress('Hello')
compressed_2nd_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_2nd_hello = compressed_2nd_hello[:-4]
data += '\xc1%c' % (len(compressed_2nd_hello) | 0x80)
data += _mask_hybi(compressed_2nd_hello)
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
data, deflate_frame_request=extension)
for i in xrange(3):
self.assertEqual('Hello', msgutil.receive_message(request))
def test_receive_message_perframe_compression_frame(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
data += '\xc1%c' % (len(compressed_hello) | 0x80)
data += _mask_hybi(compressed_hello)
compressed_websocket = compress.compress('WebSocket')
compressed_websocket += compress.flush(zlib.Z_FINISH)
compressed_websocket += '\x00'
data += '\xc1%c' % (len(compressed_websocket) | 0x80)
data += _mask_hybi(compressed_websocket)
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
data += '\xc1%c' % (len(compressed_world) | 0x80)
data += _mask_hybi(compressed_world)
# Close frame
data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
extension = common.ExtensionParameter(
common.PERFRAME_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
data, perframe_compression_request=extension)
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('WebSocket', msgutil.receive_message(request))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual(None, msgutil.receive_message(request))
def test_receive_message_permessage_deflate_compression(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = ''
compressed_hello = compress.compress('HelloWebSocket')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
split_position = len(compressed_hello) / 2
data += '\x41%c' % (split_position | 0x80)
data += _mask_hybi(compressed_hello[:split_position])
data += '\x80%c' % ((len(compressed_hello) - split_position) | 0x80)
data += _mask_hybi(compressed_hello[split_position:])
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
data += '\xc1%c' % (len(compressed_world) | 0x80)
data += _mask_hybi(compressed_world)
# Close frame
data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
extension = common.ExtensionParameter(
common.PERMESSAGE_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
data, permessage_compression_request=extension)
self.assertEqual('HelloWebSocket', msgutil.receive_message(request))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual(None, msgutil.receive_message(request))
def test_send_longest_close(self):
reason = 'a' * 123
request = _create_request(
('\x88\xfd',
struct.pack('!H', common.STATUS_NORMAL_CLOSURE) + reason))
request.ws_stream.close_connection(common.STATUS_NORMAL_CLOSURE,
reason)
self.assertEqual(request.ws_close_code, common.STATUS_NORMAL_CLOSURE)
self.assertEqual(request.ws_close_reason, reason)
def test_send_close_too_long(self):
request = _create_request()
self.assertRaises(msgutil.BadOperationException,
Stream.close_connection,
request.ws_stream,
common.STATUS_NORMAL_CLOSURE,
'a' * 124)
def test_send_close_inconsistent_code_and_reason(self):
request = _create_request()
# reason parameter must not be specified when code is None.
self.assertRaises(msgutil.BadOperationException,
Stream.close_connection,
request.ws_stream,
None,
'a')
def test_send_ping(self):
request = _create_request()
msgutil.send_ping(request, 'Hello World!')
self.assertEqual('\x89\x0cHello World!',
request.connection.written_data())
def test_send_longest_ping(self):
request = _create_request()
msgutil.send_ping(request, 'a' * 125)
self.assertEqual('\x89\x7d' + 'a' * 125,
request.connection.written_data())
def test_send_ping_too_long(self):
request = _create_request()
self.assertRaises(msgutil.BadOperationException,
msgutil.send_ping,
request,
'a' * 126)
def test_receive_ping(self):
"""Tests receiving a ping control frame."""
def handler(request, message):
request.called = True
# Stream automatically respond to ping with pong without any action
# by application layer.
request = _create_request(
('\x89\x85', 'Hello'), ('\x81\x85', 'World'))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual('\x8a\x05Hello',
request.connection.written_data())
request = _create_request(
('\x89\x85', 'Hello'), ('\x81\x85', 'World'))
request.on_ping_handler = handler
self.assertEqual('World', msgutil.receive_message(request))
self.assertTrue(request.called)
def test_receive_longest_ping(self):
request = _create_request(
('\x89\xfd', 'a' * 125), ('\x81\x85', 'World'))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual('\x8a\x7d' + 'a' * 125,
request.connection.written_data())
def test_receive_ping_too_long(self):
request = _create_request(('\x89\xfe\x00\x7e', 'a' * 126))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_receive_pong(self):
"""Tests receiving a pong control frame."""
def handler(request, message):
request.called = True
request = _create_request(
('\x8a\x85', 'Hello'), ('\x81\x85', 'World'))
request.on_pong_handler = handler
msgutil.send_ping(request, 'Hello')
self.assertEqual('\x89\x05Hello',
request.connection.written_data())
# Valid pong is received, but receive_message won't return for it.
self.assertEqual('World', msgutil.receive_message(request))
# Check that nothing was written after receive_message call.
self.assertEqual('\x89\x05Hello',
request.connection.written_data())
self.assertTrue(request.called)
def test_receive_unsolicited_pong(self):
# Unsolicited pong is allowed from HyBi 07.
request = _create_request(
('\x8a\x85', 'Hello'), ('\x81\x85', 'World'))
msgutil.receive_message(request)
request = _create_request(
('\x8a\x85', 'Hello'), ('\x81\x85', 'World'))
msgutil.send_ping(request, 'Jumbo')
# Body mismatch.
msgutil.receive_message(request)
def test_ping_cannot_be_fragmented(self):
request = _create_request(('\x09\x85', 'Hello'))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_ping_with_too_long_payload(self):
request = _create_request(('\x89\xfe\x01\x00', 'a' * 256))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
class MessageTestHixie75(unittest.TestCase):
"""Tests for draft-hixie-thewebsocketprotocol-76 stream class."""
def test_send_message(self):
request = _create_request_hixie75()
msgutil.send_message(request, 'Hello')
self.assertEqual('\x00Hello\xff', request.connection.written_data())
def test_send_message_unicode(self):
request = _create_request_hixie75()
msgutil.send_message(request, u'\u65e5')
# U+65e5 is encoded as e6,97,a5 in UTF-8
self.assertEqual('\x00\xe6\x97\xa5\xff',
request.connection.written_data())
def test_receive_message(self):
request = _create_request_hixie75('\x00Hello\xff\x00World!\xff')
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('World!', msgutil.receive_message(request))
def test_receive_message_unicode(self):
request = _create_request_hixie75('\x00\xe6\x9c\xac\xff')
# U+672c is encoded as e6,9c,ac in UTF-8
self.assertEqual(u'\u672c', msgutil.receive_message(request))
def test_receive_message_erroneous_unicode(self):
# \x80 and \x81 are invalid as UTF-8.
request = _create_request_hixie75('\x00\x80\x81\xff')
# Invalid characters should be replaced with
# U+fffd REPLACEMENT CHARACTER
self.assertEqual(u'\ufffd\ufffd', msgutil.receive_message(request))
def test_receive_message_discard(self):
request = _create_request_hixie75('\x80\x06IGNORE\x00Hello\xff'
'\x01DISREGARD\xff\x00World!\xff')
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('World!', msgutil.receive_message(request))
class MessageReceiverTest(unittest.TestCase):
"""Tests the Stream class using MessageReceiver."""
def test_queue(self):
request = _create_blocking_request()
receiver = msgutil.MessageReceiver(request)
self.assertEqual(None, receiver.receive_nowait())
request.connection.put_bytes('\x81\x86' + _mask_hybi('Hello!'))
self.assertEqual('Hello!', receiver.receive())
def test_onmessage(self):
onmessage_queue = Queue.Queue()
def onmessage_handler(message):
onmessage_queue.put(message)
request = _create_blocking_request()
receiver = msgutil.MessageReceiver(request, onmessage_handler)
request.connection.put_bytes('\x81\x86' + _mask_hybi('Hello!'))
self.assertEqual('Hello!', onmessage_queue.get())
class MessageReceiverHixie75Test(unittest.TestCase):
"""Tests the StreamHixie75 class using MessageReceiver."""
def test_queue(self):
request = _create_blocking_request_hixie75()
receiver = msgutil.MessageReceiver(request)
self.assertEqual(None, receiver.receive_nowait())
request.connection.put_bytes('\x00Hello!\xff')
self.assertEqual('Hello!', receiver.receive())
def test_onmessage(self):
onmessage_queue = Queue.Queue()
def onmessage_handler(message):
onmessage_queue.put(message)
request = _create_blocking_request_hixie75()
receiver = msgutil.MessageReceiver(request, onmessage_handler)
request.connection.put_bytes('\x00Hello!\xff')
self.assertEqual('Hello!', onmessage_queue.get())
class MessageSenderTest(unittest.TestCase):
"""Tests the Stream class using MessageSender."""
def test_send(self):
request = _create_blocking_request()
sender = msgutil.MessageSender(request)
sender.send('World')
self.assertEqual('\x81\x05World', request.connection.written_data())
def test_send_nowait(self):
# Use a queue to check the bytes written by MessageSender.
# request.connection.written_data() cannot be used here because
# MessageSender runs in a separate thread.
send_queue = Queue.Queue()
def write(bytes):
send_queue.put(bytes)
request = _create_blocking_request()
request.connection.write = write
sender = msgutil.MessageSender(request)
sender.send_nowait('Hello')
sender.send_nowait('World')
self.assertEqual('\x81\x05Hello', send_queue.get())
self.assertEqual('\x81\x05World', send_queue.get())
class MessageSenderHixie75Test(unittest.TestCase):
"""Tests the StreamHixie75 class using MessageSender."""
def test_send(self):
request = _create_blocking_request_hixie75()
sender = msgutil.MessageSender(request)
sender.send('World')
self.assertEqual('\x00World\xff', request.connection.written_data())
def test_send_nowait(self):
# Use a queue to check the bytes written by MessageSender.
# request.connection.written_data() cannot be used here because
# MessageSender runs in a separate thread.
send_queue = Queue.Queue()
def write(bytes):
send_queue.put(bytes)
request = _create_blocking_request_hixie75()
request.connection.write = write
sender = msgutil.MessageSender(request)
sender.send_nowait('Hello')
sender.send_nowait('World')
self.assertEqual('\x00Hello\xff', send_queue.get())
self.assertEqual('\x00World\xff', send_queue.get())
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| Omegaphora/external_chromium-trace | trace-viewer/third_party/pywebsocket/src/test/test_msgutil.py | Python | bsd-3-clause | 39,891 |
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
import logging
from netzob.Common.ResourcesConfiguration import ResourcesConfiguration
from netzob.UI.Vocabulary.Controllers.VocabularyController import VocabularyController
#+---------------------------------------------------------------------------+
#| Related third party imports
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| AbstractPluginController:
#| Regroup methods any plugins' controllers must be able to access
#+---------------------------------------------------------------------------+
class AbstractPluginController(object):
def __init__(self, netzob, plugin, view):
super(AbstractPluginController, self).__init__()
self.netzob = netzob
self.plugin = plugin
self.view = view
def getCurrentProject(self):
"""Computes the current project. It may returns None if no
current project is yet loaded.
@return: the current project L{netzob.Common.Project:Project}
"""
return self.netzob.getCurrentProject()
def getVocabularyController(self):
"""getVocabularyController:
Returns the controller associated with the vocabulary"""
return self.netzob.getPerspectiveController(VocabularyController.PERSPECTIVE_ID)
def getPlugin(self):
return self.plugin
def getView(self):
return self.view
| nagyistoce/netzob | src/netzob/Common/Plugins/AbstractPluginController.py | Python | gpl-3.0 | 3,761 |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""Application controller for usearch v5.2.32
Includes application controllers for usearch and
convenience wrappers for different functions of uclust including
sorting fasta files, finding clusters, converting to cd-hit format and
searching and aligning against a database. Also contains
a parser for the resulting .clstr file.
Modified from pycogent_backports/uclust.py, written by
Greg Caporaso/William Walters
"""
from os.path import splitext, abspath, join
from tempfile import mkstemp, gettempdir
from skbio.parse.sequences import parse_fasta
from burrito.parameters import ValuedParameter, FlagParameter
from burrito.util import (CommandLineApplication, ResultPath,
ApplicationError, ApplicationNotFoundError)
from skbio.util import remove_files
class UsearchParseError(Exception):
pass
class Usearch(CommandLineApplication):
""" Usearch ApplicationController
"""
_command = 'usearch'
_input_handler = '_input_as_parameters'
_parameters = {
# Fasta input file for merge-sort function
'--mergesort': ValuedParameter('--', Name='mergesort', Delimiter=' ',
IsPath=True),
# Fasta input file for merge-sort function
'--evalue': ValuedParameter('--', Name='evalue', Delimiter=' ',
IsPath=False),
# Output file, used by several difference functions
'--output': ValuedParameter('--', Name='output', Delimiter=' ',
IsPath=True),
# Output filename will be in uclust (.uc) format
# Output cluster file, required parameter
'--uc': ValuedParameter('--', Name='uc', Delimiter=' ',
IsPath=True),
'--blast6out': ValuedParameter('--', Name='blast6out', Delimiter=' ',
IsPath=True),
# ID percent for OTU, by default is 97%
'--id': ValuedParameter('--', Name='id', Delimiter=' ', IsPath=False),
'--evalue':
ValuedParameter('--', Name='evalue', Delimiter=' ', IsPath=False),
'--queryalnfract':
ValuedParameter(
'--',
Name='queryalnfract',
Delimiter=' ',
IsPath=False),
'--targetalnfract':
ValuedParameter(
'--',
Name='targetalnfract',
Delimiter=' ',
IsPath=False),
# Enable reverse strand matching. Will double memory.
'--rev': FlagParameter('--', Name='rev'),
# Maximum hits before quitting search (default 1, 0=infinity).
'--maxaccepts':
ValuedParameter('--', Name='maxaccepts', Delimiter=' '),
# Maximum rejects before quitting search (default 8, 0=infinity).
'--maxrejects':
ValuedParameter('--', Name='maxrejects', Delimiter=' '),
# Target nr. of common words (default 8, 0=don't step)
'--stepwords': ValuedParameter('--', Name='stepwords', Delimiter=' '),
# Word length for windex (default 5 aa.s, 8 nuc.s).
'--w': ValuedParameter('--', Name='w', Delimiter=' '),
# Don't assume input is sorted by length (default assume sorted).
'--usersort': FlagParameter('--', Name='usersort'),
# log filepath
'--log': ValuedParameter('--', Name='log', Delimiter=' ', IsPath=True),
# cluster command
'--cluster': ValuedParameter('--', Name='cluster', Delimiter=' ',
IsPath=True),
# Size of compressed index table. Should be prime, e.g. 40000003.
'--slots': ValuedParameter('--', Name='slots', Delimiter=' ',
IsPath=False),
# Not specified in usearch helpstring...
'--sizein': FlagParameter('--', Name='sizein'),
# Not specified in usearch helpstring...
'--sizeout': FlagParameter('--', Name='sizeout'),
# Not specified in usearch helpstring...
'--minlen': ValuedParameter('--', Name='minlen', Delimiter=' ',
IsPath=False),
# output filepath for dereplicated fasta file
'--seedsout': ValuedParameter('--', Name='seedsout', Delimiter=' ',
IsPath=True),
# Dereplicate exact subsequences
'--derep_subseq': FlagParameter('--', Name='derep_subseq'),
# Dereplicate exact sequences
'--derep_fullseq': FlagParameter('--', Name='derep_fullseq'),
# Sort by abundance
'--sortsize': ValuedParameter('--', Name='sortsize', Delimiter=' ',
IsPath=True),
# usearch search plus clustering
'--consout': ValuedParameter('--', Name='consout', Delimiter=' ',
IsPath=True),
# Abundance skew setting for uchime de novo chimera detection
'--abskew': ValuedParameter('--', Name='abskew', Delimiter=' ',
IsPath=False),
# input fasta filepath for uchime chimera
'--uchime': ValuedParameter('--', Name='uchime', Delimiter=' ',
IsPath=True),
# output chimera filepath
'--chimeras': ValuedParameter('--', Name='chimeras', Delimiter=' ',
IsPath=True),
# output non-chimera filepath
'--nonchimeras': ValuedParameter('--', Name='nonchimeras',
Delimiter=' ', IsPath=True),
# reference sequence database for ref based chimera detection
'--db': ValuedParameter('--', Name='db', Delimiter=' ', IsPath=True),
# output clusters filepath for chimera detection
'--uchimeout': ValuedParameter('--', Name='uchimeout', Delimiter=' ',
IsPath=True),
# minimum cluster size for quality filtering
'--minsize': ValuedParameter('--', Name='minsize', Delimiter=' ',
IsPath=False),
# input fasta for blast alignments
'--query': ValuedParameter('--', Name='query', Delimiter=' ',
IsPath=True),
# global alignment flag
'--global': FlagParameter('--', Name='global')
}
_suppress_stdout = False
_suppress_stderr = False
def _input_as_parameters(self, data):
""" Set the input path (a fasta filepath)
"""
# The list of values which can be passed on a per-run basis
allowed_values = ['--uc', '--output', '--mergesort', '--log',
'--cluster', '--seedsout', '--sortsize',
'--consout', '--uchime', '--chimeras',
'--nonchimeras', '--db', '--uchimeout',
'--query', '--blast6out']
unsupported_parameters = set(data.keys()) - set(allowed_values)
if unsupported_parameters:
raise ApplicationError(
"Unsupported parameter(s) passed when calling usearch: %s" %
' '.join(unsupported_parameters))
for v in allowed_values:
# turn the parameter off so subsequent runs are not
# affected by parameter settings from previous runs
self.Parameters[v].off()
if v in data:
# turn the parameter on if specified by the user
self.Parameters[v].on(data[v])
return ''
def _get_result_paths(self, data):
""" Set the result paths """
result = {}
result['Output'] = ResultPath(
Path=self.Parameters['--output'].Value,
IsWritten=self.Parameters['--output'].isOn())
result['ClusterFile'] = ResultPath(
Path=self.Parameters['--uc'].Value,
IsWritten=self.Parameters['--uc'].isOn())
return result
def _accept_exit_status(self, exit_status):
""" Test for acceptable exit status
usearch can seg fault and still generate a parsable .uc file
so we explicitly check the exit status
"""
return exit_status == 0
def getHelp(self):
"""Method that points to documentation"""
help_str =\
"""
USEARCH is hosted at:
http://www.drive5.com/usearch/
The following papers should be cited if this resource is used:
Paper pending. Check with Robert Edgar who is writing the paper
for usearch as of Aug. 2011
"""
return help_str
# Start functions for processing usearch output files
def clusters_from_blast_uc_file(uc_lines, otu_id_field=1):
""" Parses out hit/miss sequences from usearch blast uc file
All lines should be 'H'it or 'N'o hit. Returns a dict of OTU ids: sequence
labels of the hits, and a list of all sequence labels that miss.
uc_lines = open file object of uc file
otu_id_field: uc field to use as the otu id. 1 is usearch's ClusterNr field,
and 9 is usearch's TargetLabel field
"""
hit_miss_index = 0
cluster_id_index = otu_id_field
seq_label_index = 8
otus = {}
unassigned_seqs = []
for line in uc_lines:
# skip empty, comment lines
if line.startswith('#') or len(line.strip()) == 0:
continue
curr_line = line.split('\t')
if curr_line[hit_miss_index] == 'N':
# only retaining actual sequence label
unassigned_seqs.append(curr_line[seq_label_index].split()[0])
if curr_line[hit_miss_index] == 'H':
curr_seq_label = curr_line[seq_label_index].split()[0]
curr_otu_id = curr_line[cluster_id_index].split()[0]
# Append sequence label to dictionary, or create key
try:
otus[curr_otu_id].append(curr_seq_label)
except KeyError:
otus[curr_otu_id] = [curr_seq_label]
return otus, unassigned_seqs
# End functions for processing usearch output files
# Start usearch convenience functions
def usearch_fasta_sort_from_filepath(
fasta_filepath,
output_filepath=None,
log_name="sortlen.log",
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
"""Generates sorted fasta file via usearch --mergesort.
fasta_filepath: filepath to input fasta file
output_filepath: filepath for output sorted fasta file.
log_name: string to specify log filename
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created."""
if not output_filepath:
_, output_filepath = mkstemp(prefix='usearch_fasta_sort',
suffix='.fasta')
log_filepath = join(working_dir, log_name)
params = {}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
data = {'--mergesort': fasta_filepath,
'--output': output_filepath,
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
return app_result, output_filepath
def usearch_dereplicate_exact_subseqs(
fasta_filepath,
output_filepath=None,
minlen=64,
w=64,
slots=16769023,
sizeout=True,
maxrejects=64,
log_name="derep.log",
usersort=False,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
""" Generates clusters and fasta file of dereplicated subsequences
These parameters are those specified by Robert Edgar for optimal use of
usearch in clustering/filtering sequences.
fasta_filepath = input filepath of fasta file to be dereplicated
output_filepath = output filepath of dereplicated fasta file
minlen = (not specified in usearch helpstring)
w = Word length for U-sorting
slots = Size of compressed index table. Should be prime, e.g. 40000003.
Should also specify --w, typical is --w 16 or --w 32.
sizeout = (not specified in usearch helpstring)
maxrejects = Max rejected targets, 0=ignore, default 32.
log_name: string to specify log filename
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created."""
if not output_filepath:
_, output_filepath = mkstemp(prefix='usearch_fasta_dereplicated',
suffix='.fasta')
log_filepath = join(working_dir, log_name)
uc_filepath = join(working_dir, "derep.uc")
params = {'--derep_subseq': True,
'--minlen': minlen,
'--w': w,
'--slots': slots,
'--sizeout': sizeout,
'--maxrejects': maxrejects}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
if usersort:
app.Parameters['--usersort'].on()
data = {'--cluster': fasta_filepath,
'--uc': uc_filepath,
'--seedsout': output_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
if not save_intermediate_files:
remove_files([uc_filepath])
# Returning output filepath to delete if specified.
return app_result, output_filepath
def usearch_dereplicate_exact_seqs(
fasta_filepath,
output_filepath=None,
minlen=64,
w=64,
slots=16769023,
sizeout=True,
maxrejects=64,
log_name="derep.log",
usersort=False,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
""" Generates clusters and fasta file of dereplicated subsequences
for exact sequences.
These parameters are those specified by Robert Edgar for optimal use of
usearch in clustering/filtering sequences.
fasta_filepath = input filepath of fasta file to be dereplicated
output_filepath = output filepath of dereplicated fasta file
minlen = (not specified in usearch helpstring)
w = Word length for U-sorting
slots = Size of compressed index table. Should be prime, e.g. 40000003.
Should also specify --w, typical is --w 16 or --w 32.
sizeout = (not specified in usearch helpstring)
maxrejects = Max rejected targets, 0=ignore, default 32.
log_name: string to specify log filename
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created."""
if not output_filepath:
_, output_filepath = mkstemp(prefix='usearch_fasta_dereplicated',
suffix='.fasta')
log_filepath = join(working_dir, log_name)
uc_filepath = join(working_dir, "derep.uc")
params = {'--derep_fullseq': True,
'--minlen': minlen,
'--w': w,
'--slots': slots,
'--sizeout': sizeout,
'--maxrejects': maxrejects}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
if usersort:
app.Parameters['--usersort'].on()
data = {'--cluster': fasta_filepath,
'--uc': uc_filepath,
'--seedsout': output_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
if not save_intermediate_files:
remove_files([uc_filepath])
# Returning output filepath to delete if specified.
return app_result, output_filepath
def usearch_sort_by_abundance(
fasta_filepath,
output_filepath=None,
sizein=True,
sizeout=True,
minsize=0,
log_name="abundance_sort.log",
usersort=False,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
""" Sorts fasta file by abundance
fasta_filepath = input fasta file, generally a dereplicated fasta
output_filepath = output abundance sorted fasta filepath
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
minsize = minimum size of cluster to retain.
log_name = string to specify log filename
usersort = Use if not sorting by abundance or usearch will raise an error
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created.
"""
if not output_filepath:
_, output_filepath = mkstemp(prefix='usearch_abundance_sorted',
suffix='.fasta')
log_filepath = join(
working_dir,
"minsize_" + str(minsize) + "_" + log_name)
params = {}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
if usersort:
app.Parameters['--usersort'].on()
if minsize:
app.Parameters['--minsize'].on(minsize)
if sizein:
app.Parameters['--sizein'].on()
if sizeout:
app.Parameters['--sizeout'].on()
data = {'--sortsize': fasta_filepath,
'--output': output_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
# Can have no data following this filter step, which will raise an
# application error, try to catch it here to raise meaningful message.
try:
app_result = app(data)
except ApplicationError:
raise ValueError('No data following filter steps, please check ' +
'parameter settings for usearch_qf.')
return app_result, output_filepath
def usearch_cluster_error_correction(
fasta_filepath,
output_filepath=None,
output_uc_filepath=None,
percent_id_err=0.97,
sizein=True,
sizeout=True,
w=64,
slots=16769023,
maxrejects=64,
log_name="usearch_cluster_err_corrected.log",
usersort=False,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
""" Cluster for err. correction at percent_id_err, output consensus fasta
fasta_filepath = input fasta file, generally a dereplicated fasta
output_filepath = output error corrected fasta filepath
percent_id_err = minimum identity percent.
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
w = Word length for U-sorting
slots = Size of compressed index table. Should be prime, e.g. 40000003.
Should also specify --w, typical is --w 16 or --w 32.
maxrejects = Max rejected targets, 0=ignore, default 32.
log_name = string specifying output log name
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created.
"""
if not output_filepath:
_, output_filepath = mkstemp(prefix='usearch_cluster_err_corrected',
suffix='.fasta')
log_filepath = join(working_dir, log_name)
params = {'--sizein': sizein,
'--sizeout': sizeout,
'--id': percent_id_err,
'--w': w,
'--slots': slots,
'--maxrejects': maxrejects}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
if usersort:
app.Parameters['--usersort'].on()
data = {'--cluster': fasta_filepath,
'--consout': output_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
if output_uc_filepath:
data['--uc'] = output_uc_filepath
app_result = app(data)
return app_result, output_filepath
def usearch_chimera_filter_de_novo(
fasta_filepath,
output_chimera_filepath=None,
output_non_chimera_filepath=None,
abundance_skew=2.0,
log_name="uchime_de_novo_chimera_filtering.log",
usersort=False,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
""" Chimera filter de novo, output chimeras and non-chimeras to fastas
fasta_filepath = input fasta file, generally a dereplicated fasta
output_chimera_filepath = output chimera filepath
output_non_chimera_filepath = output non chimera filepath
abundance_skew = abundance skew setting for de novo filtering.
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created.
"""
if not output_chimera_filepath:
_, output_chimera_filepath = mkstemp(prefix='uchime_chimeras_',
suffix='.fasta')
if not output_non_chimera_filepath:
_, output_non_chimera_filepath = mkstemp(prefix='uchime_non_chimeras_',
suffix='.fasta')
log_filepath = join(working_dir, log_name)
params = {'--abskew': abundance_skew}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
if usersort:
app.Parameters['--usersort'].on()
data = {'--uchime': fasta_filepath,
'--chimeras': output_chimera_filepath,
'--nonchimeras': output_non_chimera_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
if not save_intermediate_files:
remove_files([output_chimera_filepath])
return app_result, output_non_chimera_filepath
def usearch_chimera_filter_ref_based(
fasta_filepath,
db_filepath,
output_chimera_filepath=None,
output_non_chimera_filepath=None,
rev=False,
log_name="uchime_reference_chimera_filtering.log",
usersort=False,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
""" Chimera filter against a reference database.
fasta_filepath = input fasta file, generally a dereplicated fasta
db_filepath = filepath to reference sequence database
output_chimera_filepath = output chimera filepath
output_non_chimera_filepath = output non chimera filepath
rev = search plus and minus strands of sequences
abundance_skew = abundance skew setting for de novo filtering.
log_name = string specifying log filename.
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created.
"""
if not output_chimera_filepath:
_, output_chimera_filepath = mkstemp(prefix='uchime_chimeras_',
suffix='.fasta')
if not output_non_chimera_filepath:
_, output_non_chimera_filepath = mkstemp(prefix='uchime_non_chimeras_',
suffix='.fasta')
log_filepath = join(working_dir, log_name)
# clusters filepath created by usearch
cluster_filepath = join(working_dir, "refdb.uc")
params = {}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
if usersort:
app.Parameters['--usersort'].on()
if rev:
app.Parameters['--rev'].on()
data = {'--uchime': fasta_filepath,
'--db': db_filepath,
'--chimeras': output_chimera_filepath,
'--nonchimeras': output_non_chimera_filepath,
'--uchimeout': cluster_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
if not save_intermediate_files:
remove_files([cluster_filepath, output_chimera_filepath])
return app_result, output_non_chimera_filepath
def usearch_cluster_seqs(
fasta_filepath,
output_filepath=None,
percent_id=0.97,
sizein=True,
sizeout=True,
w=64,
slots=16769023,
maxrejects=64,
log_name="usearch_cluster_seqs.log",
usersort=True,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None
):
""" Cluster seqs at percent_id, output consensus fasta
fasta_filepath = input fasta file, generally a dereplicated fasta
output_filepath = output error corrected fasta filepath
percent_id = minimum identity percent.
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
w = Word length for U-sorting
slots = Size of compressed index table. Should be prime, e.g. 40000003.
Should also specify --w, typical is --w 16 or --w 32.
maxrejects = Max rejected targets, 0=ignore, default 32.
log_name = string specifying output log name
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error. In post chimera checked sequences, the seqs
are sorted by abundance, so this should be set to True.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created.
"""
if not output_filepath:
_, output_filepath = mkstemp(prefix='usearch_cluster', suffix='.fasta')
log_filepath = join(working_dir, log_name)
uc_filepath = join(working_dir, "clustered_seqs_post_chimera.uc")
params = {'--sizein': sizein,
'--sizeout': sizeout,
'--id': percent_id,
'--w': w,
'--slots': slots,
'--maxrejects': maxrejects}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
if usersort:
app.Parameters['--usersort'].on()
data = {'--cluster': fasta_filepath,
'--seedsout': output_filepath,
'--uc': uc_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
if not save_intermediate_files:
remove_files([uc_filepath])
return app_result, output_filepath
def usearch_cluster_seqs_ref(
fasta_filepath,
output_filepath=None,
percent_id=0.97,
sizein=True,
sizeout=True,
w=64,
slots=16769023,
maxrejects=64,
log_name="usearch_cluster_seqs.log",
usersort=True,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
suppress_new_clusters=False,
refseqs_fp=None,
output_dir=None,
working_dir=None,
rev=False):
""" Cluster seqs at percent_id, output consensus fasta
Also appends de novo clustered seqs if suppress_new_clusters is False.
Forced to handle reference + de novo in hackish fashion as usearch does not
work as listed in the helpstrings. Any failures are clustered de novo,
and given unique cluster IDs.
fasta_filepath = input fasta file, generally a dereplicated fasta
output_filepath = output reference clustered uc filepath
percent_id = minimum identity percent.
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
w = Word length for U-sorting
slots = Size of compressed index table. Should be prime, e.g. 40000003.
Should also specify --w, typical is --w 16 or --w 32.
maxrejects = Max rejected targets, 0=ignore, default 32.
log_name = string specifying output log name
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error. In post chimera checked sequences, the seqs
are sorted by abundance, so this should be set to True.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created.
suppress_new_clusters: Disables de novo OTUs when ref based OTU picking
enabled.
refseqs_fp: Filepath for ref based OTU picking
output_dir: output directory
rev = search plus and minus strands of sequences
"""
if not output_filepath:
_, output_filepath = mkstemp(prefix='usearch_cluster_ref_based',
suffix='.uc')
log_filepath = join(working_dir, log_name)
uc_filepath = join(working_dir, "clustered_seqs_post_chimera.uc")
params = {'--sizein': sizein,
'--sizeout': sizeout,
'--id': percent_id,
'--w': w,
'--slots': slots,
'--maxrejects': maxrejects}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
if usersort:
app.Parameters['--usersort'].on()
if rev:
app.Parameters['--rev'].on()
data = {'--query': fasta_filepath,
'--uc': uc_filepath,
'--db': refseqs_fp
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
files_to_remove = []
# Need to create fasta file of all hits (with reference IDs),
# recluster failures if new clusters allowed, and create complete fasta
# file, with unique fasta label IDs.
if suppress_new_clusters:
output_fna_filepath = join(output_dir, 'ref_clustered_seqs.fasta')
output_filepath, labels_hits = get_fasta_from_uc_file(fasta_filepath,
uc_filepath, hit_type="H", output_dir=output_dir,
output_fna_filepath=output_fna_filepath)
files_to_remove.append(uc_filepath)
else:
# Get fasta of successful ref based clusters
output_fna_clustered = join(output_dir, 'ref_clustered_seqs.fasta')
output_filepath_ref_clusters, labels_hits =\
get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H",
output_dir=output_dir, output_fna_filepath=output_fna_clustered)
# get failures and recluster
output_fna_failures =\
join(output_dir, 'ref_clustered_seqs_failures.fasta')
output_filepath_failures, labels_hits =\
get_fasta_from_uc_file(fasta_filepath,
uc_filepath, hit_type="N", output_dir=output_dir,
output_fna_filepath=output_fna_failures)
# de novo cluster the failures
app_result, output_filepath_clustered_failures =\
usearch_cluster_seqs(output_fna_failures, output_filepath=
join(
output_dir,
'clustered_seqs_reference_failures.fasta'),
percent_id=percent_id, sizein=sizein, sizeout=sizeout, w=w,
slots=slots, maxrejects=maxrejects,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs, working_dir=working_dir)
output_filepath = concatenate_fastas(output_fna_clustered,
output_fna_failures, output_concat_filepath=join(
output_dir,
'concatenated_reference_denovo_clusters.fasta'))
files_to_remove.append(output_fna_clustered)
files_to_remove.append(output_fna_failures)
files_to_remove.append(output_filepath_clustered_failures)
if not save_intermediate_files:
remove_files(files_to_remove)
return app_result, output_filepath
def concatenate_fastas(output_fna_clustered,
output_fna_failures,
output_concat_filepath):
""" Concatenates two input fastas, writes to output_concat_filepath
output_fna_clustered: fasta of successful ref clusters
output_fna_failures: de novo fasta of cluster failures
output_concat_filepath: path to write combined fastas to
"""
output_fp = open(output_concat_filepath, "w")
for label, seq in parse_fasta(open(output_fna_clustered, "U")):
output_fp.write(">%s\n%s\n" % (label, seq))
for label, seq in parse_fasta(open(output_fna_failures, "U")):
output_fp.write(">%s\n%s\n" % (label, seq))
return output_concat_filepath
def enumerate_otus(fasta_filepath,
output_filepath=None,
label_prefix="",
label_suffix="",
retain_label_as_comment=False,
count_start=0):
""" Writes unique, sequential count to OTUs
fasta_filepath = input fasta filepath
output_filepath = output fasta filepath
label_prefix = string to place before enumeration
label_suffix = string to place after enumeration
retain_label_as_comment = if True, will place existing label in sequence
comment, after a tab
count_start = number to start enumerating OTUs with
"""
fasta_i = open(fasta_filepath, "U")
if not output_filepath:
_, output_filepath = mkstemp(prefix='enumerated_seqs_',
suffix='.fasta')
fasta_o = open(output_filepath, "w")
for label, seq in parse_fasta(fasta_i):
curr_label = ">" + label_prefix + str(count_start) + label_suffix
if retain_label_as_comment:
curr_label += '\t' + label
fasta_o.write(curr_label.strip() + '\n')
fasta_o.write(seq.strip() + '\n')
count_start += 1
return output_filepath
def get_fasta_from_uc_file(fasta_filepath,
uc_filepath,
hit_type="H",
output_fna_filepath=None,
label_prefix="",
output_dir=None):
""" writes fasta of sequences from uc file of type hit_type
fasta_filepath: Filepath of original query fasta file
uc_filepath: Filepath of .uc file created by usearch post error filtering
hit_type: type to read from first field of .uc file, "H" for hits, "N" for
no hits.
output_fna_filepath = fasta output filepath
label_prefix = Added before each fasta label, important when doing ref
based OTU picking plus de novo clustering to preserve label matching.
output_dir: output directory
"""
hit_type_index = 0
seq_label_index = 8
target_label_index = 9
labels_hits = {}
labels_to_keep = []
for line in open(uc_filepath, "U"):
if line.startswith("#") or len(line.strip()) == 0:
continue
curr_line = line.split('\t')
if curr_line[0] == hit_type:
labels_hits[curr_line[seq_label_index]] =\
curr_line[target_label_index].strip()
labels_to_keep.append(curr_line[seq_label_index])
labels_to_keep = set(labels_to_keep)
out_fna = open(output_fna_filepath, "w")
for label, seq in parse_fasta(open(fasta_filepath, "U")):
if label in labels_to_keep:
if hit_type == "H":
out_fna.write(">" + labels_hits[label] + "\n%s\n" % seq)
if hit_type == "N":
out_fna.write(">" + label + "\n%s\n" % seq)
return output_fna_filepath, labels_hits
def get_retained_chimeras(output_fp_de_novo_nonchimeras,
output_fp_ref_nonchimeras,
output_combined_fp,
chimeras_retention='union'):
""" Gets union or intersection of two supplied fasta files
output_fp_de_novo_nonchimeras: filepath of nonchimeras from de novo
usearch detection.
output_fp_ref_nonchimeras: filepath of nonchimeras from reference based
usearch detection.
output_combined_fp: filepath to write retained sequences to.
chimeras_retention: accepts either 'intersection' or 'union'. Will test
for chimeras against the full input error clustered sequence set, and
retain sequences flagged as non-chimeras by either (union) or
only those flagged as non-chimeras by both (intersection)."""
de_novo_non_chimeras = []
reference_non_chimeras = []
de_novo_nonchimeras_f = open(output_fp_de_novo_nonchimeras, "U")
reference_nonchimeras_f = open(output_fp_ref_nonchimeras, "U")
output_combined_f = open(output_combined_fp, "w")
for label, seq in parse_fasta(de_novo_nonchimeras_f):
de_novo_non_chimeras.append(label)
de_novo_nonchimeras_f.close()
for label, seq in parse_fasta(reference_nonchimeras_f):
reference_non_chimeras.append(label)
reference_nonchimeras_f.close()
de_novo_non_chimeras = set(de_novo_non_chimeras)
reference_non_chimeras = set(reference_non_chimeras)
if chimeras_retention == 'union':
all_non_chimeras = de_novo_non_chimeras.union(reference_non_chimeras)
elif chimeras_retention == 'intersection':
all_non_chimeras =\
de_novo_non_chimeras.intersection(reference_non_chimeras)
de_novo_nonchimeras_f = open(output_fp_de_novo_nonchimeras, "U")
reference_nonchimeras_f = open(output_fp_ref_nonchimeras, "U")
# Save a list of already-written labels
labels_written = []
for label, seq in parse_fasta(de_novo_nonchimeras_f):
if label in all_non_chimeras:
if label not in labels_written:
output_combined_f.write('>%s\n%s\n' % (label, seq))
labels_written.append(label)
de_novo_nonchimeras_f.close()
for label, seq in parse_fasta(reference_nonchimeras_f):
if label in all_non_chimeras:
if label not in labels_written:
output_combined_f.write('>%s\n%s\n' % (label, seq))
labels_written.append(label)
reference_nonchimeras_f.close()
output_combined_f.close()
return output_combined_fp
def assign_reads_to_otus(original_fasta,
filtered_fasta,
output_filepath=None,
log_name="assign_reads_to_otus.log",
perc_id_blast=0.97,
global_alignment=True,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
""" Uses original fasta file, blasts to assign reads to filtered fasta
original_fasta = filepath to original query fasta
filtered_fasta = filepath to enumerated, filtered fasta
output_filepath = output path to clusters (uc) file
log_name = string specifying output log name
perc_id_blast = percent ID for blasting original seqs against filtered set
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error. In post chimera checked sequences, the seqs
are sorted by abundance, so this should be set to True.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created.
"""
# Not sure if I feel confortable using blast as a way to recapitulate
# original read ids....
if not output_filepath:
_, output_filepath = mkstemp(prefix='assign_reads_to_otus',
suffix='.uc')
log_filepath = join(working_dir, log_name)
params = {'--id': perc_id_blast,
'--global': global_alignment}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
data = {'--query': original_fasta,
'--db': filtered_fasta,
'--uc': output_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
return app_result, output_filepath
def usearch_qf(
fasta_filepath,
refseqs_fp=None,
output_dir=None,
percent_id=0.97,
percent_id_err=0.97,
minsize=4,
abundance_skew=2.0,
db_filepath=None,
rev=False,
label_prefix="",
label_suffix="",
retain_label_as_comment=False,
count_start=0,
perc_id_blast=0.97,
save_intermediate_files=False,
HALT_EXEC=False,
global_alignment=True,
sizein=True,
sizeout=True,
w=64,
slots=16769023,
maxrejects=64,
minlen=64,
de_novo_chimera_detection=True,
derep_fullseq=False,
reference_chimera_detection=True,
cluster_size_filtering=True,
remove_usearch_logs=False,
usersort=True,
suppress_new_clusters=False,
chimeras_retention="union",
verbose=False
):
""" Main convenience wrapper for using usearch to filter/cluster seqs
The complete 'usearch_qf' process is a multistep process with many calls
to usearch with various parameters. It is likely to change from the
original implementation. A lot.
fasta_filepath = fasta filepath to filtering/clustering (e.g., output
seqs.fna file from split_libraries.py)
refseqs_fp = fasta filepath for ref-based otu picking.
output_dir = directory to store the otu mapping file, as well logs and
the intermediate files created if save_intermediate_files is True.
percent_ID = percent ID for clustering sequences.
percent_ID_err = percent ID for filtering out chimeras
minsize = Minimum size of cluster for retention after chimera removal.
abundance_skew = threshold setting for chimera removal with de novo
chimera detection.
db_filepath = filepath of reference fasta sequence set for ref based
chimera detection.
rev = search plus and minus strands of sequences, used in ref based chimera
detection.
label_prefix = optional prefix added to filtered fasta file.
label_suffix = optional suffix added to filtered fasta file.
retain_label_as_comment = option to add usearch generated label to
enumerated fasta labels.
count_start = integer to begin counting at for sequence enumeration.
perc_id_blast = percent identity setting for using blast algorithm to
assign original sequence labels to filtered fasta.
global_alignment = Setting for assignment of original seq labels to filtered
seqs.
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
w = Word length for U-sorting
slots = Size of compressed index table. Should be prime, e.g. 40000003.
Should also specify --w, typical is --w 16 or --w 32.
maxrejects = Max rejected targets, 0=ignore, default 32.
save_intermediate_files = retain all the intermediate files created during
this process.
minlen = (not specified in usearch helpstring), but seems like a good bet
that this refers to the minimum length of the sequences for dereplication.
HALT_EXEC = used to debug app controller problems.
de_novo_chimera_detection = If True, will detect chimeras de novo
reference_chimera_detection = If True, will detect chimeras ref based
cluster_size_filtering = If True, will filter OTUs according to seq counts.
remove_usearch_logs = If True, will not call the --log function for each
usearch call.
usersort = Used for specifying custom sorting (i.e., non-length based
sorting) with usearch/uclust.
suppress_new_clusters = with reference based OTU picking, if enabled,
will prevent new clusters that do not match the reference from being
clustered.
chimeras_retention = accepts either 'intersection' or 'union'. Will test
for chimeras against the full input error clustered sequence set, and
retain sequences flagged as non-chimeras by either (union) or
only those flagged as non-chimeras by both (intersection).
"""
# Save a list of intermediate filepaths in case they are to be removed.
intermediate_files = []
# Need absolute paths to avoid problems with app controller
if output_dir:
output_dir = abspath(output_dir) + '/'
fasta_filepath = abspath(fasta_filepath)
try:
if verbose:
print "Sorting sequences by length..."
# Sort seqs by length
app_result, output_filepath_len_sorted =\
usearch_fasta_sort_from_filepath(fasta_filepath, output_filepath=
join(
output_dir,
'len_sorted.fasta'),
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs,
working_dir=output_dir, HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_filepath_len_sorted)
if verbose:
print "Dereplicating sequences..."
# Dereplicate sequences
app_result, output_filepath_dereplicated =\
usearch_dereplicate_exact_subseqs(output_filepath_len_sorted,
output_filepath=join(
output_dir,
'dereplicated_seqs.fasta'),
minlen=minlen, w=w, slots=slots, sizeout=sizeout,
maxrejects=maxrejects, save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs,
working_dir=output_dir, HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_filepath_dereplicated)
if verbose:
print "Sorting by abundance..."
# Sort by abundance, initially no filter based on seqs/otu
app_result, output_fp =\
usearch_sort_by_abundance(output_filepath_dereplicated,
output_filepath=join(
output_dir,
'abundance_sorted.fasta'),
usersort=True, sizein=sizein, sizeout=sizeout, minsize=0,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp)
if verbose:
print "Clustering sequences for error correction..."
# Create .uc file of clusters file, to identify original sequences
# later
output_uc_filepath = output_dir + 'err_corrected_clusters.uc'
app_result, error_clustered_output_fp =\
usearch_cluster_error_correction(output_fp,
output_filepath=join(output_dir,
'clustered_error_corrected.fasta'),
output_uc_filepath=output_uc_filepath,
usersort=True, percent_id_err=percent_id_err, sizein=sizein,
sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects,
remove_usearch_logs=remove_usearch_logs,
save_intermediate_files=save_intermediate_files,
working_dir=output_dir, HALT_EXEC=HALT_EXEC)
intermediate_files.append(error_clustered_output_fp)
intermediate_files.append(output_uc_filepath)
# Series of conditional tests, using generic 'output_fp' name so the
# conditional filtering, if any/all are selected, do not matter.
if de_novo_chimera_detection:
if verbose:
print "Performing de novo chimera detection..."
app_result, output_fp_de_novo_nonchimeras =\
usearch_chimera_filter_de_novo(error_clustered_output_fp,
abundance_skew=abundance_skew, output_chimera_filepath=
join(
output_dir,
'de_novo_chimeras.fasta'),
output_non_chimera_filepath=join(
output_dir,
'de_novo_non_chimeras.fasta'), usersort=True,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp_de_novo_nonchimeras)
output_fp = output_fp_de_novo_nonchimeras
if reference_chimera_detection:
if verbose:
print "Performing reference based chimera detection..."
app_result, output_fp_ref_nonchimeras =\
usearch_chimera_filter_ref_based(error_clustered_output_fp,
db_filepath=db_filepath, output_chimera_filepath=
join(
output_dir,
'reference_chimeras.fasta'),
output_non_chimera_filepath=
join(output_dir, 'reference_non_chimeras.fasta'), usersort=True,
save_intermediate_files=save_intermediate_files, rev=rev,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp_ref_nonchimeras)
output_fp = output_fp_ref_nonchimeras
# get intersection or union if both ref and de novo chimera detection
if de_novo_chimera_detection and reference_chimera_detection:
if verbose:
print "Finding %s of non-chimeras..." % chimeras_retention
output_fp = get_retained_chimeras(
output_fp_de_novo_nonchimeras, output_fp_ref_nonchimeras,
output_combined_fp=
join(output_dir, 'combined_non_chimeras.fasta'),
chimeras_retention=chimeras_retention)
intermediate_files.append(output_fp)
if cluster_size_filtering:
# Test for empty filepath following filters, raise error if all seqs
# have been removed
if verbose:
print "Filtering by cluster size..."
# chimera detection was not performed, use output file of step 4 as input
# to filtering by cluster size
if not (reference_chimera_detection and de_novo_chimera_detection):
output_fp = error_clustered_output_fp
app_result, output_fp =\
usearch_sort_by_abundance(output_fp, output_filepath=
join(output_dir, 'abundance_sorted_minsize_' + str(minsize) +
'.fasta'),
minsize=minsize, sizein=sizein, sizeout=sizeout,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp)
# cluster seqs
# Should we add in option to use alternative OTU picking here?
# Seems like it will be a bit of a mess...maybe after we determine
# if usearch_qf should become standard.
if refseqs_fp:
if verbose:
print "Clustering against reference sequences..."
app_result, output_filepath =\
usearch_cluster_seqs_ref(output_fp, output_filepath=
join(
output_dir,
'ref_clustered_seqs.uc'),
percent_id=percent_id, sizein=sizein,
sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs,
suppress_new_clusters=suppress_new_clusters, refseqs_fp=refseqs_fp,
output_dir=output_dir, working_dir=output_dir, rev=rev,
HALT_EXEC=HALT_EXEC
)
else:
if verbose:
print "De novo clustering sequences..."
app_result, output_filepath =\
usearch_cluster_seqs(output_fp, output_filepath=
join(output_dir, 'clustered_seqs.fasta'),
percent_id=percent_id, sizein=sizein,
sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_filepath)
# Enumerate the OTUs in the clusters
if not suppress_new_clusters:
if verbose:
print "Enumerating OTUs..."
output_filepath =\
enumerate_otus(output_filepath, output_filepath=
join(output_dir, 'enumerated_otus.fasta'),
label_prefix=label_prefix,
label_suffix=label_suffix, count_start=count_start,
retain_label_as_comment=retain_label_as_comment)
intermediate_files.append(output_filepath)
# Get original sequence label identities
if verbose:
print "Assigning sequences to clusters..."
app_result, clusters_file = assign_reads_to_otus(fasta_filepath,
filtered_fasta=output_filepath, output_filepath=join(
output_dir,
'assign_reads_to_otus.uc'), perc_id_blast=percent_id,
global_alignment=global_alignment,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(clusters_file)
except ApplicationError:
raise ApplicationError('Error running usearch. Possible causes are '
'unsupported version (current supported version is usearch ' +
'v5.2.236) is installed or improperly formatted input file was ' +
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch not found, is it properly ' +
'installed?')
# Get dict of clusters, list of failures
# Set OTU ID field to 9 for the case of closed reference OTU picking
if suppress_new_clusters:
otu_id_field = 9
else:
otu_id_field = 1
clusters, failures = clusters_from_blast_uc_file(open(clusters_file, "U"),
otu_id_field)
# Remove temp files unless user specifies output filepath
if not save_intermediate_files:
remove_files(intermediate_files)
return clusters, failures
def assign_dna_reads_to_database(query_fasta_fp,
database_fasta_fp,
output_fp,
temp_dir=gettempdir(),
params={},
blast6_fp=None,
HALT_EXEC=False):
_params = {'--id': 0.97}
_params.update(params)
if blast6_fp is None:
blast6_fp = splitext(output_fp)[0] + '.bl6'
data = {'--query': query_fasta_fp,
'--uc': output_fp,
'--db': database_fasta_fp,
'--blast6out': blast6_fp,
}
app = Usearch(_params,
WorkingDir=temp_dir,
HALT_EXEC=False)
app_result = app(data)
assign_dna_reads_to_protein_database =\
assign_dna_reads_to_dna_database =\
assign_dna_reads_to_database
# End uclust convenience functions
# Start usearch61 application controller
class Usearch61(CommandLineApplication):
""" Usearch61 ApplicationController
"""
_command = 'usearch61'
_input_handler = '_input_as_parameters'
_parameters = {
# IO filepaths specified by these values
# Output file, used by several difference functions
'--output': ValuedParameter('--', Name='output', Delimiter=' ',
IsPath=True),
# Output filename in uclust (.uc) format
'--uc': ValuedParameter('--', Name='uc', Delimiter=' ', IsPath=True),
# log filepath
'--log': ValuedParameter('--', Name='log', Delimiter=' ', IsPath=True),
# Uses to specify input file for reference based clustering
'--usearch_global': ValuedParameter('--', Name='usearch_global',
Delimiter=' ', IsPath=True),
# Used to specify reference sequences to act as seeds
'--db': ValuedParameter('--', Name='db', Delimiter=' ', IsPath=True),
# Default de novo clustering input fasta filepath, memory efficient
'--cluster_smallmem': ValuedParameter('--', Name='cluster_smallmem',
Delimiter=' ', IsPath=True),
# Fast de novo clustering input fasta filepath
'--cluster_fast': ValuedParameter('--', Name='cluster_fast',
Delimiter=' ', IsPath=True),
# Specifies consensus fasta file output for a cluster
'--consout': ValuedParameter('--', Name='consout',
Delimiter=' ', IsPath=True),
# Specifies input consensus/abundance file for de novo chimeras
'--uchime_denovo': ValuedParameter('--', Name='uchime_denovo',
Delimiter=' ', IsPath=True),
# Specifies input consensus/abundance file for ref chimera detection
'--uchime_ref': ValuedParameter('--', Name='uchime_ref',
Delimiter=' ', IsPath=True),
# Specifies output uchime file for chimera results
'--uchimeout': ValuedParameter('--', Name='uchimeout',
Delimiter=' ', IsPath=True),
# Parameters for sorting raw fasta files
# specifies fasta filepath to sort by length
'--sortbylength': ValuedParameter('--', Name='sortbylength',
Delimiter=' ', IsPath=True),
# specifies fasta filepath to dereplicate, sort by abundance
'--derep_fulllength': ValuedParameter('--', Name='derep_fulllength',
Delimiter=' ', IsPath=True),
# Adds label showing abundance of dereplicated sequences
'--sizeout': FlagParameter('--', Name='sizeout'),
# Other parameters for clustering/sorting
# Needed to use data sorted by abundance and use sizeorder option
'--usersort': FlagParameter('--', Name='usersort'),
# specifies percent identity for clustering
'--id': ValuedParameter('--', Name='id', Delimiter=' ', IsPath=False),
# specifies minimum sequence length allowed
'--minseqlength': ValuedParameter('--', Name='minseqlength',
Delimiter=' ', IsPath=False),
# if set as --strand both will enable reverse strand matching
'--strand': ValuedParameter('--', Name='strand', Delimiter=' ',
IsPath=False),
# Word length to use, in base pairs
'--wordlength': ValuedParameter('--', Name='wordlength',
Delimiter=' ', IsPath=False),
# Max rejects, lower = more speed, higher=higher accuracy
'--maxrejects': ValuedParameter('--', Name='maxrejects',
Delimiter=' ', IsPath=False),
# Max accepts, should be greater than 1 for sizeorder option
'--maxaccepts': ValuedParameter('--', Name='maxaccepts',
Delimiter=' ', IsPath=False),
# Option to cluster to most abundant seed
'--sizeorder': FlagParameter('--', Name='sizeorder'),
# Chimera-specific parameters
# abundance skew for comparing parent/child putative clusters
'--abskew': ValuedParameter('--', Name='abskew', Delimiter=' ',
IsPath=False),
# min score to be classified as chimeric
'--minh': ValuedParameter('--', Name='minh', Delimiter=' ',
IsPath=False),
# weight of no vote
'--xn': ValuedParameter('--', Name='xn', Delimiter=' ',
IsPath=False),
# pseudo count prior for no votes
'--dn': ValuedParameter('--', Name='dn', Delimiter=' ',
IsPath=False),
# Minimum number of diffs in a segment
'--mindiffs': ValuedParameter('--', Name='mindiffs', Delimiter=' ',
IsPath=False),
# Minimum divergence between query and ref sequence
'--mindiv': ValuedParameter('--', Name='mindiv', Delimiter=' ',
IsPath=False),
# Threads allocated for multithreading calls.
'--threads': ValuedParameter('--', Name='threads',
Delimiter=' ', IsPath=False)
}
_suppress_stdout = False
_suppress_stderr = False
def _input_as_parameters(self, data):
""" Set the input path (a fasta filepath)
"""
# The list of values which can be passed on a per-run basis
allowed_values = ['--uc', '--output', '--log',
'--sortbylength', '--derep_fulllength', '--sizeout',
'--minseqlength', '--strand', '--wordlength',
'--maxrejects', '--usearch_global', '--db',
'--cluster_smallmem', '--cluster_fast', '--id',
'--maxaccepts', '--sizeorder', '--usersort',
'--abskew', '--minh', '--xn', '--dn', '--mindiffs',
'--mindiv', '--uchime_denovo', '--uchimeout',
'--uchime_ref', '--threads'
]
unsupported_parameters = set(data.keys()) - set(allowed_values)
if unsupported_parameters:
raise ApplicationError(
"Unsupported parameter(s) passed when calling %s: %s" %
(self._command, ' '.join(unsupported_parameters)))
for v in allowed_values:
# turn the parameter off so subsequent runs are not
# affected by parameter settings from previous runs
self.Parameters[v].off()
if v in data:
# turn the parameter on if specified by the user
self.Parameters[v].on(data[v])
return ''
def _get_result_paths(self, data):
""" Set the result paths """
result = {}
result['Output'] = ResultPath(
Path=self.Parameters['--output'].Value,
IsWritten=self.Parameters['--output'].isOn())
result['ClusterFile'] = ResultPath(
Path=self.Parameters['--uc'].Value,
IsWritten=self.Parameters['--uc'].isOn())
return result
def _accept_exit_status(self, exit_status):
""" Test for acceptable exit status
usearch can seg fault and still generate a parsable .uc file
so we explicitly check the exit status
"""
return exit_status == 0
def getHelp(self):
"""Method that points to documentation"""
help_str =\
"""
USEARCH is hosted at:
http://www.drive5.com/usearch/
The following papers should be cited if this resource is used:
Edgar,RC, Haas,BJ, Clemente,JC, Quince,C, Knight,R (2011) UCHIME
improves sensitivity and speed of chimera detection, Bioinformatics
"""
return help_str
# Start Usearch61 convenience functions
def usearch61_ref_cluster(seq_path,
refseqs_fp,
percent_id=0.97,
rev=False,
save_intermediate_files=True,
minlen=64,
output_dir='.',
remove_usearch_logs=False,
verbose=False,
wordlength=8,
usearch_fast_cluster=False,
usearch61_sort_method='abundance',
otu_prefix="denovo",
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
suppress_new_clusters=False,
threads=1.0,
HALT_EXEC=False
):
""" Returns dictionary of cluster IDs:seq IDs
Overall function for reference-based clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
refseqs_fp: reference fasta filepath, used to cluster sequences against.
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds (only applies
when doing open reference de novo clustering)
suppress_new_clusters: If True, will allow de novo clustering on top of
reference clusters.
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
Description of analysis workflows
---------------------------------
closed-reference approach:
dereplicate sequences first, do reference based clustering,
merge clusters/failures and dereplicated data,
write OTU mapping and failures file.
open-reference approach:
dereplicate sequences first, do reference based clustering, parse failures,
sort failures fasta according to chosen method, cluster failures, merge
reference clustering results/de novo results/dereplicated data, write
OTU mapping file.
Dereplication should save processing time for large datasets.
"""
files_to_remove = []
# Need absolute paths to avoid potential problems with app controller
if output_dir:
output_dir = join(abspath(output_dir), '')
seq_path = abspath(seq_path)
try:
if verbose:
print "Presorting sequences according to abundance..."
intermediate_fasta, dereplicated_uc, app_result =\
sort_by_abundance_usearch61(seq_path, output_dir, rev,
minlen, remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(
output_dir,
'abundance_sorted.fna'),
output_uc_filepath=join(
output_dir,
'abundance_sorted.uc'),
threads=threads)
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
files_to_remove.append(dereplicated_uc)
if verbose:
print "Performing reference based clustering..."
clusters_fp, app_result = usearch61_cluster_ref(intermediate_fasta,
refseqs_fp, percent_id, rev, minlen, output_dir,
remove_usearch_logs, wordlength, usearch61_maxrejects,
usearch61_maxaccepts, HALT_EXEC,
output_uc_filepath=join(
output_dir,
'ref_clustered.uc'),
threads=threads)
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix="",
ref_clustered=True)
dereplicated_clusters =\
parse_dereplicated_uc(open(dereplicated_uc, "U"))
clusters = merge_clusters_dereplicated_seqs(clusters,
dereplicated_clusters)
failures = merge_failures_dereplicated_seqs(failures,
dereplicated_clusters)
if not suppress_new_clusters and failures:
if verbose:
print "Parsing out sequences that failed to cluster..."
failures_fasta = parse_usearch61_failures(seq_path, set(failures),
output_fasta_fp=join(output_dir, "failures_parsed.fna"))
if not save_intermediate_files:
files_to_remove.append(failures_fasta)
denovo_clusters = usearch61_denovo_cluster(failures_fasta,
percent_id, rev, save_intermediate_files, minlen, output_dir,
remove_usearch_logs, verbose, wordlength, usearch_fast_cluster,
usearch61_sort_method, otu_prefix, usearch61_maxrejects,
usearch61_maxaccepts, sizeorder, threads, HALT_EXEC)
failures = []
# Merge ref and denovo clusters
clusters.update(denovo_clusters)
except ApplicationError:
raise ApplicationError('Error running usearch61. Possible causes are '
'unsupported version (current supported version is usearch '
'v6.1.544) is installed or improperly formatted input file was '
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch61 not found, is it properly '
'installed?')
if not save_intermediate_files:
remove_files(files_to_remove)
return clusters, failures
def usearch61_denovo_cluster(seq_path,
percent_id=0.97,
rev=False,
save_intermediate_files=True,
minlen=64,
output_dir='.',
remove_usearch_logs=False,
verbose=False,
wordlength=8,
usearch_fast_cluster=False,
usearch61_sort_method='abundance',
otu_prefix="denovo",
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
threads=1.0,
HALT_EXEC=False,
file_prefix="denovo_"
):
""" Returns dictionary of cluster IDs:seq IDs
Overall function for de novo clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
"""
files_to_remove = []
# Need absolute paths to avoid potential problems with app controller
if output_dir:
output_dir = abspath(output_dir) + '/'
seq_path = abspath(seq_path)
try:
if verbose and usearch61_sort_method is not None and\
not usearch_fast_cluster:
print "Sorting sequences according to %s..." % usearch61_sort_method
# fast sorting option automatically performs length sorting
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
intermediate_fasta, dereplicated_uc, app_result =\
sort_by_abundance_usearch61(seq_path, output_dir, rev,
minlen, remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(
output_dir,
file_prefix + 'abundance_sorted.fna'),
output_uc_filepath=join(output_dir,
file_prefix + 'abundance_sorted.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
files_to_remove.append(dereplicated_uc)
elif usearch61_sort_method == 'length' and not usearch_fast_cluster:
intermediate_fasta, app_result =\
sort_by_length_usearch61(seq_path, output_dir, minlen,
remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(output_dir,
file_prefix + 'length_sorted.fna'))
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
else:
intermediate_fasta = seq_path
if verbose:
print "Clustering sequences de novo..."
if usearch_fast_cluster:
clusters_fp, app_result = usearch61_fast_cluster(
intermediate_fasta,
percent_id, minlen, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, HALT_EXEC,
output_uc_filepath=join(
output_dir,
file_prefix + 'fast_clustered.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
else:
clusters_fp, app_result =\
usearch61_smallmem_cluster(intermediate_fasta, percent_id,
minlen, rev, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, sizeorder, HALT_EXEC,
output_uc_filepath=join(output_dir,
file_prefix + 'smallmem_clustered.uc'))
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
except ApplicationError:
raise ApplicationError('Error running usearch61. Possible causes are '
'unsupported version (current supported version is usearch ' +
'v6.1.544) is installed or improperly formatted input file was ' +
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch61 not found, is it properly ' +
'installed?')
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
de_novo_clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
dereplicated_clusters =\
parse_dereplicated_uc(open(dereplicated_uc, "U"))
clusters = merge_clusters_dereplicated_seqs(de_novo_clusters,
dereplicated_clusters)
else:
clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
if not save_intermediate_files:
remove_files(files_to_remove)
return clusters
# Start fasta sorting functions
def sort_by_abundance_usearch61(seq_path,
output_dir='.',
rev=False,
minlen=64,
remove_usearch_logs=False,
HALT_EXEC=False,
output_fna_filepath=None,
output_uc_filepath=None,
log_name="abundance_sorted.log",
threads=1.0):
""" usearch61 application call to sort fasta file by abundance.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
rev: enable reverse strand matching for clustering/sorting
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
output_uc_filepath: path to write usearch61 generated .uc file
log_name: filepath to write usearch61 generated log file
threads: Specify number of threads used per core per CPU
"""
if not output_fna_filepath:
_, output_fna_filepath = mkstemp(prefix='abundance_sorted',
suffix='.fna')
if not output_uc_filepath:
_, output_uc_filepath = mkstemp(prefix='abundance_sorted',
suffix='.uc')
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--sizeout': True,
'--derep_fulllength': seq_path,
'--output': output_fna_filepath,
'--uc': output_uc_filepath,
'--threads': threads
}
if rev:
params['--strand'] = 'both'
if not remove_usearch_logs:
params['--log'] = log_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return output_fna_filepath, output_uc_filepath, app_result
def sort_by_length_usearch61(seq_path,
output_dir=".",
minlen=64,
remove_usearch_logs=False,
HALT_EXEC=False,
output_fna_filepath=None,
log_name="length_sorted.log"):
""" usearch61 application call to sort fasta file by length.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
log_name: filepath to write usearch61 generated log file
"""
if not output_fna_filepath:
_, output_fna_filepath = mkstemp(prefix='length_sorted', suffix='.fna')
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--sortbylength': seq_path,
'--output': output_fna_filepath
}
if not remove_usearch_logs:
params['--log'] = log_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return output_fna_filepath, app_result
# End fasta sorting functions
# Start reference clustering functions
def usearch61_cluster_ref(intermediate_fasta,
refseqs_fp,
percent_id=0.97,
rev=False,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
HALT_EXEC=False,
output_uc_filepath=None,
log_filepath="ref_clustered.log",
threads=1.0
):
""" Cluster input fasta seqs against reference database
seq_path: fasta filepath to be clustered with usearch61
refseqs_fp: reference fasta filepath, used to cluster sequences against.
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for clustering
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
output_uc_filepath: path to write usearch61 generated .uc file
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
"""
log_filepath = join(output_dir, log_filepath)
params = {
'--usearch_global': intermediate_fasta,
'--db': refseqs_fp,
'--minseqlength': minlen,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = log_filepath
if rev:
params['--strand'] = 'both'
else:
params['--strand'] = 'plus'
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result
# End reference clustering functions
# Start de novo clustering functions
def usearch61_fast_cluster(intermediate_fasta,
percent_id=0.97,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=8,
usearch61_maxaccepts=1,
HALT_EXEC=False,
output_uc_filepath=None,
log_name="fast_clustered.log",
threads=1.0):
""" Performs usearch61 de novo fast clustering via cluster_fast option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
threads: Specify number of threads used per core per CPU
"""
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--cluster_fast': intermediate_fasta,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--usersort': True,
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = log_filepath
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result
def usearch61_smallmem_cluster(intermediate_fasta,
percent_id=0.97,
minlen=64,
rev=False,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
HALT_EXEC=False,
output_uc_filepath=None,
log_name="smallmem_clustered.log",
sizeout=False,
consout_filepath=None):
""" Performs usearch61 de novo clustering via cluster_smallmem option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
rev: will enable reverse strand matching if True
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
sizeout: If True, will save abundance data in output fasta labels.
consout_filepath: Needs to be set to save clustered consensus fasta
filepath used for chimera checking.
"""
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--cluster_smallmem': intermediate_fasta,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--usersort': True
}
if sizeorder:
params['--sizeorder'] = True
if not remove_usearch_logs:
params['--log'] = log_filepath
if rev:
params['--strand'] = 'both'
else:
params['--strand'] = 'plus'
if sizeout:
params['--sizeout'] = True
if consout_filepath:
params['--consout'] = consout_filepath
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result
# End de novo clustering functions
# Start Chimera checking functions
def usearch61_chimera_check_denovo(abundance_fp,
uchime_denovo_fp,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
uchime_denovo_log_fp="uchime_denovo.log",
usearch61_minh=0.28,
usearch61_xn=8.0,
usearch61_dn=1.4,
usearch61_mindiffs=3,
usearch61_mindiv=0.8,
usearch61_abundance_skew=2.0,
HALT_EXEC=False):
""" Does de novo, abundance based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_denovo_fp: output uchime file for chimera results.
minlen: minimum sequence length for usearch input fasta seqs.
output_dir: output directory
removed_usearch_logs: suppresses creation of log file.
uchime_denovo_log_fp: output filepath for log file.
usearch61_minh: Minimum score (h) to be classified as chimera.
Increasing this value tends to the number of false positives (and also
sensitivity).
usearch61_xn: Weight of "no" vote. Increasing this value tends to the
number of false positives (and also sensitivity).
usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this
value tends to the number of false positives (and also sensitivity).
usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this
value tends to reduce the number of false positives while reducing
sensitivity to very low-divergence chimeras.
usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the
query and closest reference database sequence. Expressed as a percentage,
so the default is 0.8%, which allows chimeras that are up to 99.2% similar
to a reference sequence.
usearch61_abundance_skew: abundance skew for de novo chimera comparisons.
HALTEXEC: halt execution and returns command used for app controller.
"""
params = {'--minseqlength': minlen,
'--uchime_denovo': abundance_fp,
'--uchimeout': uchime_denovo_fp,
'--minh': usearch61_minh,
'--xn': usearch61_xn,
'--dn': usearch61_dn,
'--mindiffs': usearch61_mindiffs,
'--mindiv': usearch61_mindiv,
'--abskew': usearch61_abundance_skew
}
if not remove_usearch_logs:
params['--log'] = uchime_denovo_log_fp
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return uchime_denovo_fp, app_result
def usearch61_chimera_check_ref(abundance_fp,
uchime_ref_fp,
reference_seqs_fp,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
uchime_ref_log_fp="uchime_ref.log",
usearch61_minh=0.28,
usearch61_xn=8.0,
usearch61_dn=1.4,
usearch61_mindiffs=3,
usearch61_mindiv=0.8,
threads=1.0,
HALT_EXEC=False):
""" Does reference based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_ref_fp: output uchime filepath for reference results
reference_seqs_fp: reference fasta database for chimera checking.
minlen: minimum sequence length for usearch input fasta seqs.
output_dir: output directory
removed_usearch_logs: suppresses creation of log file.
uchime_denovo_log_fp: output filepath for log file.
usearch61_minh: Minimum score (h) to be classified as chimera.
Increasing this value tends to the number of false positives (and also
sensitivity).
usearch61_xn: Weight of "no" vote. Increasing this value tends to the
number of false positives (and also sensitivity).
usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this
value tends to the number of false positives (and also sensitivity).
usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this
value tends to reduce the number of false positives while reducing
sensitivity to very low-divergence chimeras.
usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the
query and closest reference database sequence. Expressed as a percentage,
so the default is 0.8%, which allows chimeras that are up to 99.2% similar
to a reference sequence.
threads: Specify number of threads used per core per CPU
HALTEXEC: halt execution and returns command used for app controller.
"""
params = {'--minseqlength': minlen,
'--uchime_ref': abundance_fp,
'--uchimeout': uchime_ref_fp,
'--db': reference_seqs_fp,
'--minh': usearch61_minh,
'--xn': usearch61_xn,
'--dn': usearch61_dn,
'--mindiffs': usearch61_mindiffs,
'--mindiv': usearch61_mindiv,
# Only works in plus according to usearch doc
'--strand': 'plus',
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = uchime_ref_log_fp
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return uchime_ref_fp, app_result
# End chimera checking functions
# Start parsing functions
def parse_dereplicated_uc(dereplicated_uc_lines):
""" Return dict of seq ID:dereplicated seq IDs from dereplicated .uc lines
dereplicated_uc_lines: list of lines of .uc file from dereplicated seqs from
usearch61 (i.e. open file of abundance sorted .uc data)
"""
dereplicated_clusters = {}
seed_hit_ix = 0
seq_id_ix = 8
seed_id_ix = 9
for line in dereplicated_uc_lines:
if line.startswith("#") or len(line.strip()) == 0:
continue
curr_line = line.strip().split('\t')
if curr_line[seed_hit_ix] == "S":
dereplicated_clusters[curr_line[seq_id_ix]] = []
if curr_line[seed_hit_ix] == "H":
curr_seq_id = curr_line[seq_id_ix]
dereplicated_clusters[curr_line[seed_id_ix]].append(curr_seq_id)
return dereplicated_clusters
def parse_usearch61_clusters(clustered_uc_lines,
otu_prefix='denovo',
ref_clustered=False):
""" Returns dict of cluster ID:seq IDs
clustered_uc_lines: lines from .uc file resulting from de novo clustering
otu_prefix: string added to beginning of OTU ID.
ref_clustered: If True, will attempt to create dict keys for clusters as
they are read from the .uc file, rather than from seed lines.
"""
clusters = {}
failures = []
seed_hit_ix = 0
otu_id_ix = 1
seq_id_ix = 8
ref_id_ix = 9
for line in clustered_uc_lines:
if line.startswith("#") or len(line.strip()) == 0:
continue
curr_line = line.strip().split('\t')
if curr_line[seed_hit_ix] == "S":
# Need to split on semicolons for sequence IDs to handle case of
# abundance sorted data
clusters[otu_prefix + curr_line[otu_id_ix]] =\
[curr_line[seq_id_ix].split(';')[0].split()[0]]
if curr_line[seed_hit_ix] == "H":
curr_id = curr_line[seq_id_ix].split(';')[0].split()[0]
if ref_clustered:
try:
clusters[otu_prefix + curr_line[ref_id_ix]].append(curr_id)
except KeyError:
clusters[otu_prefix + curr_line[ref_id_ix]] = [curr_id]
else:
clusters[otu_prefix +
curr_line[otu_id_ix]].append(curr_id)
if curr_line[seed_hit_ix] == "N":
failures.append(curr_line[seq_id_ix].split(';')[0])
return clusters, failures
def merge_clusters_dereplicated_seqs(de_novo_clusters,
dereplicated_clusters):
""" combines de novo clusters and dereplicated seqs to OTU id:seqs dict
de_novo_clusters: dict of OTU ID:clustered sequences
dereplicated_clusters: dict of seq IDs: dereplicated seq IDs
"""
clusters = {}
for curr_denovo_key in de_novo_clusters.keys():
clusters[curr_denovo_key] = de_novo_clusters[curr_denovo_key]
curr_clusters = []
for curr_denovo_id in de_novo_clusters[curr_denovo_key]:
curr_clusters += dereplicated_clusters[curr_denovo_id]
clusters[curr_denovo_key] += curr_clusters
return clusters
def merge_failures_dereplicated_seqs(failures,
dereplicated_clusters):
""" Appends failures from dereplicated seqs to failures list
failures: list of failures
dereplicated_clusters: dict of seq IDs: dereplicated seq IDs
"""
curr_failures = set(failures)
dereplicated_ids = set(dereplicated_clusters)
for curr_failure in curr_failures:
if curr_failure in dereplicated_ids:
failures += dereplicated_clusters[curr_failure]
return failures
def parse_usearch61_failures(seq_path,
failures,
output_fasta_fp):
""" Parses seq IDs from failures list, writes to output_fasta_fp
seq_path: filepath of original input fasta file.
failures: list/set of failure seq IDs
output_fasta_fp: path to write parsed sequences
"""
parsed_out = open(output_fasta_fp, "w")
for label, seq in parse_fasta(open(seq_path), "U"):
curr_label = label.split()[0]
if curr_label in failures:
parsed_out.write(">%s\n%s\n" % (label, seq))
parsed_out.close()
return output_fasta_fp
# End parsing functions
| biocore/burrito-fillings | bfillings/usearch.py | Python | bsd-3-clause | 101,600 |
# -*- coding: utf-8 -*-
"""Custom password validators."""
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _, ungettext
class ComplexityValidator(object):
"""Check password contains at least a few things."""
def __init__(self, upper=1, lower=1, digits=1, specials=1):
self.upper = upper
self.lower = lower
self.digits = digits
self.specials = specials
def validate(self, password, user=None):
special_characters = "~!@#$%^&*()_+{}\":;,'[]"
condition = (
self.digits > 0 and
sum(1 for char in password if char.isdigit()) < self.digits)
if condition:
raise ValidationError(
ungettext(
"Password must contain at least {} digit.",
"Password must contain at least {} digits.",
self.digits
).format(self.digits))
condition = (
self.lower > 0 and
sum(1 for char in password if char.islower()) < self.lower)
if condition:
raise ValidationError(
ungettext(
"Password must contain at least {} lowercase letter.",
"Password must contain at least {} lowercase letters.",
self.lower
)
.format(self.lower))
condition = (
self.upper > 0 and
sum(1 for char in password if char.isupper()) < self.upper)
if condition:
raise ValidationError(
ungettext(
"Password must contain at least {} uppercase letter.",
"Password must contain at least {} uppercase letters.",
self.upper
)
.format(self.upper))
condition = (
self.specials > 0 and
sum(1 for char in password if char in special_characters) <
self.specials)
if condition:
raise ValidationError(
ungettext(
"Password must contain at least {} special character.",
"Password must contain at least {} special characters.",
self.specials
)
.format(self.specials))
def get_help_text(self):
return _(
"Your password must contain a combination of different "
"character types.")
| tonioo/modoboa | modoboa/core/password_validation.py | Python | isc | 2,526 |
# coding=utf-8
# Progetto: Pushetta Site
# Class view per la gestione dei metodi legati alla gestione delle push si Chrome
import json
import logging
logger = logging.getLogger(__name__)
from django.http import HttpResponse
from django.views.generic import View
from django.shortcuts import get_object_or_404
from core.services import ask_subscribe_channel
from core.models import Subscriber, Channel, User
from core.subscriber_manager import SubscriberManager
class WebPushRegistration(View):
"""
Custom API to handle post of registration data (user, token,...)
Invoked by Ajax call in callback of permissionRequest client side
"""
# Check if device_id is subscriber of channel_name
def get(self, request, device_id=None, channel_name=None):
channel = get_object_or_404(Channel, name=channel_name)
channels = SubscriberManager().get_device_subscriptions(device_id)
resp = 200
if next((x for x in channels if x == channel.name.lower()), None) == None:
resp = 404
return HttpResponse(status=resp)
# Subscribe to a channel
def post(self, request):
post_data = json.loads(request.body)
channel_name = None
if 'channel' in post_data:
channel_name = post_data['channel']
deviceToken = post_data['token']
browser = post_data['browser']
deviceId = post_data['device_id']
name = "-"
if request.user.is_authenticated():
name = request.user.username
# Create il subscriber if it doesn't exist
subscriber, created = Subscriber.objects.update_or_create(device_id=deviceId,
defaults={'sub_type': browser,
'sandbox': False, 'enabled': True,
'name': name,
'token': deviceToken})
# Channel subscription
if channel_name is not None:
channel = get_object_or_404(Channel, name=channel_name)
ask_subscribe_channel(channel, deviceId)
return HttpResponse(status=201 if created else 200)
# Delete a channel subscription
def delete(self, request, device_id=None, channel_name=None):
channel = get_object_or_404(Channel, name=channel_name)
channels = SubscriberManager().get_device_subscriptions(device_id)
resp = 404
if next((x for x in channels if x == channel.name.lower()), None) != None:
current_dev = Subscriber.objects.get(device_id=device_id)
SubscriberManager().unsubscribe(channel_name, device_id, current_dev.sub_type)
resp = 200
return HttpResponse(status=resp)
| guglielmino/pushetta-api-django | pushetta/www/browser_views.py | Python | gpl-3.0 | 2,942 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'PubTracker.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', include('pub.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| PubTracker/PubTracker-Backend | PubTracker/PubTracker/urls.py | Python | gpl-2.0 | 315 |
"""
homeassistant.components.automation.template
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Offers template automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation/#template-trigger
"""
import logging
from homeassistant.const import CONF_VALUE_TEMPLATE, EVENT_STATE_CHANGED
from homeassistant.exceptions import TemplateError
from homeassistant.util import template
_LOGGER = logging.getLogger(__name__)
def trigger(hass, config, action):
""" Listen for state changes based on `config`. """
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is None:
_LOGGER.error("Missing configuration key %s", CONF_VALUE_TEMPLATE)
return False
# Local variable to keep track of if the action has already been triggered
already_triggered = False
def event_listener(event):
""" Listens for state changes and calls action. """
nonlocal already_triggered
template_result = _check_template(hass, value_template)
# Check to see if template returns true
if template_result and not already_triggered:
already_triggered = True
action()
elif not template_result:
already_triggered = False
hass.bus.listen(EVENT_STATE_CHANGED, event_listener)
return True
def if_action(hass, config):
""" Wraps action method with state based condition. """
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is None:
_LOGGER.error("Missing configuration key %s", CONF_VALUE_TEMPLATE)
return False
return lambda: _check_template(hass, value_template)
def _check_template(hass, value_template):
""" Checks if result of template is true """
try:
value = template.render(hass, value_template, {})
except TemplateError:
_LOGGER.exception('Error parsing template')
return False
return value.lower() == 'true'
| nevercast/home-assistant | homeassistant/components/automation/template.py | Python | mit | 2,014 |
import warnings
import ctypes
import ctypes.util
from numpy.ctypeslib import ndpointer
import numpy
from scipy import integrate
from .. import potential
from ..util import galpyWarning
from ..potential.Potential import _evaluateRforces, _evaluatezforces,\
_evaluatephiforces
from .integratePlanarOrbit import _parse_integrator, _parse_tol
from ..util.multi import parallel_map
from ..util.leung_dop853 import dop853
from ..util import symplecticode
from ..util import _load_extension_libs
_lib, _ext_loaded= _load_extension_libs.load_libgalpy()
def _parse_pot(pot,potforactions=False,potfortorus=False):
"""Parse the potential so it can be fed to C"""
#Figure out what's in pot
if not isinstance(pot,list):
pot= [pot]
#Initialize everything
pot_type= []
pot_args= []
npot= len(pot)
for p in pot:
if isinstance(p,potential.LogarithmicHaloPotential):
pot_type.append(0)
if p.isNonAxi:
pot_args.extend([p._amp,p._q,p._core2,p._1m1overb2])
else:
pot_args.extend([p._amp,p._q,p._core2,2.]) # 1m1overb2 > 1: axi
elif isinstance(p,potential.DehnenBarPotential):
pot_type.append(1)
pot_args.extend([p._amp*p._af,p._tform,p._tsteady,p._rb,p._omegab,
p._barphi])
elif isinstance(p,potential.MiyamotoNagaiPotential):
pot_type.append(5)
pot_args.extend([p._amp,p._a,p._b])
elif isinstance(p,potential.PowerSphericalPotential):
pot_type.append(7)
pot_args.extend([p._amp,p.alpha])
elif isinstance(p,potential.HernquistPotential):
pot_type.append(8)
pot_args.extend([p._amp,p.a])
elif isinstance(p,potential.NFWPotential):
pot_type.append(9)
pot_args.extend([p._amp,p.a])
elif isinstance(p,potential.JaffePotential):
pot_type.append(10)
pot_args.extend([p._amp,p.a])
elif isinstance(p,potential.DoubleExponentialDiskPotential):
pot_type.append(11)
pot_args.extend([p._amp,-4.*numpy.pi*p._alpha*p._amp,
p._alpha,p._beta,len(p._de_j1_xs)])
pot_args.extend(p._de_j0_xs)
pot_args.extend(p._de_j1_xs)
pot_args.extend(p._de_j0_weights)
pot_args.extend(p._de_j1_weights)
elif isinstance(p,potential.FlattenedPowerPotential):
pot_type.append(12)
pot_args.extend([p._amp,p.alpha,p.q2,p.core2])
elif isinstance(p,potential.interpRZPotential):
pot_type.append(13)
pot_args.extend([len(p._rgrid),len(p._zgrid)])
if p._logR:
pot_args.extend([p._logrgrid[ii] for ii in range(len(p._rgrid))])
else:
pot_args.extend([p._rgrid[ii] for ii in range(len(p._rgrid))])
pot_args.extend([p._zgrid[ii] for ii in range(len(p._zgrid))])
if hasattr(p,'_potGrid_splinecoeffs'):
pot_args.extend([x for x in p._potGrid_splinecoeffs.flatten(order='C')])
else: # pragma: no cover
warnings.warn("You are attempting to use the C implementation of interpRZPotential, but have not interpolated the potential itself; if you think this is needed for what you want to do, initialize the interpRZPotential instance with interpPot=True",
galpyWarning)
pot_args.extend(list(numpy.ones(len(p._rgrid)*len(p._zgrid))))
if hasattr(p,'_rforceGrid_splinecoeffs'):
pot_args.extend([x for x in p._rforceGrid_splinecoeffs.flatten(order='C')])
else: # pragma: no cover
warnings.warn("You are attempting to use the C implementation of interpRZPotential, but have not interpolated the Rforce; if you think this is needed for what you want to do, initialize the interpRZPotential instance with interpRforce=True",
galpyWarning)
pot_args.extend(list(numpy.ones(len(p._rgrid)*len(p._zgrid))))
if hasattr(p,'_zforceGrid_splinecoeffs'):
pot_args.extend([x for x in p._zforceGrid_splinecoeffs.flatten(order='C')])
else: # pragma: no cover
warnings.warn("You are attempting to use the C implementation of interpRZPotential, but have not interpolated the zforce; if you think this is needed for what you want to do, initialize the interpRZPotential instance with interpzforce=True",
galpyWarning)
pot_args.extend(list(numpy.ones(len(p._rgrid)*len(p._zgrid))))
pot_args.extend([p._amp,int(p._logR)])
elif isinstance(p,potential.IsochronePotential):
pot_type.append(14)
pot_args.extend([p._amp,p.b])
elif isinstance(p,potential.PowerSphericalPotentialwCutoff):
pot_type.append(15)
pot_args.extend([p._amp,p.alpha,p.rc])
elif isinstance(p,potential.MN3ExponentialDiskPotential):
# Three Miyamoto-Nagai disks
npot+= 2
pot_type.extend([5,5,5])
pot_args.extend([p._amp*p._mn3[0]._amp,
p._mn3[0]._a,p._mn3[0]._b,
p._amp*p._mn3[1]._amp,
p._mn3[1]._a,p._mn3[1]._b,
p._amp*p._mn3[2]._amp,
p._mn3[2]._a,p._mn3[2]._b])
elif isinstance(p,potential.KuzminKutuzovStaeckelPotential):
pot_type.append(16)
pot_args.extend([p._amp,p._ac,p._Delta])
elif isinstance(p,potential.PlummerPotential):
pot_type.append(17)
pot_args.extend([p._amp,p._b])
elif isinstance(p,potential.PseudoIsothermalPotential):
pot_type.append(18)
pot_args.extend([p._amp,p._a])
elif isinstance(p,potential.KuzminDiskPotential):
pot_type.append(19)
pot_args.extend([p._amp,p._a])
elif isinstance(p,potential.BurkertPotential):
pot_type.append(20)
pot_args.extend([p._amp,p.a])
elif isinstance(p,potential.EllipsoidalPotential.EllipsoidalPotential):
pot_args.append(p._amp)
pot_args.extend([0.,0.,0.,0.,0.,0.]) # for caching
# Potential specific parameters
if isinstance(p,potential.TriaxialHernquistPotential):
pot_type.append(21)
pot_args.extend([2,p.a,p.a4]) # for psi, mdens, mdens_deriv
elif isinstance(p,potential.TriaxialNFWPotential):
pot_type.append(22)
pot_args.extend([2,p.a,p.a3]) # for psi, mdens, mdens_deriv
elif isinstance(p,potential.TriaxialJaffePotential):
pot_type.append(23)
pot_args.extend([2,p.a,p.a2]) # for psi, mdens, mdens_deriv
elif isinstance(p,potential.PerfectEllipsoidPotential):
pot_type.append(30)
pot_args.extend([1,p.a2]) # for psi, mdens, mdens_deriv
elif isinstance(p,potential.TriaxialGaussianPotential):
pot_type.append(37)
pot_args.extend([1,-p._twosigma2]) # for psi, mdens, mdens_deriv
elif isinstance(p,potential.PowerTriaxialPotential):
pot_type.append(38)
pot_args.extend([1,p.alpha]) # for psi, mdens, mdens_deriv
pot_args.extend([p._b2,p._c2,int(p._aligned)]) # Reg. Ellipsoidal
if not p._aligned:
pot_args.extend(list(p._rot.flatten()))
else:
pot_args.extend(list(numpy.eye(3).flatten())) # not actually used
pot_args.append(p._glorder)
pot_args.extend([p._glx[ii] for ii in range(p._glorder)])
# this adds some common factors to the integration weights
pot_args.extend([-4.*numpy.pi*p._glw[ii]*p._b*p._c\
/numpy.sqrt(( 1.+(p._b2-1.)*p._glx[ii]**2.)
*(1.+(p._c2-1.)*p._glx[ii]**2.))
for ii in range(p._glorder)])
elif isinstance(p,potential.SCFPotential):
# Type 24, see stand-alone parser below
pt,pa= _parse_scf_pot(p)
pot_type.append(pt)
pot_args.extend(pa)
elif isinstance(p,potential.SoftenedNeedleBarPotential):
pot_type.append(25)
pot_args.extend([p._amp,p._a,p._b,p._c2,p._pa,p._omegab])
pot_args.extend([0.,0.,0.,0.,0.,0.,0.]) # for caching
elif isinstance(p,potential.DiskSCFPotential):
# Need to pull this apart into: (a) SCF part, (b) constituent
# [Sigma_i,h_i] parts
# (a) SCF, multiply in any add'l amp
pt,pa= _parse_scf_pot(p._scf,extra_amp=p._amp)
pot_type.append(pt)
pot_args.extend(pa)
# (b) constituent [Sigma_i,h_i] parts
for Sigma,hz in zip(p._Sigma_dict,p._hz_dict):
npot+= 1
pot_type.append(26)
stype= Sigma.get('type','exp')
if stype == 'exp' and not 'Rhole' in Sigma:
pot_args.extend([3,0,
4.*numpy.pi*Sigma.get('amp',1.)*p._amp,
Sigma.get('h',1./3.)])
elif stype == 'expwhole' \
or (stype == 'exp' and 'Rhole' in Sigma):
pot_args.extend([4,1,
4.*numpy.pi*Sigma.get('amp',1.)*p._amp,
Sigma.get('h',1./3.),
Sigma.get('Rhole',0.5)])
hztype= hz.get('type','exp')
if hztype == 'exp':
pot_args.extend([0,hz.get('h',0.0375)])
elif hztype == 'sech2':
pot_args.extend([1,hz.get('h',0.0375)])
elif isinstance(p, potential.SpiralArmsPotential):
pot_type.append(27)
pot_args.extend([len(p._Cs), p._amp, p._N, p._sin_alpha, p._tan_alpha, p._r_ref, p._phi_ref,
p._Rs, p._H, p._omega])
pot_args.extend(p._Cs)
# 30: PerfectEllipsoidPotential, done with others above
# 31: KGPotential
# 32: IsothermalDiskPotential
elif isinstance(p,potential.DehnenCoreSphericalPotential):
pot_type.append(33)
pot_args.extend([p._amp,p.a])
elif isinstance(p,potential.DehnenSphericalPotential):
pot_type.append(34)
pot_args.extend([p._amp,p.a,p.alpha])
elif isinstance(p,potential.HomogeneousSpherePotential):
pot_type.append(35)
pot_args.extend([p._amp,p._R2,p._R3])
elif isinstance(p,potential.interpSphericalPotential):
pot_type.append(36)
pot_args.append(len(p._rgrid))
pot_args.extend(p._rgrid)
pot_args.extend(p._rforce_grid)
pot_args.extend([p._amp,p._rmin,p._rmax,p._total_mass,
p._Phi0,p._Phimax])
# 37: TriaxialGaussianPotential, done with others above
# 38: PowerTriaxialPotential, done with others above
############################## WRAPPERS ###############################
elif isinstance(p,potential.DehnenSmoothWrapperPotential):
pot_type.append(-1)
wrap_npot, wrap_pot_type, wrap_pot_args= \
_parse_pot(p._pot,
potforactions=potforactions,potfortorus=potfortorus)
pot_args.append(wrap_npot)
pot_type.extend(wrap_pot_type)
pot_args.extend(wrap_pot_args)
pot_args.extend([p._amp,p._tform,p._tsteady,int(p._grow)])
elif isinstance(p,potential.SolidBodyRotationWrapperPotential):
pot_type.append(-2)
# Not sure how to easily avoid this duplication
wrap_npot, wrap_pot_type, wrap_pot_args= \
_parse_pot(p._pot,
potforactions=potforactions,potfortorus=potfortorus)
pot_args.append(wrap_npot)
pot_type.extend(wrap_pot_type)
pot_args.extend(wrap_pot_args)
pot_args.extend([p._amp,p._omega,p._pa])
elif isinstance(p,potential.CorotatingRotationWrapperPotential):
pot_type.append(-4)
# Not sure how to easily avoid this duplication
wrap_npot, wrap_pot_type, wrap_pot_args= \
_parse_pot(p._pot,
potforactions=potforactions,potfortorus=potfortorus)
pot_args.append(wrap_npot)
pot_type.extend(wrap_pot_type)
pot_args.extend(wrap_pot_args)
pot_args.extend([p._amp,p._vpo,p._beta,p._pa,p._to])
elif isinstance(p,potential.GaussianAmplitudeWrapperPotential):
pot_type.append(-5)
wrap_npot, wrap_pot_type, wrap_pot_args= \
_parse_pot(p._pot,
potforactions=potforactions,potfortorus=potfortorus)
pot_args.append(wrap_npot)
pot_type.extend(wrap_pot_type)
pot_args.extend(wrap_pot_args)
pot_args.extend([p._amp,p._to,p._sigma2])
elif isinstance(p,potential.MovingObjectPotential):
pot_type.append(-6)
wrap_npot, wrap_pot_type, wrap_pot_args= \
_parse_pot(p._pot,
potforactions=potforactions,potfortorus=potfortorus)
pot_args.append(wrap_npot)
pot_type.extend(wrap_pot_type)
pot_args.extend(wrap_pot_args)
pot_args.extend([len(p._orb.t)])
pot_args.extend(p._orb.t)
pot_args.extend(p._orb.x(p._orb.t,use_physical=False))
pot_args.extend(p._orb.y(p._orb.t,use_physical=False))
pot_args.extend(p._orb.z(p._orb.t,use_physical=False))
pot_args.extend([p._amp])
pot_args.extend([p._orb.t[0],p._orb.t[-1]]) #t_0, t_f
elif isinstance(p,potential.ChandrasekharDynamicalFrictionForce):
pot_type.append(-7)
wrap_npot, wrap_pot_type, wrap_pot_args= \
_parse_pot(p._dens_pot,
potforactions=potforactions,potfortorus=potfortorus)
pot_args.append(wrap_npot)
pot_type.extend(wrap_pot_type)
pot_args.extend(wrap_pot_args)
pot_args.extend([len(p._sigmar_rs_4interp)])
pot_args.extend(p._sigmar_rs_4interp)
pot_args.extend(p._sigmars_4interp)
pot_args.extend([p._amp])
pot_args.extend([-1.,0.,0.,0.,0.,0.,0.,0.]) # for caching
pot_args.extend([p._ms,p._rhm,p._gamma**2.,
-1 if not p._lnLambda else p._lnLambda,
p._minr**2.])
pot_args.extend([p._sigmar_rs_4interp[0],
p._sigmar_rs_4interp[-1]]) #r_0, r_f
elif isinstance(p,potential.RotateAndTiltWrapperPotential):
pot_type.append(-8)
# Not sure how to easily avoid this duplication
wrap_npot, wrap_pot_type, wrap_pot_args= \
_parse_pot(p._pot,
potforactions=potforactions,potfortorus=potfortorus)
pot_args.append(wrap_npot)
pot_type.extend(wrap_pot_type)
pot_args.extend(wrap_pot_args)
pot_args.extend([p._amp])
pot_args.extend([0.,0.,0.,0.,0.,0.]) # for caching
pot_args.extend(list(p._rot.flatten()))
pot_type= numpy.array(pot_type,dtype=numpy.int32,order='C')
pot_args= numpy.array(pot_args,dtype=numpy.float64,order='C')
return (npot,pot_type,pot_args)
def _parse_scf_pot(p,extra_amp=1.):
# Stand-alone parser for SCF, bc re-used
isNonAxi= p.isNonAxi
pot_args= [p._a, isNonAxi]
pot_args.extend(p._Acos.shape)
pot_args.extend(extra_amp*p._amp*p._Acos.flatten(order='C'))
if isNonAxi:
pot_args.extend(extra_amp*p._amp*p._Asin.flatten(order='C'))
pot_args.extend([-1.,0,0,0,0,0,0])
return (24,pot_args)
def integrateFullOrbit_c(pot,yo,t,int_method,rtol=None,atol=None,dt=None):
"""
NAME:
integrateFullOrbit_c
PURPOSE:
C integrate an ode for a FullOrbit
INPUT:
pot - Potential or list of such instances
yo - initial condition [q,p] , can be [N,6] or [6]
t - set of times at which one wants the result
int_method= 'leapfrog_c', 'rk4_c', 'rk6_c', 'symplec4_c'
rtol, atol
dt= (None) force integrator to use this stepsize (default is to automatically determine one; only for C-based integrators)
OUTPUT:
(y,err)
y : array, shape (N,len(t),6) or (len(t),6) if N = 1
Array containing the value of y for each desired time in t, \
with the initial value y0 in the first row.
err: error message, if not zero: 1 means maximum step reduction happened for adaptive integrators
HISTORY:
2011-11-13 - Written - Bovy (IAS)
2018-12-21 - Adapted to allow multiple objects - Bovy (UofT)
"""
if len(yo.shape) == 1: single_obj= True
else: single_obj= False
yo= numpy.atleast_2d(yo)
nobj= len(yo)
rtol, atol= _parse_tol(rtol,atol)
npot, pot_type, pot_args= _parse_pot(pot)
int_method_c= _parse_integrator(int_method)
if dt is None:
dt= -9999.99
#Set up result array
result= numpy.empty((nobj,len(t),6))
err= numpy.zeros(nobj,dtype=numpy.int32)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
integrationFunc= _lib.integrateFullOrbit
integrationFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ctypes.c_int]
#Array requirements, first store old order
f_cont= [yo.flags['F_CONTIGUOUS'],
t.flags['F_CONTIGUOUS']]
yo= numpy.require(yo,dtype=numpy.float64,requirements=['C','W'])
t= numpy.require(t,dtype=numpy.float64,requirements=['C','W'])
result= numpy.require(result,dtype=numpy.float64,requirements=['C','W'])
err= numpy.require(err,dtype=numpy.int32,requirements=['C','W'])
#Run the C code
integrationFunc(ctypes.c_int(nobj),
yo,
ctypes.c_int(len(t)),
t,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(dt),
ctypes.c_double(rtol),
ctypes.c_double(atol),
result,
err,
ctypes.c_int(int_method_c))
if numpy.any(err == -10): #pragma: no cover
raise KeyboardInterrupt("Orbit integration interrupted by CTRL-C (SIGINT)")
#Reset input arrays
if f_cont[0]: yo= numpy.asfortranarray(yo)
if f_cont[1]: t= numpy.asfortranarray(t)
if single_obj: return (result[0],err[0])
else: return (result,err)
def integrateFullOrbit_dxdv_c(pot,yo,dyo,t,int_method,rtol=None,atol=None): #pragma: no cover because not included in v1, uncover when included
"""
NAME:
integrateFullOrbit_dxdv_c
PURPOSE:
C integrate an ode for a planarOrbit+phase space volume dxdv
INPUT:
pot - Potential or list of such instances
yo - initial condition [q,p]
dyo - initial condition [dq,dp]
t - set of times at which one wants the result
int_method= 'leapfrog_c', 'rk4_c', 'rk6_c', 'symplec4_c'
rtol, atol
OUTPUT:
(y,err)
y : array, shape (len(y0), len(t))
Array containing the value of y for each desired time in t, \
with the initial value y0 in the first row.
err: error message if not zero, 1: maximum step reduction happened for adaptive integrators
HISTORY:
2011-11-13 - Written - Bovy (IAS)
"""
rtol, atol= _parse_tol(rtol,atol)
npot, pot_type, pot_args= _parse_pot(pot)
int_method_c= _parse_integrator(int_method)
yo= numpy.concatenate((yo,dyo))
#Set up result array
result= numpy.empty((len(t),12))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
integrationFunc= _lib.integrateFullOrbit_dxdv
integrationFunc.argtypes= [ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_double,
ctypes.c_double,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int),
ctypes.c_int]
#Array requirements, first store old order
f_cont= [yo.flags['F_CONTIGUOUS'],
t.flags['F_CONTIGUOUS']]
yo= numpy.require(yo,dtype=numpy.float64,requirements=['C','W'])
t= numpy.require(t,dtype=numpy.float64,requirements=['C','W'])
result= numpy.require(result,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
integrationFunc(yo,
ctypes.c_int(len(t)),
t,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(rtol),ctypes.c_double(atol),
result,
ctypes.byref(err),
ctypes.c_int(int_method_c))
if int(err.value) == -10: #pragma: no cover
raise KeyboardInterrupt("Orbit integration interrupted by CTRL-C (SIGINT)")
#Reset input arrays
if f_cont[0]: yo= numpy.asfortranarray(yo)
if f_cont[1]: t= numpy.asfortranarray(t)
return (result,err.value)
def integrateFullOrbit(pot,yo,t,int_method,rtol=None,atol=None,numcores=1,
dt=None):
"""
NAME:
integrateFullOrbit
PURPOSE:
Integrate an ode for a FullOrbit
INPUT:
pot - Potential or list of such instances
yo - initial condition [q,p], shape [N,5] or [N,6]
t - set of times at which one wants the result
int_method= 'leapfrog', 'odeint', or 'dop853'
rtol, atol= tolerances (not always used...)
numcores= (1) number of cores to use for multi-processing
dt= (None) force integrator to use this stepsize (default is to automatically determine one; only for C-based integrators)
OUTPUT:
(y,err)
y : array, shape (N,len(t),5/6)
Array containing the value of y for each desired time in t, \
with the initial value y0 in the first row.
err: error message, always zero for now
HISTORY:
2010-08-01 - Written - Bovy (NYU)
2019-04-09 - Adapted to allow multiple objects and parallel mapping - Bovy (UofT)
"""
nophi= False
if not int_method.lower() == 'dop853' and not int_method == 'odeint':
if len(yo[0]) == 5:
nophi= True
#We hack this by putting in a dummy phi=0
yo= numpy.pad(yo,((0,0),(0,1)),'constant',constant_values=0)
if int_method.lower() == 'leapfrog':
if rtol is None: rtol= 1e-8
def integrate_for_map(vxvv):
#go to the rectangular frame
this_vxvv= numpy.array([vxvv[0]*numpy.cos(vxvv[5]),
vxvv[0]*numpy.sin(vxvv[5]),
vxvv[3],
vxvv[1]*numpy.cos(vxvv[5])
-vxvv[2]*numpy.sin(vxvv[5]),
vxvv[2]*numpy.cos(vxvv[5])
+vxvv[1]*numpy.sin(vxvv[5]),
vxvv[4]])
#integrate
out= symplecticode.leapfrog(_rectForce,this_vxvv,
t,args=(pot,),rtol=rtol)
#go back to the cylindrical frame
R= numpy.sqrt(out[:,0]**2.+out[:,1]**2.)
phi= numpy.arccos(out[:,0]/R)
phi[(out[:,1] < 0.)]= 2.*numpy.pi-phi[(out[:,1] < 0.)]
vR= out[:,3]*numpy.cos(phi)+out[:,4]*numpy.sin(phi)
vT= out[:,4]*numpy.cos(phi)-out[:,3]*numpy.sin(phi)
out[:,3]= out[:,2]
out[:,4]= out[:,5]
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,5]= phi
return out
elif int_method.lower() == 'dop853' or int_method.lower() == 'odeint':
if rtol is None: rtol= 1e-8
if int_method.lower() == 'dop853':
integrator= dop853
extra_kwargs= {}
else:
integrator= integrate.odeint
extra_kwargs= {'rtol':rtol}
if len(yo[0]) == 5:
def integrate_for_map(vxvv):
l= vxvv[0]*vxvv[2]
l2= l**2.
init= [vxvv[0],vxvv[1],vxvv[3],vxvv[4]]
intOut= integrator(_RZEOM,init,t=t,args=(pot,l2),
**extra_kwargs)
out= numpy.zeros((len(t),5))
out[:,0]= intOut[:,0]
out[:,1]= intOut[:,1]
out[:,3]= intOut[:,2]
out[:,4]= intOut[:,3]
out[:,2]= l/out[:,0]
#post-process to remove negative radii
neg_radii= (out[:,0] < 0.)
out[neg_radii,0]= -out[neg_radii,0]
return out
else:
def integrate_for_map(vxvv):
vphi= vxvv[2]/vxvv[0]
init= [vxvv[0],vxvv[1],vxvv[5],vphi,vxvv[3],vxvv[4]]
intOut= integrator(_EOM,init,t=t,args=(pot,))
out= numpy.zeros((len(t),6))
out[:,0]= intOut[:,0]
out[:,1]= intOut[:,1]
out[:,2]= out[:,0]*intOut[:,3]
out[:,3]= intOut[:,4]
out[:,4]= intOut[:,5]
out[:,5]= intOut[:,2]
#post-process to remove negative radii
neg_radii= (out[:,0] < 0.)
out[neg_radii,0]= -out[neg_radii,0]
out[neg_radii,3]+= numpy.pi
return out
else: # Assume we are forcing parallel_mapping of a C integrator...
def integrate_for_map(vxvv):
return integrateFullOrbit_c(pot,numpy.copy(vxvv),
t,int_method,dt=dt)[0]
if len(yo) == 1: # Can't map a single value...
out= numpy.atleast_3d(integrate_for_map(yo[0]).T).T
else:
out= numpy.array((parallel_map(integrate_for_map,yo,numcores=numcores)))
if nophi:
out= out[:,:,:5]
return out, numpy.zeros(len(yo))
def _RZEOM(y,t,pot,l2):
"""
NAME:
_RZEOM
PURPOSE:
implements the EOM, i.e., the right-hand side of the differential
equation, for a 3D orbit assuming conservation of angular momentum
INPUT:
y - current phase-space position
t - current time
pot - (list of) Potential instance(s)
l2 - angular momentum squared
OUTPUT:
dy/dt
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
return [y[1],
l2/y[0]**3.+_evaluateRforces(pot,y[0],y[2],t=t),
y[3],
_evaluatezforces(pot,y[0],y[2],t=t)]
def _EOM(y,t,pot):
"""
NAME:
_EOM
PURPOSE:
implements the EOM, i.e., the right-hand side of the differential
equation, for a 3D orbit
INPUT:
y - current phase-space position
t - current time
pot - (list of) Potential instance(s)
OUTPUT:
dy/dt
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
l2= (y[0]**2.*y[3])**2.
return [y[1],
l2/y[0]**3.+_evaluateRforces(pot,y[0],y[4],phi=y[2],t=t,
v=[y[1],y[0]*y[3],y[5]]),
y[3],
1./y[0]**2.*(_evaluatephiforces(pot,y[0],y[4],phi=y[2],t=t,
v=[y[1],y[0]*y[3],y[5]])
-2.*y[0]*y[1]*y[3]),
y[5],
_evaluatezforces(pot,y[0],y[4],phi=y[2],t=t,
v=[y[1],y[0]*y[3],y[5]])]
def _rectForce(x,pot,t=0.):
"""
NAME:
_rectForce
PURPOSE:
returns the force in the rectangular frame
INPUT:
x - current position
t - current time
pot - (list of) Potential instance(s)
OUTPUT:
force
HISTORY:
2011-02-02 - Written - Bovy (NYU)
"""
#x is rectangular so calculate R and phi
R= numpy.sqrt(x[0]**2.+x[1]**2.)
phi= numpy.arccos(x[0]/R)
sinphi= x[1]/R
cosphi= x[0]/R
if x[1] < 0.: phi= 2.*numpy.pi-phi
#calculate forces
Rforce= _evaluateRforces(pot,R,x[2],phi=phi,t=t)
phiforce= _evaluatephiforces(pot,R,x[2],phi=phi,t=t)
return numpy.array([cosphi*Rforce-1./R*sinphi*phiforce,
sinphi*Rforce+1./R*cosphi*phiforce,
_evaluatezforces(pot,R,x[2],phi=phi,t=t)])
| jobovy/galpy | galpy/orbit/integrateFullOrbit.py | Python | bsd-3-clause | 29,960 |
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme.automate.explorer.domain import DomainCollection
from utils import error
from utils.appliance.implementations.ui import navigate_to
from utils.update import update
@pytest.mark.tier(1)
@pytest.mark.parametrize('enabled', [True, False], ids=['enabled', 'disabled'])
def test_domain_crud(request, enabled):
domains = DomainCollection()
domain = domains.create(
name=fauxfactory.gen_alpha(),
description=fauxfactory.gen_alpha(),
enabled=enabled)
request.addfinalizer(domain.delete_if_exists)
assert domain.exists
view = navigate_to(domain, 'Details')
if enabled:
assert 'Disabled' not in view.title.text
else:
assert 'Disabled' in view.title.text
updated_description = "editdescription{}".format(fauxfactory.gen_alpha())
with update(domain):
domain.description = updated_description
view = navigate_to(domain, 'Edit')
assert view.description.value == updated_description
assert domain.exists
domain.delete(cancel=True)
assert domain.exists
domain.delete()
assert not domain.exists
@pytest.mark.tier(1)
def test_domain_delete_from_table(request):
domains = DomainCollection()
generated = []
for _ in range(3):
domain = domains.create(
name=fauxfactory.gen_alpha(),
description=fauxfactory.gen_alpha(),
enabled=True)
request.addfinalizer(domain.delete_if_exists)
generated.append(domain)
domains.delete(*generated)
for domain in generated:
assert not domain.exists
@pytest.mark.tier(2)
def test_duplicate_domain_disallowed(request):
domains = DomainCollection()
domain = domains.create(
name=fauxfactory.gen_alpha(),
description=fauxfactory.gen_alpha(),
enabled=True)
request.addfinalizer(domain.delete_if_exists)
with error.expected("Name has already been taken"):
domains.create(
name=domain.name,
description=domain.description,
enabled=domain.enabled)
@pytest.mark.tier(2)
def test_cannot_delete_builtin():
domains = DomainCollection()
manageiq_domain = domains.instantiate(name='ManageIQ')
details_view = navigate_to(manageiq_domain, 'Details')
if domains.appliance.version < '5.7':
assert details_view.configuration.is_displayed
assert 'Remove this Domain' not in details_view.configuration.items
else:
assert not details_view.configuration.is_displayed
@pytest.mark.tier(2)
def test_cannot_edit_builtin():
domains = DomainCollection()
manageiq_domain = domains.instantiate(name='ManageIQ')
details_view = navigate_to(manageiq_domain, 'Details')
if domains.appliance.version < '5.7':
assert details_view.configuration.is_displayed
assert not details_view.configuration.item_enabled('Edit this Domain')
else:
assert not details_view.configuration.is_displayed
@pytest.mark.tier(2)
def test_domain_name_wrong():
domains = DomainCollection()
with error.expected('Name may contain only'):
domains.create(name='with space')
@pytest.mark.tier(2)
def test_domain_lock_unlock(request):
domains = DomainCollection()
domain = domains.create(
name=fauxfactory.gen_alpha(),
description=fauxfactory.gen_alpha(),
enabled=True)
request.addfinalizer(domain.delete)
ns1 = domain.namespaces.create(name='ns1')
ns2 = ns1.namespaces.create(name='ns2')
cls = ns2.classes.create(name='class1')
cls.schema.add_field(name='myfield', type='Relationship')
inst = cls.instances.create(name='inst')
meth = cls.methods.create(name='meth', script='$evm')
# Lock the domain
domain.lock()
# Check that nothing is editable
# namespaces
details = navigate_to(ns1, 'Details')
assert not details.configuration.is_displayed
details = navigate_to(ns2, 'Details')
assert not details.configuration.is_displayed
# class
details = navigate_to(cls, 'Details')
assert details.configuration.items == ['Copy selected Instances']
assert not details.configuration.item_enabled('Copy selected Instances')
details.schema.select()
assert not details.configuration.is_displayed
# instance
details = navigate_to(inst, 'Details')
assert details.configuration.items == ['Copy this Instance']
# method
details = navigate_to(meth, 'Details')
assert details.configuration.items == ['Copy this Method']
# Unlock it
domain.unlock()
# Check that it is editable
with update(ns1):
ns1.name = 'UpdatedNs1'
assert ns1.exists
with update(ns2):
ns2.name = 'UpdatedNs2'
assert ns2.exists
with update(cls):
cls.name = 'UpdatedClass'
assert cls.exists
cls.schema.add_field(name='myfield2', type='Relationship')
with update(inst):
inst.name = 'UpdatedInstance'
assert inst.exists
with update(meth):
meth.name = 'UpdatedMethod'
assert meth.exists
| rlbabyuk/integration_tests | cfme/tests/automate/test_domain.py | Python | gpl-2.0 | 5,084 |
"""
Copyright (2010-2014) INCUBAID BVBA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ArakoonExceptions import ArakoonInvalidArguments
import ArakoonProtocol
import logging
from functools import wraps
class SignatureValidator :
def __init__ (self, *args ):
self.param_types = args
self.param_native_type_mapping = {
'int': int,
'string': str,
'bool': bool
}
def __call__ (self, f ):
@wraps(f)
def my_new_f ( *args, **kwargs ) :
new_args = list( args[1:] )
missing_args = f.func_code.co_varnames[len(args):]
for missing_arg in missing_args:
if( len(new_args) == len(self.param_types) ) :
break
if( kwargs.has_key(missing_arg) ) :
pos = f.func_code.co_varnames.index( missing_arg )
# if pos > len(new_args):
# new_args.append( None )
new_args.insert(pos, kwargs[missing_arg])
del kwargs[missing_arg]
if len( kwargs ) > 0:
raise ArakoonInvalidArguments( f.func_name, list(kwargs.iteritems()) )
i = 0
error_key_values = []
for (arg, arg_type) in zip(new_args, self.param_types) :
if not self.validate(arg, arg_type):
error_key_values.append( (f.func_code.co_varnames[i+1],new_args[i]) )
i += 1
if len(error_key_values) > 0 :
raise ArakoonInvalidArguments( f.func_name, error_key_values )
return f( args[0], *new_args )
return my_new_f
def validate(self,arg,arg_type):
if self.param_native_type_mapping.has_key( arg_type ):
return isinstance(arg,self.param_native_type_mapping[arg_type] )
elif arg_type == 'string_option' :
return isinstance( arg, str ) or arg is None
elif arg_type == 'sequence' :
return isinstance( arg, ArakoonProtocol.Sequence )
else:
raise RuntimeError( "Invalid argument type supplied: %s" % arg_type )
| openvstorage/arakoon | src/client/python/ArakoonValidators.py | Python | apache-2.0 | 2,748 |
from django.conf.urls.defaults import *
urlpatterns = patterns('member.views',
url(r'^$', 'login', name='passport_index'),
url(r'^register/$', 'register', name='passport_register'),
url(r'^login/$', 'login', name='passport_login'),
url(r'^logout/$', 'logout', name='passport_logout'),
url(r'^active/$', 'active', name='passport_active'),
url(r'^forget/$', 'forget', name='passport_forget'),
url(r'^profile/$', 'profile', name='passport_profile'),
)
| masiqi/douquan | member/urls.py | Python | mit | 478 |
# -*- coding: utf-8 -*-
# Parsing a fasta file.
# Author - Janu Verma
# jv367@cornell.edu
import sys
from sequenceOperations import SequenceManipulation
from trimming import Trimming
class FastaParser:
"""
Parses a FASTA file to extract the sequences and header information, if any.
Parameters
----------
fasta_file : Fasta file to be parsed.
Example
-------
>>> import sys
>>> input_file = sys.argv[1]
>>> out = FastaParser(input_file)
>>> seqDict = out.sequenceDict()
>>> print len(seqDict.keys())
"""
def __init__(self, fasta_file):
self.ff = fasta_file
def readFasta(self, fastaFile):
"""
Reads and parser the FASTA file.
Parameters
----------
fastaFile - A FASTA file.
Returns
------
Generator object containing sequences.
"""
name, seq = None, []
for line in fastaFile:
line = line.rstrip()
if (line.startswith(">")):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
def sequenceDict(self):
"""
Creates a dictionary of sequences with their header.
Returns
-------
A dictionary of sequences.
"""
with open(self.ff) as fastaFile:
sequences = {}
for name, seq in self.readFasta(fastaFile):
sequences[name] = seq
return sequences
def seqNames(self):
"""
Names/Headers of all the sequences.
Returns
-------
A list of names of all the sequences in the FASTA file.
"""
seqDict = self.sequenceDict()
return seqDict.keys()
def seqFromName(self, name):
"""
Extract the sequence corresponding to the given name.
Parameters
---------
name : Name of the sequence to be retrieved.
Returns
-------
Sequence corresponding to the input name.
"""
seqDict = self.sequenceDict()
return seqDict[name]
def reverseComplement(self, nameSeq):
"""
Compute the reverse complement of a given sequence.
Parameters
----------
sequence: Name of the sequence whose reverse complement is to be computed.
Returns
-------
sequence which is the reverse complement of the input sequence.
"""
seqDict = self.sequenceDict()
sequence = seqDict[nameSeq]
new_seq = SequenceManipulation(sequence)
return new_seq.reverseComplement()
def maskSeq(self, name, interval, toLower=False, maskingChar='N'):
"""
Masks the sequence based on the given interval.
Parameters
---------
name: Name/header of the sequence.
interval: A tuple containing the start and end positions for the masking.
toLower: If True, the sequence in the interval is converted to lower case bases.
Default is False.
maskingChar : Masking character. Default is 'N'.
Returns
-------
Masked sequence.
"""
seqDict = self.sequenceDict()
sequence = seqDict[name]
masker = SequenceManipulation(sequence)
return masker.maskSequence(interval, toLower=False, maskingChar='N')
def trimSeq(self, name, interval, quality = None):
"""
Trims the sequence from both sides based on the interval.
Parameters
----------
name : Name/header of the sequence to be trimmed.
interval : The interval containing the number of bp's to be trimmed from left and right side respectively.
Returns
-------
Trimmed sequence.
"""
seqDict = self.sequenceDict()
sequence = seqDict[name]
trimmer = Trimming(sequence, quality)
return trimmer.trimSequence(interval)
def maskAll(self, intervals, toLower=False, maskingChar='N'):
"""
Masks the sequences in the FASTA file based on the given intervals.
Parameters
---------
intervals: A list of tuples containing the start and end positions for the masking.
toLower: If True, the sequence in the interval is converted to lower case bases.
Default is False.
maskingChar : Masking character. Default is 'N'.
Returns
-------
Masked sequences.
"""
seqDict = self.sequenceDict()
for i in range(len(seqDict.keys())):
x = seqDict.keys()[i]
interval = intervals[i]
print self.maskSeq(x, interval, toLower=False, maskingChar='N')
return ''
def trimAll(self, intervals, quality=None):
"""
Trims all the sequence in the FASTA file from both sides based on the intervals.
Parameters
----------
interval : A list of tuples containing the number of bp's to be trimmed from left and right side respectively.
Returns
-------
Trimmed sequences.
"""
seqDict = self.sequenceDict()
for i in range(len(seqDict.keys())):
x = seqDict.keys()[i]
interval = intervals[i]
print self.trimSeq(x, interval, quality = None)
return ''
def reverseComplementAll(self):
"""
Compute the reverse complements of all the sequences in the given FASTA file.
Parameters
----------
sequence: Name of the sequence whose reverse complement is to be computed.
Returns
-------
Prints the reverse complements.
"""
seqDict = self.sequenceDict()
for i in range(len(seqDict.keys())):
x = seqDict.keys()[i]
print self.reverseComplement(x)
return ''
| Jverma/fastAQ | fastAQ/fastaInfo.py | Python | mit | 5,057 |
#
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2010-2016 Joanna Rutkowska <joanna@invisiblethingslab.com>
# Copyright (C) 2013-2016 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
# Copyright (C) 2014-2016 Wojtek Porczyk <woju@invisiblethingslab.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
''' This module contains the NetVMMixin '''
import ipaddress
import os
import re
import libvirt # pylint: disable=import-error
import qubes
import qubes.config
import qubes.events
import qubes.firewall
import qubes.exc
def _setter_mac(self, prop, value):
''' Helper for setting the MAC address '''
# pylint: disable=unused-argument
if not isinstance(value, str):
raise ValueError('MAC address must be a string')
value = value.lower()
if re.match(r"^([0-9a-f][0-9a-f]:){5}[0-9a-f][0-9a-f]$", value) is None:
raise ValueError('Invalid MAC address value')
return value
def _default_ip(self):
if not self.is_networked():
return None
if self.netvm is not None:
return self.netvm.get_ip_for_vm(self) # pylint: disable=no-member
return self.get_ip_for_vm(self)
def _default_ip6(self):
if not self.is_networked():
return None
if not self.features.check_with_netvm('ipv6', False):
return None
if self.netvm is not None:
return self.netvm.get_ip6_for_vm(self) # pylint: disable=no-member
return self.get_ip6_for_vm(self)
def _setter_netvm(self, prop, value):
# pylint: disable=unused-argument
if value is None:
return None
if not value.provides_network:
raise qubes.exc.QubesValueError(
'The {!s} qube does not provide network'.format(value))
# skip check for netvm loops during qubes.xml loading, to avoid tricky
# loading order
if self.events_enabled:
if value is self \
or value in self.app.domains.get_vms_connected_to(self):
raise qubes.exc.QubesValueError(
'Loops in network are unsupported')
return value
def _setter_provides_network(self, prop, value):
value = qubes.property.bool(self, prop, value)
if not value:
if list(self.connected_vms):
raise qubes.exc.QubesValueError(
'The qube is still used by other qubes, change theirs '
'\'netvm\' first')
return value
class NetVMMixin(qubes.events.Emitter):
''' Mixin containing network functionality '''
mac = qubes.property('mac', type=str,
default='00:16:3e:5e:6c:00',
setter=_setter_mac,
doc='MAC address of the NIC emulated inside VM')
ip = qubes.property('ip', type=ipaddress.IPv4Address,
default=_default_ip,
doc='IP address of this domain.')
ip6 = qubes.property('ip6', type=ipaddress.IPv6Address,
default=_default_ip6,
doc='IPv6 address of this domain.')
# CORE2: swallowed uses_default_netvm
netvm = qubes.VMProperty('netvm', load_stage=4, allow_none=True,
default=(lambda self: self.app.default_netvm),
setter=_setter_netvm,
doc='''VM that provides network connection to this domain. When
`None`, machine is disconnected. When absent, domain uses default
NetVM.''')
provides_network = qubes.property('provides_network', default=False,
type=bool, setter=_setter_provides_network,
doc='''If this domain can act as network provider (formerly known as
NetVM or ProxyVM)''')
@property
def firewall_conf(self):
return 'firewall.xml'
#
# used in networked appvms or proxyvms (netvm is not None)
#
@qubes.stateless_property
def visible_ip(self):
'''IP address of this domain as seen by the domain.'''
return self.features.check_with_template('net.fake-ip', None) or \
self.ip
@qubes.stateless_property
def visible_ip6(self):
'''IPv6 address of this domain as seen by the domain.'''
return self.ip6
@qubes.stateless_property
def visible_gateway(self):
'''Default gateway of this domain as seen by the domain.'''
return self.features.check_with_template('net.fake-gateway', None) or \
(self.netvm.gateway if self.netvm else None)
@qubes.stateless_property
def visible_gateway6(self):
'''Default (IPv6) gateway of this domain as seen by the domain.'''
if self.features.check_with_netvm('ipv6', False):
return self.netvm.gateway6 if self.netvm else None
return None
@qubes.stateless_property
def visible_netmask(self):
'''Netmask as seen by the domain.'''
return self.features.check_with_template('net.fake-netmask', None) or \
(self.netvm.netmask if self.netvm else None)
#
# used in netvms (provides_network=True)
# those properties and methods are most likely accessed as vm.netvm.<prop>
#
@staticmethod
def get_ip_for_vm(vm):
'''Get IP address for (appvm) domain connected to this (netvm) domain.
'''
import qubes.vm.dispvm # pylint: disable=redefined-outer-name
if isinstance(vm, qubes.vm.dispvm.DispVM):
return ipaddress.IPv4Address('10.138.{}.{}'.format(
(vm.dispid >> 8) & 0xff, vm.dispid & 0xff))
# VM technically can get address which ends in '.0'. This currently
# does not happen, because qid < 253, but may happen in the future.
return ipaddress.IPv4Address('10.137.{}.{}'.format(
(vm.qid >> 8) & 0xff, vm.qid & 0xff))
@staticmethod
def get_ip6_for_vm(vm):
'''Get IPv6 address for (appvm) domain connected to this (netvm) domain.
Default address is constructed with Qubes-specific site-local prefix,
and IPv4 suffix (0xa89 is 10.137.).
'''
import qubes.vm.dispvm # pylint: disable=redefined-outer-name
if isinstance(vm, qubes.vm.dispvm.DispVM):
return ipaddress.IPv6Address('{}::a8a:{:x}'.format(
qubes.config.qubes_ipv6_prefix, vm.dispid))
return ipaddress.IPv6Address('{}::a89:{:x}'.format(
qubes.config.qubes_ipv6_prefix, vm.qid))
@qubes.stateless_property
def gateway(self):
'''Gateway for other domains that use this domain as netvm.'''
return self.visible_ip if self.provides_network else None
@qubes.stateless_property
def gateway6(self):
'''Gateway (IPv6) for other domains that use this domain as netvm.'''
if self.features.check_with_netvm('ipv6', False):
return self.visible_ip6 if self.provides_network else \
None
return None
@property
def netmask(self):
'''Netmask for gateway address.'''
return '255.255.255.255' if self.is_networked() else None
@property
def connected_vms(self):
''' Return a generator containing all domains connected to the current
NetVM.
'''
for vm in self.app.domains:
if getattr(vm, 'netvm', None) is self:
yield vm
#
# used in both
#
@property
def dns(self):
'''Secondary DNS server set up for this domain.'''
if self.netvm is not None or self.provides_network:
return (
'10.139.1.1',
'10.139.1.2',
)
return None
def __init__(self, *args, **kwargs):
self._firewall = None
super().__init__(*args, **kwargs)
@qubes.events.handler('domain-load')
def on_domain_load_netvm_loop_check(self, event):
# pylint: disable=unused-argument
# make sure there are no netvm loops - which could cause qubesd
# looping infinitely
if self is self.netvm:
self.log.error(
'vm \'%s\' network-connected to itself, breaking the '
'connection', self.name)
self.netvm = None
elif self.netvm in self.app.domains.get_vms_connected_to(self):
self.log.error(
'netvm loop detected on \'%s\', breaking the connection',
self.name)
self.netvm = None
@qubes.events.handler('domain-shutdown')
def on_domain_shutdown(self, event, **kwargs):
'''Cleanup network interfaces of connected, running VMs.
This will allow re-reconnecting them cleanly later.
'''
# pylint: disable=unused-argument
for vm in self.connected_vms:
if not vm.is_running():
continue
try:
vm.detach_network()
except (qubes.exc.QubesException, libvirt.libvirtError):
# ignore errors
pass
@qubes.events.handler('domain-start')
def on_domain_started(self, event, **kwargs):
'''Connect this domain to its downstream domains. Also reload firewall
in its netvm.
This is needed when starting netvm *after* its connected domains.
''' # pylint: disable=unused-argument
if self.netvm:
self.netvm.reload_firewall_for_vm(self) # pylint: disable=no-member
for vm in self.connected_vms:
if not vm.is_running():
continue
vm.log.info('Attaching network')
try:
vm.attach_network()
except (qubes.exc.QubesException, libvirt.libvirtError):
vm.log.warning('Cannot attach network', exc_info=1)
@qubes.events.handler('domain-pre-shutdown')
def on_domain_pre_shutdown(self, event, force=False):
''' Checks before NetVM shutdown if any connected domains are running.
If `force` is `True` tries to detach network interfaces of connected
vms
''' # pylint: disable=unused-argument
connected_vms = [vm for vm in self.connected_vms if vm.is_running()]
if connected_vms and not force:
raise qubes.exc.QubesVMError(self,
'There are other VMs connected to this VM: {}'.format(
', '.join(vm.name for vm in connected_vms)))
def attach_network(self):
'''Attach network in this machine to it's netvm.'''
if not self.is_running():
raise qubes.exc.QubesVMNotRunningError(self)
if self.netvm is None:
raise qubes.exc.QubesVMError(self,
'netvm should not be {}'.format(self.netvm))
if not self.netvm.is_running(): # pylint: disable=no-member
# pylint: disable=no-member
self.log.info('Starting NetVM ({0})'.format(self.netvm.name))
self.netvm.start()
self.netvm.set_mapped_ip_info_for_vm(self)
self.libvirt_domain.attachDevice(
self.app.env.get_template('libvirt/devices/net.xml').render(
vm=self))
def detach_network(self):
'''Detach machine from it's netvm'''
if not self.is_running():
raise qubes.exc.QubesVMNotRunningError(self)
if self.netvm is None:
raise qubes.exc.QubesVMError(self,
'netvm should not be {}'.format(self.netvm))
self.libvirt_domain.detachDevice(
self.app.env.get_template('libvirt/devices/net.xml').render(
vm=self))
def is_networked(self):
'''Check whether this VM can reach network (firewall notwithstanding).
:returns: :py:obj:`True` if is machine can reach network, \
:py:obj:`False` otherwise.
:rtype: bool
'''
if self.provides_network:
return True
return self.netvm is not None
def reload_firewall_for_vm(self, vm):
''' Reload the firewall rules for the vm '''
if not self.is_running():
return
for addr_family in (4, 6):
ip = vm.ip6 if addr_family == 6 else vm.ip
if ip is None:
continue
base_dir = '/qubes-firewall/{}/'.format(ip)
# remove old entries if any (but don't touch base empty entry - it
# would trigger reload right away
self.untrusted_qdb.rm(base_dir)
# write new rules
for key, value in vm.firewall.qdb_entries(
addr_family=addr_family).items():
self.untrusted_qdb.write(base_dir + key, value)
# signal its done
self.untrusted_qdb.write(base_dir[:-1], '')
def set_mapped_ip_info_for_vm(self, vm):
'''
Set configuration to possibly hide real IP from the VM.
This needs to be done before executing 'script'
(`/etc/xen/scripts/vif-route-qubes`) in network providing VM
'''
# add info about remapped IPs (VM IP hidden from the VM itself)
mapped_ip_base = '/mapped-ip/{}'.format(vm.ip)
if vm.visible_ip:
self.untrusted_qdb.write(mapped_ip_base + '/visible-ip',
str(vm.visible_ip))
else:
self.untrusted_qdb.rm(mapped_ip_base + '/visible-ip')
if vm.visible_gateway:
self.untrusted_qdb.write(mapped_ip_base + '/visible-gateway',
str(vm.visible_gateway))
else:
self.untrusted_qdb.rm(mapped_ip_base + '/visible-gateway')
def reload_connected_ips(self):
'''
Update list of IPs possibly connected to this machine.
This is used by qubes-firewall to implement anti-spoofing.
'''
connected_ips = [str(vm.visible_ip) for vm in self.connected_vms
if vm.visible_ip is not None]
connected_ips6 = [str(vm.visible_ip6) for vm in self.connected_vms
if vm.visible_ip6 is not None]
self.untrusted_qdb.write(
'/connected-ips',
' '.join(connected_ips))
self.untrusted_qdb.write(
'/connected-ips6',
' '.join(connected_ips6))
@qubes.events.handler('property-pre-reset:netvm')
def on_property_pre_reset_netvm(self, event, name, oldvalue=None):
''' Sets the the NetVM to default NetVM '''
# pylint: disable=unused-argument
# we are changing to default netvm
newvalue = type(self).netvm.get_default(self)
# check for netvm loop
_setter_netvm(self, type(self).netvm, newvalue)
if newvalue == oldvalue:
return
self.fire_event('property-pre-set:netvm', pre_event=True,
name='netvm', newvalue=newvalue, oldvalue=oldvalue)
@qubes.events.handler('property-reset:netvm')
def on_property_reset_netvm(self, event, name, oldvalue=None):
''' Sets the the NetVM to default NetVM '''
# pylint: disable=unused-argument
# we are changing to default netvm
newvalue = self.netvm
if newvalue == oldvalue:
return
self.fire_event('property-set:netvm',
name='netvm', newvalue=newvalue, oldvalue=oldvalue)
@qubes.events.handler('property-pre-set:netvm')
def on_property_pre_set_netvm(self, event, name, newvalue, oldvalue=None):
''' Run sanity checks before setting a new NetVM '''
# pylint: disable=unused-argument
if newvalue is not None:
if not self.app.vmm.offline_mode \
and self.is_running() and not newvalue.is_running():
raise qubes.exc.QubesVMNotStartedError(newvalue,
'Cannot dynamically attach to stopped NetVM: {!r}'.format(
newvalue))
# don't check oldvalue, because it's missing if it was default
if self.netvm is not None:
if self.is_running():
self.detach_network()
@qubes.events.handler('property-set:netvm')
def on_property_set_netvm(self, event, name, newvalue, oldvalue=None):
''' Replaces the current NetVM with a new one and fires
net-domain-connect event
'''
# pylint: disable=unused-argument
if oldvalue is not None and oldvalue.is_running():
oldvalue.reload_connected_ips()
if newvalue is None:
return
if newvalue.is_running():
newvalue.reload_connected_ips()
if self.is_running():
# refresh IP, DNS etc
self.create_qdb_entries()
self.attach_network()
newvalue.fire_event('net-domain-connect', vm=self)
@qubes.events.handler('net-domain-connect')
def on_net_domain_connect(self, event, vm):
''' Reloads the firewall config for vm '''
# pylint: disable=unused-argument
self.reload_firewall_for_vm(vm)
@qubes.events.handler('property-set:ip', 'property-reset:ip')
def on_property_set_ip(self, _event, name, newvalue=None, oldvalue=None):
# pylint: disable=unused-argument
if newvalue == oldvalue:
return
if self.provides_network:
self.fire_event('property-reset:gateway', name='gateway')
self.fire_event('property-reset:visible_ip', name='visible_ip')
for vm in self.connected_vms:
vm.fire_event(
'property-reset:visible_gateway', name='visible_gateway')
@qubes.events.handler('property-set:ip6', 'property-reset:ipv6')
def on_property_set_ip6(self, _event, name, newvalue=None, oldvalue=None):
# pylint: disable=unused-argument
if newvalue == oldvalue:
return
if self.provides_network:
self.fire_event('property-reset:gateway6', name='gateway6')
self.fire_event('property-reset:visible_ip6', name='visible_ip6')
for vm in self.connected_vms:
vm.fire_event(
'property-reset:visible_gateway6', name='visible_gateway6')
@qubes.events.handler('feature-set:net.fake-ip')
def on_feature_set_net_fake_ip(self, event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument
if oldvalue == newvalue:
return
self.fire_event('property-reset:visible_ip', name='visible_ip')
for vm in self.connected_vms:
vm.fire_event(
'property-reset:visible_gateway', name='visible_gateway')
@qubes.events.handler('feature-set:ipv6')
def on_feature_set_ipv6(self, event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument
if oldvalue == newvalue:
return
self.fire_event('property-reset:visible_ip6', name='visible_ip6')
for vm in self.connected_vms:
vm.fire_event(
'property-reset:visible_gateway6', name='visible_gateway6')
@qubes.events.handler('property-set:provides_network')
def on_property_set_provides(
self, _event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument
if newvalue == oldvalue:
return
self.fire_event('property-reset:gateway', name='gateway')
self.fire_event('property-reset:gateway6', name='gateway6')
@qubes.events.handler('domain-qdb-create')
def on_domain_qdb_create(self, event):
''' Fills the QubesDB with firewall entries. '''
# pylint: disable=unused-argument
# Keep the following in sync with on_firewall_changed.
self.reload_connected_ips()
for vm in self.connected_vms:
if vm.is_running():
self.set_mapped_ip_info_for_vm(vm)
self.reload_firewall_for_vm(vm)
@qubes.events.handler('firewall-changed', 'domain-spawn')
def on_firewall_changed(self, event, **kwargs):
''' Reloads the firewall if vm is running and has a NetVM assigned '''
# pylint: disable=unused-argument
if self.is_running() and self.netvm:
self.netvm.reload_connected_ips()
self.netvm.set_mapped_ip_info_for_vm(self)
self.netvm.reload_firewall_for_vm(self) # pylint: disable=no-member
# CORE2: swallowed get_firewall_conf, write_firewall_conf,
# get_firewall_defaults
@property
def firewall(self):
if self._firewall is None:
self._firewall = qubes.firewall.Firewall(self)
return self._firewall
def has_firewall(self):
''' Return `True` if there are some vm specific firewall rules set '''
return os.path.exists(os.path.join(self.dir_path, self.firewall_conf))
| marmarek/qubes-core-admin | qubes/vm/mix/net.py | Python | lgpl-2.1 | 21,077 |
import os
import flask
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_w():
s = ''
s = s + "<a href=/dir?path=/> Browse </a> <br> "
s = s + "<a href=/plot_caffe > Plot <a> <br> "
return s
@app.route('/plot_caffe')
def plot_caffe():
os.chdir('./tmp')
log_files = flask.request.args.get('log_files')
if log_files is None:
log_files = '/tmp/caffe.INFO'
s = '<a href=/ > Main </a> <br> ' + log_files
for id in range(0, 8):
s = s + os.popen('/opt/caffe/tools/extra/plot_training_log.py.example ' + str(id) + ' ' + str(id) + '.png ' + log_files ).read()
s = s + ' <img src=/get?path=tmp/' + str(id) + '.png /> <br> '
os.chdir('../')
return s
@app.route('/get')
def get():
return flask.send_file(flask.request.args.get('path'), as_attachment=True)
@app.route('/dir')
def dir():
s = '<a href=/> Main </a><br> '
s = s + 'Dir: '
path = flask.request.args.get('path')
p = os.path.abspath(os.path.join(path, os.pardir))
s = s + '<a href=/dir?path=' + p + '> UUUUPPPPPP </a> <br> '
files = os.listdir(path)
for f in files:
p = path + '/' + f
if os.path.isfile(p) == False:
s = s + '>>>>>>>>>>>>>' + '<a href=/dir?path=' + p + '>' + f + '</a> <br> '
s = s + 'Files:<br> '
for f in files:
p = path + '/' + f
if os.path.isfile(p):
s = s + '>>>>>>>>>>>>>' + '<a href=/get?path=' + p + '>' + f + '</a> <br> '
return s
@app.route('/cmd')
def cmd():
return flask.Response(os.popen(flask.request.args.get('cmd')).read(), mimetype='text/plain')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| lugiavn/caffe_viz | main.py | Python | bsd-3-clause | 1,571 |
import numpy as np
from mpi4py import MPI
from pySDC.core.Controller import controller
from pySDC.core.Errors import ControllerError
from pySDC.core.Step import step
class controller_MPI(controller):
"""
PFASST controller, running parallel version of PFASST in blocks (MG-style)
"""
def __init__(self, controller_params, description, comm):
"""
Initialization routine for PFASST controller
Args:
controller_params: parameter set for the controller and the step class
description: all the parameters to set up the rest (levels, problems, transfer, ...)
comm: MPI communicator
"""
# call parent's initialization routine
super(controller_MPI, self).__init__(controller_params)
# create single step per processor
self.S = step(description)
# pass communicator for future use
self.comm = comm
num_procs = self.comm.Get_size()
rank = self.comm.Get_rank()
# insert data on time communicator to the steps (helpful here and there)
self.S.status.time_size = num_procs
if self.params.dump_setup and rank == 0:
self.dump_setup(step=self.S, controller_params=controller_params, description=description)
num_levels = len(self.S.levels)
# add request handler for status send
self.req_status = None
# add request handle container for isend
self.req_send = [None] * num_levels
self.req_ibcast = None
self.req_diff = None
if num_procs > 1 and num_levels > 1:
for L in self.S.levels:
if not L.sweep.coll.right_is_node or L.sweep.params.do_coll_update:
raise ControllerError("For PFASST to work, we assume uend^k = u_M^k")
if num_levels == 1 and self.params.predict_type is not None:
self.logger.warning('you have specified a predictor type but only a single level.. '
'predictor will be ignored')
def run(self, u0, t0, Tend):
"""
Main driver for running the parallel version of SDC, MSSDC, MLSDC and PFASST
Args:
u0: initial values
t0: starting time
Tend: ending time
Returns:
end values on the finest level
stats object containing statistics for each step, each level and each iteration
"""
# reset stats to prevent double entries from old runs
self.hooks.reset_stats()
# find active processes and put into new communicator
rank = self.comm.Get_rank()
num_procs = self.comm.Get_size()
all_dt = self.comm.allgather(self.S.dt)
all_time = [t0 + sum(all_dt[0:i]) for i in range(num_procs)]
time = all_time[rank]
all_active = all_time < Tend - 10 * np.finfo(float).eps
if not any(all_active):
raise ControllerError('Nothing to do, check t0, dt and Tend')
active = all_active[rank]
if not all(all_active):
comm_active = self.comm.Split(active)
rank = comm_active.Get_rank()
num_procs = comm_active.Get_size()
else:
comm_active = self.comm
self.S.status.slot = rank
# initialize block of steps with u0
self.restart_block(num_procs, time, u0)
uend = u0
# call post-setup hook
self.hooks.post_setup(step=None, level_number=None)
# call pre-run hook
self.hooks.pre_run(step=self.S, level_number=0)
comm_active.Barrier()
# while any process still active...
while active:
while not self.S.status.done:
self.pfasst(comm_active, num_procs)
time += self.S.dt
# broadcast uend, set new times and fine active processes
tend = comm_active.bcast(time, root=num_procs - 1)
uend = self.S.levels[0].uend.bcast(root=num_procs - 1, comm=comm_active)
all_dt = comm_active.allgather(self.S.dt)
all_time = [tend + sum(all_dt[0:i]) for i in range(num_procs)]
time = all_time[rank]
all_active = all_time < Tend - 10 * np.finfo(float).eps
active = all_active[rank]
if not all(all_active):
comm_active = comm_active.Split(active)
rank = comm_active.Get_rank()
num_procs = comm_active.Get_size()
self.S.status.slot = rank
# initialize block of steps with u0
self.restart_block(num_procs, time, uend)
# call post-run hook
self.hooks.post_run(step=self.S, level_number=0)
comm_active.Free()
return uend, self.hooks.return_stats()
def restart_block(self, size, time, u0):
"""
Helper routine to reset/restart block of (active) steps
Args:
size: number of active time steps
time: current time
u0: initial value to distribute across the steps
Returns:
block of (all) steps
"""
# store link to previous step
self.S.prev = (self.S.status.slot - 1) % size
self.S.next = (self.S.status.slot + 1) % size
# resets step
self.S.reset_step()
# determine whether I am the first and/or last in line
self.S.status.first = self.S.prev == size - 1
self.S.status.last = self.S.next == 0
# intialize step with u0
self.S.init_step(u0)
# reset some values
self.S.status.done = False
self.S.status.iter = 0
self.S.status.stage = 'SPREAD'
for l in self.S.levels:
l.tag = None
self.req_status = None
self.req_diff = None
self.req_ibcast = None
self.req_diff = None
self.req_send = [None] * len(self.S.levels)
self.S.status.prev_done = False
self.S.status.force_done = False
self.S.status.time_size = size
for lvl in self.S.levels:
lvl.status.time = time
lvl.status.sweep = 1
def recv(self, target, source, tag=None, comm=None):
"""
Receive function
Args:
target: level which will receive the values
source: level which initiated the send
tag: identifier to check if this message is really for me
comm: communicator
"""
req = target.u[0].irecv(source=source, tag=tag, comm=comm)
self.wait_with_interrupt(request=req)
if self.S.status.force_done:
return None
# re-evaluate f on left interval boundary
target.f[0] = target.prob.eval_f(target.u[0], target.time)
def wait_with_interrupt(self, request):
"""
Wrapper for waiting for the completion of a non-blocking communication, can be interrupted
Args:
request: request to wait for
"""
if request is not None and self.req_ibcast is not None:
while not request.Test():
if self.req_ibcast.Test():
self.logger.debug(f'{self.S.status.slot} has been cancelled during {self.S.status.stage}..')
self.S.status.stage = f'CANCELLED_{self.S.status.stage}'
self.S.status.force_done = True
return None
if request is not None:
request.Wait()
def check_iteration_estimate(self, comm):
"""
Routine to compute and check error/iteration estimation
Args:
comm: time-communicator
"""
# Compute diff between old and new values
diff_new = 0.0
L = self.S.levels[0]
for m in range(1, L.sweep.coll.num_nodes + 1):
diff_new = max(diff_new, abs(L.uold[m] - L.u[m]))
# Send forward diff
self.hooks.pre_comm(step=self.S, level_number=0)
self.wait_with_interrupt(request=self.req_diff)
if self.S.status.force_done:
return None
if not self.S.status.first:
prev_diff = np.empty(1, dtype=float)
req = comm.Irecv((prev_diff, MPI.DOUBLE), source=self.S.prev, tag=999)
self.wait_with_interrupt(request=req)
if self.S.status.force_done:
return None
self.logger.debug('recv diff: status %s, process %s, time %s, source %s, tag %s, iter %s' %
(prev_diff, self.S.status.slot, self.S.time, self.S.prev,
999, self.S.status.iter))
diff_new = max(prev_diff[0], diff_new)
if not self.S.status.last:
self.logger.debug('isend diff: status %s, process %s, time %s, target %s, tag %s, iter %s' %
(diff_new, self.S.status.slot, self.S.time, self.S.next,
999, self.S.status.iter))
tmp = np.array(diff_new, dtype=float)
self.req_diff = comm.Issend((tmp, MPI.DOUBLE), dest=self.S.next, tag=999)
self.hooks.post_comm(step=self.S, level_number=0)
# Store values from first iteration
if self.S.status.iter == 1:
self.S.status.diff_old_loc = diff_new
self.S.status.diff_first_loc = diff_new
# Compute iteration estimate
elif self.S.status.iter > 1:
Ltilde_loc = min(diff_new / self.S.status.diff_old_loc, 0.9)
self.S.status.diff_old_loc = diff_new
alpha = 1 / (1 - Ltilde_loc) * self.S.status.diff_first_loc
Kest_loc = np.log(self.S.params.errtol / alpha) / np.log(Ltilde_loc) * 1.05 # Safety factor!
self.logger.debug(f'LOCAL: {L.time:8.4f}, {self.S.status.iter}: {int(np.ceil(Kest_loc))}, '
f'{Ltilde_loc:8.6e}, {Kest_loc:8.6e}, '
f'{Ltilde_loc ** self.S.status.iter * alpha:8.6e}')
Kest_glob = Kest_loc
# If condition is met, send interrupt
if np.ceil(Kest_glob) <= self.S.status.iter:
if self.S.status.last:
self.logger.debug(f'{self.S.status.slot} is done, broadcasting..')
self.hooks.pre_comm(step=self.S, level_number=0)
comm.Ibcast((np.array([1]), MPI.INT), root=self.S.status.slot).Wait()
self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=True)
self.logger.debug(f'{self.S.status.slot} is done, broadcasting done')
self.S.status.done = True
else:
self.hooks.pre_comm(step=self.S, level_number=0)
self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=True)
def check_residual(self, comm):
"""
Routine to compute and check the residual
Args:
comm: time-communicator
"""
# Update values to compute the residual
self.hooks.pre_comm(step=self.S, level_number=0)
self.wait_with_interrupt(request=self.req_send[0])
if self.S.status.force_done:
return None
self.S.levels[0].sweep.compute_end_point()
if not self.S.status.last:
self.logger.debug('isend data: process %s, stage %s, time %s, target %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.next,
0, self.S.status.iter))
self.req_send[0] = self.S.levels[0].uend.isend(dest=self.S.next, tag=self.S.status.iter, comm=comm)
if not self.S.status.first and not self.S.status.prev_done:
self.logger.debug('recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev,
0, self.S.status.iter))
self.recv(target=self.S.levels[0], source=self.S.prev, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=0)
# Compute residual and check for convergence
self.S.levels[0].sweep.compute_residual()
self.S.status.done = self.check_convergence(self.S)
# Either gather information about all status or send forward own
if self.params.all_to_done:
self.hooks.pre_comm(step=self.S, level_number=0)
self.S.status.done = comm.allreduce(sendobj=self.S.status.done, op=MPI.LAND)
self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=True)
else:
self.hooks.pre_comm(step=self.S, level_number=0)
# check if an open request of the status send is pending
self.wait_with_interrupt(request=self.req_status)
if self.S.status.force_done:
return None
# recv status
if not self.S.status.first and not self.S.status.prev_done:
tmp = np.empty(1, dtype=int)
comm.Irecv((tmp, MPI.INT), source=self.S.prev, tag=99).Wait()
self.S.status.prev_done = tmp
self.logger.debug('recv status: status %s, process %s, time %s, source %s, tag %s, iter %s' %
(self.S.status.prev_done, self.S.status.slot, self.S.time, self.S.prev,
99, self.S.status.iter))
self.S.status.done = self.S.status.done and self.S.status.prev_done
# send status forward
if not self.S.status.last:
self.logger.debug('isend status: status %s, process %s, time %s, target %s, tag %s, iter %s' %
(self.S.status.done, self.S.status.slot, self.S.time, self.S.next,
99, self.S.status.iter))
tmp = np.array(self.S.status.done, dtype=int)
self.req_status = comm.Issend((tmp, MPI.INT), dest=self.S.next, tag=99)
self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=True)
def pfasst(self, comm, num_procs):
"""
Main function including the stages of SDC, MLSDC and PFASST (the "controller")
For the workflow of this controller, check out one of our PFASST talks or the pySDC paper
Args:
comm: communicator
num_procs (int): number of parallel processes
"""
def spread():
"""
Spreading phase
"""
# first stage: spread values
self.hooks.pre_step(step=self.S, level_number=0)
# call predictor from sweeper
self.S.levels[0].sweep.predict()
if self.params.use_iteration_estimator:
# store pervious iterate to compute difference later on
self.S.levels[0].uold[1:] = self.S.levels[0].u[1:]
# update stage
if len(self.S.levels) > 1: # MLSDC or PFASST with predict
self.S.status.stage = 'PREDICT'
else:
self.S.status.stage = 'IT_CHECK'
def predict():
"""
Predictor phase
"""
self.hooks.pre_predict(step=self.S, level_number=0)
if self.params.predict_type is None:
pass
elif self.params.predict_type == 'fine_only':
# do a fine sweep only
self.S.levels[0].sweep.update_nodes()
elif self.params.predict_type == 'libpfasst_style':
# restrict to coarsest level
for l in range(1, len(self.S.levels)):
self.S.transfer(source=self.S.levels[l - 1], target=self.S.levels[l])
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.first:
self.logger.debug('recv data predict: process %s, stage %s, time, %s, source %s, tag %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev,
self.S.status.iter))
self.recv(target=self.S.levels[-1], source=self.S.prev, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1)
# do the sweep with new values
self.S.levels[-1].sweep.update_nodes()
self.S.levels[-1].sweep.compute_end_point()
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.last:
self.logger.debug('send data predict: process %s, stage %s, time, %s, target %s, tag %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.next,
self.S.status.iter))
self.S.levels[-1].uend.isend(dest=self.S.next, tag=self.S.status.iter, comm=comm).Wait()
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1, add_to_stats=True)
# go back to fine level, sweeping
for l in range(len(self.S.levels) - 1, 0, -1):
# prolong values
self.S.transfer(source=self.S.levels[l], target=self.S.levels[l - 1])
# on middle levels: do sweep as usual
if l - 1 > 0:
self.S.levels[l - 1].sweep.update_nodes()
# end with a fine sweep
self.S.levels[0].sweep.update_nodes()
elif self.params.predict_type == 'pfasst_burnin':
# restrict to coarsest level
for l in range(1, len(self.S.levels)):
self.S.transfer(source=self.S.levels[l - 1], target=self.S.levels[l])
for p in range(self.S.status.slot + 1):
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not p == 0 and not self.S.status.first:
self.logger.debug(
'recv data predict: process %s, stage %s, time, %s, source %s, tag %s, phase %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev,
self.S.status.iter, p))
self.recv(target=self.S.levels[-1], source=self.S.prev, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1)
# do the sweep with new values
self.S.levels[-1].sweep.update_nodes()
self.S.levels[-1].sweep.compute_end_point()
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.last:
self.logger.debug(
'send data predict: process %s, stage %s, time, %s, target %s, tag %s, phase %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.next,
self.S.status.iter, p))
self.S.levels[-1].uend.isend(dest=self.S.next, tag=self.S.status.iter, comm=comm).Wait()
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1,
add_to_stats=(p == self.S.status.slot))
# interpolate back to finest level
for l in range(len(self.S.levels) - 1, 0, -1):
self.S.transfer(source=self.S.levels[l], target=self.S.levels[l - 1])
# end this with a fine sweep
self.S.levels[0].sweep.update_nodes()
elif self.params.predict_type == 'fmg':
# TODO: implement FMG predictor
raise NotImplementedError('FMG predictor is not yet implemented')
else:
raise ControllerError('Wrong predictor type, got %s' % self.params.predict_type)
self.hooks.post_predict(step=self.S, level_number=0)
# update stage
self.S.status.stage = 'IT_CHECK'
def it_check():
"""
Key routine to check for convergence/termination
"""
if not self.params.use_iteration_estimator:
self.check_residual(comm=comm)
else:
self.check_iteration_estimate(comm=comm)
if self.S.status.force_done:
return None
if self.S.status.iter > 0:
self.hooks.post_iteration(step=self.S, level_number=0)
# if not readys, keep doing stuff
if not self.S.status.done:
# increment iteration count here (and only here)
self.S.status.iter += 1
self.hooks.pre_iteration(step=self.S, level_number=0)
if self.params.use_iteration_estimator:
# store pervious iterate to compute difference later on
self.S.levels[0].uold[1:] = self.S.levels[0].u[1:]
if len(self.S.levels) > 1: # MLSDC or PFASST
self.S.status.stage = 'IT_DOWN'
else:
if num_procs == 1 or self.params.mssdc_jac: # SDC or parallel MSSDC (Jacobi-like)
self.S.status.stage = 'IT_FINE'
else:
self.S.status.stage = 'IT_COARSE' # serial MSSDC (Gauss-like)
else:
if not self.params.use_iteration_estimator:
# Need to finish all pending isend requests. These will occur for the first active process, since
# in the last iteration the wait statement will not be called ("send and forget")
for req in self.req_send:
if req is not None:
req.Wait()
if self.req_status is not None:
self.req_status.Wait()
if self.req_diff is not None:
self.req_diff.Wait()
else:
for req in self.req_send:
if req is not None:
req.Cancel()
if self.req_status is not None:
self.req_status.Cancel()
if self.req_diff is not None:
self.req_diff.Cancel()
self.hooks.post_step(step=self.S, level_number=0)
self.S.status.stage = 'DONE'
def it_fine():
"""
Fine sweeps
"""
nsweeps = self.S.levels[0].params.nsweeps
self.S.levels[0].status.sweep = 0
# do fine sweep
for k in range(nsweeps):
self.S.levels[0].status.sweep += 1
self.hooks.pre_comm(step=self.S, level_number=0)
self.wait_with_interrupt(request=self.req_send[0])
if self.S.status.force_done:
return None
self.S.levels[0].sweep.compute_end_point()
if not self.S.status.last:
self.logger.debug('isend data: process %s, stage %s, time %s, target %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.next,
self.S.status.iter, self.S.status.iter))
self.req_send[0] = self.S.levels[0].uend.isend(dest=self.S.next, tag=self.S.status.iter, comm=comm)
if not self.S.status.first and not self.S.status.prev_done:
self.logger.debug('recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev,
self.S.status.iter, self.S.status.iter))
self.recv(target=self.S.levels[0], source=self.S.prev, tag=self.S.status.iter, comm=comm)
if self.S.status.force_done:
return None
self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=(k == nsweeps - 1))
self.hooks.pre_sweep(step=self.S, level_number=0)
self.S.levels[0].sweep.update_nodes()
self.S.levels[0].sweep.compute_residual()
self.hooks.post_sweep(step=self.S, level_number=0)
# update stage
self.S.status.stage = 'IT_CHECK'
def it_down():
"""
Go down the hierarchy from finest to coarsest level
"""
self.S.transfer(source=self.S.levels[0], target=self.S.levels[1])
# sweep and send on middle levels (not on finest, not on coarsest, though)
for l in range(1, len(self.S.levels) - 1):
nsweeps = self.S.levels[l].params.nsweeps
for _ in range(nsweeps):
self.hooks.pre_comm(step=self.S, level_number=l)
self.wait_with_interrupt(request=self.req_send[l])
if self.S.status.force_done:
return None
self.S.levels[l].sweep.compute_end_point()
if not self.S.status.last:
self.logger.debug('isend data: process %s, stage %s, time %s, target %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.next,
l * 100 + self.S.status.iter, self.S.status.iter))
self.req_send[l] = self.S.levels[l].uend.isend(dest=self.S.next,
tag=l * 100 + self.S.status.iter,
comm=comm)
if not self.S.status.first and not self.S.status.prev_done:
self.logger.debug('recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev,
l * 100 + self.S.status.iter, self.S.status.iter))
self.recv(target=self.S.levels[l], source=self.S.prev,
tag=l * 100 + self.S.status.iter,
comm=comm)
if self.S.status.force_done:
return None
self.hooks.post_comm(step=self.S, level_number=l)
self.hooks.pre_sweep(step=self.S, level_number=l)
self.S.levels[l].sweep.update_nodes()
self.S.levels[l].sweep.compute_residual()
self.hooks.post_sweep(step=self.S, level_number=l)
# transfer further down the hierarchy
self.S.transfer(source=self.S.levels[l], target=self.S.levels[l + 1])
# update stage
self.S.status.stage = 'IT_COARSE'
def it_coarse():
"""
Coarse sweep
"""
# receive from previous step (if not first)
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.first and not self.S.status.prev_done:
self.logger.debug('recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev,
(len(self.S.levels) - 1) * 100 + self.S.status.iter, self.S.status.iter))
self.recv(target=self.S.levels[-1], source=self.S.prev,
tag=(len(self.S.levels) - 1) * 100 + self.S.status.iter,
comm=comm)
if self.S.status.force_done:
return None
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1)
# do the sweep
self.hooks.pre_sweep(step=self.S, level_number=len(self.S.levels) - 1)
assert self.S.levels[-1].params.nsweeps == 1, \
'ERROR: this controller can only work with one sweep on the coarse level, got %s' % \
self.S.levels[-1].params.nsweeps
self.S.levels[-1].sweep.update_nodes()
self.S.levels[-1].sweep.compute_residual()
self.hooks.post_sweep(step=self.S, level_number=len(self.S.levels) - 1)
self.S.levels[-1].sweep.compute_end_point()
# send to next step
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.last:
self.logger.debug('isend data: process %s, stage %s, time %s, target %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.next,
(len(self.S.levels) - 1) * 100 + self.S.status.iter, self.S.status.iter))
self.req_send[-1] = \
self.S.levels[-1].uend.isend(dest=self.S.next,
tag=(len(self.S.levels) - 1) * 100 + self.S.status.iter,
comm=comm)
self.wait_with_interrupt(request=self.req_send[-1])
if self.S.status.force_done:
return None
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1, add_to_stats=True)
# update stage
if len(self.S.levels) > 1: # MLSDC or PFASST
self.S.status.stage = 'IT_UP'
else:
self.S.status.stage = 'IT_CHECK' # MSSDC
def it_up():
"""
Prolong corrections up to finest level (parallel)
"""
# receive and sweep on middle levels (except for coarsest level)
for l in range(len(self.S.levels) - 1, 0, -1):
# prolong values
self.S.transfer(source=self.S.levels[l], target=self.S.levels[l - 1])
# on middle levels: do sweep as usual
if l - 1 > 0:
nsweeps = self.S.levels[l - 1].params.nsweeps
for k in range(nsweeps):
self.hooks.pre_comm(step=self.S, level_number=l - 1)
self.wait_with_interrupt(request=self.req_send[l - 1])
if self.S.status.force_done:
return None
self.S.levels[l - 1].sweep.compute_end_point()
if not self.S.status.last:
self.logger.debug('isend data: process %s, stage %s, time %s, target %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.next,
(l - 1) * 100 + self.S.status.iter, self.S.status.iter))
self.req_send[l - 1] = \
self.S.levels[l - 1].uend.isend(dest=self.S.next,
tag=(l - 1) * 100 + self.S.status.iter,
comm=comm)
if not self.S.status.first and not self.S.status.prev_done:
self.logger.debug('recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s' %
(self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev,
(l - 1) * 100 + self.S.status.iter, self.S.status.iter))
self.recv(target=self.S.levels[l - 1], source=self.S.prev,
tag=(l - 1) * 100 + self.S.status.iter,
comm=comm)
if self.S.status.force_done:
return None
self.hooks.post_comm(step=self.S, level_number=l - 1, add_to_stats=(k == nsweeps - 1))
self.hooks.pre_sweep(step=self.S, level_number=l - 1)
self.S.levels[l - 1].sweep.update_nodes()
self.S.levels[l - 1].sweep.compute_residual()
self.hooks.post_sweep(step=self.S, level_number=l - 1)
# update stage
self.S.status.stage = 'IT_FINE'
def default():
"""
Default routine to catch wrong status
"""
raise ControllerError('Weird stage, got %s' % self.S.status.stage)
stage = self.S.status.stage
self.logger.debug(stage + ' - process ' + str(self.S.status.slot))
# Wait for interrupt, if iteration estimator is used
if self.params.use_iteration_estimator and stage == 'SPREAD' and not self.S.status.last:
done = np.empty(1)
self.req_ibcast = comm.Ibcast((done, MPI.INT), root=comm.Get_size() - 1)
# If interrupt is there, cleanup and finish
if self.params.use_iteration_estimator and not self.S.status.last and self.req_ibcast.Test():
self.logger.debug(f'{self.S.status.slot} is done..')
self.S.status.done = True
if not stage == 'IT_CHECK':
self.logger.debug(f'Rewinding {self.S.status.slot} after {stage}..')
self.S.levels[0].u[1:] = self.S.levels[0].uold[1:]
self.hooks.post_iteration(step=self.S, level_number=0)
for req in self.req_send:
if req is not None and req != MPI.REQUEST_NULL:
req.Cancel()
if self.req_status is not None and self.req_status != MPI.REQUEST_NULL:
self.req_status.Cancel()
if self.req_diff is not None and self.req_diff != MPI.REQUEST_NULL:
self.req_diff.Cancel()
self.S.status.stage = 'DONE'
self.hooks.post_step(step=self.S, level_number=0)
else:
# Start cycling, if not interrupted
switcher = {
'SPREAD': spread,
'PREDICT': predict,
'IT_CHECK': it_check,
'IT_FINE': it_fine,
'IT_DOWN': it_down,
'IT_COARSE': it_coarse,
'IT_UP': it_up
}
switcher.get(stage, default)()
| Parallel-in-Time/pySDC | pySDC/implementations/controller_classes/controller_MPI.py | Python | bsd-2-clause | 34,851 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from openstack_dashboard import exceptions
DEBUG = False
TEMPLATE_DEBUG = DEBUG
COMPRESS_OFFLINE = True
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
STATIC_ROOT = '/var/www/horizon/static'
ALLOWED_HOSTS = ['*']
with open('/etc/horizon/.secret_key_store', 'r') as f:
SECRET_KEY = f.read()
HORIZON_CONFIG = {
'dashboards': ('project', 'admin', 'settings',),
'default_dashboard': 'project',
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
}
CACHES = {
'default': {
{{#horizon.caches.memcached}}
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': [{{#nodes}}'{{.}}',{{/nodes}}]
{{/horizon.caches.memcached}} # flake8: noqa
{{^horizon.caches.memcached}}
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
{{/horizon.caches.memcached}}
}
}
{{#horizon.caches.memcached}}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
{{/horizon.caches.memcached}}
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
OPENSTACK_HOST = "{{keystone.host}}"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True
}
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': True,
# NOTE: as of Grizzly this is not yet supported in Nova so enabling this
# setting will not do anything useful
'can_encrypt_volumes': False
}
OPENSTACK_NEUTRON_NETWORK = {
'enable_lb': False
}
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
TIME_ZONE = "UTC"
| rdo-management/tripleo-image-elements | elements/horizon/os-apply-config/etc/horizon/local_settings.py | Python | apache-2.0 | 2,751 |
def maximumGap(A):
ln = len(A)
if not A or ln == 1:
return 0
minimum, maximum = A[0], A[0]
for elem in A:
if elem > maximum:
maximum = elem
if elem < minimum:
minimum = elem
difference = (maximum-minimum) / (ln - 1)
bucket = [0 for i in xrange(difference)]
for elem in A:
bucket[elem - minimum] += 1
maxDiff = 0
previous = 0
print bucket
for b in xrange(1, difference):
if not bucket[b]:
continue
else:
diff = abs(b - previous)
previous = b
if diff > maxDiff:
maxDiff = diff
return maxDiff
A = [1,10,5]
#A = [5, 3, 1, 8, 9, 2, 4]
print maximumGap(A) | purushothamc/myibitsolutions | arrays/max_consecutive_gap.py | Python | gpl-3.0 | 735 |
"""Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
import idna
import urllib3
from . import __version__ as requests_version
try:
import charset_normalizer
except ImportError:
charset_normalizer = None
chardet = None
try:
from urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
urllib3_info = {'version': urllib3.__version__}
charset_normalizer_info = {'version': None}
chardet_info = {'version': None}
if charset_normalizer:
charset_normalizer_info = {'version': charset_normalizer.__version__}
if chardet:
chardet_info = {'version': chardet.__version__}
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
idna_info = {
'version': getattr(idna, '__version__', ''),
}
system_ssl = ssl.OPENSSL_VERSION_NUMBER
system_ssl_info = {
'version': '%x' % system_ssl if system_ssl is not None else ''
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': system_ssl_info,
'using_pyopenssl': pyopenssl is not None,
'using_charset_normalizer': chardet is None,
'pyOpenSSL': pyopenssl_info,
'urllib3': urllib3_info,
'chardet': chardet_info,
'charset_normalizer': charset_normalizer_info,
'cryptography': cryptography_info,
'idna': idna_info,
'requests': {
'version': requests_version,
},
}
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == '__main__':
main()
| cloudera/hue | desktop/core/ext-py3/requests-2.27.1/requests/help.py | Python | apache-2.0 | 3,920 |
#!/usr/bin/env python
# To kick off the script, run the following from the python directory:
# PYTHONPATH=`pwd` python testdaemon.py start
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import time
import stomp
import smbutil
import json
#third party libs
from daemon import runner
class SampleListener(object):
def on_message(self, headers, msg):
#logger.info(msg)
parsed_string = json.loads(msg)
logger.info(parsed_string["Function"])
logger.info(parsed_string["Parameters"])
method_name = 'number_' + str(parsed_string["Function"])
method = getattr(self, method_name, lambda: "nothing")
method(parsed_string["Parameters"])
def number_0(self, parms):
for status in parms:
smbutil.set_sd(status)
logger.info(status)
def number_1(self, parms):
for item in parms:
smbutil.set_sd_from_parent(item)
logger.error(item)
def number_2(self, parms):
for item in parms:
smbutil.set_tree_ntacl(item)
logger.error(item)
def number_4(self, parms):
for item in parms:
smbutil.recalcExistingFolder(item)
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/opt/testdaemon/test2.txt'
self.stderr_path = '/opt/testdaemon/test.txt'
self.pidfile_path = '/var/run/testdaemon.pid'
self.pidfile_timeout = 5
def run(self):
conn = stomp.Connection10()
conn.set_listener('SampleListener', SampleListener())
conn.start()
conn.connect()
conn.subscribe('SampleQueue')
while True:
#Main code goes here ...
#Note that logger level needs to be set to logging.DEBUG before this shows up in the logs
#logger.debug("Debug message")
#logger.info("Info message")
#logger.warn("Warning message")
#logger.error("Error message")
time.sleep(3)
app = App()
logger = logging.getLogger("DaemonLog")
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = logging.FileHandler("/var/log/testdaemon/testdaemon.log")
handler.setFormatter(formatter)
logger.addHandler(handler)
daemon_runner = runner.DaemonRunner(app)
#This ensures that the logger file handle does not get closed during daemonization
daemon_runner.daemon_context.files_preserve=[handler.stream]
daemon_runner.do_action()
| iee/iee_fuse | testdaemon/testdaemon.py | Python | gpl-3.0 | 2,561 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @file
# @author Piotr Krysik <ptrkrysik@gmail.com>
# @section LICENSE
#
# Gr-gsm is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# Gr-gsm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gr-gsm; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import grgsm_swig as grgsm
class qa_txtime_setter (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_txtime_setter)
| 0x7678/gr-gsm | python/qa_txtime_setter.py | Python | gpl-3.0 | 1,219 |
import asyncio
from collections import namedtuple
import contextlib
import os
import subprocess
import tempfile
import yaml
from .async_helpers import create_subprocess_with_handle
from . import cache
from . import compat
from .compat import makedirs
from .error import PrintableError
DEFAULT_PARALLEL_FETCH_LIMIT = 10
DEBUG_PARALLEL_COUNT = 0
DEBUG_PARALLEL_MAX = 0
PluginDefinition = namedtuple(
'PluginDefinition',
['type', 'sync_exe', 'reup_exe', 'fields', 'required_fields',
'optional_fields', 'cache_fields'])
PluginContext = namedtuple(
'PluginContext',
['cwd', 'plugin_cache_root', 'parallelism_semaphore', 'plugin_cache_locks',
'tmp_root'])
@asyncio.coroutine
def plugin_fetch(plugin_context, module_type, module_fields, dest,
display_handle):
env = {'PERU_SYNC_DEST': dest}
yield from _plugin_job(plugin_context, module_type, module_fields, 'sync',
env, display_handle)
@asyncio.coroutine
def plugin_get_reup_fields(plugin_context, module_type, module_fields,
display_handle):
with tmp_dir(plugin_context) as output_file_dir:
output_path = os.path.join(output_file_dir, 'reup_output')
env = {'PERU_REUP_OUTPUT': output_path}
yield from _plugin_job(
plugin_context, module_type, module_fields, 'reup', env,
display_handle)
with open(output_path) as output_file:
fields = yaml.safe_load(output_file) or {}
for key, value in fields.items():
if not isinstance(key, str):
raise PluginModuleFieldError(
'reup field name must be a string: {}'.format(key))
if not isinstance(value, str):
raise PluginModuleFieldError(
'reup field value must be a string: {}'.format(value))
return fields
@asyncio.coroutine
def _plugin_job(plugin_context, module_type, module_fields, command, env,
display_handle):
# We take several locks and other context managers in here. Using an
# ExitStack saves us from indentation hell.
with contextlib.ExitStack() as stack:
definition = _get_plugin_definition(module_type, module_fields,
command)
exe = _get_plugin_exe(definition, command)
# For Windows to run scripts with the right interpreter, we need to run
# as a shell command, rather than exec.
shell_command_line = subprocess.list2cmdline([exe])
complete_env = _plugin_env(
plugin_context, definition, module_fields, command, stack)
complete_env.update(env)
# Use a lock to protect the plugin cache. It would be unsafe for two
# jobs to read/write to the same plugin cache dir at the same time. The
# lock (and the cache dir) are both keyed off the module's "cache
# fields" as defined by plugin.yaml. For plugins that don't define
# cacheable fields, there is no cache dir (it's set to /dev/null) and
# the cache lock is a no-op.
stack.enter_context((yield from _plugin_cache_lock(
plugin_context, definition, module_fields)))
# Use a semaphore to limit the number of jobs that can run in parallel.
# Most plugin fetches hit the network, and for performance reasons we
# don't want to fire off too many network requests at once. See
# DEFAULT_PARALLEL_FETCH_LIMIT. This also lets the user control
# parallelism with the --jobs flag. It's important that this is the
# last lock taken before starting a job, otherwise we might waste a job
# slot just waiting on other locks.
stack.enter_context((yield from plugin_context.parallelism_semaphore))
# We use this debug counter for our parallelism tests. It's important
# that it comes after all locks have been taken (so the job it's
# counting is actually running).
stack.enter_context(debug_parallel_count_context())
try:
yield from create_subprocess_with_handle(
shell_command_line, display_handle, cwd=plugin_context.cwd,
env=complete_env, shell=True)
except subprocess.CalledProcessError as e:
raise PluginRuntimeError(
module_type, module_fields, e.returncode, e.output)
def _get_plugin_exe(definition, command):
if command == 'sync':
exe = definition.sync_exe
elif command == 'reup':
exe = definition.reup_exe
else:
raise RuntimeError('Unrecognized command name: ' + repr(command))
if not exe:
raise PluginPermissionsError(
"Module type '{0}' does not support {1}.",
definition.type,
command)
if not os.path.exists(exe):
raise PluginPermissionsError('Plugin exe is missing: ' + exe)
if not os.access(exe, os.X_OK):
raise PluginPermissionsError('Plugin exe is not executable: ' + exe)
return exe
def _format_module_fields(module_fields):
return {'PERU_MODULE_{}'.format(name.upper()): value for
name, value in module_fields.items()}
def _validate_plugin_definition(definition, module_fields):
field_names_not_strings = [name for name in definition.fields
if not isinstance(name, str)]
if field_names_not_strings:
raise PluginModuleFieldError(
'Metadata field names must be strings: ' +
', '.join(repr(name) for name in field_names_not_strings))
missing_module_fields = definition.required_fields - module_fields.keys()
if missing_module_fields:
raise PluginModuleFieldError(
'Required module field missing: ' +
', '.join(missing_module_fields))
unknown_module_fields = module_fields.keys() - definition.fields
if unknown_module_fields:
raise PluginModuleFieldError(
'Unknown module fields: ' + ', '.join(unknown_module_fields))
def _plugin_env(plugin_context, plugin_definition, module_fields, command,
exit_stack):
env = os.environ.copy()
# First, blank out all module field vars. This prevents the calling
# environment from leaking in when optional fields are undefined.
blank_module_vars = {field: '' for field in plugin_definition.fields}
env.update(_format_module_fields(blank_module_vars))
# Then add in the fields that are actually defined.
env.update(_format_module_fields(module_fields))
# Disable buffering by default in Python subprocesses. Without this,
# plugins would usually need to do something like
# print(..., flush=True)
# or else all their progress output would get held up in the stdout buffer
# until the plugin finally exited. Plugins in other languages will need to
# be careful about this.
env['PYTHONUNBUFFERED'] = 'true'
# For plugins that use the same exe for sync and reup, make the command
# name available in the environment.
env['PERU_PLUGIN_COMMAND'] = command
# Create a directory for plugins' temporary files.
env['PERU_PLUGIN_TMP'] = exit_stack.enter_context(tmp_dir(plugin_context))
# Create a persistent cache dir for saved files, like repo clones.
env['PERU_PLUGIN_CACHE'] = _plugin_cache_path(
plugin_context, plugin_definition, module_fields)
return env
@asyncio.coroutine
def _noop_lock():
return contextlib.ExitStack() # a no-op context manager
def _plugin_cache_lock(plugin_context, definition, module_fields):
if not definition.cache_fields:
# This plugin is not cacheable.
return _noop_lock()
key = _plugin_cache_key(definition, module_fields)
return plugin_context.plugin_cache_locks[key]
def _plugin_cache_path(plugin_context, definition, module_fields):
if not definition.cache_fields:
# This plugin is not cacheable.
return os.devnull
key = _plugin_cache_key(definition, module_fields)
plugin_cache = os.path.join(
plugin_context.plugin_cache_root, definition.type, key)
makedirs(plugin_cache)
return plugin_cache
def _plugin_cache_key(definition, module_fields):
assert definition.cache_fields, "Can't compute key for uncacheable type."
return cache.compute_key({
'type': definition.type,
'cacheable_fields': {field: module_fields.get(field, None)
for field in definition.cache_fields},
})
def _get_plugin_definition(module_type, module_fields, command):
root = _find_plugin_dir(module_type)
metadata_path = os.path.join(root, 'plugin.yaml')
if not os.path.isfile(metadata_path):
raise PluginMetadataMissingError(
'No metadata file found for plugin at path: {}'.format(root))
# Read the metadata document.
with open(metadata_path) as metafile:
metadoc = yaml.safe_load(metafile) or {}
sync_exe = os.path.join(root, metadoc.pop('sync exe'))
reup_exe = (None if 'reup exe' not in metadoc
else os.path.join(root, metadoc.pop('reup exe')))
required_fields = frozenset(metadoc.pop('required fields'))
optional_fields = frozenset(metadoc.pop('optional fields', []))
cache_fields = frozenset(metadoc.pop('cache fields', []))
fields = required_fields | optional_fields
# TODO: All of these checks need to be tested.
if metadoc:
raise RuntimeError('Unknown metadata in {} plugin: {}'.format(
module_type, metadoc))
overlap = required_fields & optional_fields
if overlap:
raise RuntimeError('Fields in {} are both required and optional: {}'
.format(module_type, overlap))
invalid = cache_fields - fields
if invalid:
raise RuntimeError(
'"cache fields" must also be either required or optional: ' +
str(invalid))
definition = PluginDefinition(
module_type, sync_exe, reup_exe, fields, required_fields,
optional_fields, cache_fields)
_validate_plugin_definition(definition, module_fields)
return definition
def _find_plugin_dir(module_type):
'''Find the directory containing the plugin definition for the given type.
Do this by searching all the paths where plugins can live for a dir that
matches the type name.'''
for install_dir in _get_plugin_install_dirs():
candidate = os.path.join(install_dir, module_type)
if os.path.isdir(candidate):
return candidate
else:
raise PluginCandidateError(
'No plugin found for `{}` module in paths:\n{}'.format(
module_type,
'\n'.join(_get_plugin_install_dirs())))
def _get_plugin_install_dirs():
'''Return all the places on the filesystem where we should look for plugin
definitions. Order is significant here: user-installed plugins should be
searched first, followed by system-installed plugins, and last of all peru
builtins.'''
builtins_dir = os.path.join(compat.MODULE_ROOT, 'resources', 'plugins')
if os.name == 'nt':
# Windows
local_data_dir = os.path.expandvars('%LOCALAPPDATA%')
program_files_dir = os.path.expandvars('%PROGRAMFILES%')
return (
os.path.join(local_data_dir, 'peru', 'plugins'),
os.path.join(program_files_dir, 'peru', 'plugins'),
builtins_dir,
)
else:
# non-Windows
default_config_dir = os.path.expanduser('~/.config')
config_dir = os.environ.get('XDG_CONFIG_HOME', default_config_dir)
return (
os.path.join(config_dir, 'peru', 'plugins'),
'/usr/local/lib/peru/plugins',
'/usr/lib/peru/plugins',
builtins_dir,
)
def debug_assert_clean_parallel_count():
assert DEBUG_PARALLEL_COUNT == 0, \
"parallel count should be 0 but it's " + str(DEBUG_PARALLEL_COUNT)
@contextlib.contextmanager
def debug_parallel_count_context():
global DEBUG_PARALLEL_COUNT, DEBUG_PARALLEL_MAX
DEBUG_PARALLEL_COUNT += 1
DEBUG_PARALLEL_MAX = max(DEBUG_PARALLEL_COUNT, DEBUG_PARALLEL_MAX)
try:
yield
finally:
DEBUG_PARALLEL_COUNT -= 1
def tmp_dir(context):
return tempfile.TemporaryDirectory(dir=context.tmp_root)
class PluginCandidateError(PrintableError):
pass
class PluginCommandCandidateError(PrintableError):
pass
class PluginModuleFieldError(PrintableError):
pass
class PluginMetadataMissingError(PrintableError):
pass
class PluginPermissionsError(PrintableError):
pass
class PluginRuntimeError(PrintableError):
def __init__(self, type, fields, errorcode, output):
# Don't depend on plugins using terminating newlines.
stripped_output = output.strip('\n')
super().__init__(stripped_output)
| oconnor663/peru | peru/plugin.py | Python | mit | 12,866 |
import sys; import os
sys.path.insert(0, os.path.abspath('..'))
from modules.detector import lof2d
import unittest
class LOF2DTestCase(unittest.TestCase):
points = [
[1, 2], [2, 4], [3, 6], [4, 37], [5, 2],
[6, 5], [7, 3], [8, 4], [9, 8]
]
neighbours = 3
lof_test = lof2d.LOF2D(points)
def test_lof_operation(self):
# 1. Calculate all the distances
self.lof_test.create_distance_dictionary()
# 2. Get all neighbours that are closer or equal to k neighbour
self.lof_test.get_knn(self.neighbours)
# 3. Calculate local reachability density for all points
self.lof_test.calculate_lrd()
# 4. Calculate LOF
self.lof_test.calculate_lof()
# 5. Sort
self.lof_test.sort_lof()
top_3 = self.lof_test.get_top(3)
correct = 0
error = ""
result_1 = (4, 37)
result_2 = (3, 6)
result_3 = (6, 5)
if top_3[0] == result_1:
correct += 1
else:
error += "\tExpected (" + str(result_1[0]) + ", " + str(result_1[1]) + ") instead of (" + str(top_3[0][0]) + ", " + str(top_3[0][1]) + ").\n"
if top_3[1] == result_2:
correct += 1
else:
error += "\tExpected (" + str(result_2[0]) + ", " + str(result_2[1]) + ") instead of (" + str(top_3[1][0]) + ", " + str(top_3[1][1]) + ").\n"
if top_3[2] == result_3:
correct += 1
else:
error += "\tExpected (" + str(result_3[0]) + ", " + str(result_3[1]) + ") instead of (" + str(top_3[2][0]) + ", " + str(top_3[2][1]) + ").\n"
self.assertEqual(correct, 3, "LOF gives unexpected results.\n" + error)
def test_other_operation(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main(verbosity=2)
| mnmnc/campephilus | testing/detector_lof2d_testing.py | Python | apache-2.0 | 1,613 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test top-level desitarget functions.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# The line above will help with 2to3 support.
import unittest
import re
import sys
from .. import __version__ as theVersion
class TestTopLevel(unittest.TestCase):
"""Test top-level desitarget functions.
"""
@classmethod
def setUpClass(cls):
cls.versionre = re.compile(
r'([0-9]+!)?([0-9]+)(\.[0-9]+)*((a|b|rc|\.post|\.dev)[0-9]+)?')
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_version(self):
"""Ensure the version conforms to PEP386/PEP440.
"""
if sys.version_info.major == 3:
self.assertRegex(theVersion, self.versionre)
else:
self.assertRegexpMatches(theVersion, self.versionre)
if __name__ == '__main__':
unittest.main()
def test_suite():
"""Allows testing of only this module with the command:
python setup.py test -m desitarget.test.test_top_level
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| desihub/desitarget | py/desitarget/test/test_top_level.py | Python | bsd-3-clause | 1,290 |
import numpy as np
import math
def calc(lst):
res = lst[0]
for i in range(1, len(lst)):
res += lst[i] / math.log(i+1, 2)
return res
tr = [l.rstrip().split(',') for l in open('run/CategoriesFeatures/categoryFeaturesSingaporeTrainAnonymized.csv','r')]
tr = np.array(tr)
tr = tr[1:].transpose()[2:].transpose().astype(np.double)
pred = tr
numt = tr.shape[1]
sumt = tr.sum(axis=0)
for i in range(numt):
rnk = sorted(zip(tr[i,:], sumt, range(numt)), reverse=1)
k = numt
for j in rnk:
pred[i][j[2]] = k
k -= 1
lineg = [l.rstrip().split(',') for l in open('run/CategoriesFeatures/categoryFeaturesSingaporeTestAnonymized.csv','r')]
#linep = [l.rstrip().split(' ') for l in open('predLondon.txt', 'r')]
gt = np.array(lineg)
#pred = np.array(linep)
gt = gt[1:].transpose()[2:].transpose().astype(np.double)
pred = pred.astype(np.double)
avg = 0
avgn = 0
for i in range(len(gt)):
x = calc(list(zip(*sorted(zip(pred[i], gt[i]), reverse=True))[1]))
y = calc(sorted(gt[i], reverse=True))
if sum(gt[i].astype(np.bool)) >= 8:
avgn += 1
avg += x / y
print avg / avgn | farseev/mobility | Baseline/run_baseline_sort.py | Python | gpl-2.0 | 1,107 |
import collections
from supriya import CalculationRate
from supriya.ugens.UGen import UGen
class MantissaMask(UGen):
"""
A floating-point mantissa mask.
::
>>> source = supriya.ugens.SinOsc.ar()
>>> mantissa_mask = supriya.ugens.MantissaMask.ar(
... source=source,
... bits=3,
... )
>>> mantissa_mask
MantissaMask.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Noise UGens"
_ordered_input_names = collections.OrderedDict([("source", 0), ("bits", 3)])
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
| Pulgama/supriya | supriya/ugens/MantissaMask.py | Python | mit | 652 |
# encoding: utf-8
import json
from ckan.tests.legacy import *
import ckan.model as model
HTTP_MOVED_PERMANENTLY = 301
class TestTagController(TestController):
@classmethod
def setup_class(self):
model.Session.remove()
CreateTestData.create()
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def test_autocomplete(self):
controller = 'api'
action = 'tag_autocomplete'
offset = url_for(controller=controller, action=action, ver=2)
res = self.app.get(offset)
assert '[]' in res
offset = url_for(controller=controller, action=action, incomplete='russian', ver=2)
res = self.app.get(offset)
assert 'russian' in res
assert 'tolstoy' not in res
offset = url_for(controller=controller, action=action, incomplete='tolstoy', ver=2)
res = self.app.get(offset)
assert 'russian' not in res
assert 'tolstoy' in res
def test_autocomplete_with_capital_letter_in_search_term(self):
controller = 'api'
action = 'tag_autocomplete'
offset = url_for(controller=controller, action=action, incomplete='Flex', ver=2)
res = self.app.get(offset)
data = json.loads(res.body)
assert u'Flexible \u30a1' in data['ResultSet']['Result'][0].values()
def test_autocomplete_with_space_in_search_term(self):
controller = 'api'
action = 'tag_autocomplete'
offset = url_for(controller=controller, action=action, incomplete='Flexible ', ver=2)
res = self.app.get(offset)
data = json.loads(res.body)
assert u'Flexible \u30a1' in data['ResultSet']['Result'][0].values()
def test_autocomplete_with_unicode_in_search_term(self):
controller = 'api'
action = 'tag_autocomplete'
offset = url_for(controller=controller, action=action, incomplete=u'ible \u30a1', ver=2)
res = self.app.get(offset)
data = json.loads(res.body)
assert u'Flexible \u30a1' in data['ResultSet']['Result'][0].values()
| NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/tests/legacy/functional/test_tag.py | Python | gpl-3.0 | 2,075 |
"""
This module is for testing the GitManager, part of the project manager component.
"""
__author__ = 'ameadows'
import unittest
import os
import shutil
from datanexus.project_manager.version_control.git_manager import GitManager
class GitManagerTests(unittest.TestCase):
"""
We first have to set up some paramters that will be utilized by the GitManager methods.
"""
def setUp(self):
#TODO: Change to use SettingsManager.
self.project_dir = "/home/ameadows/datanexus/project1"
self.git_dir = os.path.join(self.project_dir, '.git')
def test_create_git_project(self):
"""
Testing the creation of a project directory using git for version control.
:return:
"""
# Verify the directory does not exist beforehand
self.assertFalse(os.path.isdir(self.git_dir))
project = GitManager().create_git_project(self.project_dir)
# Verify the directory and project is created
self.assertTrue(os.path.isdir(self.git_dir))
# Clean up directory once test is complete
shutil.rmtree(self.project_dir)
| OpenDataAlex/dataNexus | datanexus/test/project_manager/version_control/test_git_manager.py | Python | gpl-2.0 | 1,124 |
# -*- coding:utf-8 -*-
from django.conf import settings
from haystack.views import SearchView
from colab.plugins.utils import filters_importer
class ColabSearchView(SearchView):
def extra_context(self, *args, **kwargs):
use_language, date_format = settings.DJANGO_DATE_FORMAT_TO_JS.get(
self.request.LANGUAGE_CODE, (None, None)
)
try:
type_chosen = self.form.cleaned_data.get('type')
except AttributeError:
type_chosen = ''
mimetype_choices = ()
size_choices = ()
used_by_choices = ()
mimetype_chosen = self.request.GET.get('mimetype')
size_chosen = self.request.GET.get('size')
used_by_chosen = self.request.GET.get('used_by')
types = filters_importer.import_plugin_filters(self.request.GET)
filters_options = [(k, v['name'], v['icon'])
for (k, v) in types.iteritems()]
return dict(
filters=types.get(type_chosen),
filters_options=filters_options,
type_chosen=type_chosen,
order_data=settings.ORDERING_DATA,
date_format=date_format,
use_language=use_language,
mimetype_chosen=mimetype_chosen if mimetype_chosen else '',
mimetype_choices=mimetype_choices,
size_chosen=size_chosen if size_chosen else '',
size_choices=size_choices,
used_by_chosen=used_by_chosen if used_by_chosen else '',
used_by_choices=used_by_choices,
)
| colab/colab | colab/search/views.py | Python | gpl-2.0 | 1,556 |
from oauthentication.contrib.google.backends import OAuth2AuthenticationBackend as GoogleOAuth2AuthenticationBackend
class OAuth2AuthenticationBackend(GoogleOAuth2AuthenticationBackend):
oauth2_provider_alias = 'googleapps'
| allanlei/django-oauthentication | oauthentication/contrib/oauth2/googleapps/backends.py | Python | bsd-3-clause | 230 |
# -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class HrEmployee(models.Model):
_inherit = 'hr.employee'
_description = 'Employee'
def generate_work_entries(self, date_start, date_stop, force=False):
date_start = fields.Date.to_date(date_start)
date_stop = fields.Date.to_date(date_stop)
if self:
current_contracts = self._get_contracts(date_start, date_stop, states=['open', 'close'])
else:
current_contracts = self._get_all_contracts(date_start, date_stop, states=['open', 'close'])
return bool(current_contracts._generate_work_entries(date_start, date_stop, force))
| jeremiahyan/odoo | addons/hr_work_entry_contract/models/hr_employee.py | Python | gpl-3.0 | 733 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.param.feldman_verifiable_sum_param import FeldmanVerifiableSumParam
from pipeline.component.component_base import FateComponent
from pipeline.interface import Input
from pipeline.interface import Output
from pipeline.utils.logger import LOGGER
class FeldmanVerifiableSum(FateComponent, FeldmanVerifiableSumParam):
def __init__(self, **kwargs):
FateComponent.__init__(self, **kwargs)
LOGGER.debug(f"{self.name} component created")
new_kwargs = self.erase_component_base_param(**kwargs)
FeldmanVerifiableSumParam.__init__(self, **new_kwargs)
self.input = Input(self.name)
self.output = Output(self.name, has_model=False)
self._module_name = "FeldmanVerifiableSum"
| FederatedAI/FATE | python/fate_client/pipeline/component/feldman_verifiable_sum.py | Python | apache-2.0 | 1,358 |
# encoding: utf-8
# module gtk._gtk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.135
# no doc
# imports
import atk as __atk
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
class PrintError(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
}
__gtype__ = None # (!) real value is ''
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/gtk/_gtk/PrintError.py | Python | gpl-2.0 | 721 |
'''
Progress class for modules. Represents where a student is in a module.
Useful things to know:
- Use Progress.to_js_status_str() to convert a progress into a simple
status string to pass to js.
- Use Progress.to_js_detail_str() to convert a progress into a more detailed
string to pass to js.
In particular, these functions have a canonical handing of None.
For most subclassing needs, you should only need to reimplement
frac() and __str__().
'''
from collections import namedtuple
import numbers
class Progress(object):
'''Represents a progress of a/b (a out of b done)
a and b must be numeric, but not necessarily integer, with
0 <= a <= b and b > 0.
Progress can only represent Progress for modules where that makes sense. Other
modules (e.g. html) should return None from get_progress().
TODO: add tag for module type? Would allow for smarter merging.
'''
def __init__(self, a, b):
'''Construct a Progress object. a and b must be numbers, and must have
0 <= a <= b and b > 0
'''
# Want to do all checking at construction time, so explicitly check types
if not (isinstance(a, numbers.Number) and
isinstance(b, numbers.Number)):
raise TypeError('a and b must be numbers. Passed {0}/{1}'.format(a, b))
if a > b:
a = b
if a < 0:
a = 0
if b <= 0:
raise ValueError('fraction a/b = {0}/{1} must have b > 0'.format(a, b))
self._a = a
self._b = b
def frac(self):
''' Return tuple (a,b) representing progress of a/b'''
return (self._a, self._b)
def percent(self):
''' Returns a percentage progress as a float between 0 and 100.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return 100.0 * a / b
def started(self):
''' Returns True if fractional progress is greater than 0.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
return self.frac()[0] > 0
def inprogress(self):
''' Returns True if fractional progress is strictly between 0 and 1.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a > 0 and a < b
def done(self):
''' Return True if this represents done.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a == b
def ternary_str(self):
''' Return a string version of this progress: either
"none", "in_progress", or "done".
subclassing note: implemented in terms of frac()
'''
(a, b) = self.frac()
if a == 0:
return "none"
if a < b:
return "in_progress"
return "done"
def __eq__(self, other):
''' Two Progress objects are equal if they have identical values.
Implemented in terms of frac()'''
if not isinstance(other, Progress):
return False
(a, b) = self.frac()
(a2, b2) = other.frac()
return a == a2 and b == b2
def __ne__(self, other):
''' The opposite of equal'''
return not self.__eq__(other)
def __str__(self):
''' Return a string representation of this string.
subclassing note: implemented in terms of frac().
'''
(a, b) = self.frac()
return "{0}/{1}".format(a, b)
@staticmethod
def add_counts(a, b):
'''Add two progress indicators, assuming that each represents items done:
(a / b) + (c / d) = (a + c) / (b + d).
If either is None, returns the other.
'''
if a is None:
return b
if b is None:
return a
# get numerators + denominators
(n, d) = a.frac()
(n2, d2) = b.frac()
return Progress(n + n2, d + d2)
@staticmethod
def to_js_status_str(progress):
'''
Return the "status string" version of the passed Progress
object that should be passed to js. Use this function when
sending Progress objects to js to limit dependencies.
'''
if progress is None:
return "NA"
return progress.ternary_str()
@staticmethod
def to_js_detail_str(progress):
'''
Return the "detail string" version of the passed Progress
object that should be passed to js. Use this function when
passing Progress objects to js to limit dependencies.
'''
if progress is None:
return "NA"
return str(progress)
| elimence/edx-platform | common/lib/xmodule/xmodule/progress.py | Python | agpl-3.0 | 4,933 |
from contrib.rfc2460 import next_header_zero as suite
from scapy.all import *
from veripy.testability import ComplianceTestTestCase
class NextHeaderZeroTestCase(ComplianceTestTestCase):
def test_next_header_zero(self):
self.ifx.replies_with(IPv6(src=str(self.ifx.global_ip()), dst=str(self.tn1.global_ip()))/ICMPv6ParamProblem(code=1, ptr=40))
o = self.get_outcome(suite.NextHeaderZeroTestCase)
self.assertCheckPasses(o)
def test_next_header_zero_incorrect_code(self):
self.ifx.replies_with(IPv6(src=str(self.ifx.global_ip()), dst=str(self.tn1.global_ip()))/ICMPv6ParamProblem(code=0, ptr=40))
o = self.get_outcome(suite.NextHeaderZeroTestCase)
self.assertCheckFails(o)
def test_next_header_zero_incorrect_pointer(self):
self.ifx.replies_with(IPv6(src=str(self.ifx.global_ip()), dst=str(self.tn1.global_ip()))/ICMPv6ParamProblem(code=1, ptr=20))
o = self.get_outcome(suite.NextHeaderZeroTestCase)
self.assertCheckFails(o)
def test_next_header_zero_reply(self):
self.ifx.replies_with(IPv6(src=str(self.ifx.global_ip()), dst=str(self.tn1.global_ip()))/ICMPv6EchoReply())
o = self.get_outcome(suite.NextHeaderZeroTestCase)
self.assertCheckFails(o)
| mwrlabs/veripy | contrib/rfc2460/tests/next_header_zero_test_case.py | Python | gpl-3.0 | 1,318 |
#!/usr/bin/env python
# Copyright (c) 2015-2016 Freescale Semiconductor, Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# o Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# o Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# o Neither the name of Freescale Semiconductor, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import threading
from .server import (Service, Server)
from .client import RequestError
class SimpleServer(Server):
def __init__(self, transport=None, codecClass=None):
super(SimpleServer, self).__init__(transport, codecClass)
self._run = True
def run(self):
self._run = True
while self._run:
try:
self._receive_request()
except RequestError as e:
print("Error while processing request: %s" % (e))
def stop(self):
self._run = False
def _receive_request(self):
msg = self.transport.receive()
inCodec = self.codec_class()
inCodec.buffer = msg
inCodec.reset()
outCodec = self.codec_class()
outCodec.buffer = bytearray()
self._process_request(inCodec, outCodec)
if len(outCodec.buffer):
self.transport.send(outCodec.buffer)
class ServerThread(SimpleServer):
def __init__(self, transport, codecClass):
super(ServerThread, self).__init__(transport, codecClass)
self._thread = threading.Thread(target=self.run, name="erpc_server")
self._thread.daemon = True
def start(self):
self._thread.start()
| scottdarch/Noer | FRDMK66NoEr/SDK_2.1_FRDM-K66F-GCC-Full/middleware/multicore_2.1.0/erpc/erpc_python/erpc/simple_server.py | Python | apache-2.0 | 2,837 |
from __future__ import absolute_import, print_function, division
import re
import operator
from petl.compat import next, text_type
from petl.errors import ArgumentError
from petl.util.base import Table, asindices
from petl.transform.basics import TransformError
from petl.transform.conversions import convert
def capture(table, field, pattern, newfields=None, include_original=False,
flags=0, fill=None):
"""
Add one or more new fields with values captured from an existing field
searched via a regular expression. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'variable', 'value'],
... ['1', 'A1', '12'],
... ['2', 'A2', '15'],
... ['3', 'B1', '18'],
... ['4', 'C12', '19']]
>>> table2 = etl.capture(table1, 'variable', '(\\w)(\\d+)',
... ['treat', 'time'])
>>> table2
+-----+-------+-------+------+
| id | value | treat | time |
+=====+=======+=======+======+
| '1' | '12' | 'A' | '1' |
+-----+-------+-------+------+
| '2' | '15' | 'A' | '2' |
+-----+-------+-------+------+
| '3' | '18' | 'B' | '1' |
+-----+-------+-------+------+
| '4' | '19' | 'C' | '12' |
+-----+-------+-------+------+
>>> # using the include_original argument
... table3 = etl.capture(table1, 'variable', '(\\w)(\\d+)',
... ['treat', 'time'],
... include_original=True)
>>> table3
+-----+----------+-------+-------+------+
| id | variable | value | treat | time |
+=====+==========+=======+=======+======+
| '1' | 'A1' | '12' | 'A' | '1' |
+-----+----------+-------+-------+------+
| '2' | 'A2' | '15' | 'A' | '2' |
+-----+----------+-------+-------+------+
| '3' | 'B1' | '18' | 'B' | '1' |
+-----+----------+-------+-------+------+
| '4' | 'C12' | '19' | 'C' | '12' |
+-----+----------+-------+-------+------+
By default the field on which the capture is performed is omitted. It can
be included using the `include_original` argument.
The ``fill`` parameter can be used to provide a list or tuple of values to
use if the regular expression does not match. The ``fill`` parameter
should contain as many values as there are capturing groups in the regular
expression. If ``fill`` is ``None`` (default) then a
``petl.transform.TransformError`` will be raised on the first non-matching
value.
"""
return CaptureView(table, field, pattern,
newfields=newfields,
include_original=include_original,
flags=flags,
fill=fill)
Table.capture = capture
class CaptureView(Table):
def __init__(self, source, field, pattern, newfields=None,
include_original=False, flags=0, fill=None):
self.source = source
self.field = field
self.pattern = pattern
self.newfields = newfields
self.include_original = include_original
self.flags = flags
self.fill = fill
def __iter__(self):
return itercapture(self.source, self.field, self.pattern,
self.newfields, self.include_original, self.flags,
self.fill)
def itercapture(source, field, pattern, newfields, include_original, flags,
fill):
it = iter(source)
prog = re.compile(pattern, flags)
hdr = next(it)
flds = list(map(text_type, hdr))
if isinstance(field, int) and field < len(hdr):
field_index = field
elif field in flds:
field_index = flds.index(field)
else:
raise ArgumentError('field invalid: must be either field name or index')
# determine output fields
outhdr = list(flds)
if not include_original:
outhdr.remove(field)
if newfields:
outhdr.extend(newfields)
yield tuple(outhdr)
# construct the output data
for row in it:
value = row[field_index]
if include_original:
out_row = list(row)
else:
out_row = [v for i, v in enumerate(row) if i != field_index]
match = prog.search(value)
if match is None:
if fill is not None:
out_row.extend(fill)
else:
raise TransformError('value %r did not match pattern %r'
% (value, pattern))
else:
out_row.extend(match.groups())
yield tuple(out_row)
def split(table, field, pattern, newfields=None, include_original=False,
maxsplit=0, flags=0):
"""
Add one or more new fields with values generated by splitting an
existing value around occurrences of a regular expression. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'variable', 'value'],
... ['1', 'parad1', '12'],
... ['2', 'parad2', '15'],
... ['3', 'tempd1', '18'],
... ['4', 'tempd2', '19']]
>>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day'])
>>> table2
+-----+-------+----------+-----+
| id | value | variable | day |
+=====+=======+==========+=====+
| '1' | '12' | 'para' | '1' |
+-----+-------+----------+-----+
| '2' | '15' | 'para' | '2' |
+-----+-------+----------+-----+
| '3' | '18' | 'temp' | '1' |
+-----+-------+----------+-----+
| '4' | '19' | 'temp' | '2' |
+-----+-------+----------+-----+
By default the field on which the split is performed is omitted. It can
be included using the `include_original` argument.
"""
return SplitView(table, field, pattern, newfields, include_original,
maxsplit, flags)
Table.split = split
class SplitView(Table):
def __init__(self, source, field, pattern, newfields=None,
include_original=False, maxsplit=0, flags=0):
self.source = source
self.field = field
self.pattern = pattern
self.newfields = newfields
self.include_original = include_original
self.maxsplit = maxsplit
self.flags = flags
def __iter__(self):
return itersplit(self.source, self.field, self.pattern, self.newfields,
self.include_original, self.maxsplit, self.flags)
def itersplit(source, field, pattern, newfields, include_original, maxsplit,
flags):
it = iter(source)
prog = re.compile(pattern, flags)
hdr = next(it)
flds = list(map(text_type, hdr))
if isinstance(field, int) and field < len(hdr):
field_index = field
field = hdr[field_index]
elif field in flds:
field_index = flds.index(field)
else:
raise ArgumentError('field invalid: must be either field name or index')
# determine output fields
outhdr = list(flds)
if not include_original:
outhdr.remove(field)
if newfields:
outhdr.extend(newfields)
yield tuple(outhdr)
# construct the output data
for row in it:
value = row[field_index]
if include_original:
out_row = list(row)
else:
out_row = [v for i, v in enumerate(row) if i != field_index]
out_row.extend(prog.split(value, maxsplit))
yield tuple(out_row)
def sub(table, field, pattern, repl, count=0, flags=0):
"""
Convenience function to convert values under the given field using a
regular expression substitution. See also :func:`re.sub`.
"""
prog = re.compile(pattern, flags)
conv = lambda v: prog.sub(repl, v, count=count)
return convert(table, field, conv)
Table.sub = sub
def search(table, *args, **kwargs):
"""
Perform a regular expression search, returning rows that match a given
pattern, either anywhere in the row or within a specific field. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['orange', 12, 'oranges are nice fruit'],
... ['mango', 42, 'I like them'],
... ['banana', 74, 'lovely too'],
... ['cucumber', 41, 'better than mango']]
>>> # search any field
... table2 = etl.search(table1, '.g.')
>>> table2
+------------+-----+--------------------------+
| foo | bar | baz |
+============+=====+==========================+
| 'orange' | 12 | 'oranges are nice fruit' |
+------------+-----+--------------------------+
| 'mango' | 42 | 'I like them' |
+------------+-----+--------------------------+
| 'cucumber' | 41 | 'better than mango' |
+------------+-----+--------------------------+
>>> # search a specific field
... table3 = etl.search(table1, 'foo', '.g.')
>>> table3
+----------+-----+--------------------------+
| foo | bar | baz |
+==========+=====+==========================+
| 'orange' | 12 | 'oranges are nice fruit' |
+----------+-----+--------------------------+
| 'mango' | 42 | 'I like them' |
+----------+-----+--------------------------+
The complement can be found via
:func:`petl.transform.regex.searchcomplement`.
"""
if len(args) == 1:
field = None
pattern = args[0]
elif len(args) == 2:
field = args[0]
pattern = args[1]
else:
raise ArgumentError('expected 1 or 2 positional arguments')
return SearchView(table, pattern, field=field, **kwargs)
Table.search = search
class SearchView(Table):
def __init__(self, table, pattern, field=None, flags=0, complement=False):
self.table = table
self.pattern = pattern
self.field = field
self.flags = flags
self.complement = complement
def __iter__(self):
return itersearch(self.table, self.pattern, self.field, self.flags,
self.complement)
def itersearch(table, pattern, field, flags, complement):
prog = re.compile(pattern, flags)
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
yield tuple(hdr)
if field is None:
# search whole row
test = lambda r: any(prog.search(text_type(v)) for v in r)
else:
indices = asindices(hdr, field)
if len(indices) == 1:
index = indices[0]
test = lambda r: prog.search(text_type(r[index]))
else:
getvals = operator.itemgetter(*indices)
test = lambda r: any(prog.search(text_type(v)) for v in getvals(r))
# complement==False, return rows that match
if not complement:
for row in it:
if test(row):
yield tuple(row)
# complement==True, return rows that do not match
else:
for row in it:
if not test(row):
yield tuple(row)
def searchcomplement(table, *args, **kwargs):
"""
Perform a regular expression search, returning rows that **do not**
match a given pattern, either anywhere in the row or within a specific
field. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['orange', 12, 'oranges are nice fruit'],
... ['mango', 42, 'I like them'],
... ['banana', 74, 'lovely too'],
... ['cucumber', 41, 'better than mango']]
>>> # search any field
... table2 = etl.searchcomplement(table1, '.g.')
>>> table2
+----------+-----+--------------+
| foo | bar | baz |
+==========+=====+==============+
| 'banana' | 74 | 'lovely too' |
+----------+-----+--------------+
>>> # search a specific field
... table3 = etl.searchcomplement(table1, 'foo', '.g.')
>>> table3
+------------+-----+---------------------+
| foo | bar | baz |
+============+=====+=====================+
| 'banana' | 74 | 'lovely too' |
+------------+-----+---------------------+
| 'cucumber' | 41 | 'better than mango' |
+------------+-----+---------------------+
This returns the complement of :func:`petl.transform.regex.search`.
"""
return search(table, *args, complement=True, **kwargs)
Table.searchcomplement = searchcomplement
def splitdown(table, field, pattern, maxsplit=0, flags=0):
"""
Split a field into multiple rows using a regular expression. E.g.:
>>> import petl as etl
>>> table1 = [['name', 'roles'],
... ['Jane Doe', 'president,engineer,tailor,lawyer'],
... ['John Doe', 'rocket scientist,optometrist,chef,knight,sailor']]
>>> table2 = etl.splitdown(table1, 'roles', ',')
>>> table2.lookall()
+------------+--------------------+
| name | roles |
+============+====================+
| 'Jane Doe' | 'president' |
+------------+--------------------+
| 'Jane Doe' | 'engineer' |
+------------+--------------------+
| 'Jane Doe' | 'tailor' |
+------------+--------------------+
| 'Jane Doe' | 'lawyer' |
+------------+--------------------+
| 'John Doe' | 'rocket scientist' |
+------------+--------------------+
| 'John Doe' | 'optometrist' |
+------------+--------------------+
| 'John Doe' | 'chef' |
+------------+--------------------+
| 'John Doe' | 'knight' |
+------------+--------------------+
| 'John Doe' | 'sailor' |
+------------+--------------------+
"""
return SplitDownView(table, field, pattern, maxsplit, flags)
Table.splitdown = splitdown
class SplitDownView(Table):
def __init__(self, table, field, pattern, maxsplit=0, flags=0):
self.table = table
self.field = field
self.pattern = pattern
self.maxsplit = maxsplit
self.flags = flags
def __iter__(self):
return itersplitdown(self.table, self.field, self.pattern,
self.maxsplit, self.flags)
def itersplitdown(table, field, pattern, maxsplit, flags):
prog = re.compile(pattern, flags)
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
if isinstance(field, int) and field < len(hdr):
field_index = field
field = hdr[field_index]
elif field in flds:
field_index = flds.index(field)
else:
raise ArgumentError('field invalid: must be either field name or index')
yield tuple(hdr)
for row in it:
value = row[field_index]
for v in prog.split(value, maxsplit):
yield tuple(v if i == field_index else row[i] for i in range(len(hdr)))
| alimanfoo/petl | petl/transform/regex.py | Python | mit | 15,382 |
# -*- coding: utf-8 -*-
import datetime
class AtomDate:
__slots__ = ("date",)
def __init__(self, timestamp):
"""
In QuickTime File Format, date time is represented by seconds
since midnight, Jan. 1, 1904.
"""
self.date = datetime.datetime(1904, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)
self.date += datetime.timedelta(seconds=timestamp)
def __str__(self):
return self.date.strftime("%Y-%m-%dT%H:%M:%S%z")
class AtomMatrix:
__slots__ = ("a", "b", "c", "d", "u", "v", "w", "x", "y")
def __init__(self, matrix_data):
"""
In QuickTime File Format, Matrix structure is stored in the
order of a, b, u, c, d, v, x, y, w, where
[a b u]
[c d v]
[x y w]
with the all elements are 32-bit fixed-point numbers.
u, v and w are divided as 2.30 and the others are divided as 16.16.
"""
(
self.a,
self.b,
self.u,
self.c,
self.d,
self.v,
self.x,
self.y,
self.w,
) = matrix_data
self.a /= 2 ** 16
self.b /= 2 ** 16
self.c /= 2 ** 16
self.d /= 2 ** 16
self.x /= 2 ** 16
self.y /= 2 ** 16
self.u /= 2 ** 30
self.v /= 2 ** 30
self.w /= 2 ** 30
def matrix(self):
return (self.a, self.b, self.u, self.c, self.d, self.v, self.x, self.y, self.w)
def __str__(self):
return (
"[[%.4f %.4f %.4f] " "[%.4f %.4f %.4f] " "[%.4f %.4f %.4f]]" % self.matrix()
)
class AtomLanguageCodeValue:
# Macintosh Language Codes
MAC_LANGUAGE_CODE = (
"English",
"French",
"German",
"Italian",
"Dutch",
"Swedish",
"Spanish",
"Danish",
"Portuguese",
"Norwegian",
"Hebrew",
"Japanese",
"Arabic",
"Finnish",
"Greek",
"Icelandic",
"Maltese",
"Turkish",
"Croatian",
"Traditional",
"Chinese",
"Urdu",
"Hindi",
"Thai",
"Korean",
"Lithuanian",
"Polish",
"Hungarian",
"Estonian",
"Lettish",
"Latvian",
"Saami",
"Sami",
"Faroese",
"Farsi",
"Russian",
"Simplified",
"Chinese",
"Flemish",
"Irish",
"Albanian",
"Romanian",
"Czech",
"Slovak",
"Slovenian",
"Yiddish",
"Serbian",
"Macedonian",
"Bulgarian",
"Ukrainian",
"Belarusian",
"Uzbek",
"Kazakh",
"Azerbaijani",
"AzerbaijanAr",
"Armenian",
"Georgian",
"Moldavian",
"Kirghiz",
"Tajiki",
"Turkmen",
"Mongolian",
"MongolianCyr",
"Pashto",
"Kurdish",
"Kashmiri",
"Sindhi",
"Tibetan",
"Nepali",
"Sanskrit",
"Marathi",
"Bengali",
"Assamese",
"Gujarati",
"Punjabi",
"Oriya",
"Malayalam",
"Kannada",
"Tamil",
"Telugu",
"Sinhala",
"Burmese",
"Khmer",
"Lao",
"Vietnamese",
"Indonesian",
"Tagalog",
"MalayRoman",
"MalayArabic",
"Amharic",
"Galla",
"Oromo",
"Somali",
"Swahili",
"Kinyarwanda",
"Rundi",
"Nyanja",
"Malagasy",
"Esperanto",
"Welsh",
"Basque",
"Catalan",
"Latin",
"Quechua",
"Guarani",
"Aymara",
"Tatar",
"Uighur",
"Dzongkha",
"JavaneseRom",
)
__slots__ = ("value",)
def __init__(self, value):
# the value should be less than 0x8000
if value > 0x7FFF:
raise ValueError
self.value = value
def __str__(self):
if self.value < 0x400:
return self.MAC_LANGUAGE_CODE[self.value]
elif self.value == 0x7FFF:
return "Unspecified"
else:
return "%c%c%c" % (
((self.value >> 10) & 0x1F) + 0x60,
((self.value >> 5) & 0x1F) + 0x60,
(self.value & 0x1F) + 0x60,
)
| rarewin/AtomSeeker | atomseeker/elements.py | Python | bsd-2-clause | 4,426 |
# encoding: utf-8
from unittest import TestCase, main as unittest_run
from data_tools.classification.datastructures.classification_results import ClassificationResults
class ClassificationResultsTests(TestCase):
""" Tests suite for classification results wrapper. """
def test_selected_class(self):
results = ClassificationResults({"Class_A": 0.8, "Class_B": 0.2})
self.assertEquals(results.selected_class, "Class_A")
def test_selected_class_draw(self):
results = ClassificationResults({"Class_A": 0.5, "Class_B": 0.5})
# selected class in case of draw does not matter (from some points of view),
# as long as it does not change
selected = results.selected_class
for _ in xrange(100):
self.assertEquals(results.selected_class, selected)
def test_selected_class_confidence(self):
results = ClassificationResults({"Class_A": 0.8, "Class_B": 0.2})
self.assertEquals(results.selected_class_confidence, 0.8)
if __name__ == "__main__":
unittest_run() | dzida/data-tools | data_tools/classification/datastructures/test/classification_results_test.py | Python | apache-2.0 | 1,056 |
from django.core.management.base import BaseCommand
from native_tags.registry import register
class Command(BaseCommand):
def usage(self, exe):
return '%s [bucket1 ...]\n\nLists registered tags for the given buckets if any' % exe
def handle(self, *buckets, **kwargs):
for bucket,items in register.items():
if len(buckets) and not bucket in buckets:
continue
print bucket.title()
items = [(x,y.__module__) for x,y in items.items()]
items.sort(lambda x,y: cmp(x[1],y[1]))
for name,mod in items:
print '\t%s.%s' % (mod,name)
| justquick/django-native-tags | native_tags/management/commands/nativelib.py | Python | bsd-3-clause | 644 |
# Lexicographic permutations
# Problem 24
# A permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of
# the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it
# lexicographic order. The lexicographic permutations of 0, 1 and 2 are:
#
# 012 021 102 120 201 210
#
# What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?
#
# This problem can actually be solved mathematically. We note that
# 1! = 1
# 2! = 2
# 3! = 6
# 4! = 24
# 5! = 120
# 6! = 720
# 7! = 5040
# 8! = 40320
# 9! = 362880
# 10! = 3628800
#
# So the 1-millionth permutation must start with the digit floor(PERMUTATION_INDEX / 9!) = _,
# followed by remaining_digits[floor((PERMUTATION_INDEX % 9!) / 8!] = _, etc.
#
# The code reflects this solution, rather than the itertools result.
from math import factorial
PERMUTATION_INDEX = 1000000
def run():
remaining_digits = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
remaining = PERMUTATION_INDEX - 1
permutation = 0
for i in range(9, -1, -1):
result = divmod(remaining, factorial(i))
permutation *= 10
permutation += remaining_digits[result[0]]
print(str(permutation))
remaining_digits.remove(remaining_digits[result[0]])
remaining = result[1]
print("The {0}th lexicographic permutation of the digits 0123456789 is {1}".format(PERMUTATION_INDEX, permutation))
# Sample Output:
# The 1000000th lexicographic permutation of the digits 0123456789 is 2783915460
#
# Total running time for Problem24.py is 0.00012720982628747603 seconds
| YangLuGitHub/Euler | src/scripts/Problem24.py | Python | mit | 1,645 |
from flask import Flask, request, redirect, url_for, jsonify, flash
from flask import render_template
from flask import session as login_session
from flask import make_response
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import desc
from db_setup import Base, User, Category, Item
from werkzeug import secure_filename
import random
import string
import os
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
import requests
import jaxml
UPLOAD_FOLDER = 'static/images/'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Item Catalog"
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
engine = create_engine('sqlite:///item-catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# RSS APIs to view Catalog Information
@app.route('/catalog/RSS')
def catalog_RSS():
"""
Return an RSS feed containing all items in catalog
"""
items = session.query(Item).all()
doc = jaxml.XML_document()
doc.catalogitems()
for item in items:
doc._push()
doc.item()
doc.id(item.id)
doc.name(item.name)
doc.description(item.description)
doc.imagepath('"' + item.image + '"')
doc.category_id(item.category_id)
doc.user_id(item.user_id)
doc._pop()
return doc.__repr__()
@app.route('/catalog/<int:category_id>/RSS')
def catalog_category_RSS(category_id):
"""
Return an RSS feed containing all items in the specified category_id
"""
items = session.query(Item).filter_by(
category_id=category_id).all()
doc = jaxml.XML_document()
doc.category(str(category_id))
for item in items:
doc._push()
doc.item()
doc.id(item.id)
doc.name(item.name)
doc.description(item.description)
doc.imagepath('"' + item.image + '"')
doc.category_id(item.category_id)
doc.user_id(item.user_id)
doc._pop()
return doc.__repr__()
@app.route('/catalog/<int:category_id>/<int:item_id>/RSS')
def catalog_item_RSS(category_id, item_id):
"""
Return an RSS feed containing specified item_id
"""
item = session.query(Item).filter_by(id=item_id).one()
doc = jaxml.XML_document()
doc.category(str(category_id))
doc._push()
doc.item()
doc.id(item.id)
doc.name(item.name)
doc.description(item.description)
doc.imagepath('"' + item.image + '"')
doc.category_id(item.category_id)
doc.user_id(item.user_id)
doc._pop()
return doc.__repr__()
# JSON APIs to view Catalog Information
@app.route('/catalog/JSON')
def catalog_JSON():
"""
Return a JSON containing all items in catalog
"""
items = session.query(Item).all()
return jsonify(CatalogItems=[i.serialize for i in items])
@app.route('/catalog/<int:category_id>/JSON')
def catalog_category_JSON(category_id):
"""
Return a JSON containing all items in the specified category_id
"""
items = session.query(Item).filter_by(
category_id=category_id).all()
return jsonify(CategoryItems=[i.serialize for i in items])
@app.route('/catalog/<int:category_id>/<int:item_id>/JSON')
def catalog_item_JSON(category_id, item_id):
"""
Return a JSON containing the specified item_id
"""
item = session.query(Item).filter_by(id=item_id).one()
return jsonify(item=item.serialize)
@app.route('/login')
def show_login():
"""
Allow user login and create anti-forgery state token
"""
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
"""
Authenticate the user using Facebook OAuth
"""
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = request.data
print "access token received %s " % access_token
app_id = json.loads(open('fb_client_secrets.json', 'r').read())[
'web']['app_id']
app_secret = json.loads(
open('fb_client_secrets.json', 'r').read())['web']['app_secret']
# If I break the below row to respect PEP8 function will stop working
url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.2/me"
# strip expire tag from access token
token = result.split("&")[0]
url = 'https://graph.facebook.com/v2.2/me?%s' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['provider'] = 'facebook'
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
# The token must be stored in the login_session in order to properly logout
# Strip out the information before the equals sign in our token
stored_token = token.split("=")[1]
login_session['access_token'] = stored_token
# Get user picture (if I break the below row to respect PEP8 function will stop working)
url = 'https://graph.facebook.com/v2.2/me/picture?%s&redirect=0&height=200&width=200' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
# see if user exists
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius:\
150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("Now logged in as %s" % login_session['username'])
return output
@app.route('/fbdisconnect')
def fbdisconnect():
"""
Disconnect user using Facebook OAuth
"""
facebook_id = login_session['facebook_id']
# The access token must be included to successfully logout
access_token = login_session['access_token']
url = 'https://www.facebook.com/logout.php?next=localhost:8000\
&access_token=' + access_token
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
return "you have been logged out"
@app.route('/gconnect', methods=['POST'])
def gconnect():
"""
Authenticate the user using Google OAuth
"""
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is\
already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['credentials'] = credentials
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# ADD PROVIDER TO LOGIN SESSION
login_session['provider'] = 'google'
# see if user exists, if it doesn't make a new one
user_id = getUserID(data["email"])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius:\
150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash(u'You are now logged in as %s\
' % login_session['username'], 'success')
print "done!"
return output
@app.route('/gdisconnect')
def gdisconnect():
"""
Disconnect user using Google OAuth
"""
# Only disconnect a connected user.
credentials = login_session.get('credentials')
for i in login_session:
print i
if credentials is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
print url
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] != '200':
# For whatever reason, the given token was invalid.
response = make_response(
json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/disconnect')
def disconnect():
"""
Disconnect based on provider
"""
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['credentials']
if login_session['provider'] == 'facebook':
fbdisconnect()
del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash(u'You have successfully been logged out', 'success')
return redirect(url_for('show_catalog'))
else:
flash(u'You were not logged in', 'error')
return redirect(url_for('show_catalog'))
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
@app.route('/')
@app.route('/catalog/')
def show_catalog():
"""
Render the catalog page with all the categories and
a selection of the latest items inserted
"""
categories = session.query(Category).order_by(Category.name).all()
# Shows 6 newer items
items = session.query(Item).order_by(desc(Item.id)).limit(6)
if 'username' in login_session:
user = getUserInfo(login_session['user_id'])
return render_template('catalog.html',
categories=categories,
items=items,
user=user)
else:
return render_template('catalog.html',
categories=categories,
items=items)
@app.route('/new', methods=['GET', 'POST'])
@app.route('/catalog/new', methods=['GET', 'POST'])
def new_category():
"""
Get: Show the form allowing an authenticated user to create a category
Post: Allow an authenticated user to create a category
"""
if 'username' not in login_session:
return redirect('/login')
user = getUserInfo(login_session['user_id'])
categories = session.query(Category).order_by(Category.name).all()
if request.method == 'POST':
newCategory = Category(
name=request.form['inputCategoryName'],
description=request.form['inputCategoryDescription'],
user_id=user.id
)
session.add(newCategory)
session.commit()
flash(u'Category added successfully', 'success')
return redirect(url_for('show_catalog'))
else:
return render_template('new_category.html',
categories=categories,
user=user)
@app.route('/catalog/<int:category_id>/edit', methods=['GET', 'POST'])
def edit_category(category_id):
"""
Get: Show to the authenticated user a form to change the category data
Post: Allow authenticated user to change category data
"""
if 'username' not in login_session:
return redirect('/login')
categories = session.query(Category).order_by(Category.name).all()
editedCategory = session.query(Category).filter_by(id=category_id).one()
if request.method == 'POST':
if login_session['user_id'] == editedCategory.user_id:
if request.form['inputCategoryName']:
editedCategory.name = request.form['inputCategoryName']
if request.form['inputCategoryDescription']:
editedCategory.description = \
request.form['inputCategoryDescription']
session.add(editedCategory)
session.commit()
flash(u'Category edited successfully', 'success')
return redirect(url_for('show_catalog'))
else:
flash(u'Insufficient rights to edit this category', 'error')
return redirect(url_for('show_catalog'))
else:
if login_session['user_id'] == editedCategory.user_id:
user = getUserInfo(login_session['user_id'])
return render_template('edit_category.html',
category=editedCategory,
categories=categories,
user=user)
else:
flash(u'Insufficient rights to edit this category', 'error')
return redirect(url_for('category_items', category_id=category_id))
@app.route('/catalog/<int:category_id>/delete', methods=['GET', 'POST'])
def delete_category(category_id):
"""
Get: Show to the authenticated user a form to confirm category deletion
Post: Allow authenticated user to delete category
"""
if 'username' not in login_session:
return redirect('/login')
categories = session.query(Category).order_by(Category.name).all()
deletedCategory = session.query(Category).filter_by(id=category_id).one()
if request.method == 'POST':
session.delete(deletedCategory)
session.commit()
flash(u'Category deleted successfully', 'success')
return redirect(url_for('show_catalog'))
else:
if login_session['user_id'] == deletedCategory.user_id:
user = getUserInfo(login_session['user_id'])
return render_template('delete_category.html',
category=deletedCategory,
categories=categories,
user=user)
else:
flash(u'Insufficient rights to delete this category', 'error')
return redirect(url_for('category_items', category_id=category_id))
@app.route('/catalog/<int:category_id>/')
def category_items(category_id):
"""
Render a catalog page with items present in a specific category
"""
category = session.query(Category).filter_by(id=category_id).one()
categories = session.query(Category).order_by(Category.name).all()
items = session.query(Item).filter_by(category_id=category_id).order_by(Item.name).all()
total_items = len(items)
if 'username' not in login_session:
return render_template('category.html',
category=category,
items=items,
categories=categories,
total_items=total_items)
else:
user = getUserInfo(login_session['user_id'])
return render_template('category.html',
category=category,
items=items,
categories=categories,
user=user,
total_items=total_items)
@app.route('/catalog/<int:category_id>/<int:item_id>', methods=['GET', 'POST'])
def show_item(category_id, item_id):
"""
Render the item page
"""
categories = session.query(Category).order_by(Category.name).all()
category = session.query(Category).filter_by(id=category_id).one()
item = session.query(Item).filter_by(id=item_id).one()
if 'username' not in login_session:
return render_template('item.html',
category=category,
item=item,
categories=categories)
else:
user = getUserInfo(login_session['user_id'])
return render_template('item.html',
category=category,
item=item,
categories=categories,
user=user)
@app.route('/catalog/<int:category_id>/new', methods=['GET', 'POST'])
def new_item(category_id):
"""
Get: Show the form allowing an authenticated user to create an item
in specified category
Post: Allow an authenticated user to create an item in specified category
"""
if 'username' not in login_session:
return redirect('/login')
category = session.query(Category).filter_by(id=category_id).one()
categories = session.query(Category).order_by(Category.name).all()
user = getUserInfo(login_session['user_id'])
if request.method == 'POST':
newItem = Item(
name=request.form['inputItemName'],
description=request.form['inputItemDescription'],
price=request.form['inputItemPrice'],
image="",
category_id=category_id,
user_id=user.id
)
session.add(newItem)
session.commit()
file = request.files['inputItemImage']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file_extension = os.path.splitext(filename)[1]
image_path = os.path.join(app.config['UPLOAD_FOLDER'],
str(newItem.id) + file_extension)
file.save(image_path)
newItem.image = "/" + image_path
session.add(newItem)
session.commit()
flash(u'Item added successfully', 'success')
return redirect(url_for('category_items',
category_id=category_id))
else:
user = getUserInfo(login_session['user_id'])
return render_template('new_item.html',
category=category,
categories=categories,
user=user)
@app.route('/catalog/<int:category_id>/<int:item_id>/edit',
methods=['GET', 'POST'])
def edit_item(category_id, item_id):
"""
Get: Show to the authenticated user a form to change an item data
Post: Allow authenticated user to change an item data
"""
if 'username' not in login_session:
return redirect('/login')
category = session.query(Category).filter_by(id=category_id).one()
categories = session.query(Category).order_by(Category.name).all()
editedItem = session.query(Item).filter_by(id=item_id).one()
if request.method == 'POST':
if login_session['user_id'] == editedItem.user_id:
if request.form['inputItemName']:
editedItem.name = request.form['inputItemName']
if request.form['inputItemDescription']:
editedItem.description = request.form['inputItemDescription']
if request.form['inputItemPrice']:
editedItem.price = request.form['inputItemPrice']
if request.form['inputItemCategory']:
editedItem.category_id = request.form['inputItemCategory']
file = request.files['inputItemImage']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file_extension = os.path.splitext(filename)[1]
image_path = os.path.join(app.config['UPLOAD_FOLDER'],
str(editedItem.id) + file_extension)
file.save(image_path)
editedItem.image = "/" + image_path
session.add(editedItem)
session.commit()
flash(u'Item edited successfully', 'success')
return redirect(url_for('show_catalog'))
else:
flash(u'Insufficient rights to edit this item', 'error')
return redirect(url_for('category_items', category_id=category_id))
else:
user = getUserInfo(login_session['user_id'])
if login_session['user_id'] == editedItem.user_id:
return render_template('edit_item.html',
item=editedItem,
category=category,
categories=categories,
user=user)
else:
flash(u'Insufficient rights to edit this item', 'error')
return redirect(url_for('category_items', category_id=category_id))
@app.route('/catalog/<int:category_id>/<int:item_id>/delete',
methods=['GET', 'POST'])
def delete_item(category_id, item_id):
"""
Render the catalog page with all categories and items present in the db
"""
if 'username' not in login_session:
return redirect('/login')
category = session.query(Category).filter_by(id=category_id).one()
categories = session.query(Category).order_by(Category.name).all()
deletedItem = session.query(Item).filter_by(id=item_id).one()
if request.method == 'POST':
if login_session['user_id'] == deletedItem.user_id:
session.delete(deletedItem)
session.commit()
flash(u'Item deleted successfully', 'success')
return redirect(url_for('show_catalog'))
else:
flash(u'Insufficient rights to delete this item', 'error')
return redirect(url_for('category_items',
category_id=category_id))
else:
user = getUserInfo(login_session['user_id'])
if login_session['user_id'] == deletedItem.user_id:
return render_template('delete_item.html',
item=deletedItem,
category=category,
categories=categories,
user=user)
else:
flash(u'Insufficient rights to delete this item', 'error')
return redirect(url_for('category_items',
category_id=category_id))
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=8000)
| ltpitt/item-catalog | itemcatalog.py | Python | mit | 26,568 |
import requests
import os
import bs4.BeautifulSoup
def get_syllabus(course):
# returns course syllabus_body
url = base_url + 'courses/' + str(course) + '?include[]=syllabus_body'
return requests.get(url, headers=auth_headers).json()['syllabus_body']
def get_root_folder(course):
# returns folder id
url = base_url + 'courses/' + str(course) + '/folders/root'
return requests.get(url, headers=auth_headers).json()['id']
def get_subfolders(folder):
# returns list of subfolders
# https://canvas.instructure.com/doc/api/file.pagination.html
url = base_url + 'folders/' + str(folder) + '/folders?per_page=100'
return requests.get(url, headers=auth_headers).json()
def get_files(folder):
# returns list of files
url = base_url + 'folders/' + str(folder) + '/files?per_page=100'
return requests.get(url, headers=auth_headers).json()
def get_file_url(file):
# returns url from file object
url = base_url + 'files/' + str(file)
r = requests.get(url, headers=auth_headers).json()
return r['url'] if 'url' in r else ''
def download_file(source, url, program, course, file):
print(url)
r = requests.get(url)
print(source, r, program, course, file, '\n')
if not os.path.exists(program):
os.makedirs(program)
with open(program + '/' + course + ' ' + file, 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
base_url = 'https://samuelmerritt.test.instructure.com/api/v1/'
auth_headers = {'Authorization': 'Bearer <TOKEN>'}
canvas_courses_file = 'syllabi courses.csv'
for line in open(canvas_courses_file):
fields = line.split(',')
course_id = fields[0]
cn = fields[1].split('-') # 2013FS-OAK-GENERAL-NURSG-108-LEC1-1
course_name = cn[3] + '-' + cn[4] + '-' + cn[5] + '-' + cn[6] + '-' + cn[1] + '-' + cn[0]
program = fields[5] # ACADEMIC_NURS_U_BSN
# files
course_root_folder = get_root_folder(course_id)
folders = get_subfolders(course_root_folder)
for i in folders:
files = get_files(i['id'])
for j in files:
if 'syl' in j['display_name'].lower():
download_file('F', j['url'], program, course_name, j['display_name'])
# syllabus
syllabus = get_syllabus(course_id)
if syllabus is not None:
soup = BeautifulSoup(str(syllabus))
for link in soup.find_all('a'):
url = link.get('href')
file_name = link.get('title')
if 'download?verifier' in url and file_name not in ('Preview the document', 'View in a new window'):
print(url)
if 'courses' in url:
url = get_file_url(url.split('/')[6])
if url == '':
continue
download_file('S', url, program, course_name, file_name)
| dgrobani/py3-canvaslms-api | syllabi/syllabi_download_OLD.py | Python | mit | 2,925 |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''BonjourMeal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
'''
from django.contrib import admin
from django.urls import include, path
from bopis import views as bopis_views
urlpatterns = [
path('', bopis_views.landing_placeholder),
path('callback/', bopis_views.callback),
path('bopis/', include('bopis.urls')),
path('admin/', admin.site.urls),
]
| google-business-communications/bm-bonjour-meal-django-starter-code | bonjourmeal-codelab/full-sample/bmcodelab/urls.py | Python | apache-2.0 | 1,533 |
__author__ = 'tony'
from .person import Person
| PurpleSun/Facemash | model/__init__.py | Python | bsd-3-clause | 47 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from keystone.common.policies import base
consumer_policies = [
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_consumer',
check_str=base.RULE_ADMIN_REQUIRED,
description='Show OAUTH1 consumer details.',
operations=[{'path': '/v3/OS-OAUTH1/consumers/{consumer_id}',
'method': 'GET'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_consumers',
check_str=base.RULE_ADMIN_REQUIRED,
description='List OAUTH1 consumers.',
operations=[{'path': '/v3/OS-OAUTH1/consumers',
'method': 'GET'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_consumer',
check_str=base.RULE_ADMIN_REQUIRED,
description='Create OAUTH1 consumer.',
operations=[{'path': '/v3/OS-OAUTH1/consumers',
'method': 'POST'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_consumer',
check_str=base.RULE_ADMIN_REQUIRED,
description='Update OAUTH1 consumer.',
operations=[{'path': '/v3/OS-OAUTH1/consumers/{consumer_id}',
'method': 'PATCH'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_consumer',
check_str=base.RULE_ADMIN_REQUIRED,
description='Delete OAUTH1 consumer.',
operations=[{'path': '/v3/OS-OAUTH1/consumers/{consumer_id}',
'method': 'DELETE'}])
]
def list_rules():
return consumer_policies
| rajalokan/keystone | keystone/common/policies/consumer.py | Python | apache-2.0 | 2,092 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .partition_safety_check import PartitionSafetyCheck
class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck):
"""Safety check that waits for the replica build operation to finish. This
indiciates that there is a replica that is going through the copy or is
providing data for building another replica. Bring the node down will abort
this copy operation which are typoically expensive involving data
movements.
:param kind: Polymorphic Discriminator
:type kind: str
:param partition_id:
:type partition_id: str
"""
_validation = {
'kind': {'required': True},
}
def __init__(self, partition_id=None):
super(WaitForInbuildReplicaSafetyCheck, self).__init__(partition_id=partition_id)
self.kind = 'WaitForInbuildReplica'
| AutorestCI/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/wait_for_inbuild_replica_safety_check.py | Python | mit | 1,281 |
# coding=utf-8
from builtins import object
import json
from django.core.urlresolvers import reverse
from rest_framework import serializers
from rest_framework_gis.serializers import (
GeoFeatureModelSerializer)
from realtime.models.earthquake import Earthquake, EarthquakeReport, \
EarthquakeMMIContour
from realtime.serializers.utilities import CustomSerializerMethodField
__author__ = 'Rizky Maulana Nugraha "lucernae" <lana.pcfre@gmail.com>'
__date__ = '19/06/15'
class EarthquakeReportSerializer(serializers.ModelSerializer):
def get_url(self, serializer_field, obj):
"""
:param serializer_field:
:type serializer_field: CustomSerializerMethodField
:param obj:
:type obj: EarthquakeReport
:return:
"""
relative_uri = reverse(
'realtime:earthquake_report_detail',
kwargs={
'shake_id': obj.earthquake.shake_id,
'source_type': obj.earthquake.source_type,
'language': obj.language})
if self.context and 'request' in self.context:
return self.context['request'].build_absolute_uri(relative_uri)
else:
return relative_uri
# auto bind to get_url method
url = CustomSerializerMethodField()
def get_shake_url(self, serializer_field, obj):
"""
:param serializer_field:
:type serializer_field: CustomSerializerMethodField
:param obj:
:type obj: EarthquakeReport
:return:
"""
relative_uri = reverse(
'realtime:earthquake_detail',
kwargs={
'shake_id': obj.earthquake.shake_id,
'source_type': obj.earthquake.source_type
})
if self.context and 'request' in self.context:
return self.context['request'].build_absolute_uri(relative_uri)
else:
return relative_uri
# auto bind to get_shake_url method
shake_url = CustomSerializerMethodField()
class Meta(object):
model = EarthquakeReport
fields = (
'url',
'shake_id',
'source_type',
'shake_url',
'language',
'report_pdf',
'report_image',
'report_thumbnail',
'report_map_filename'
)
class EarthquakeSerializer(serializers.ModelSerializer):
reports = EarthquakeReportSerializer(
many=True, required=False, write_only=False,
read_only=True)
def get_url(self, serializer_field, obj):
"""
:param serializer_field:
:type serializer_field: CustomSerializerMethodField
:param obj:
:type obj: Earthquake
:return:
"""
relative_uri = reverse(
'realtime:earthquake_detail',
kwargs={
'shake_id': obj.shake_id,
'source_type': obj.source_type})
if self.context and 'request' in self.context:
return self.context['request'].build_absolute_uri(relative_uri)
else:
return relative_uri
# auto bind to get_url method
url = CustomSerializerMethodField()
def get_shake_grid(self, serializer_field, obj):
"""
:param serializer_field:
:type serializer_field: CustomSerializerMethodField
:param obj:
:type obj: Earthquake
:return:
"""
if obj.shake_grid:
return obj.shake_grid.url
else:
return obj.shake_grid_download_url
shake_grid = CustomSerializerMethodField()
class Meta(object):
model = Earthquake
fields = (
'url',
'shake_id',
'shake_grid',
'mmi_output',
'magnitude',
'time',
'depth',
'location',
'location_description',
'felt',
'reports',
'hazard_path',
'source_type',
'event_id_formatted',
'shake_grid_download_url',
'mmi_layer_download_url',
'grid_xml_filename',
'mmi_layer_filename',
'mmi_layer_saved',
)
class EarthquakeGeoJsonSerializer(GeoFeatureModelSerializer):
def get_shake_grid(self, serializer_field, obj):
"""
:param serializer_field:
:type serializer_field: CustomSerializerMethodField
:param obj:
:type obj: Earthquake
:return:
"""
if obj.shake_grid:
return obj.shake_grid.url
else:
return obj.shake_grid_download_url
shake_grid = CustomSerializerMethodField()
class Meta(object):
model = Earthquake
geo_field = "location"
id = 'id'
fields = (
'shake_id',
'shake_grid',
# 'mmi_output',
'magnitude',
'time',
'depth',
'location',
'location_description',
'felt',
'source_type',
'event_id_formatted',
'grid_xml_filename',
'has_corrected',
'mmi_layer_saved'
)
class EarthquakeMMIContourGeoJSONSerializer(GeoFeatureModelSerializer):
class Meta(object):
model = EarthquakeMMIContour
geo_field = 'geometry'
id = 'id'
def get_properties(self, instance, fields):
return json.loads(instance.properties)
| AIFDR/inasafe-django | django_project/realtime/serializers/earthquake_serializer.py | Python | bsd-2-clause | 5,474 |
from pandac.PandaModules import CollisionSphere, CollisionNode, CollisionTube
from pandac.PandaModules import TextNode, NodePath, Vec3, Point3
from direct.distributed.ClockDelta import globalClockDelta
from direct.distributed import DistributedObject
from direct.showbase import RandomNumGen
from direct.showbase import PythonUtil
from direct.interval.IntervalGlobal import Sequence, Parallel, ActorInterval
from direct.interval.FunctionInterval import Wait
from otp.avatar import Emote
from otp.otpbase import OTPGlobals
from toontown.toonbase import TTLocalizer
from toontown.parties import PartyGlobals
from toontown.minigame.MinigameRulesPanel import MinigameRulesPanel
from toontown.toontowngui import TTDialog
from toontown.parties.JellybeanRewardGui import JellybeanRewardGui
from toontown.parties.PartyUtils import getPartyActivityIcon, getCenterPosFromGridSize
class DistributedPartyActivity(DistributedObject.DistributedObject):
deferFor = 1 # We need to defer the generation of activities 1 frame, as the party must generate first
def __init__(self, cr, activityId, activityType, wantLever = False, wantRewardGui = False):
DistributedObject.DistributedObject.__init__(self, cr)
self.activityId = activityId
self.activityName = PartyGlobals.ActivityIds.getString(self.activityId)
self.activityType = activityType
self.wantLever = wantLever
self.wantRewardGui = wantRewardGui
self.messageGui = None
self.rewardGui = None
self.toonIds = []
self._toonId2ror = {}
childName = '%s' % self
childName = childName[childName.rfind('.DistributedParty') + len('.DistributedParty'):childName.rfind('Activity instance')]
if not hasattr(base, 'partyActivityDict'):
base.partyActivityDict = {}
base.partyActivityDict[childName] = self
self.root = NodePath('root')
self.rulesDoneEvent = 'rulesDone'
self.modelCount = 500
self.cleanupActions = []
self.usesSmoothing = 0
self.usesLookAround = 0
self.difficultyOverride = None
self.trolleyZoneOverride = None
self._localToonRequestStatus = None
return
def localToonExiting(self):
self._localToonRequestStatus = PartyGlobals.ActivityRequestStatus.Exiting
def localToonJoining(self):
self._localToonRequestStatus = PartyGlobals.ActivityRequestStatus.Joining
def d_toonJoinRequest(self):
if self._localToonRequestStatus is None:
self.localToonJoining()
self.sendUpdate('toonJoinRequest')
return
def d_toonExitRequest(self):
if self._localToonRequestStatus is None:
self.localToonExiting()
self.sendUpdate('toonExitRequest')
return
def d_toonExitDemand(self):
self.localToonExiting()
self.sendUpdate('toonExitDemand')
def joinRequestDenied(self, reason):
self._localToonRequestStatus = None
return
def exitRequestDenied(self, reason):
self._localToonRequestStatus = None
return
def handleToonJoined(self, toonId):
self.notify.error('BASE: handleToonJoined should be overridden %s' % self.activityName)
def handleToonExited(self, toonId):
self.notify.error('BASE: handleToonExited should be overridden %s' % self.activityName)
def handleToonDisabled(self, toonId):
self.notify.error('BASE: handleToonDisabled should be overridden %s' % self.activityName)
def setToonsPlaying(self, toonIds):
exitedToons, joinedToons = self.getToonsPlayingChanges(self.toonIds, toonIds)
self.setToonIds(toonIds)
self._processExitedToons(exitedToons)
self._processJoinedToons(joinedToons)
def _processExitedToons(self, exitedToons):
for toonId in exitedToons:
if toonId != base.localAvatar.doId or toonId == base.localAvatar.doId and self.isLocalToonRequestStatus(PartyGlobals.ActivityRequestStatus.Exiting):
toon = self.getAvatar(toonId)
if toon is not None:
self.ignore(toon.uniqueName('disable'))
self.handleToonExited(toonId)
if toonId == base.localAvatar.doId:
self._localToonRequestStatus = None
if toonId in self._toonId2ror:
self.cr.relatedObjectMgr.abortRequest(self._toonId2ror[toonId])
del self._toonId2ror[toonId]
return
def _processJoinedToons(self, joinedToons):
for toonId in joinedToons:
if toonId != base.localAvatar.doId or toonId == base.localAvatar.doId and self.isLocalToonRequestStatus(PartyGlobals.ActivityRequestStatus.Joining):
if toonId not in self._toonId2ror:
request = self.cr.relatedObjectMgr.requestObjects([toonId], allCallback=self._handlePlayerPresent)
if toonId in self._toonId2ror:
del self._toonId2ror[toonId]
else:
self._toonId2ror[toonId] = request
def _handlePlayerPresent(self, toons):
toon = toons[0]
toonId = toon.doId
if toonId in self._toonId2ror:
del self._toonId2ror[toonId]
else:
self._toonId2ror[toonId] = None
self._enableHandleToonDisabled(toonId)
self.handleToonJoined(toonId)
if toonId == base.localAvatar.doId:
self._localToonRequestStatus = None
return
def _enableHandleToonDisabled(self, toonId):
toon = self.getAvatar(toonId)
if toon is not None:
self.acceptOnce(toon.uniqueName('disable'), self.handleToonDisabled, [toonId])
else:
self.notify.warning('BASE: unable to get handle to toon with toonId:%d. Hook for handleToonDisabled not set.' % toonId)
return
def isLocalToonRequestStatus(self, requestStatus):
return self._localToonRequestStatus == requestStatus
def setToonIds(self, toonIds):
self.toonIds = toonIds
def getToonsPlayingChanges(self, oldToonIds, newToonIds):
oldToons = set(oldToonIds)
newToons = set(newToonIds)
exitedToons = oldToons.difference(newToons)
joinedToons = newToons.difference(oldToons)
return (list(exitedToons), list(joinedToons))
def setUsesSmoothing(self):
self.usesSmoothing = True
def setUsesLookAround(self):
self.usesLookAround = True
def getInstructions(self):
return TTLocalizer.DefaultPartyActivityInstructions
def getParentNodePath(self):
if hasattr(base.cr.playGame, 'hood') and base.cr.playGame.hood and hasattr(base.cr.playGame.hood, 'loader') and base.cr.playGame.hood.loader and hasattr(base.cr.playGame.hood.loader, 'geom') and base.cr.playGame.hood.loader.geom:
return base.cr.playGame.hood.loader.geom
else:
self.notify.warning('Hood or loader not created, defaulting to render')
return render
def __createRandomNumGen(self):
self.notify.debug('BASE: self.doId=0x%08X' % self.doId)
self.randomNumGen = RandomNumGen.RandomNumGen(self.doId)
def destroy(self = self):
self.notify.debug('BASE: destroying random num gen')
del self.randomNumGen
self.cleanupActions.append(destroy)
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.notify.debug('BASE: generate, %s' % self.getTitle())
self.__createRandomNumGen()
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.notify.debug('BASE: announceGenerate %s' % self.activityName)
self.root.setName(self.activityName + 'Root')
centeredX, centeredY = getCenterPosFromGridSize(self.x, self.y, PartyGlobals.ActivityInformationDict[self.activityId]['gridsize'])
self.root.setPos(centeredX, centeredY, 0.0)
self.root.setH(self.h)
self.normalExit = True
if self.wantLever:
self.leverTriggerEvent = self.uniqueName('leverTriggerEvent')
self.load()
def cleanup(self = self):
self.notify.debug('BASE: cleanup: normalExit=%s' % self.normalExit)
base.cr.renderFrame()
if self.normalExit:
self.sendUpdate('toonExitRequest')
self.cleanupActions.append(cleanup)
def disable(self):
self.notify.debug('BASE: disable')
DistributedObject.DistributedObject.disable(self)
rorToonIds = self._toonId2ror.keys()
for toonId in rorToonIds:
self.cr.relatedObjectMgr.abortRequest(self._toonId2ror[toonId])
del self._toonId2ror[toonId]
self.ignore(self.messageDoneEvent)
if self.messageGui is not None and not self.messageGui.isEmpty():
self.messageGui.cleanup()
self.messageGui = None
return
def delete(self):
self.notify.debug('BASE: delete')
self.unload()
self.ignoreAll()
DistributedObject.DistributedObject.delete(self)
def load(self):
self.notify.debug('BASE: load')
self.loadSign()
if self.wantLever:
self.loadLever()
if self.wantRewardGui:
self.showRewardDoneEvent = self.uniqueName('showRewardDoneEvent')
self.rewardGui = JellybeanRewardGui(self.showRewardDoneEvent)
self.messageDoneEvent = self.uniqueName('messageDoneEvent')
self.root.reparentTo(self.getParentNodePath())
self._enableCollisions()
def loadSign(self):
actNameForSign = self.activityName
if self.activityId == PartyGlobals.ActivityIds.PartyJukebox40:
actNameForSign = PartyGlobals.ActivityIds.getString(PartyGlobals.ActivityIds.PartyJukebox)
elif self.activityId == PartyGlobals.ActivityIds.PartyDance20:
actNameForSign = PartyGlobals.ActivityIds.getString(PartyGlobals.ActivityIds.PartyDance)
self.sign = self.root.attachNewNode('%sSign' % self.activityName)
self.signModel = self.party.defaultSignModel.copyTo(self.sign)
self.signFlat = self.signModel.find('**/sign_flat')
self.signFlatWithNote = self.signModel.find('**/sign_withNote')
self.signTextLocator = self.signModel.find('**/signText_locator')
textureNodePath = getPartyActivityIcon(self.party.activityIconsModel, actNameForSign)
textureNodePath.setPos(0.0, -0.02, 2.2)
textureNodePath.setScale(2.35)
textureNodePath.copyTo(self.signFlat)
textureNodePath.copyTo(self.signFlatWithNote)
text = TextNode('noteText')
text.setTextColor(0.2, 0.1, 0.7, 1.0)
text.setAlign(TextNode.ACenter)
text.setFont(OTPGlobals.getInterfaceFont())
text.setWordwrap(10.0)
text.setText('')
self.noteText = self.signFlatWithNote.attachNewNode(text)
self.noteText.setPosHpr(self.signTextLocator, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0)
self.noteText.setScale(0.2)
self.signFlatWithNote.stash()
self.signTextLocator.stash()
def loadLever(self):
self.lever = self.root.attachNewNode('%sLever' % self.activityName)
self.leverModel = self.party.defaultLeverModel.copyTo(self.lever)
self.controlColumn = NodePath('cc')
column = self.leverModel.find('**/column')
column.getChildren().reparentTo(self.controlColumn)
self.controlColumn.reparentTo(column)
self.stickHinge = self.controlColumn.attachNewNode('stickHinge')
self.stick = self.party.defaultStickModel.copyTo(self.stickHinge)
self.stickHinge.setHpr(0.0, 90.0, 0.0)
self.stick.setHpr(0, -90.0, 0)
self.stick.flattenLight()
self.bottom = self.leverModel.find('**/bottom')
self.bottom.wrtReparentTo(self.controlColumn)
self.bottomPos = self.bottom.getPos()
cs = CollisionSphere(0.0, 1.35, 2.0, 1.0)
cs.setTangible(False)
cn = CollisionNode(self.leverTriggerEvent)
cn.addSolid(cs)
cn.setIntoCollideMask(OTPGlobals.WallBitmask)
self.leverTrigger = self.root.attachNewNode(cn)
self.leverTrigger.reparentTo(self.lever)
self.leverTrigger.stash()
cs = CollisionTube(0.0, 2.7, 0.0, 0.0, 2.7, 3.0, 1.2)
cn = CollisionNode('levertube')
cn.addSolid(cs)
cn.setIntoCollideMask(OTPGlobals.WallBitmask)
self.leverTube = self.leverModel.attachNewNode(cn)
host = base.cr.doId2do.get(self.party.partyInfo.hostId)
if host is None:
self.notify.debug('%s loadLever : Host has left the game before lever could be created.' % self.activityName)
return
scale = host.getGeomNode().getChild(0).getSz(render)
self.leverModel.setScale(scale)
self.controlColumn.setPos(0, 0, 0)
host.setPosHpr(self.lever, 0, 0, 0, 0, 0, 0)
host.pose('leverNeutral', 0)
host.update()
pos = host.rightHand.getPos(self.controlColumn)
self.controlColumn.setPos(pos[0], pos[1], pos[2] - 1)
self.bottom.setZ(host, 0.0)
self.bottom.setPos(self.bottomPos[0], self.bottomPos[1], self.bottom.getZ())
lookAtPoint = Point3(0.3, 0, 0.1)
lookAtUp = Vec3(0, -1, 0)
self.stickHinge.lookAt(host.rightHand, lookAtPoint, lookAtUp)
host.play('walk')
host.update()
return
def unloadLever(self):
self.lever.removeNode()
self.leverModel.removeNode()
self.controlColumn.removeNode()
self.stickHinge.removeNode()
self.stick.removeNode()
self.bottom.removeNode()
self.leverTrigger.removeNode()
self.leverTube.removeNode()
del self.bottomPos
del self.lever
del self.leverModel
del self.controlColumn
del self.stickHinge
del self.stick
del self.bottom
del self.leverTrigger
del self.leverTube
def _enableCollisions(self):
if self.wantLever:
self.leverTrigger.unstash()
self.accept('enter%s' % self.leverTriggerEvent, self._leverPulled)
def _disableCollisions(self):
if self.wantLever:
self.leverTrigger.stash()
self.ignore('enter%s' % self.leverTriggerEvent)
def _leverPulled(self, collEntry):
self.notify.debug('_leverPulled : Someone pulled the lever!!! ')
if self.activityType == PartyGlobals.ActivityTypes.HostInitiated and base.localAvatar.doId != self.party.partyInfo.hostId:
return False
return True
def getToonPullingLeverInterval(self, toon):
walkTime = 0.2
reach = ActorInterval(toon, 'leverReach', playRate=2.0)
pull = ActorInterval(toon, 'leverPull', startFrame=6)
origPos = toon.getPos(render)
origHpr = toon.getHpr(render)
newPos = self.lever.getPos(render)
newHpr = self.lever.getHpr(render)
origHpr.setX(PythonUtil.fitSrcAngle2Dest(origHpr[0], newHpr[0]))
toon.setPosHpr(origPos, origHpr)
reachAndPull = Sequence(ActorInterval(toon, 'walk', loop=True, duration=walkTime - reach.getDuration()), reach, pull)
leverSeq = Sequence(Wait(walkTime + reach.getDuration() - 0.1), self.stick.hprInterval(0.55, Point3(0.0, 25.0, 0.0), Point3(0.0, 0.0, 0.0)), Wait(0.3), self.stick.hprInterval(0.4, Point3(0.0, 0.0, 0.0), Point3(0.0, 25.0, 0.0)))
returnSeq = Sequence(Parallel(toon.posInterval(walkTime, newPos, origPos), toon.hprInterval(walkTime, newHpr, origHpr), leverSeq, reachAndPull))
return returnSeq
def showMessage(self, message, endState = 'walk'):
base.cr.playGame.getPlace().fsm.request('activity')
self.acceptOnce(self.messageDoneEvent, self.__handleMessageDone)
self.messageGui = TTDialog.TTGlobalDialog(doneEvent=self.messageDoneEvent, message=message, style=TTDialog.Acknowledge)
self.messageGui.endState = endState
def __handleMessageDone(self):
self.ignore(self.messageDoneEvent)
if hasattr(base.cr.playGame.getPlace(), 'fsm'):
if self.messageGui and hasattr(self.messageGui, 'endState'):
self.notify.info('__handleMessageDone (endState=%s)' % self.messageGui.endState)
base.cr.playGame.getPlace().fsm.request(self.messageGui.endState)
else:
self.notify.warning("messageGui has no endState, defaulting to 'walk'")
base.cr.playGame.getPlace().fsm.request('walk')
if self.messageGui is not None and not self.messageGui.isEmpty():
self.messageGui.cleanup()
self.messageGui = None
return
def showJellybeanReward(self, earnedAmount, jarAmount, message):
if not self.isLocalToonInActivity() or base.localAvatar.doId in self.getToonIdsAsList():
messenger.send('DistributedPartyActivity-showJellybeanReward')
base.cr.playGame.getPlace().fsm.request('activity')
self.acceptOnce(self.showRewardDoneEvent, self.__handleJellybeanRewardDone)
self.rewardGui.showReward(earnedAmount, jarAmount, message)
def __handleJellybeanRewardDone(self):
self.ignore(self.showRewardDoneEvent)
self.handleRewardDone()
def handleRewardDone(self):
if base.cr.playGame.getPlace() and hasattr(base.cr.playGame.getPlace(), 'fsm'):
base.cr.playGame.getPlace().fsm.request('walk')
def setSignNote(self, note):
self.noteText.node().setText(note)
if len(note.strip()) > 0:
self.signFlat.stash()
self.signFlatWithNote.unstash()
self.signTextLocator.unstash()
else:
self.signFlat.unstash()
self.signFlatWithNote.stash()
self.signTextLocator.stash()
def unload(self):
self.notify.debug('BASE: unload')
self.finishRules()
self._disableCollisions()
self.signModel.removeNode()
del self.signModel
self.sign.removeNode()
del self.sign
self.ignoreAll()
if self.wantLever:
self.unloadLever()
self.root.removeNode()
del self.root
del self.activityId
del self.activityName
del self.activityType
del self.wantLever
del self.messageGui
if self.rewardGui is not None:
self.rewardGui.destroy()
del self.rewardGui
if hasattr(self, 'toonIds'):
del self.toonIds
del self.rulesDoneEvent
del self.modelCount
del self.cleanupActions
del self.usesSmoothing
del self.usesLookAround
del self.difficultyOverride
del self.trolleyZoneOverride
if hasattr(base, 'partyActivityDict'):
del base.partyActivityDict
return
def setPartyDoId(self, partyDoId):
self.party = base.cr.doId2do[partyDoId]
def setX(self, x):
self.x = x
def setY(self, y):
self.y = y
def setH(self, h):
self.h = h
def setState(self, newState, timestamp):
if newState == 'Active':
self.activityStartTime = globalClockDelta.networkToLocalTime(timestamp)
def turnOffSmoothingOnGuests(self):
for toonId in self.toonIds:
avatar = self.getAvatar(toonId)
if avatar:
if not self.usesSmoothing:
avatar.stopSmooth()
if not self.usesLookAround:
avatar.stopLookAround()
def getAvatar(self, toonId):
if self.cr.doId2do.has_key(toonId):
return self.cr.doId2do[toonId]
else:
self.notify.warning('BASE: getAvatar: No avatar in doId2do with id: ' + str(toonId))
return None
return None
def getAvatarName(self, toonId):
avatar = self.getAvatar(toonId)
if avatar:
return avatar.getName()
else:
return 'Unknown'
def isLocalToonInActivity(self):
result = False
place = base.cr.playGame.getPlace()
if place and place.__class__.__name__ == 'Party' and hasattr(place, 'fsm') and place.fsm:
result = place.fsm.getCurrentState().getName() == 'activity'
return result
def getToonIdsAsList(self):
return self.toonIds
def startRules(self, timeout = PartyGlobals.DefaultRulesTimeout):
self.notify.debug('BASE: startRules')
self.accept(self.rulesDoneEvent, self.handleRulesDone)
self.rulesPanel = MinigameRulesPanel('PartyRulesPanel', self.getTitle(), self.getInstructions(), self.rulesDoneEvent, timeout)
base.setCellsAvailable(base.bottomCells + [base.leftCells[0], base.rightCells[1]], False)
self.rulesPanel.load()
self.rulesPanel.enter()
def finishRules(self):
self.notify.debug('BASE: finishRules')
self.ignore(self.rulesDoneEvent)
if hasattr(self, 'rulesPanel'):
self.rulesPanel.exit()
self.rulesPanel.unload()
del self.rulesPanel
base.setCellsAvailable(base.bottomCells + [base.leftCells[0], base.rightCells[1]], True)
def handleRulesDone(self):
self.notify.error('BASE: handleRulesDone should be overridden')
def getTitle(self):
return TTLocalizer.PartyActivityNameDict[self.activityId]['generic']
def local2ActivityTime(self, timestamp):
return timestamp - self.activityStartTime
def activity2LocalTime(self, timestamp):
return timestamp + self.activityStartTime
def getCurrentActivityTime(self):
return self.local2ActivityTime(globalClock.getFrameTime())
def disableEmotes(self):
Emote.globalEmote.disableAll(base.localAvatar)
def enableEmotes(self):
Emote.globalEmote.releaseAll(base.localAvatar)
| ToonTownInfiniteRepo/ToontownInfinite | toontown/parties/DistributedPartyActivity.py | Python | mit | 21,955 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
bkr group-modify: Modify a group
================================
.. program:: bkr group-modify
Synopsis
--------
| :program:`bkr group-modify` [*options*] <group-name>
Description
-----------
Modify an existing group.
Options
-------
.. option:: --display-name
New display name of the group.
.. option:: --group-name
New name of the group.
.. option:: --description
New description of the group.
.. option:: --add-member
Add a user to the group. This option can be specified multiple
times to add more than one user to the group. Should a specified
user fail to be added, all subsequent users are ignored.
.. option:: --remove-member
Remove a user from the group. This option can be specified multiple
times to remove more than one user from the group. Should a specified
user fail to be removed, all subsequent users are ignored.
.. option:: --grant-owner
Grant group owner permissions to an existing group member.
.. option:: --revoke-owner
Remove group owner permissions from an existing group owner.
.. option:: --root-password
Root password for group jobs.
Common :program:`bkr` options are described in the :ref:`Options
<common-options>` section of :manpage:`bkr(1)`.
Exit status
-----------
Non-zero on error, otherwise zero.
Examples
--------
Modify an existing group 'mygroup' with the new display name 'A new group'::
bkr group-modify --display-name="A new group" mygroup
Modify an existing group 'mygroup' with the new display name 'A new group'
and new group name 'mynewgroup'::
bkr group-modify --display-name="A new group" --group-name="mynewgroup" mygroup
Add a user with username 'user1' to the group 'mygroup'::
bkr group-modify --add-member user1 mygroup
Remove an existing group member with username 'user1' from the group 'mygroup'::
bkr group-modify --remove-member user1 mygroup
Add an existing group member with username 'user1' as an owner of group 'mygroup'::
bkr group-modify --grant-owner user1 mygroup
Revoke group owner rights from an existing group owner of group 'mygroup' with username 'user1'::
bkr group-modify --revoke-owner user1 mygroup
See also
--------
:manpage:`bkr(1)`
"""
from bkr.client import BeakerCommand
class Group_Modify(BeakerCommand):
"""Modify an existing Group"""
enabled = True
def options(self):
self.parser.usage = "%%prog %s [options] <group-name>" % self.normalized_name
self.parser.add_option(
"--display-name",
help="New display name of the group",
)
self.parser.add_option(
"--group-name",
help="New name of the group",
)
self.parser.add_option(
"--description",
help="New description of the group",
)
self.parser.add_option(
"--add-member",
action='append',
default=[],
help="Username of the member to be added to the group",
)
self.parser.add_option(
"--remove-member",
action='append',
default=[],
help="Username of the member to be removed from the group",
)
self.parser.add_option(
"--grant-owner",
action='append',
default=[],
help="Username of the member to grant owner rights",
)
self.parser.add_option(
"--revoke-owner",
action='append',
default=[],
help="Username of the member to revoke owner rights",
)
self.parser.add_option(
"--root-password",
help="Root password used for group jobs",
)
def run(self, *args, **kwargs):
if len(args) != 1:
self.parser.error('Exactly one group name must be specified.')
group = args[0]
display_name = kwargs.get('display_name', None)
group_name = kwargs.get('group_name', None)
description = kwargs.get('description', None)
add_member = kwargs.pop('add_member', [])
remove_member = kwargs.pop('remove_member', [])
grant_owner = kwargs.get('grant_owner', None)
revoke_owner = kwargs.get('revoke_owner', None)
password = kwargs.get('root_password', None)
if not any([group_name, display_name, description, add_member, grant_owner,
revoke_owner, password,remove_member]):
self.parser.error('Please specify an attribute to modify.')
self.set_hub(**kwargs)
requests_session = self.requests_session()
for member in add_member:
url = 'groups/%s/members/' % group
res = requests_session.post(url, json={'user_name': member})
res.raise_for_status()
for member in remove_member:
url = 'groups/%s/members/?user_name=%s' % (group, member)
res = requests_session.delete(url)
res.raise_for_status()
if grant_owner:
for member in grant_owner:
url = 'groups/%s/owners/' % group
res = requests_session.post(url, json={'user_name': member})
res.raise_for_status()
if revoke_owner:
for member in revoke_owner:
url = 'groups/%s/owners/?user_name=%s' % (group, member)
res = requests_session.delete(url)
res.raise_for_status()
group_attrs = {}
if group_name:
group_attrs['group_name'] = group_name
if display_name:
group_attrs['display_name'] = display_name
if description:
group_attrs['description'] = description
if password:
group_attrs['root_password'] = password
if group_attrs:
res = requests_session.patch('groups/%s' % group, json=group_attrs)
res.raise_for_status()
| jtoppins/beaker | Client/src/bkr/client/commands/cmd_group_modify.py | Python | gpl-2.0 | 6,192 |
#!/usr/bin/env python
import commands
class redis_monitor:
redis_bin='/opt/redis/bin/redis-cli'
def __init__(self,password,port,item):
self.Password = password
self.Port = int(port)
self.items = item
def Monitor_items(self):
try:
code,res = commands.getstatusoutput("%s -a %s -p %d info | grep %s | awk -F: '{print$2}'" %(redis_monitor.redis_bin,self.Password,self.Port,self.items))
except ValueError,e:
print e
print 'error'
else:
res_m = int(res)/1024/1024
return res_m
if __name__ == '__main__':
obj=redis_monitor('quB1BY3njv0e1212b7BFw92',6779,'used_memory_rss')
Redis_use_Mem=obj.Monitor_items()
print Redis_use_Mem
| woerwin/learning | kjyw/nagios/nagios_client/sh/check_redis.py | Python | mit | 661 |
from django.core.management.base import BaseCommand, CommandError
from apps.chat.models import Chats
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
self.result = {}
super(Command, self).__init__(*args, **kwargs)
CHAT_LIFETIME = 1 # chat lifetime in days
help = 'Close and delete old chats'
def handle(self, *args, **options):
try:
# delete closed chats
deleted_chats = Chats.delete_closed_chat()
# auto close chats by time limit
chats = Chats.objects.get_old_chat(self.CHAT_LIFETIME)
closed_chats = len(chats)
for chat in chats:
chat.pre_delete()
msg = str(closed_chats) + ' chat(s) closed, ' + str(deleted_chats) + ' chat(s) deleted!'
logger.setLevel(20) # INFO
logger.info(msg)
self.result = {'level': logger.level, 'msg':msg}
except Exception as err:
# @TODO Fix. Error: Chats matching query does not exist.
msg = 'Error: ' + str(err)
logger.setLevel(40) # ERROR
logger.error(msg)
self.result = {'level': logger.level, 'msg':msg}
| MySmile/sfchat | apps/adminpanel/management/commands/clearchats.py | Python | bsd-3-clause | 1,257 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common.exceptions import NeutronException
class NiblickException(NeutronException):
pass
class NoMoreResources(NiblickException):
message = _('There are no available resources of type "%(resource_type)s"')
class WrongResourceId(NiblickException):
message = _('Wrong resource ID "%(resource_id)s"')
class WrongObjectId(NiblickException):
message = _('Wrong object ID "%(object_id)s"')
| Brocade-OpenSource/OpenStack-DNRM-Neutron | neutron/plugins/niblick/exceptions.py | Python | apache-2.0 | 1,102 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
RandomHR.py
---------------------
Date : May 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'May 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import math
import random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from processing.core.Processing import Processing
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
try:
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterNumber import ParameterNumber
from processing.outputs.OutputVector import OutputVector
from processing.outputs.OutputHTML import OutputHTML
from processing.outputs.OutputFile import OutputFile
except:
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputVector
from processing.core.outputs import OutputHTML
from processing.core.outputs import OutputFile
from processing.tools import dataobjects, vector
class RandomHR(GeoAlgorithm):
HR_LAYER = 'HR_LAYER'
STUDY_LAYER = 'STUDY_LAYER'
ITERATIONS = 'ITERATIONS'
RANDOM_HR = 'RANDOM_HR'
HTML = 'HTML'
RAW_DATA = 'RAW_DATA'
SUMMARY_DATA = 'SUMMARY_DATA'
def defineCharacteristics(self):
self.name = 'Random HR'
self.group = 'Random HR/Path'
self.addParameter(ParameterVector(self.HR_LAYER,
'Home ranges layer', [ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterVector(self.STUDY_LAYER,
'Study area layer', [ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterNumber(self.ITERATIONS,
'Number of iterations', 1, 999, 10))
self.addOutput(OutputVector(self.RANDOM_HR, 'Random HR'))
self.addOutput(OutputHTML(self.HTML, 'Random HR results'))
self.addOutput(OutputFile(self.RAW_DATA, 'Raw output'))
self.addOutput(OutputFile(self.SUMMARY_DATA, 'Output summary'))
def processAlgorithm(self, progress):
hrLayer = dataobjects.getObjectFromUri(
self.getParameterValue(self.HR_LAYER))
studyLayer = dataobjects.getObjectFromUri(
self.getParameterValue(self.STUDY_LAYER))
iterations = int(self.getParameterValue(self.ITERATIONS))
toolLog = self.getOutputValue(self.HTML)
rawFile = self.getOutputValue(self.RAW_DATA)
sumFile = self.getOutputValue(self.SUMMARY_DATA)
if studyLayer.featureCount() != 1:
raise GeoAlgorithmExecutionException(
'The study area layer should contain exactly one polygon or '
'multipolygon.')
# prepare frame polygon
f = studyLayer.getFeatures(QgsFeatureRequest().setFilterFid(0)).next()
rect = f.geometry().boundingBox()
outside = QgsGeometry().fromRect(rect).difference(f.geometry())
# generate output by copying features from home ranges layer
crs = hrLayer.dataProvider().crs().authid()
self.layer = QgsVectorLayer('MultiPolygon?crs=%s' % crs, 'tmp', 'memory')
provider = self.layer.dataProvider()
# also calculate areas
areas = []
da = QgsDistanceArea()
features = vector.features(hrLayer)
for f in features:
provider.addFeatures([f])
areas.append(da.measure(f.geometry()))
# analyze source overlaps
self.overlaps = []
self.overlapsTotal = []
self.overlaps += self._calculateOverlaps()
self.overlapsTotal += [self._sum2d(self.overlaps[0])]
html = '<table border="1">'
html += '<tr><td colspan="3">'
html += 'Number of homeranges: %d' % len(areas)
html += '</td></tr>'
html += '<tr><td colspan="3">'
html += 'Total area of the homeranges: %.3f' % sum(areas)
html += '</td></tr>'
html += '<tr><td></td><td>total overlap area</td><td>SD</td></tr>'
html += '<tr><td>observed</td><td>%.3f</td><td>n/a</td></tr>' % self.overlapsTotal[0]
total = 100.0 / float(iterations)
for i in xrange(iterations):
for f in self.layer.getFeatures():
sticksOut = True
while sticksOut:
geom = self._rotate(f.geometry())
tries = 0
while sticksOut and tries < 50:
geom = self._move(geom, rect)
sticksOut = outside.intersects(geom)
tries += 1
provider.changeGeometryValues({f.id(): geom})
self.overlaps += self._calculateOverlaps()
overlap = self._sum2d(self.overlaps[len(self.overlaps) - 1])
self.overlapsTotal += [overlap]
html += '<tr><td>iteration %d</td><td>%.3f</td><td>n/a</td></tr>' % (i + 1, overlap)
progress.setPercentage(int((i + 1) * total))
(mean, sd) = self._calculateStats()
html += '<tr><td>mean</td><td>%.3f</td><td>%.3f</td></tr>' % (mean, sd)
html += '</table>'
html += '<h1>Result</h1>'
dist = self.overlapsTotal[0] - mean
if dist > 0:
t = 'more'
else:
t = 'less'
html += '<p>Distance between the observed and randomized value is: %0.3f (the observed one is %s).</p>' % (dist, t)
html += '<p>The standard deviation is: %.3f.</p>' % sd
html += '<p>The last iteration result has been saved.</p>'
fl = open(toolLog, 'w')
fl.write(html)
fl.close()
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 10, 0))
writer = self.getOutputFromName(self.RANDOM_HR).getVectorWriter(
fields, QGis.WKBMultiPolygon, hrLayer.dataProvider().crs())
f = QgsFeature()
f.initAttributes(1)
f.setFields(fields)
features = vector.features(self.layer)
for feature in features:
f.setGeometry(feature.geometry())
f.setAttribute('id', feature.id())
writer.addFeature(f)
del writer
self.writeRaw(rawFile, hrLayer, studyLayer, areas)
self.writeSummary(sumFile, hrLayer, studyLayer)
def _calculateOverlaps(self):
# collect the geometries
polygons = []
for f in self.layer.getFeatures():
geom = QgsGeometry(f.geometry())
polygons.append(geom)
# calculate
result = []
da = QgsDistanceArea()
for i in range(len(polygons)):
tmp = []
for j in range(i + 1, len(polygons)):
if polygons[i].intersects(polygons[j]):
overlap = da.measure(polygons[i].intersection(polygons[j]))
else:
overlap = 0.0
tmp.append(overlap)
result += [tmp]
return [result]
def _sum2d(self, data):
k = 0
for i in data:
for j in i:
k += j
return k
def _rotate(self, geom):
# randomize the angle
angle = random.uniform(0, 2 * math.pi)
sina = math.sin(angle)
cosa = math.cos(angle)
i = 0
# create unique dict of verticles because of overlapping the
# first and the last one
unique = dict()
vertex = geom.vertexAt(i)
while vertex.x() != 0 and vertex.y() != 0:
unique[i] = vertex
vertex = geom.vertexAt( i )
i += 1
for key in unique.keys():
vertex = unique[key]
x = cosa * vertex.x() - sina * vertex.y()
y = sina * vertex.x() + cosa * vertex.y()
geom.moveVertex(x, y, key)
return geom
def _move(self, geom, rect):
bbox = geom.boundingBox()
# compute allowed movement range
dxMin = rect.xMinimum() - bbox.xMinimum()
dxMax = rect.xMaximum() - bbox.xMaximum()
dyMin = rect.yMinimum() - bbox.yMinimum()
dyMax = rect.yMaximum() - bbox.yMaximum()
# randomize dx and dy
dx = random.uniform(dxMin, dxMax)
dy = random.uniform(dyMin, dyMax)
# move
geom.translate(dx, dy)
return geom
def _calculateStats(self):
data = self.overlapsTotal[1:]
mean = sum(data) / len(data)
sd = 0
for i in data:
sd += (i - mean) * (i - mean)
if len(data) > 1:
sd = math.sqrt(sd / (len(data) - 1))
else:
sd = 0
return (mean, sd)
def writeSummary(self, fileName, hrLayer, studyLayer):
sepField = ProcessingConfig.getSetting('FIELD_SEPARATOR')
sepNumber = ProcessingConfig.getSetting('DECIMAL_SEPARATOR')
with open(fileName, 'w') as f:
f.write('QGIS Random Home Range summary\n')
f.write('Frame layer%s%s\n' % (sepField, studyLayer.name()))
f.write('Home ranges layer%s%s\n' % (sepField, hrLayer.name()))
f.write('Number of the home ranges%s%s\n' % (sepField, hrLayer.featureCount()))
f.write('Number of iterations%s%s\n\n' % (sepField, len(self.overlaps) - 1))
f.write(sepField + 'total overlap area' + sepField + 'SD\n')
f.write('observed' + sepField + str(self.overlapsTotal[0]).replace('.', sepNumber) + sepField + '\n')
for i in range(1, len(self.overlapsTotal)):
f.write('iteration %s%s%s%s\n' % (i, sepField, str(self.overlapsTotal[i]).replace('.', sepNumber), sepField))
(mean, sd) = self._calculateStats()
f.write('mean' + sepField + str(mean).replace('.', sepNumber) + sepField + str(sd).replace('.',sepNumber) + '\n')
f.write('observed-randomized' + sepField + str(self.overlapsTotal[0] - mean).replace('.', sepNumber) + sepField + '\n\n')
def writeRaw(self, fileName, hrLayer, studyLayer, areas):
sepField = ProcessingConfig.getSetting('FIELD_SEPARATOR')
sepNumber = ProcessingConfig.getSetting('DECIMAL_SEPARATOR')
with open(fileName, 'w') as f:
f.write('QGIS Random Home Range summary\n')
f.write('Frame layer%s%s\n' % (sepField, studyLayer.name()))
f.write('Home ranges layer%s%s\n' % (sepField, hrLayer.name()))
f.write('Number of the home ranges%s%s\n' % (sepField, studyLayer.featureCount()))
f.write('Number of iterations%s%s\n\n' % (sepField, len(self.overlaps) - 1))
f.write('Note: The first column contains the home range area\n\n')
for i in range(len(self.overlaps)):
if i == 0:
f.write('Observed data:\n')
else:
f.write('Iteration %s:\n' % i)
for j in range(len(self.overlaps[i])):
text = str(areas[j]) + sepField
for k in range(len(self.overlaps[i]) - len(self.overlaps[i][j])):
text += sepField
for k in range(len(self.overlaps[i][j])):
val = self.overlaps[i][j][k]
text += str(val).replace('.', sepNumber) + sepField
text = text[:len(text) - 1] + '\n'
f.write(text)
f.write('\n')
| gioman/radio_telemetry_tools | RandomHR.py | Python | gpl-2.0 | 12,425 |
# -*- coding:utf-8 -*-
# Copyright (c) 2012 TiN
# Copyright (c) 2012, 2014 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014-2015 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Foster McLane
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# coding: utf-8
import re
from six import u, PY2
from . import base
from ..utils import UnixCommandNotFound, catch_exception_and_warn
class ThermalSensor(base.InLoopPollText):
'''
For using the thermal sensor widget you need to have lm-sensors installed.
You can get a list of the tag_sensors executing "sensors" in your terminal.
Then you can choose which you want, otherwise it will display the first
available.
'''
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('metric', True, 'True to use metric/C, False to use imperial/F'),
('show_tag', False, 'Show tag sensor'),
('update_interval', 2, 'Update interval in seconds'),
('tag_sensor', None,
'Tag of the temperature sensor. For example: "temp1" or "Core 0"'),
(
'threshold',
70,
'If the current temperature value is above, '
'then change to foreground_alert colour'
),
('foreground_alert', 'ff0000', 'Foreground colour alert'),
]
def __init__(self, **config):
base.InLoopPollText.__init__(self, **config)
self.add_defaults(ThermalSensor.defaults)
self.sensors_temp = re.compile(
u(r"""
([\w ]+): # Sensor tag name
\s+[+|-] # temp signed
(\d+\.\d+) # temp value
({degrees} # ° match
[C|F]) # Celsius or Fahrenheit
""".format(degrees="\xc2\xb0" if PY2 else "\xb0")),
re.UNICODE | re.VERBOSE
)
self.value_temp = re.compile("\d+\.\d+")
temp_values = self.get_temp_sensors()
self.foreground_normal = self.foreground
if temp_values is None:
self.data = "sensors command not found"
elif len(temp_values) == 0:
self.data = "Temperature sensors not found"
elif self.tag_sensor is None:
for k in temp_values:
self.tag_sensor = k
break
@catch_exception_and_warn(warning=UnixCommandNotFound, excepts=OSError)
def get_temp_sensors(self):
"""calls the unix `sensors` command with `-f` flag if user has specified that
the output should be read in Fahrenheit.
"""
command = ["sensors", ]
if not self.metric:
command.append("-f")
sensors_out = self.call_process(command)
return self._format_sensors_output(sensors_out)
def _format_sensors_output(self, sensors_out):
"""formats output of unix `sensors` command into a dict of
{<sensor_name>: (<temperature>, <temperature symbol>), ..etc..}
"""
temperature_values = {}
for name, temp, symbol in self.sensors_temp.findall(sensors_out):
name = name.strip()
temperature_values[name] = temp, symbol
return temperature_values
def poll(self):
temp_values = self.get_temp_sensors()
if temp_values is None:
return False
text = ""
if self.show_tag and self.tag_sensor is not None:
text = self.tag_sensor + ": "
text += "".join(temp_values.get(self.tag_sensor, ['N/A']))
temp_value = float(temp_values.get(self.tag_sensor, [0])[0])
if temp_value > self.threshold:
self.layout.colour = self.foreground_alert
else:
self.layout.colour = self.foreground_normal
return text
| StephenBarnes/qtile | libqtile/widget/sensors.py | Python | mit | 4,737 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class SuiteSparse(Package):
"""
SuiteSparse is a suite of sparse matrix algorithms
"""
homepage = 'http://faculty.cse.tamu.edu/davis/suitesparse.html'
url = 'http://faculty.cse.tamu.edu/davis/SuiteSparse/SuiteSparse-5.2.0.tar.gz'
version('5.2.0', '8e625539dbeed061cc62fbdfed9be7cf')
version('5.1.0', '9c34d7c07ad5ce1624b8187faa132046')
version('4.5.5', '0a5b38af0016f009409a9606d2f1b555')
version('4.5.4', 'f6ab689442e64a1624a47aa220072d1b')
version('4.5.3', '8ec57324585df3c6483ad7f556afccbd')
version('4.5.1', 'f0ea9aad8d2d1ffec66a5b6bfeff5319')
variant('tbb', default=False, description='Build with Intel TBB')
variant('pic', default=True, description='Build position independent code (required to link with shared libraries)')
variant('cuda', default=False, description='Build with CUDA')
variant('openmp', default=False, description='Build with OpenMP')
depends_on('blas')
depends_on('lapack')
depends_on('cmake', when='@5.2.0:', type='build')
depends_on('metis@5.1.0', when='@4.5.1:')
# in @4.5.1. TBB support in SPQR seems to be broken as TBB-related linkng
# flags does not seem to be used, which leads to linking errors on Linux.
depends_on('tbb', when='@4.5.3:+tbb')
depends_on('cuda', when='+cuda')
patch('tbb_453.patch', when='@4.5.3:+tbb')
# This patch removes unsupported flags for pgi compiler
patch('pgi.patch', when='%pgi')
# This patch adds '-lm' when linking libgraphblas and when using clang.
# Fixes 'libgraphblas.so.2.0.1: undefined reference to `__fpclassify''
patch('graphblas_libm_dep.patch', when='@5.2.0:%clang')
def install(self, spec, prefix):
# The build system of SuiteSparse is quite old-fashioned.
# It's basically a plain Makefile which include an header
# (SuiteSparse_config/SuiteSparse_config.mk)with a lot of convoluted
# logic in it. Any kind of customization will need to go through
# filtering of that file
pic_flag = self.compiler.pic_flag if '+pic' in spec else ''
make_args = [
'INSTALL=%s' % prefix,
# By default, the Makefile uses the Intel compilers if
# they are found. The AUTOCC flag disables this behavior,
# forcing it to use Spack's compiler wrappers.
'AUTOCC=no',
# CUDA=no does NOT disable cuda, it only disables internal search
# for CUDA_PATH. If in addition the latter is empty, then CUDA is
# completely disabled. See
# [SuiteSparse/SuiteSparse_config/SuiteSparse_config.mk] for more.
'CUDA=no',
'CUDA_PATH=%s' % (spec['cuda'].prefix if '+cuda' in spec else ''),
'CFOPENMP=%s' % (self.compiler.openmp_flag
if '+openmp' in spec else ''),
'CFLAGS=-O3 %s' % pic_flag,
# Both FFLAGS and F77FLAGS are used in SuiteSparse makefiles;
# FFLAGS is used in CHOLMOD, F77FLAGS is used in AMD and UMFPACK.
'FFLAGS=%s' % pic_flag,
'F77FLAGS=%s' % pic_flag,
# use Spack's metis in CHOLMOD/Partition module,
# otherwise internal Metis will be compiled
'MY_METIS_LIB=%s' % spec['metis'].libs.ld_flags,
'MY_METIS_INC=%s' % spec['metis'].prefix.include,
# Make sure Spack's Blas/Lapack is used. Otherwise System's
# Blas/Lapack might be picked up. Need to add -lstdc++, following
# with the TCOV path of SparseSuite 4.5.1's Suitesparse_config.mk,
# even though this fix is ugly
'BLAS=%s' % (spec['blas'].libs.ld_flags + (
' -lstdc++' if '@4.5.1' in spec else '')),
'LAPACK=%s' % spec['lapack'].libs.ld_flags,
]
# 64bit blas in UMFPACK:
if (spec.satisfies('^openblas+ilp64') or
spec.satisfies('^intel-mkl+ilp64') or
spec.satisfies('^intel-parallel-studio+mkl+ilp64')):
make_args.append('UMFPACK_CONFIG=-DLONGBLAS="long long"')
# SuiteSparse defaults to using '-fno-common -fexceptions' in
# CFLAGS, but not all compilers use the same flags for these
# optimizations
if any([x in spec
for x in ('%clang', '%gcc', '%intel')]):
make_args += ['CFLAGS+=-fno-common -fexceptions']
elif '%pgi' in spec:
make_args += ['CFLAGS+=--exceptions']
if '%xl' in spec or '%xl_r' in spec:
make_args += ['CFLAGS+=-DBLAS_NO_UNDERSCORE']
# Intel TBB in SuiteSparseQR
if 'tbb' in spec:
make_args += [
'SPQR_CONFIG=-DHAVE_TBB',
'TBB=-L%s -ltbb' % spec['tbb'].prefix.lib,
]
make('install', *make_args)
@property
def libs(self):
"""Export the libraries of SuiteSparse.
Sample usage: spec['suite-sparse'].libs.ld_flags
spec['suite-sparse:klu,btf'].libs.ld_flags
"""
# Component libraries, ordered by dependency. Any missing components?
all_comps = ['klu', 'btf', 'umfpack', 'cholmod', 'colamd', 'amd',
'camd', 'ccolamd', 'cxsparse', 'ldl', 'rbio', 'spqr',
'suitesparseconfig']
query_parameters = self.spec.last_query.extra_parameters
comps = all_comps if not query_parameters else query_parameters
libs = find_libraries(['lib' + c for c in comps], root=self.prefix.lib,
shared=True, recursive=False)
if not libs:
return None
libs += find_system_libraries('librt')
return libs
| EmreAtes/spack | var/spack/repos/builtin/packages/suite-sparse/package.py | Python | lgpl-2.1 | 6,954 |
# -*- coding: utf-8 -*-
# @Author: tasdik
# @Date: 2016-04-11 18:52:17
# @Last Modified by: Tasdik Rahman
# @Last Modified time: 2016-04-12 17:07:33
# @GPLv3 License
# @http://tasdikrahman.me
# @https://github.com/tasdikrahman
from __future__ import absolute_import, division
import os
import bs4
from spammy.train import Trainer
from spammy.exceptions import CorpusFileError, CorpusError, LimitError
from spammy.version import VERSION
__title__ = 'spammy'
__version__ = VERSION
__author__ = 'Tasdik Rahman'
__email__ = 'prodicus@outlook.com'
__license__ = 'GPLv3'
__copyright__ = 'Copyright 2016 Tasdik Rahman'
class Spammy(object):
"""Stiches everything from train module and classifier module together"""
def __init__(self, directory=None, limit=None, **kwargs):
"""
Initializing the essential
:param directory: Pass the full path of the directory where your
training data is
:param spam: folder spam inside the 'directory'
:param ham: folder ham inside the 'directory'
:param limit: limit the number of files for the classifier to be
trained upon when training the classifier
:Example:
>>> import os
>>> from spammy import Spammy
>>>
>>> directory = '/home/tasdik/Dropbox/projects/spamfilter/data/corpus3'
>>>
>>> # directory structure
>>> os.listdir(directory)
['spam', 'Summary.txt', 'ham']
>>> os.listdir(os.path.join(directory, 'spam'))[:5]
['4257.2005-04-06.BG.spam.txt', '0724.2004-09-21.BG.spam.txt', '2835.2005-01-19.BG.spam.txt', '2505.2005-01-03.BG.spam.txt', '3992.2005-03-19.BG.spam.txt']
>>>
>>> # Spammy object created
>>> cl = Spammy(directory, limit=100)
"""
if kwargs:
spam = kwargs['spam']
ham = kwargs['ham']
else:
spam = 'spam'
ham = 'ham'
"""checking if the directories passed are valid ones or not"""
if not os.path.isdir(directory):
raise CorpusError(directory)
if not os.path.isdir(os.path.join(directory, ham)):
raise CorpusError(ham)
if not os.path.isdir(os.path.join(directory, spam)):
raise CorpusError(spam)
if limit < 0:
raise LimitError("Limit cannot be less than 0")
safe_limit = min(len(os.listdir(os.path.join(directory, spam))),
len(os.listdir(os.path.join(directory, ham))))
if limit > safe_limit:
limit = safe_limit
self.directory = directory
self.ham = os.path.join(self.directory, ham)
self.spam = os.path.join(self.directory, spam)
self.limit = limit
def train(self):
"""
Trains the classifier object
:param self: the classifier object
:Example:
>>> from spammy import Spammy
>>> directory = '/home/tasdik/Dropbox/projects/spammy/examples/training_dataset'
>>> cl = Spammy(directory, limit=300) # training on only 300 spam and ham files
>>> cl.train()
"""
kwargs = {
"directory": self.directory,
"spam": self.spam,
"ham": self.ham,
"limit": self.limit
}
self.trainer = Trainer(**kwargs)
self.classifier_object = self.trainer.train()
def classify(self, email_text):
"""
tries classifying text into spam or ham
:param email_text: email_text to be passed here which is to be classified
:returns: Either ham or spam
:rtype: str
.. note :: To be run after you have trained the classifier object on your dataset
:Example:
>>> from spammy import Spammy
>>> cl = Spammy(path_to_trainin_data, limit=200)
# 200 or the number of files you need to train the classifier upon
>>>
>>> HAM_TEXT = \
'''
Bro. Hope you are fine. Hows the work going on ? Can you send me some updates on it.
And are you free tomorrow ?
No problem man. But please make sure you are finishing it
by friday night and sending me on on that day itself. As we
have to get it printed on Saturday.
'''
>>> cl.classify(HAM_TEXT)
'ham'
"""
email_text = bs4.UnicodeDammit.detwingle(email_text).decode('utf-8')
email_text = email_text.encode('ascii', 'ignore')
return self.classifier_object.classify(
self.trainer.extract_features(email_text)
)
def accuracy(self, **kwargs):
"""
Checks the accuracy of the classifier by running it against a testing
corpus
:param limit: number of files the classifier should test upon
:param label: the label as in spam or ham
:param directory: The absolute path of the directory to be tested
:returns: the precision of the classifier. Eg: 0.87
:rtype: float
:Example:
>>> from spammy import Spammy
>>> directory = '/home/tasdik/Dropbox/projects/spammy/examples/training_dataset'
>>> cl = Spammy(directory, limit=300) # training on only 300 spam and ham files
>>> cl.train()
>>> cl.accuracy(directory='/home/tasdik/Dropbox/projects/spammy/examples/test_dataset', label='spam', limit=300)
0.9554794520547946
>>> cl.accuracy(directory='/home/tasdik/Dropbox/projects/spammy/examples/test_dataset', label='ham', limit=300)
0.9033333333333333
>>>
"""
directory = kwargs['directory']
label = kwargs['label']
limit = kwargs['limit']
if not os.path.isdir(directory):
raise CorpusError(directory)
if not os.path.isdir(os.path.join(directory, label)):
raise CorpusError(os.path.join(directory, label))
if limit < 0:
raise LimitError("Limit cannot be less than 0")
label_dir = os.path.join(directory, label)
safe_limit = len(os.listdir(label_dir))
if limit > safe_limit:
limit = safe_limit
os.chdir(label_dir)
correct = 0
total = 0
for email in os.listdir(label_dir)[:limit]:
email = os.path.join(label_dir, email)
email_file = open(email, 'r')
email_text = email_file.read()
email_file.close()
try:
email_text = bs4.UnicodeDammit.detwingle(email_text).decode(
'utf-8'
)
except:
# bad encoding error, skipping file
continue
email_text = email_text.encode('ascii', 'ignore')
hamorspam = self.classifier_object.classify(
self.trainer.extract_features(email_text)
)
total += 1
if hamorspam == label:
correct += 1
precision = correct / total
return precision | prodicus/spammy | spammy/__init__.py | Python | gpl-3.0 | 7,251 |
import sys
from lib.apscheduler.executors.base import BaseExecutor, run_job
class DebugExecutor(BaseExecutor):
"""
A special executor that executes the target callable directly instead of deferring it to a thread or process.
Plugin alias: ``debug``
"""
def _do_submit_job(self, job, run_times):
try:
events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
except:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
| theguardian/JIRA-APPy | lib/apscheduler/executors/debug.py | Python | gpl-2.0 | 559 |
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
metadata = MetaData()
Base = declarative_base(metadata=metadata)
| allison-knauss/arkweb | api/models/base.py | Python | mit | 155 |
# -*- coding: utf-8 -*-
import flask
import anitya
import anitya.lib.plugins
import anitya.lib.model
from anitya.app import APP, SESSION
from anitya.doc_utils import load_doc
@APP.template_filter('InsertDiv')
def insert_div(content):
""" Template filter inserting an opening <div> and closing </div>
after the first title and then at the end of the content.
"""
# This is quite a hack but simpler solution using .replace() didn't work
# for some reasons...
content = content.split('\n')
output = []
for row in content:
if row.startswith('<h1 class="title">'):
title = row.split('"title">')[1].split('</h1>')[0]
link = '<a name="%(title)s" class="glyphicon glyphicon-link btn-xs" '\
'title="Permalink to this headline" href="#%(title)s"></a>' % (
{
'title': title.replace(' ', '_'),
}
)
row = str(row).replace('</h1>', link + '</h1>')
if row.startswith('<div class="document" id='):
continue
output.append(row)
output = "\n".join(output)
output = output.replace('</div>', '')
output = output.replace('h1', 'h3')
return output
@APP.route('/api/')
@APP.route('/api')
def api():
''' Display the api information page. '''
doc_api_version = load_doc(api_version)
doc_api_projects = load_doc(api_projects)
doc_api_packages_wiki_list = load_doc(api_packages_wiki_list)
doc_api_projects_names = load_doc(api_projects_names)
doc_api_get_version = load_doc(api_get_version)
doc_api_get_project = load_doc(api_get_project)
doc_api_get_project_distro = load_doc(api_get_project_distro)
return flask.render_template(
'api.html',
docs=[
doc_api_version,
doc_api_projects,
doc_api_packages_wiki_list,
doc_api_projects_names,
doc_api_get_version,
doc_api_get_project,
doc_api_get_project_distro,
],
)
@APP.route('/api/version/')
@APP.route('/api/version')
def api_version():
'''
API Version
-----------
Display the api version information.
::
/api/version
Accepts GET queries only.
Sample response:
::
{
"version": "1.0"
}
'''
return flask.jsonify({'version': anitya.__api_version__})
@APP.route('/api/projects/')
@APP.route('/api/projects')
def api_projects():
'''
List all projects
-----------------
Lists all the projects registered in anitya.
::
/api/projects
/api/projects/?pattern=<pattern>
/api/projects/?pattern=py*
/api/projects/?homepage=<homepage>
/api/projects/?homepage=https%3A%2F%2Fpypi.python.org%2Fpypi%2Fansi2html
Accepts GET queries only.
:kwarg pattern: pattern to use to restrict the list of projects returned.
:kwarg homepage: upstream homepage to use to restrict the list of projects
returned.
The ``pattern`` and ``homepage`` arguments are mutually exclusive and
cannot be combined. You can query for packages by a pattern **or** you can
query by their upstream homepage, but not both.
Sample response:
::
{
"projects": [
{
"backend": "custom",
"created_on": 1409917222.0,
"homepage": "http://www.finnie.org/software/2ping/",
"id": 2,
"name": "2ping",
"regex": null,
"updated_on": 1414400794.0,
"version": "2.1.1",
"version_url": "http://www.finnie.org/software/2ping",
"versions": [
"2.1.1"
]
},
{
"backend": "custom",
"created_on": 1409917223.0,
"homepage": "http://www.3proxy.ru/download/",
"id": 3,
"name": "3proxy",
"regex": null,
"updated_on": 1415115096.0,
"version": "0.7.1.1",
"version_url": "http://www.3proxy.ru/download/",
"versions": [
"0.7.1.1"
]
}
],
"total": 2
}
'''
pattern = flask.request.args.get('pattern', None)
homepage = flask.request.args.get('homepage', None)
if pattern and homepage:
err = 'pattern and homepage are mutually exclusive. Specify only one.'
output = {'output': 'notok', 'error': [err]}
jsonout = flask.jsonify(output)
jsonout.status_code = 400
return jsonout
if homepage is not None:
project_objs = anitya.lib.model.Project.by_homepage(SESSION, homepage)
elif pattern:
if '*' not in pattern:
pattern += '*'
project_objs = anitya.lib.model.Project.search(
SESSION, pattern=pattern)
else:
project_objs = anitya.lib.model.Project.all(SESSION)
projects = [project.__json__() for project in project_objs]
output = {
'total': len(projects),
'projects': projects
}
jsonout = flask.jsonify(output)
jsonout.status_code = 200
return jsonout
@APP.route('/api/packages/wiki/')
@APP.route('/api/packages/wiki')
def api_packages_wiki_list():
'''
List all packages in mediawiki format
-------------------------------------
Lists all the packages registered in anitya using the format of the
old wiki page. If a project is present multiple times on different
distribution, it will be shown multiple times.
::
/api/packages/wiki
Accepts GET queries only.
Sample response:
::
* 2ping None http://www.finnie.org/software/2ping
* 3proxy None http://www.3proxy.ru/download/
'''
project_objs = anitya.lib.model.Project.all(SESSION)
projects = []
for project in project_objs:
for package in project.packages:
tmp = '* {name} {regex} {version_url}'.format(
name=package.package_name,
regex=project.regex,
version_url=project.version_url)
projects.append(tmp)
return flask.Response(
"\n".join(projects),
content_type="text/plain;charset=UTF-8"
)
@APP.route('/api/projects/names/')
@APP.route('/api/projects/names')
def api_projects_names():
'''
List all projects names
------------------------
Lists the names of all the projects registered in anitya.
::
/api/projects/names
/api/projects/names/?pattern=<pattern>
/api/projects/names/?pattern=py*
Accepts GET queries only.
:kwarg pattern: pattern to use to restrict the list of names returned.
Sample response:
::
{
"projects": [
"2ping",
"3proxy",
],
"total": 2
}
'''
pattern = flask.request.args.get('pattern', None)
if pattern and '*' not in pattern:
pattern += '*'
if pattern:
project_objs = anitya.lib.model.Project.search(
SESSION, pattern=pattern)
else:
project_objs = anitya.lib.model.Project.all(SESSION)
projects = [project.name for project in project_objs]
output = {
'total': len(projects),
'projects': projects
}
jsonout = flask.jsonify(output)
jsonout.status_code = 200
return jsonout
@APP.route('/api/distro/names/')
@APP.route('/api/distro/names')
def api_distro_names():
'''
List all distribution names
---------------------------
Lists the names of all the distributions registered in anitya.
::
/api/distro/names
/api/projects/names/?pattern=<pattern>
/api/projects/names/?pattern=f*
Accepts GET queries only.
:kwarg pattern: pattern to use to restrict the list of distro returned.
Accepts GET queries only.
Sample response:
::
{
"distro": [
"Fedora",
"Debian",
],
"total": 2
}
'''
pattern = flask.request.args.get('pattern', None)
if pattern and '*' not in pattern:
pattern += '*'
if pattern:
distro_objs = anitya.lib.model.Distro.search(
SESSION, pattern=pattern)
else:
distro_objs = anitya.lib.model.Distro.all(SESSION)
distros = [distro.name for distro in distro_objs]
output = {
'total': len(distros),
'distro': distros
}
jsonout = flask.jsonify(output)
jsonout.status_code = 200
return jsonout
@APP.route('/api/version/get', methods=['POST'])
def api_get_version():
'''
Retrieve version
----------------
Forces anitya to retrieve the latest version available from a project
upstream.
::
/api/version/get
Accepts POST queries only.
:arg id: the identifier of the project in anitya.
Sample response:
::
{
"backend": "Sourceforge",
"created_on": 1409917222.0,
"homepage": "http://sourceforge.net/projects/zero-install",
"id": 1,
"name": "zero-install",
"packages": [
{
"distro": "Fedora",
"package_name": "0install"
}
],
"regex": "",
"updated_on": 1413794215.0,
"version": "2.7",
"version_url": "0install",
"versions": [
"2.7"
]
}
'''
project_id = flask.request.form.get('id', None)
test = flask.request.form.get('test', False)
httpcode = 200
if not project_id:
errors = []
if not project_id:
errors.append('No project id specified')
output = {'output': 'notok', 'error': errors}
httpcode = 400
else:
project = anitya.lib.model.Project.get(
SESSION, project_id=project_id)
if not project:
output = {'output': 'notok', 'error': 'No such project'}
httpcode = 404
else:
try:
version = anitya.check_release(project, SESSION, test=test)
if version:
output = {'version': version}
else:
output = project.__json__(detailed=True)
except anitya.lib.exceptions.AnityaException as err:
output = {'output': 'notok', 'error': [str(err)]}
httpcode = 400
jsonout = flask.jsonify(output)
jsonout.status_code = httpcode
return jsonout
@APP.route('/api/project/<int:project_id>/', methods=['GET'])
@APP.route('/api/project/<int:project_id>', methods=['GET'])
def api_get_project(project_id):
'''
Retrieve a specific project
----------------------------
Retrieves a specific project using its identifier in anitya.
::
/api/project/<project_id>
Accepts GET queries only.
:arg project_id: the identifier of the project in anitya.
Sample response:
::
{
"backend": "custom",
"created_on": 1409917222.0,
"homepage": "http://www.finnie.org/software/2ping/",
"id": 2,
"name": "2ping",
"packages": [
{
"distro": "Fedora",
"package_name": "2ping"
}
],
"regex": null,
"updated_on": 1414400794.0,
"version": "2.1.1",
"version_url": "http://www.finnie.org/software/2ping",
"versions": [
"2.1.1"
]
}
'''
project = anitya.lib.model.Project.get(SESSION, project_id=project_id)
if not project:
output = {'output': 'notok', 'error': 'no such project'}
httpcode = 404
else:
output = project.__json__(detailed=True)
httpcode = 200
jsonout = flask.jsonify(output)
jsonout.status_code = httpcode
return jsonout
@APP.route('/api/project/<distro>/<package_name>/', methods=['GET'])
@APP.route('/api/project/<distro>/<package_name>', methods=['GET'])
def api_get_project_distro(distro, package_name):
'''
Retrieve a package for a distro
-------------------------------
Retrieves a project in a distribution via the name of the distribution
and the name of the package in said distribution.
::
/api/project/<distro>/<package_name>
Accepts GET queries only.
:arg distro: the name of the distribution (case insensitive).
:arg package_name: the name of the package in the distribution specified.
Sample response:
::
{
"backend": "custom",
"created_on": 1409917222.0,
"homepage": "http://www.finnie.org/software/2ping/",
"id": 2,
"name": "2ping",
"packages": [
{
"distro": "Fedora",
"package_name": "2ping"
}
],
"regex": null,
"updated_on": 1414400794.0,
"version": "2.1.1",
"version_url": "http://www.finnie.org/software/2ping",
"versions": [
"2.1.1"
]
}
'''
package = anitya.lib.model.Packages.by_package_name_distro(
SESSION, package_name, distro)
if not package:
output = {
'output': 'notok',
'error': 'No package "%s" found in distro "%s"' % (
package_name, distro)}
httpcode = 404
else:
project = anitya.lib.model.Project.get(
SESSION, project_id=package.project.id)
output = project.__json__(detailed=True)
httpcode = 200
jsonout = flask.jsonify(output)
jsonout.status_code = httpcode
return jsonout
| Prashant-Surya/anitya | anitya/api.py | Python | gpl-2.0 | 13,586 |
# -*- coding: utf-8 -*-
# pylint: disable=
"""
Created on Tue May 3 18:34:45 2016
@author: P. Rodriguez-Mier and T. Teijeiro
"""
import random
from operator import attrgetter
def bound_value(v, min_v, max_v):
return min(max(min_v, v), max_v)
def recombinate(pairs, gene_props, mutation_probability=0.1, effect=0.5):
offspring = []
for p1, p2 in pairs:
children_genes = {}
for gen in p1.genes.keys():
values = [p1.genes[gen], p2.genes[gen]]
children_genes[gen] = random.uniform(min(values), max(values))
if random.random() < mutation_probability:
min_v = gene_props[gen]['min']
max_v = gene_props[gen]['max']
v = children_genes[gen]
rv = random.choice([-1, 1]) * random.uniform(0, effect * (max_v - min_v))
new_v_gauss = bound_value(random.gauss(v, (max_v - min_v) * effect), min_v, max_v)
new_v = bound_value(v + rv, min_v, max_v)
# print '----- Mutating ' + gen + ' - RV: ' + str(rv) + ' - V: ' + str(v) + ' - New: ' + str(new_v) + ' - Gaussian: ' + str(new_v_gauss)
# rv = random.uniform(children_genes[gen], (max_v - min_v)*0.1)
children_genes[gen] = new_v
offspring.append(children_genes)
return offspring
def mating_pool(population, num_of_pairs=10, evaluator=attrgetter('fitness')):
evaluated_population = evaluate(population, evaluator)
return zip(roulette_wheel(evaluated_population, k=num_of_pairs),
roulette_wheel(evaluated_population, k=num_of_pairs))
def mating_pool_tournament(population, num_of_pairs=10, evaluator=attrgetter('fitness')):
pool = []
while len(pool) < num_of_pairs:
# Generate a pair for mating
p1 = tournament(population, evaluator)
p2 = tournament(population - {p1}, evaluator)
pool.append((p1, p2))
return pool
def evaluate(population, evaluator=attrgetter('fitness')):
return map(lambda x: (x, evaluator(x)), population)
def roulette_wheel(evaluated_population, k=10):
sum_fitness = sum([v[1] for v in evaluated_population])
selected = []
while len(selected) < k:
r = random.uniform(0, sum_fitness)
for i in evaluated_population:
r -= i[1]
if r < 0:
selected.append(i[0])
break
return selected
def tournament(population, evaluator, k=2):
sample = population if len(population) < k else random.sample(population, k)
return max(sample, key=evaluator)
if __name__ == '__main__':
pop = {15, 18, 30, 100, 120, 60, 35, 40, 42}
print mating_pool(pop, evaluator=lambda x: x)
print mating_pool_tournament(pop, evaluator=lambda x: x)
| citiususc/citius-invaders | python/evolution.py | Python | mit | 2,775 |
import pyaudio
import collections
import threading
'''
ZombieComm radio server
'''
sound_frames = collections.deque()
def record():
global sound_frames
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 10000
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
while True:
data = stream.read(CHUNK)
sound_frames.append(data)
if __name__ == '__main__':
'''
main method simply prints recorded data right now
'''
t = threading.Thread(target=record)
t.start()
while True:
try:
if len(sound_frames) > 0:
print (sound_frames.popleft())
except:
pass
| benjaminy/ZombieComm | server.py | Python | mit | 845 |
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from qgis.core import QgsVectorLayer, QGis
from ngw_api.core.ngw_vector_layer import NGWVectorLayer
class ActionStyleImportUpdate(QAction):
def __init__(self, parent=None):
super(ActionStyleImportUpdate, self).__init__(parent)
super(ActionStyleImportUpdate, self).setText(self.tr("Import/Update style")),
super(ActionStyleImportUpdate, self).setEnabled(False)
def setEnabled(self, qgis_vector_layer, ngw_vector_layer):
if not isinstance(qgis_vector_layer, QgsVectorLayer):
super(ActionStyleImportUpdate, self).setEnabled(False)
return
if not isinstance(ngw_vector_layer, NGWVectorLayer):
super(ActionStyleImportUpdate, self).setEnabled(False)
return
qgis_vector_layer_geom = qgis_vector_layer.geometryType()
ngw_vector_layer_geom = ngw_vector_layer.geom_type()
if qgis_vector_layer_geom in [QGis.Point ] and ngw_vector_layer_geom in [NGWVectorLayer.POINT, NGWVectorLayer.MULTIPOINT, ]:
super(ActionStyleImportUpdate, self).setEnabled(True)
return
elif qgis_vector_layer_geom in [QGis.Line, ] and ngw_vector_layer_geom in [NGWVectorLayer.LINESTRING, NGWVectorLayer.MULTILINESTRING, ]:
super(ActionStyleImportUpdate, self).setEnabled(True)
return
elif qgis_vector_layer_geom in [QGis.Polygon, ] and ngw_vector_layer_geom in [NGWVectorLayer.POLYGON, NGWVectorLayer.MULTIPOLYGON, ]:
super(ActionStyleImportUpdate, self).setEnabled(True)
return
super(ActionStyleImportUpdate, self).setEnabled(False)
| nextgis/ngw_connect | src/action_style_import_or_update.py | Python | gpl-2.0 | 1,691 |
#!/usr/bin/python
import HTMLParser
import re
class ClubPageParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.setFalse()
self.in_playername = False
self.result = []
self.player = {}
def setFalse(self):
self.in_gkp = False
self.in_def = False
self.in_mid = False
self.in_stk = False
def getCurrentPosition(self):
if self.in_gkp:
return 'gkp'
elif self.in_def:
return 'def'
elif self.in_mid:
return 'mid'
elif self.in_stk:
return 'stk'
def handle_starttag(self, tag, attrs):
if tag == 'div':
for key, value in attrs:
if key == 'id' and value == 'gkp':
self.setFalse()
self.in_gkp = True
if key == 'id' and value == 'def':
self.setFalse()
self.in_def = True
if key == 'id' and value == 'mid':
self.setFalse()
self.in_mid = True
if key == 'id' and value == 'stk':
self.setFalse()
self.in_stk = True
if tag == 'a':
if self.in_playername:
for key, value in attrs:
if key == 'href':
pattern = re.compile("(.*?)(\d+)")
match = re.match(pattern, value)
self.player['id'] = match.group(2)
if tag == 'tr':
for key, value in attrs:
if key == 'class' and value == 'player':
self.in_player_tr = True
if tag == 'td':
for key, value in attrs:
if key == 'class':
for v in value.rsplit():
if v == 'playername':
self.in_playername = True
def handle_endtag(self, tag):
if tag == 'div':
self.setFalse()
if tag == 'td':
self.in_playername = False;
def handle_data(self, data):
if self.in_playername:
self.player['name'] = data
self.player['position'] = self.getCurrentPosition()
self.result.append(self.player)
self.player = {}
def get(self):
return self.result | CatzHoek/UefaCrawler | Parsers/ClubPageParser.py | Python | gpl-3.0 | 1,807 |
#!/usr/bin/python
import sys
import os
import boto.ec2
import boto.utils
import requests
import json
from datetime import datetime
from elasticsearch import Elasticsearch
from subprocess import PIPE, Popen
if os.getuid():
raise OSError('You must be root')
AZ_ENDPOINT = 'http://169.254.169.254/latest/meta-data/placement/availability-zone/'
ES_ADDR = 'local.elasticsearch.sudomakeinstall.me:9200'
es = Elasticsearch([ES_ADDR])
def get_region():
r = requests.get(AZ_ENDPOINT)
az = r.text
return az[:-1]
def run(cmd):
return Popen(str(cmd).split(), stdout=PIPE, stderr=PIPE)
def log(text, severity = 'INFO', volume = None):
#Dummy log
color = {}
color['INFO'] = '\033[94m'
color['WARNING'] = '\033[93m'
color['ERROR'] = '\033[91m'
color['ENDC'] = '\033[0m'
print('[%s][%s%s%s] %s' % (sys.argv[0], color[severity] ,severity, color['ENDC'], text))
# Send log to elasticsearch
log = {}
log['date'] = datetime.now()
log['severity'] = severity
log['instance-id'] = meta['instance-id']
log['instance-type'] = meta['instance-type']
log['message'] = text
log['volume-id'] = volume
res = es.index(index='logs', doc_type='log', id="", body=log)
conn = boto.ec2.connect_to_region(get_region())
meta = boto.utils.get_instance_metadata()
volumes = conn.get_all_volumes(filters={'attachment.instance-id': meta['instance-id']})
log('Initialize JSON document')
doc = {'meta' : meta }
fstype = ['ext4 -F']
blocksize = ['4k','4k','4k','4k']
for metakey in meta['block-device-mapping'].keys():
if 'ephemeral' in metakey:
fakevolume = boto.ec2.volume.Volume()
fakevolume.attach_data = boto.ec2.volume.AttachmentSet()
#Vol
fakevolume.id = metakey
fakevolume.size = "0"
fakevolume.attach_data.device = "/dev/%s" % meta['block-device-mapping'][metakey]
fakevolume.zone = requests.get(AZ_ENDPOINT).text
fakevolume.type = 'instance-store'
fakevolume.iops = -1
fakevolume.encrypted = False
volumes.append(fakevolume)
for volume in volumes :
if meta['block-device-mapping']['root'] != volume.attach_data.device:
for fs in fstype:
for bs in blocksize:
log('Test %s' % volume.id)
try :
log('%s Format volume' % fs[:-3])
cmd = "mkfs.%s %s" % (fs, volume.attach_data.device)
retcode = run(cmd).wait()
if (retcode) :
raise OSError('Fail to format volume %s' % cmd)
log('Mount volume')
cmd = "mount %s /mnt" % volume.attach_data.device
retcode = run(cmd).wait()
if (retcode) :
raise OSError('Fail to mount volume')
log('Testing volume')
cmd = "fio --directory=/mnt --output-format=json --name %s --direct=1 --ioengine=libaio --refill_buffers --scramble_buffers=1 --blocksize=%s --rw=randrw --numjobs=1 --iodepth=64 --size=4G" % (volume.id, bs)
proc = run(cmd)
retcode = proc.wait()
if (retcode) :
raise OSError('Fail exec fio test')
log('Send test metrics at %s' % ES_ADDR)
fio_result = json.loads(proc.stdout.read())
for job_result in fio_result['jobs'] :
doc['result'] = job_result
doc['block-size'] = bs
doc['volume'] = {
'volume_id' : str(volume.id),
'volume_attach_device' : volume.attach_data.device,
'volume_size' : str(volume.size),
'volume_zone' : str(volume.zone),
'volume_type' : str(volume.type),
'volume_fs' : fs[:-3], # delete option
'volume_iops' : str(volume.iops),
'volume_encrypted' : str(volume.encrypted)
}
doc["creation-date"] = datetime.now()
res = es.index(index='benchmark', doc_type='metric', id="", body=doc)
except OSError as e :
log('%s' % e, 'ERROR', volume.id)
finally :
log('Umount filesystem')
run('umount /mnt').wait()
conn.stop_instances(instance_ids=[meta['instance-id']])
| neoxia/aws-storage-benchmark | test.py | Python | mit | 4,624 |
# engine/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and it's public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
from .interfaces import (
Connectable,
Dialect,
ExecutionContext,
# backwards compat
Compiled,
TypeCompiler
)
from .base import (
Connection,
Engine,
NestedTransaction,
RootTransaction,
Transaction,
TwoPhaseTransaction,
)
from .result import (
BufferedColumnResultProxy,
BufferedColumnRow,
BufferedRowResultProxy,
FullyBufferedResultProxy,
ResultProxy,
RowProxy,
)
from .util import (
connection_memoize
)
from . import util, strategies
# backwards compat
from ..sql import ddl
default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
.. versionchanged:: 0.8
By default, result row names match case-sensitively.
In version 0.7 and prior, all matches were case-insensitive.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a repr() of their parameter lists to the engines
logger, which defaults to sys.stdout. The ``echo`` attribute of
``Engine`` can be modified at any time to turn logging on and
off. If set to the string ``"debug"``, result rows will be
printed to the standard output as well. This flag ultimately
controls a Python logger; see :ref:`dbengine_logging` for
information on how to configure logging directly.
:param echo_pool=False: if True, the connection pool will log
all checkouts/checkins to the logging stream, which defaults to
sys.stdout. This flag ultimately controls a Python logger; see
:ref:`dbengine_logging` for information on how to configure logging
directly.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including Postgresql, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`.Engine`.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
:param pool_reset_on_return='rollback': set the "reset on return"
behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`.
.. versionadded:: 0.7.6
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param strategy='plain': selects alternate engine implementations.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ
<http://www.sqlalchemy.org/trac/wiki/FAQ#HowcanIgettheCREATETABLEDROPTABLEoutputasastring>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
"""
strategy = kwargs.pop('strategy', default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file where keys
are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The
'prefix' argument indicates the prefix to be searched for.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. In a future release, this
functionality will be expanded and include dialect-specific
arguments.
"""
options = dict((key[len(prefix):], configuration[key])
for key in configuration
if key.startswith(prefix))
options['_coerce_config'] = True
options.update(kwargs)
url = options.pop('url')
return create_engine(url, **options)
__all__ = (
'create_engine',
'engine_from_config',
)
| jessekl/flixr | venv/lib/python2.7/site-packages/sqlalchemy/engine/__init__.py | Python | mit | 15,916 |
import os
from os.path import join
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# newrand wrappers
config.add_extension('_newrand',
sources=['_newrand.pyx'],
include_dirs=[numpy.get_include(),
join('src', 'newrand')],
depends=[join('src', 'newrand', 'newrand.h')],
language='c++',
# Use C++11 random number generator fix
extra_compile_args=['-std=c++11']
)
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h'),
join('src', 'newrand', 'newrand.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
# Use C++11 to use the random number generator fix
extra_compiler_args=['-std=c++11'],
)
libsvm_sources = ['_libsvm.pyx']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h'),
join('src', 'newrand', 'newrand.h')]
config.add_extension('_libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm'),
join('src', 'newrand')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
# liblinear module
libraries = []
if os.name == 'posix':
libraries.append('m')
# precompile liblinear to use C++11 flag
config.add_library('liblinear-skl',
sources=[join('src', 'liblinear', 'linear.cpp'),
join('src', 'liblinear', 'tron.cpp')],
depends=[join('src', 'liblinear', 'linear.h'),
join('src', 'liblinear', 'tron.h'),
join('src', 'newrand', 'newrand.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
# Use C++11 to use the random number generator fix
extra_compiler_args=['-std=c++11'],
)
liblinear_sources = ['_liblinear.pyx']
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'newrand', 'newrand.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('_liblinear',
sources=liblinear_sources,
libraries=['liblinear-skl'] + libraries,
include_dirs=[join('.', 'src', 'liblinear'),
join('.', 'src', 'newrand'),
join('..', 'utils'),
numpy.get_include()],
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
)
# end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['_libsvm_sparse.pyx']
config.add_extension('_libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm"),
join("src", "newrand")],
depends=[join("src", "libsvm", "svm.h"),
join('src', 'newrand', 'newrand.h'),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| glemaitre/scikit-learn | sklearn/svm/setup.py | Python | bsd-3-clause | 4,770 |
from collections import namedtuple
import select
StreamEvent = namedtuple( 'StreamEvent', [ 'fd', 'stream', 'data', 'direction', 'num_bytes', 'eof' ] )
class StreamWatcher(object):
def __init__( self ):
if _best_backend is None:
raise Exception( "No poll/queue backend could be found for your OS." )
self.backend = _best_backend( )
self.fd_map = {}
self.stream_map = {}
def watch( self, fd, data=None, read=True, write=False ):
# allow python file-like objects that have a backing fd
if hasattr(fd, 'fileno') and callable(fd.fileno):
stream = fd
fd = stream.fileno()
self.stream_map[fd] = stream
else:
self.stream_map[fd] = None
# associate user data with the fd
self.fd_map[fd] = data
# prepare any event filter additions
if read:
self.backend.watch_read( fd )
if write:
self.backend.watch_write( fd )
def wait( self, timeout=None, max_events=4 ):
return self.backend.wait(
timeout=timeout,
max_events=max_events,
fd_data_map=self.fd_map,
fd_stream_map=self.stream_map )
_best_backend = None
try:
from select import kqueue, kevent
except ImportError:
pass
else:
class KQueueBackend(object):
def __init__( self ):
self.kq = kqueue( )
def watch_read( self, fd ):
event = kevent( fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD )
self._add_events( [event] )
def watch_write( self, fd ):
event = kevent( fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_ADD )
self._add_events( [event] )
def _add_events( self, new_events ):
e = self.kq.control( new_events, 0, 0 )
assert len(e) == 0, "Not expecting to receive any events while adding filters."
def wait( self, timeout=None, max_events=4, fd_data_map={}, fd_stream_map={} ):
r_events = self.kq.control( None, max_events, timeout )
e = []
for event in r_events:
fd = event.ident
if fd in fd_data_map:
stream = fd_stream_map.get( fd, None )
data = fd_data_map.get( fd, None )
direction = 'read' if event.filter == select.KQ_FILTER_READ else 'write'
num_bytes = event.data
eof = ( event.flags & select.KQ_EV_EOF != 0 )
e.append( StreamEvent( fd, stream, data, direction, num_bytes, eof ) )
return e
if _best_backend is None:
_best_backend = KQueueBackend
try:
from select import epoll
from fcntl import ioctl
import array
import termios
except ImportError:
pass
else:
class EPollBackend(object):
def __init__( self ):
self.ep = epoll( )
def watch_read( self, fd ):
self.ep.register( fd, select.EPOLLIN )
def watch_write( self, fd ):
self.ep.register( fd, select.EPOLLOUT )
def wait( self, timeout=None, max_events=None, fd_data_map={}, fd_stream_map={} ):
if max_events is None:
max_events = -1
if timeout is None:
timeout = -1
r_events = self.ep.poll( timeout, max_events )
e = []
for fd, event in r_events:
if fd in fd_data_map:
buf = array.array( 'i', [0] )
ioctl( fd, termios.FIONREAD, buf, 1 )
stream = fd_stream_map.get( fd, None )
data = fd_data_map.get( fd, None )
num_bytes = buf[0]
eof = ( event & (select.EPOLLHUP | select.EPOLLERR) != 0 )
if event & select.EPOLLIN != 0:
e.append( StreamEvent( fd, stream, data, 'read', num_bytes, eof ) )
if event & select.EPOLLOUT != 0:
e.append( StreamEvent( fd, stream, data, 'write', num_bytes, eof ) )
return e
if _best_backend is None:
_best_backend = EPollBackend
| theojulienne/pyio | pyio/io/StreamWatcher.py | Python | mit | 3,517 |
#Copyright (C) 2018 Leonardo Mokarzel Falcon
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>
import sys
import sqlite3 as lite
from pyxdameraulevenshtein import normalized_damerau_levenshtein_distance
class DamerauLevenshtein:
def __init__(self, tdist, data):
self.tdist = tdist
self.data = set(data)
self.top_domains_db = 'shared/top_domains.sqlite'
self.top_domains = set()
if (tdist == 'domains'):
self.load_top_domains()
else:
print 'Unknown option:', tdist
print 'damerau_levenshtein_distance.py -t <domain> -i <input file> -o <output file>'
sys.exit(2)
def calc_distance_domains(self):
not_in_top = []
output = []
for item in self.data:
if item not in self.top_domains:
not_in_top.append(item)
for item in not_in_top:
entry = {item: {}}
flag = False
for td in self.top_domains:
dist = normalized_damerau_levenshtein_distance(item, td)
if 0 < dist < 0.2:
entry[item][td] = dist
flag = True
if flag is True:
output.append(entry)
if len(output) > 0:
return output
else:
return None
def load_top_domains(self):
con = lite.connect(self.top_domains_db)
con.text_factory = str
with con:
cur = con.cursor()
cur.execute("SELECT domain FROM domains")
rows = cur.fetchall()
domains = []
if rows:
for i in range(10000):
domains.append(rows[i][0])
else:
pass
self.top_domains = set(domains)
| A3sal0n/CyberThreatHunting | tools/lib/damerau_levenshtein.py | Python | gpl-3.0 | 2,369 |
# Adapted from: Hidden Markov Models in Python
# Katrin Erk, March 2013
#
# This HMM addresses the problem of disfluency/end of utterance tagging.
# It estimates the probability of a tag sequence for a given word sequence as
# follows:
#
# Say words = w1....wN
# and tags = t1..tN
#
# then
# P(tags | words) is_proportional_to product P(ti | t{i-1}) P(wi | ti)
#
# To find the best tag sequence for a given sequence of words,
# we want to find the tag sequence that has the maximum P(tags | words)
from __future__ import division
import os
import re
from copy import deepcopy
import numpy as np
from collections import defaultdict
import cPickle as pickle
import nltk
import tag_conversion
from hmm_utils import tabulate_cfd
from hmm_utils import log
# boosts for rare classes
SPARSE_WEIGHT_T_ = 3.0 # for <t # with timings this naturally gets boost
SPARSE_WEIGHT_T = 3.0 # for t/>
# results on un-weighted timing classifier for <t boosts:
# 1 0.757 2 0.770 3 0.778 4. 0.781 5.0.783
# 6. 0.785 7. 0.784
SPARSE_WEIGHT_RPS = 4.0
SPARSE_WEIGHT_RPE = 2.0
# the weights for the source language model and the timing duration classifier
TIMING_WEIGHT = 2.0 # 10 gives 0.756, no great gains with higher weight
# NB on 30.04 this is just a weight on the <t class as timer not working
# Given this can improve things from 0.70 -> 0.76 weighting worth looking at
# if using noisy channel model:
SOURCE_WEIGHT = 0.1
class FirstOrderHMM():
"""A standard hmm model which interfaces with any sequential channel model
that outputs the input_distribution over all labels at each time step.
A first order model where the internal state probabilities only depend
on the previous state.
"""
def __init__(self, disf_dict, markov_model_file=None,
timing_model=None, timing_model_scaler=None,
n_history=20, constraint_only=True, noisy_channel=None):
self.tagToIndexDict = disf_dict # dict maps from tags -> indices
self.n_history = n_history # how many steps back we should store
self.observation_tags = set(self.tagToIndexDict.keys())
self.observation_tags.add('s') # all tag sets need a start tag
self.cfd_tags = nltk.ConditionalFreqDist()
self.cpd_tags = None
self.tag_set = None
self.timing_model = None
self.timing_model_scaler = None
self.constraint_only = constraint_only
self.noisy_channel_source_model = noisy_channel
if any(["<ct/>" in x for x in self.observation_tags]):
# if a segmentation problem
if any(["<rm-2" in x for x in self.observation_tags]):
# full set
self.convert_tag = tag_conversion.\
convert_to_disfluency_uttseg_tag
elif any(["<rm-" in x for x in self.observation_tags]):
self.convert_tag = tag_conversion.\
convert_to_disfluency_uttseg_tag_simple
elif any(["<speaker" in x for x in self.observation_tags]):
self.convert_tag = tag_conversion.\
convert_to_diact_uttseg_interactive_tag
elif any(["<speaker" in x for x in self.observation_tags]):
# if only dialogue acts
self.convert_tag = tag_conversion.convert_to_diact_uttseg_tag
else:
self.convert_tag = tag_conversion.convert_to_uttseg_tag
else:
# no segmentation in this task
self.observation_tags.add('se') # add end tag in pre-seg mode
if any(["<rm-2" in x for x in self.observation_tags]):
# full set
self.convert_tag = tag_conversion.convert_to_disfluency_tag
elif any(["<rm-" in x for x in self.observation_tags]):
self.convert_tag = tag_conversion.\
convert_to_disfluency_tag_simple
elif any(["<speaker" in x for x in self.observation_tags]):
self.convert_tag = tag_conversion.\
convert_to_diact_interactive_tag
else:
# if only dialogue acts
self.convert_tag = tag_conversion.convert_to_diact_tag
if markov_model_file:
print "loading", markov_model_file, "Markov model"
# print "If we have just seen 'DET', \
# the probability of 'N' is", cpd_tags["DET"].prob("N")
# or load from file
mm_path = os.path.dirname(os.path.realpath(__file__)) +\
"/models/{}_tags.pkl".format(markov_model_file)
# if load:
self.cfd_tags = pickle.load(open(mm_path, "rb"))
# else:
# # or create this from scratch
# graph = convert_to_dot("../decoder/models/{}.csv".format(
# markov_model_file))
# # loading MM from the graph/dot representation
# tags = []
# for line in graph.split("\n"):
# spl = line.replace(";", "").split()
# if not len(spl) == 3:
# continue
# assert spl[1] == "->"
# tags.append((spl[0], spl[2]))
# self.cfd_tags += nltk.ConditionalFreqDist(tags)
else:
print 'No Markov model file specified, empty CFD. Needs training.'
# whatever happens turn this into a cond prob dist:
self.cpd_tags = nltk.ConditionalProbDist(self.cfd_tags,
nltk.MLEProbDist)
all_outcomes = [v.keys() for v in self.cfd_tags.values()]
self.tag_set = set(self.cfd_tags.keys() +
[y for x in all_outcomes for y in x])
self.viterbi_init() # initialize viterbi
# print "Test: If we have just seen 'rpSM',\
# the probability of 'f' is", self.cpd_tags["c_rpSM_c"].prob("c_f_c")
if timing_model:
self.timing_model = timing_model
self.timing_model_scaler = timing_model_scaler
# self.simple_trp_idx2label = {0 : "<cc/>",
# 1 : "<ct/>",
# 2 : "<tc/>",
# 3 : "<tt/>"}
# Only use the Inbetween and Start tags
self.simple_trp_idx2label = {0: "<c", 1: "<t"}
else:
print "No timing model given"
print "Markov Model ready mode:"
if self.constraint_only:
print "constraint only"
else:
print "conditional probability"
def train_markov_model_from_file(self, corpus_path, mm_path, update=False,
non_sparse=False):
"""Adds to the self.cfd_tags conditional frequency distribution
loaded, if there is one, else starts afresh.
Recalculate the conditional prob distribution afresh.
args:
--filepath : filepath to newline separated file to learn sequence
probabilities from.
--mm_path : filepath to markov model distribution path to write to.
--update : whether to update the current cfd, if not start anew.
--non_sparse : whether to omit lines in the corpus without repairs,
gives higher prob to repairs
"""
tags = []
# expects line separated sequences
corpus_file = open(corpus_path)
print "training decoder from", corpus_path
for line in corpus_file:
if line.strip("\n") == "":
continue
if non_sparse and ("<r" not in line):
continue
labels_data = line.strip("\n").split(",")
if "<r" in labels_data[0]:
continue # TODO error with corpus creation
previous = "s"
# print "length sequence", len(labels_data)
for i in range(len(labels_data)):
if labels_data[i] not in self.observation_tags:
print labels_data[i], "not in obs tags"
continue
if any(["<i" in t for t in self.observation_tags]):
if "<e" in labels_data[i] and i < len(labels_data)-1:
rps_onset = None
for j in range(i, len(labels_data)):
if "<rm" in labels_data[j]:
rps_onset = j
break
if "<e" not in labels_data[j]:
break
if rps_onset:
for k in range(i, rps_onset):
labels_data[k] = labels_data[k].replace("<e",
"<i")
# print labels_data[i]
# adjust interregna
# if any(["<i" in t for t in self.observation_tags]):
# if "<rm-" in labels_data[i]:
# b = len(tags)-1
# while ("e" in tags[b][1] and (not tags[b][1]=="se")\
# and b > 0):
# if "i" not in tags[b][1]:
# new_1 = tags[b][1].replace('eR', 'i').\
# replace('e', 'i')
# tags[b] = (tags[b][0], new_1)
# if "e" in tags[b][0] and "i" not in tags[b][0]:
# new_0 = tags[b][0].replace('eR', 'i').\
# replace('e', 'i')
# tags[b] = (new_0, tags[b][1])
# b -= 1
# previous = tags[-1][1]
tag = self.convert_tag(previous, labels_data[i])
tags.append((previous, tag))
previous = tag
if "se" in self.observation_tags:
# add end tag
tags.append((previous, 'se'))
# print "If we have just seen 'DET', \
# the probability of 'N' is", cpd_tags["DET"].prob("N")
# assumes these are added to exisiting one
if update:
self.cfd_tags += nltk.ConditionalFreqDist(tags)
else:
self.cfd_tags = nltk.ConditionalFreqDist(tags)
print "cfd trained, counts:"
self.cfd_tags.tabulate()
print "test:"
print tabulate_cfd(self.cfd_tags)
# save this new cfd for later use
pickle.dump(self.cfd_tags, open(mm_path, "wb"))
# initialize the cpd
self.cpd_tags = nltk.ConditionalProbDist(self.cfd_tags,
nltk.MLEProbDist)
# print "cpd summary:"
# print self.cpd_tags.viewitems()
print tabulate_cfd(self.cpd_tags)
all_outcomes = [v.keys() for v in self.cfd_tags.values()]
self.tag_set = set(self.cfd_tags.keys() +
[y for x in all_outcomes for y in x])
self.viterbi_init() # initialize viterbi
def train_markov_model_from_constraint_matrix(self, csv_path, mm_path,
delim="\t"):
table = [line.split(delim) for line in open(csv_path)]
tags = []
range_states = table.pop(0)[1:]
for row in table:
domain = row[0]
for i, r in enumerate(row[1:]):
s = r.replace(" ", "").strip("\n")
if (s == ''):
continue
if int(s) > 0:
for _ in range(0, int(s)):
tags.append((domain, range_states[i]))
self.cfd_tags = nltk.ConditionalFreqDist(tags)
print "cfd trained, counts:"
self.cfd_tags.tabulate()
print "test:"
print tabulate_cfd(self.cfd_tags)
# save this new cfd for later use
pickle.dump(self.cfd_tags, open(mm_path, "wb"))
# initialize the cpd
self.cpd_tags = nltk.ConditionalProbDist(self.cfd_tags,
nltk.MLEProbDist)
# print "cpd summary:"
# print self.cpd_tags.viewitems()
print tabulate_cfd(self.cpd_tags)
all_outcomes = [v.keys() for v in self.cfd_tags.values()]
self.tag_set = set(self.cfd_tags.keys() +
[y for x in all_outcomes for y in x])
self.viterbi_init() # initialize viterbi
def viterbi_init(self):
self.best_tagsequence = [] # presume this is for a new sequence
self.viterbi = []
self.backpointer = []
self.converted = []
self.history = []
if self.noisy_channel_source_model:
self.noisy_channel_source_model.reset()
self.noisy_channel = [] # history
def add_to_history(self, viterbi, backpointer, converted):
"""We store a history of n_history steps back in case we need to
rollback.
"""
if len(self.history) == self.n_history:
self.history.pop(-1)
self.history = [{"viterbi": deepcopy(viterbi),
"backpointer": deepcopy(backpointer),
"converted": deepcopy(converted)}] + self.history
def rollback(self, n):
"""Rolling back to n back in the history."""
# print "rollback",n
# print len(self.history)
self.history = self.history[n:]
self.viterbi = self.viterbi[:len(self.viterbi)-n]
self.backpointer = self.backpointer[:len(self.backpointer)-n]
self.converted = self.converted[:len(self.converted)-n]
self.best_tagsequence = self.best_tagsequence[
:len(self.best_tagsequence)-n]
if self.noisy_channel_source_model:
end_idx = len(self.best_tagsequence) - n
self.noisy_channel = self.noisy_channel[: end_idx] # history
def viterbi_step(self, input_distribution, word_index,
sequence_initial=False, timing_data=None):
"""The principal viterbi calculation for an extension to the
input prefix, i.e. not reseting.
"""
# source_weight = 13 # higher for WML
if sequence_initial:
# first time requires initialization with the start of sequence tag
first_viterbi = {}
first_backpointer = {}
first_converted = {}
if self.noisy_channel_source_model:
first_noisy_channel = {}
for tag in self.observation_tags:
# don't record anything for the START tag
# print tag
if tag == "s" or tag == 'se':
continue
# print word_index
# print input_distribution.shape
# print self.tagToIndexDict[tag]
# print input_distribution[word_index][self.tagToIndexDict[tag]]
tag_prob = self.cpd_tags["s"].prob(self.convert_tag("s", tag))
if tag_prob >= 0.00001: # allowing for margin of error
if self.constraint_only:
# TODO for now treating this like a {0,1} constraint
tag_prob = 1.0
else:
tag_prob = 0.0
prob = log(tag_prob) + \
log(input_distribution[word_index][self.tagToIndexDict[tag]])
# no timing bias to start
if self.noisy_channel_source_model:
# noisy channel eliminate the missing tags
source_tags = tag_conversion.\
convert_to_source_model_tags([tag],
uttseg=True)
source_prob, node = self.noisy_channel_source_model.\
get_log_diff_of_tag_suffix(source_tags,
n=1)
first_noisy_channel[tag] = node
# prob = (source_weight * source_prob) + \
# ((1 - source_weight) * prob)
prob += (SOURCE_WEIGHT * source_prob)
first_viterbi[tag] = prob
first_backpointer[tag] = "s"
first_converted[tag] = self.convert_tag("s", tag)
assert first_converted[tag] in self.tag_set,\
first_converted[tag] + " not in: " + str(self.tag_set)
# store first_viterbi (the dictionary for the first word)
# in the viterbi list, and record that the best previous tag
# for any first tag is "s" (start of sequence tag)
self.viterbi.append(first_viterbi)
self.backpointer.append(first_backpointer)
self.converted.append(first_converted)
if self.noisy_channel_source_model:
self.noisy_channel.append(first_noisy_channel)
self.add_to_history(first_viterbi, first_backpointer,
first_converted)
return
# else we're beyond the first word
# start a new dictionary where we can store, for each tag, the prob
# of the best tag sequence ending in that tag
# for the current word in the sentence
this_viterbi = {}
# we also store the best previous converted tag
this_converted = {} # added for the best converted tags
# start a new dictionary we we can store, for each tag,
# the best previous tag
this_backpointer = {}
# prev_viterbi is a dictionary that stores, for each tag, the prob
# of the best tag sequence ending in that tag
# for the previous word in the sentence.
# So it stores, for each tag, the probability of a tag sequence
# up to the previous word
# ending in that tag.
prev_viterbi = self.viterbi[-1]
prev_converted = self.converted[-1]
if self.noisy_channel_source_model:
this_noisy_channel = {}
prev_noisy_channel = self.noisy_channel[-1]
# for each tag, determine what the best previous-tag is,
# and what the probability is of the best tag sequence ending.
# store this information in the dictionary this_viterbi
if timing_data and self.timing_model:
# print timing_data
# X = self.timing_model_scaler.transform(np.asarray(
# [timing_data[word_index-2:word_index+1]]))
# TODO may already be an array
# print "calculating timing"
# print timing_data
X = self.timing_model_scaler.transform(np.asarray([timing_data]))
input_distribution_timing = self.timing_model.predict_proba(X)
# print input_distribution_timing
# raw_input()
for tag in self.observation_tags:
# don't record anything for the START/END tag
if tag in ["s", "se"]:
continue
# joint probability calculation:
# if this tag is X and the current word is w, then
# find the previous tag Y such that
# the best tag sequence that ends in X
# actually ends in Y X
# that is, the Y that maximizes
# prev_viterbi[ Y ] * P(X | Y) * P( w | X)
# The following command has the same notation
# that you saw in the sorted() command.
best_previous = None
best_prob = log(0.0) # has to be -inf for log numbers
# the inner loop which makes this quadratic complexity
# in the size of the tag set
for prevtag in prev_viterbi.keys():
# the best converted tag, needs to access the previous one
prev_converted_tag = prev_converted[prevtag]
# TODO there could be several conversions for this tag
converted_tag = self.convert_tag(prev_converted_tag, tag)
assert converted_tag in self.tag_set, tag + " " + \
converted_tag + " prev:" + str(prev_converted_tag)
tag_prob = self.cpd_tags[prev_converted_tag].prob(
converted_tag)
if tag_prob >= 0.000001: # allowing for margin of error
if self.constraint_only:
# TODO for now treating this like a {0,1} constraint
tag_prob = 1.0
test = converted_tag.lower()
# check for different boosts for different tags
if "rps" in test: # boost for start tags
# boost for rps
tag_prob = tag_prob * SPARSE_WEIGHT_RPS
if "rpe" in test:
# boost for rp end tags
tag_prob = tag_prob * SPARSE_WEIGHT_RPE
if "t_" in test[:2]:
# boost for t tags
tag_prob = tag_prob * SPARSE_WEIGHT_T_
if "_t" in test:
tag_prob = tag_prob * SPARSE_WEIGHT_T
if timing_data and self.timing_model:
found = False
for k, v in self.simple_trp_idx2label.items():
if v in tag:
timing_tag = k
found = True
break
if not found:
raw_input("warning")
# using the prob from the timing classifier
# array over the different classes
timing_prob = input_distribution_timing[0][timing_tag]
if self.constraint_only:
# just adapt the prob of the timing tag
# tag_prob = timing_prob
# the higher the timing weight the more influence
# the timing classifier has
tag_prob = (TIMING_WEIGHT * timing_prob) + tag_prob
# print tag, timing_tag, timing_prob
else:
tag_prob = (TIMING_WEIGHT * timing_prob) + tag_prob
else:
tag_prob = 0.0
# the principal joint log prob
prob = prev_viterbi[prevtag] + log(tag_prob) + \
log(input_distribution[word_index][self.tagToIndexDict[tag]])
# gets updated by noisy channel if in this mode
if self.noisy_channel_source_model:
prev_n_ch_node = prev_noisy_channel[prevtag]
# The noisy channel model adds the score
# if we assume this tag and the backpointed path
# from the prev tag
# Converting all to source tags first
# NB this is what is slowing things down
# Need to go from the known index
# in the nc model
full_backtrack_method = False
if full_backtrack_method:
inc_best_tag_sequence = [prevtag]
# invert the list of backpointers
inc_backpointer = deepcopy(self.backpointer)
inc_backpointer.reverse()
# go backwards through the list of backpointers
# (or in this case forward, we have inverted the
# backpointer list)
inc_current_best_tag = prevtag
for b_count, bp in enumerate(inc_backpointer):
inc_best_tag_sequence.append(
bp[inc_current_best_tag])
inc_current_best_tag = bp[inc_current_best_tag]
if b_count > 9:
break
inc_best_tag_sequence.reverse()
inc_best_tag_sequence.append(tag) # add tag
source_tags = tag_conversion.\
convert_to_source_model_tags(
inc_best_tag_sequence[1:],
uttseg=True)
source_prob, nc_node = \
self.noisy_channel_source_model.\
get_log_diff_of_tag_suffix(
source_tags,
n=1)
else:
# NB these only change if there is a backward
# looking tag
if "<rm-" in tag:
m = re.search("<rm-([0-9]+)\/>", tag)
if m:
back = min([int(m.group(1)),
len(self.backpointer)])
suffix = ["<e/>"] * back + ["<f/>"]
# to get the change in probability due to this
# we need to backtrack further
n = len(suffix)
else:
suffix = tag_conversion.\
convert_to_source_model_tags([tag])
n = 1 # just monotonic extention
# print back, i, source_tags
source_prob, nc_node = \
self.noisy_channel_source_model.\
get_log_diff_of_tag_suffix(
suffix,
start_node_ID=prev_n_ch_node,
n=n)
prob += (SOURCE_WEIGHT * source_prob)
if prob >= best_prob:
best_converted = converted_tag
best_previous = prevtag
best_prob = prob
if self.noisy_channel_source_model:
best_n_c_node = nc_node
# if best result is 0 do not add, pruning, could set this higher
if best_prob > log(0.0):
this_converted[tag] = best_converted
this_viterbi[tag] = best_prob
# the most likely preceding tag for this current tag
this_backpointer[tag] = best_previous
if self.noisy_channel_source_model:
this_noisy_channel[tag] = best_n_c_node
# done with all tags in this iteration
# so store the current viterbi step
self.viterbi.append(this_viterbi)
self.backpointer.append(this_backpointer)
self.converted.append(this_converted)
if self.noisy_channel_source_model:
self.noisy_channel.append(this_noisy_channel)
self.add_to_history(this_viterbi, this_backpointer, this_converted)
return
def get_best_n_tag_sequences(self, n, noisy_channel_source_model=None):
# Do a breadth-first search
# try the best final tag and its backpointers, then the second
# best final tag etc.
# once all final tags are done and n > len(final tags)
# move to the second best penult tags for each tag
# from the best to worst, then the 3rd row
# it terminates when n is reached
# use the history self.history = [{"viterbi": deepcopy(viterbi),
# "backpointer": deepcopy(backpointer),
# "converted": deepcopy(converted)}] + self.history
# num_seq = n if not noisy_channel_source_model else 1000
num_seq = n
best_n = [] # the tag sequences with their probability (tuple)
# print "len viterbi", len(self.viterbi)
# print "len backpoint", len(self.backpointer)
for viterbi_depth in range(len(self.viterbi)-1, -1, -1):
if len(best_n) == num_seq:
break
inc_prev_viterbi = deepcopy(self.viterbi[viterbi_depth])
# inc_best_previous = max(inc_prev_viterbi.keys(),
# key=lambda prevtag:
# inc_prev_viterbi[prevtag])
inc_previous = sorted(inc_prev_viterbi.items(),
key=lambda x: x[1], reverse=True)
for tag, prob in inc_previous:
# print tag, prob
# prob = inc_prev_viterbi[inc_best_previous]
# assert(prob != log(0)), "highest likelihood is 0!"
if prob == log(0):
continue
inc_best_tag_sequence = [tag]
# invert the list of backpointers
inc_backpointer = deepcopy(self.backpointer)
inc_backpointer.reverse()
# go backwards through the list of backpointers
# (or in this case forward, we have inverted the
# backpointer list)
inc_current_best_tag = tag
# print "backpointer..."
d = 0
for bp in inc_backpointer:
d += 1
# print "depth", d, "find bp for", inc_current_best_tag
inc_best_tag_sequence.append(bp[inc_current_best_tag])
inc_current_best_tag = bp[inc_current_best_tag]
# print "..."
inc_best_tag_sequence.reverse()
best_n.append((inc_best_tag_sequence, prob))
if len(best_n) == num_seq:
break
best_n = sorted(best_n, key=lambda x: x[1], reverse=True)
debug = False
if debug:
print "getting best n"
for s, p in best_n:
print s[-1], p
print "***"
assert(best_n[0][1] > log(0.0)), "best prob 0!"
if not noisy_channel_source_model:
# return inc_best_tag_sequence
return [x[0] for x in best_n]
# if noisy channel do the interpolation
# need to entertain the whole beam for the channel model and source
# model
# channel_beam = best_n # the tag sequences with their probability
# source_beam = noisy_channel.get_best_n_tag_sequences(1000)
# self.interpolate_(channel_beam, source_beam)
channel_beam = [lambda x: (x[0], tag_conversion.
convert_to_source_model_tags(x[0]),
x[1]) for x in best_n]
best_seqs = noisy_channel_source_model.\
interpolate_probs_with_n_best(
channel_beam,
source_beam_width=1000,
output_beam_width=n)
return best_seqs
# def get_best_tag_sequence(self):
# """Returns the best tag sequence from the input so far.
# """
# inc_prev_viterbi = deepcopy(self.viterbi[-1])
# inc_best_previous = max(inc_prev_viterbi.keys(),
# key=lambda prevtag: inc_prev_viterbi[prevtag])
# assert(inc_prev_viterbi[inc_best_previous]) != log(0),\
# "highest likelihood is 0!"
# inc_best_tag_sequence = [inc_best_previous]
# # invert the list of backpointers
# inc_backpointer = deepcopy(self.backpointer)
# inc_backpointer.reverse()
# # go backwards through the list of backpointers
# # (or in this case forward, we have inverted the backpointer list)
# inc_current_best_tag = inc_best_previous
# for bp in inc_backpointer:
# inc_best_tag_sequence.append(bp[inc_current_best_tag])
# inc_current_best_tag = bp[inc_current_best_tag]
# inc_best_tag_sequence.reverse()
# return inc_best_tag_sequence
def get_best_tag_sequence(self, noisy_channel_source_model=None):
l = self.get_best_n_tag_sequences(1, noisy_channel_source_model)
return l[0]
def viterbi(self, input_distribution, incremental_best=False):
"""Standard non incremental (sequence-level) viterbi over input_distribution input
Keyword arguments:
input_distribution -- the emmision probabilities of each step in the sequence,
array of width n_classes
incremental_best -- whether the tag sequence prefix is stored for
each step in the sequence (slightly 'hack-remental'
"""
incrementalBest = []
sentlen = len(input_distribution)
self.viterbi_init()
for word_index in range(0, sentlen):
self.viterbi_step(input_distribution, word_index, word_index == 0)
# INCREMENTAL RESULTS (hack-remental. doing it post-hoc)
# the best result we have so far, not given the next one
if incremental_best:
inc_best_tag_sequence = self.get_best_tag_sequence()
incrementalBest.append(deepcopy(inc_best_tag_sequence[1:]))
# done with all words/input in the sentence/sentence
# find the probability of each tag having "se" next (end of utterance)
# and use that to find the overall best sequence
prev_converted = self.converted[-1]
prev_viterbi = self.viterbi[-1]
best_previous = max(prev_viterbi.keys(),
key=lambda prevtag: prev_viterbi[prevtag] +
log(self.cpd_tags[prev_converted[prevtag]].
prob("se")))
self.best_tagsequence = ["se", best_previous]
# invert the list of backpointers
self.backpointer.reverse()
# go backwards through the list of backpointers
# (or in this case forward, we've inverted the backpointer list)
# in each case:
# the following best tag is the one listed under
# the backpointer for the current best tag
current_best_tag = best_previous
for bp in self.backpointer:
self.best_tagsequence.append(bp[current_best_tag])
current_best_tag = bp[current_best_tag]
self.best_tagsequence.reverse()
if incremental_best:
# NB also consumes the end of utterance token! Last two the same
incrementalBest.append(self.best_tagsequence[1:-1])
return incrementalBest
return self.best_tagsequence[1:-1]
def viterbi_incremental(self, soft_max, a_range=None,
changed_suffix_only=False, timing_data=None,
words=None):
"""Given a new input_distribution input, output the latest labels.
Effectively incrementing/editing self.best_tagsequence.
Keyword arguments:
changed_suffix_only -- boolean, output the changed suffix of
the previous output sequence of labels.
i.e. if before this function is called the sequence is
[1:A, 2:B, 3:C]
and after it is
[1:A, 2:B, 3:E, 4:D]
then output is:
[3:E, 4:D]
(TODO maintaining the index/time spans is important
to acheive this, even if only externally)
"""
previous_best = deepcopy(self.best_tagsequence)
# print "previous best", previous_best
if not a_range:
# if not specified consume the whole soft_max input
a_range = (0, len(soft_max))
for i in xrange(a_range[0], a_range[1]):
if self.noisy_channel_source_model:
self.noisy_channel_source_model.consume_word(words.pop(0))
self.viterbi_step(soft_max, i, sequence_initial=self.viterbi == [],
timing_data=timing_data)
# slice the input if multiple steps
# get the best tag sequence we have so far
self.best_tagsequence = self.get_best_tag_sequence()
# print "best_tag", self.best_tagsequence
if changed_suffix_only:
# print "current best", self.best_tagsequence
# only output the suffix of predictions which has changed-
# TODO needs IDs to work
for r in range(1, len(self.best_tagsequence)):
if r > len(previous_best)-1 or \
previous_best[r] != self.best_tagsequence[r]:
return self.best_tagsequence[r:]
return self.best_tagsequence[1:]
# def adjust_incremental_viterbi_with_source_channel(self, source_channel):
# """This reranks the current hypotheses with the noisy channel
# decode, adding a weighted log prob of the language model
# scores from the source model to the probs in viterbi.
# Note this should be done before the backpointer is computed
# for each new tag?
# """
if __name__ == '__main__':
def load_tags(filepath):
"""Returns a tag dictionary from word to a n int indicating index
by an integer.
"""
tag_dictionary = defaultdict(int)
f = open(filepath)
for line in f:
l = line.strip('\n').split(",")
tag_dictionary[l[1]] = int(l[0])
f.close()
return tag_dictionary
tags_name = "swbd_disf1_uttseg_simple_033"
tags = load_tags(
"../data/tag_representations/{}_tags.csv".format(
tags_name)
)
if "disf" in tags_name:
intereg_ind = len(tags.keys())
interreg_tag = "<i/><cc/>" if "uttseg" in tags_name else "<i/>"
tags[interreg_tag] = intereg_ind # add the interregnum tag
print tags
h = FirstOrderHMM(tags, markov_model_file=None)
mm_path = "models/{}_tags.pkl".format(tags_name)
# corpus_path = "../data/tag_representations/{}_tag_corpus.csv".format(
# tags_name).replace("_021", "")
# h.train_markov_model_from_file(corpus_path, mm_path, non_sparse=True)
csv_file = "models/{}.csv".format(tags_name)
h.train_markov_model_from_constraint_matrix(csv_file,
mm_path,
delim=",")
table = tabulate_cfd(h.cpd_tags)
test_f = open("models/{}_tags_table.csv".format(tags_name), "w")
test_f.write(table)
test_f.close()
| dsg-bielefeld/deep_disfluency | deep_disfluency/decoder/hmm.py | Python | mit | 38,494 |
from django import forms
from django.core.files.images import get_image_dimensions
from django.db.models.fields.files import FileField, ImageField
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext as _
#noinspection PyPackageRequirements
import magic
from easy_thumbnails.fields import ThumbnailerImageField
class RestrictedFileField(FileField):
def __init__(self, *args, **kwargs):
self.content_types = kwargs.pop('content_types', None)
self.min_upload_size = kwargs.pop('min_upload_size', None)
self.max_upload_size = kwargs.pop('max_upload_size', None)
super(RestrictedFileField, self).__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
data = super(RestrictedFileField, self).clean(*args, **kwargs)
file = data.file
if self.content_types is not None:
content_type_headers = getattr(file, 'content_type', '')
file_magic = magic.Magic(mime=True)
file_content_type = file_magic.from_buffer(file.read(1024))
file.seek(0)
if not content_type_headers in self.content_types or not file_content_type in self.content_types:
raise forms.ValidationError(_('Files of type {type} are not supported.'.format(type=file_content_type)))
if self.min_upload_size is not None:
if file._size < self.min_upload_size:
raise forms.ValidationError(
_('Files of size less than {min_size} are not allowed. Your file is {current_size}'.format(
max_size=filesizeformat(self.min_upload_size),
current_size=filesizeformat(file._size)
)))
if self.max_upload_size is not None:
if file._size > self.max_upload_size:
raise forms.ValidationError(
_('Files of size greater than {max_size} are not allowed. Your file is {current_size}'.format(
max_size=filesizeformat(self.max_upload_size),
current_size=filesizeformat(file._size)
)))
return data
class BaseRestrictedImageField(RestrictedFileField):
def __init__(self, *args, **kwargs):
self.min_height = kwargs.pop('min_height', None)
self.max_height = kwargs.pop('max_height', None)
self.min_width = kwargs.pop('min_width', None)
self.max_width = kwargs.pop('max_width', None)
super(BaseRestrictedImageField, self).__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
data = super(BaseRestrictedImageField, self).clean(*args, **kwargs)
file = data.file
width, height = get_image_dimensions(file)
if self.min_height is not None:
if height < self.min_height:
raise forms.ValidationError(
_('Invalid height {height}px, only height greater than {min_height} allowed'.format(
height=height,
min_height=self.min_height,
)))
if self.max_height is not None:
if height > self.max_height:
raise forms.ValidationError(
_('Invalid height {height}px, only height less than {max_height} allowed'.format(
height=height,
max_height=self.max_height,
)))
if self.min_width is not None:
if width < self.min_width:
raise forms.ValidationError(
_('Invalid width {width}px, only width greater than {min_width} allowed'.format(
width=width,
min_width=self.min_width,
)))
if self.max_width is not None:
if width > self.max_width:
raise forms.ValidationError(
_('Invalid width {width}px, only width less than {max_width} allowed'.format(
width=width,
max_width=self.max_width,
)))
return data
class RestrictedImageField(BaseRestrictedImageField, ImageField):
def __init__(self, *args, **kwargs):
super(RestrictedImageField, self).__init__(*args, **kwargs)
class RestrictedThumbnailerImageField(BaseRestrictedImageField, ThumbnailerImageField):
def __init__(self, *args, **kwargs):
super(RestrictedThumbnailerImageField, self).__init__(*args, **kwargs)
class FileSizeQuota(object):
def __init__(self, max_usage=-1):
self.current_usage = 0
self.max_usage = max_usage
def update(self, items, attr_name):
self.current_usage = 0
for item in items:
the_file = getattr(item, attr_name, None)
if the_file:
self.current_usage += the_file.size
def exceeds(self, size=0):
if self.max_usage >= 0:
return self.current_usage + size > self.max_usage
else:
return False
def near_limit(self, limit_threshold=0.8):
return (float(self.current_usage) / float(self.max_usage)) > limit_threshold
class SizeQuotaValidator(object):
def __init__(self, max_usage):
self.quota = FileSizeQuota(max_usage)
def update_quota(self, items, attr_name):
self.quota.update(items, attr_name)
def __call__(self, file):
file_size = file.size
if self.quota.exceeds(file_size):
raise forms.ValidationError(_(
'Please keep the total uploaded files under {total_size}. With this file, the total would be {exceed_size}.'.format(
total_size=filesizeformat(self.quota.max_usage),
exceed_size=filesizeformat(self.quota.current_usage + file_size)
)))
class FileCountQuota(object):
def __init__(self, max_count=-1):
self.current_count = 0
self.max_count = max_count
#noinspection PyUnusedLocal
def update(self, items, attr_name):
self.current_count = len(items)
def exceeds(self, count=0):
if self.max_count >= 0:
return self.current_count + count > self.max_count
else:
return False
def near_limit(self, limit_threshold=0.8):
return (float(self.current_count) / float(self.max_count)) > limit_threshold
class CountQuotaValidator(object):
def __init__(self, max_count):
self.quota = FileCountQuota(max_count)
def update_quota(self, items, attr_name):
self.quota.update(items, attr_name)
#noinspection PyUnusedLocal
def __call__(self, file):
if self.quota.exceeds(1):
raise forms.ValidationError(_(
'Please keep the total uploaded files under {total_count} count. With this file, the total would be {exceed_count}.'.format(
total_count=self.quota.max_count,
exceed_count=self.quota.current_count + 1
)))
# South migration support
try:
#noinspection PyUnresolvedReferences,PyPackageRequirements
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
rules = [
(
(RestrictedFileField,), [],
{
'content_types': ['content_types', {'default': None}],
'min_upload_size': ['min_upload_size', {'default': None}],
'max_upload_size': ['max_upload_size', {'default': None}],
}
),
(
(RestrictedImageField, RestrictedThumbnailerImageField), [],
{
'content_types': ['content_types', {'default': None}],
'min_upload_size': ['min_upload_size', {'default': None}],
'max_upload_size': ['max_upload_size', {'default': None}],
'min_width': ['min_width', {'default': None}],
'max_width': ['max_width', {'default': None}],
'min_height': ['min_height', {'default': None}],
'max_height': ['max_height', {'default': None}],
}
),
]
add_introspection_rules(rules, ['^restrictedfile\.RestrictedFileField'])
add_introspection_rules(rules, ['^restrictedfile\.RestrictedImageField'])
add_introspection_rules(rules, ['^restrictedfile\.RestrictedThumbnailerImageField'])
| dlancer/django-restricted-file | restrictedfile/fields.py | Python | mit | 8,425 |
# Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
# Make this directory into a Python package.
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/services/webapp/doc/__init__.py | Python | agpl-3.0 | 186 |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 13:00:27 2017
@author: Eliot
shadows.py
"""
import os
import numpy as np
import cv2
def kerneldirection(kernel, direction):
'''
Inputs:
kernel: standard kernel operating in y-plane top to bottom
direction: tuple specifying direction
(1,0) = x-direction, left to right
(-1,0) = x-direction, right to left
(0,1) = y-direction, top to bottom
(0,-1) = y-direction, bottom to top
## TO ADD: DIAGONALS
(1,1) = diagonal, top left to bottom right
(1,-1) = diagonal, bottom left to top right
(-1,1) = diagonal, top right to bottom left
(-1,-1) = diagonal, bottom right to top left
'''
# ensure ksize is square
multiplier = 1
(kH, kW) = kernel.shape[:2]
assert(kH == kW)
# define negative multiplyer for directionality
if direction[0] == -1 or direction[1] == -1:
multiplier = -1
else:
multiplier = 1
transposed = multiplier * np.transpose(kernel, direction)
return transposed
def trimcontours(contours, sizethreshold):
'''
function to reduce the list of contours and remove noise from contours by
thresholding out contours below a given size
'''
cont_output = []
for conts in contours:
area = cv2.contourArea(conts, False)
if area > sizethreshold:
cont_output.append(conts)
return cont_output
thisfilepath = os.path.dirname(__file__)
loaddirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/Input Images/adaptive_medianblur_gauss"))
savedirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/shadows"))
savedirpath2 = os.path.abspath(os.path.join(thisfilepath, "test_files/shadowcands_binary"))
sobel5 = np.array(([-2, -3, -4, -3, -2],
[-1, -2, -3, -2, -1],
[0, 0, 0, 0, 0],
[1, 2, 3, 2, 1],
[2, 3, 4, 3, 2]), dtype="float") #/2.0
directions = ((1,0),(0,1),(-1,0),(0,-1))
for imgs in os.listdir(loaddirpath):
colour = cv2.imread(loaddirpath + "/" + imgs)
c_cop = colour.copy()
image = colour[...,0]
binary_combined = np.zeros_like(image, dtype=np.uint8)
for direction in directions:
grad = kerneldirection(sobel5, direction) #3
gradientimage = cv2.filter2D(image, -1, grad)
med = cv2.medianBlur(gradientimage, 11)
gradientimage = cv2.GaussianBlur(med, (13,13), 0)
# 8. Threshold out high gradients (non-shadow gradients)
ret, binarythresh = cv2.threshold(gradientimage, 55, 255, cv2.THRESH_BINARY) #alternative method
binary_combined = cv2.bitwise_or(binary_combined, binarythresh)
# draw rectangle to cover edges of module
imH, imW = binary_combined.shape[0], binary_combined.shape[1]
thick = 2
cv2.rectangle(binary_combined,(-1+thick, -1+thick),(imW-thick, imH-thick),255,thickness=thick)
cv2.imwrite(savedirpath2 + "/" + imgs[:3] + "candshad_binary.png", binary_combined)
# invert eroded image
binaryinverted = np.bitwise_not(binary_combined)
# new contours
(cimage2, contr2, heir2) = cv2.findContours(binaryinverted.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
heir2 = heir2[0] # get the actual inner heirarchy
print(heir2)
print(heir2.shape)
# create Colour binary image:
colbin = np.stack((binaryinverted, binaryinverted, binaryinverted), axis=2)
colbin2 = np.copy(colbin)
for component in zip(contr2, heir2):
currentContour = component[0]
currentHierarchy = component[1]
currentContourArea = cv2.contourArea(currentContour)
meancontourvalue =
print("Current Contour Area = ", currentContourArea)
if currentContourArea < 150000:
if currentContourArea > 500: #don't use this, use average value compared to otsu threshold
if currentHierarchy[3] < 2:
print(currentHierarchy)
cv2.drawContours(colbin2, currentContour ,-1, (0,0,255), 2)
cv2.imwrite(savedirpath + "/" + imgs[:3] + "shadowcontours.png", colbin2) | EliotBryant/ShadDetector | shadDetector_testing/Gradient Based Methods/shadows.py | Python | gpl-3.0 | 4,123 |
##################################################
#
# PandoraPFANew module
#
# Author: F.Gaede, J.Engels, DESY
# Date: June 8 2010
#
##################################################
from util import *
# custom imports
from marlinpkg import MarlinPKG
from baseilc import BaseILC
class PandoraPFANew(BaseILC):
""" Responsible for the PandoraPFANew installation process. """
def __init__(self, userInput):
BaseILC.__init__(self, userInput, "PandoraPFANew","PandoraPFANew" )
self.download.root = 'PandoraPFANew'
self.hasCMakeFindSupport = True
self.optmodules = [ "ROOT" ]
self.reqfiles = [ [
'lib/libPandoraFramework.so', 'lib/libPandoraFramework.a', 'lib/libPandoraFramework.dylib',
'lib/libPandoraPFANew.so', 'lib/libPandoraPFANew.a', 'lib/libPandoraPFANew.dylib',
'lib/libPandoraSDK.so', 'lib/libPandoraSDK.a', 'lib/libPandoraSDK.dylib',
] ]
def compile(self):
""" compile PandoraPFANew"""
os.chdir( self.installPath+'/build' )
if self.rebuild:
tryunlink( "CMakeCache.txt" )
if( os.system( self.genCMakeCmd() + " 2>&1 | tee -a " + self.logfile ) != 0 ):
self.abort( "failed to configure!!" )
if( os.system( "make ${MAKEOPTS} 2>&1 | tee -a " + self.logfile ) != 0 ):
self.abort( "failed to compile!!" )
if( os.system( "make install 2>&1 | tee -a " + self.logfile ) != 0 ):
self.abort( "failed to install!!" )
def postCheckDeps(self):
self.env["PANDORAPFANEW"] = self.installPath
self.envpath["LD_LIBRARY_PATH"].append( "$PANDORAPFANEW/lib" )
class MarlinPandora(MarlinPKG):
""" Responsible for the MarlinPandora installation process. """
def __init__(self, userInput):
MarlinPKG.__init__(self, "MarlinPandora", userInput )
# required modules
self.reqmodules = [ "Marlin", "MarlinUtil", "GEAR", "PandoraPFANew", "LCIO" ]
self.download.root = 'PandoraPFANew'
class PandoraAnalysis(MarlinPKG):
""" Responsible for the PandoraAnalysis installation process. """
def __init__(self, userInput):
MarlinPKG.__init__(self, "PandoraAnalysis", userInput )
# required modules
self.reqmodules = [ "Marlin", "GEAR", "LCIO", "ROOT" ]
self.download.root = 'PandoraPFANew'
| kislerdm/alibava_analysis-tool | ilcinstall_eutel-git/ilcsoft/pandoranew.py | Python | gpl-2.0 | 2,572 |
# -*- coding: utf-8 -*-
import json
try:
from uwhois import Uwhois
except ImportError:
print("uwhois module not installed.")
misperrors = {'error': 'Error'}
mispattributes = {'input': ['domain', 'ip-src', 'ip-dst'], 'output': ['freetext']}
moduleinfo = {'version': '0.1', 'author': 'Raphaël Vinot',
'description': 'Query a local instance of uwhois (https://github.com/rafiot/uwhoisd)',
'module-type': ['expansion']}
moduleconfig = ['server', 'port']
def handler(q=False):
if q is False:
return False
request = json.loads(q)
if request.get('domain'):
toquery = request['domain']
elif request.get('ip-src'):
toquery = request['ip-src']
elif request.get('ip-dst'):
toquery = request['ip-dst']
else:
misperrors['error'] = "Unsupported attributes type"
return misperrors
if not request.get('config') and not (request['config'].get('apikey') and request['config'].et('url')):
misperrors['error'] = 'EUPI authentication is missing'
return misperrors
uwhois = Uwhois(request['config']['server'], int(request['config']['port']))
if 'event_id' in request:
return handle_expansion(uwhois, toquery)
def handle_expansion(w, domain):
return {'results': [{'types': mispattributes['output'], 'values': w.query(domain)}]}
def introspection():
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
| Rafiot/misp-modules | misp_modules/modules/expansion/whois.py | Python | agpl-3.0 | 1,492 |
import logging
from collections import defaultdict
from dataclasses import dataclass
from sqlalchemy import func, and_, distinct, literal_column
from sqlalchemy.orm import aliased, contains_eager, joinedload
from pycroft.helpers.i18n import deferred_gettext
from pycroft.lib.exc import PycroftLibException
from pycroft.lib.logging import log_room_event
from pycroft.model import session
from pycroft.model.address import Address
from pycroft.model.facilities import Room, Building
from pycroft.model.host import Host
from pycroft.model.session import with_transaction
from pycroft.model.user import User
logger = logging.getLogger(__name__)
class RoomAlreadyExistsException(PycroftLibException):
pass
def get_overcrowded_rooms(building_id=None):
"""
:param building_id: Limit to rooms of the building.
Returns a dict of overcrowded rooms with their inhabitants
:return: dict
"""
oc_rooms_filter = []
if building_id is not None:
oc_rooms_filter.append(Room.building_id == building_id)
# rooms containing multiple users each of which has a host in the room
oc_rooms_query = (
Room.q.join(User)
.join(Host).filter(User.room_id == Host.room_id)
.filter(*oc_rooms_filter)
.group_by(Room.id).having(func.count(distinct(User.id)) > 1)
.subquery()
)
user = aliased(User)
# room can be extracted from the subquery
oc_room = contains_eager(user.room, alias=oc_rooms_query)
query = (
session.session.query(user)
# only include users living in overcrowded rooms
.join(oc_rooms_query)
# only include users that have a host in their room
.join(Host,
and_(user.id == Host.owner_id, user.room_id == Host.room_id))
.options(oc_room)
.options(oc_room.joinedload(Room.building))
.options(joinedload(user.current_properties))
)
rooms = defaultdict(list)
for user in query.all():
rooms[user.room.id].append(user)
return rooms
@with_transaction
def create_room(building, level, number, processor, address,
inhabitable=True, vo_suchname: str | None = None):
if Room.q.filter_by(number=number, level=level, building=building).first() is not None:
raise RoomAlreadyExistsException
if vo_suchname and Room.q.filter_by(swdd_vo_suchname=vo_suchname).first() is not None:
raise RoomAlreadyExistsException
room = Room(number=number,
level=level,
inhabitable=inhabitable,
building=building,
address=address,
swdd_vo_suchname=vo_suchname)
log_room_event("Room created.", processor, room)
return room
@with_transaction
def edit_room(room, number, inhabitable, vo_suchname: str, address: Address, processor: User):
if room.number != number:
if Room.q.filter_by(number=number, level=room.level, building=room.building).filter(Room.id!=room.id).first() is not None:
raise RoomAlreadyExistsException()
log_room_event(f"Renamed room from {room.number} to {number}.", processor, room)
room.number = number
if room.inhabitable != inhabitable:
log_room_event(f"Changed inhabitable status to {str(inhabitable)}.", processor, room)
room.inhabitable = inhabitable
if room.swdd_vo_suchname != vo_suchname:
log_room_event(
deferred_gettext("Changed VO id from {} to {}.")
.format(room.swdd_vo_suchname, vo_suchname).to_json(),
processor, room
)
room.swdd_vo_suchname = vo_suchname
if room.address != address:
room.address = address
log_room_event(deferred_gettext("Changed address to {}").format(str(address)).to_json(),
processor, room)
for user in room.users_sharing_address:
user.address = room.address
return room
def get_room(building_id, level, room_number):
return Room.q.filter_by(number=room_number,
level=level, building_id=building_id).one_or_none()
@dataclass
class RoomAddressSuggestion:
street: str
number: str
zip_code: str
city: str
state: str
country: str
def __str__(self):
return f"{self.street} {self.number}, {self.zip_code} {self.city}," \
+ (f" {self.state}, " if self.state else "") \
+ f"{self.country}"
def suggest_room_address_data(building: Building) -> RoomAddressSuggestion | None:
"""Return the most common address features of preexisting rooms in a certain building. """
cols = (Address.street, Address.number, Address.zip_code,
Address.city, Address.state, Address.country)
query = (
session.session.query()
.select_from(Room)
.join(Address)
.add_columns(*cols)
.add_columns(func.count().label('count'))
.filter(Room.building == building)
.group_by(*cols)
.order_by(literal_column('count').desc())
)
rows = query.all()
if not rows:
return
def row_to_suggestion(row):
return RoomAddressSuggestion(*list(row[:-1]))
row, *rest = rows
suggestion = row_to_suggestion(row)
if rest:
logger.warning("Address suggestion for building '%s' not unique (%d total):\n"
"first suggestion:\n %s (%d times),\n"
"runner-up suggestion:\n %s (%d times)",
building.short_name, len(rows),
suggestion, row.count,
row_to_suggestion(rest[0]), rest[0].count)
return suggestion
| agdsn/pycroft | pycroft/lib/facilities.py | Python | apache-2.0 | 5,723 |
import inspect
import re
from numba.tests.support import TestCase, override_config, needs_subprocess
from numba import jit, njit
from numba.core import types
import unittest
import llvmlite.binding as llvm
#NOTE: These tests are potentially sensitive to changes in SSA or lowering
# behaviour and may need updating should changes be made to the corresponding
# algorithms.
class TestDebugInfo(TestCase):
"""
These tests only checks the compiled assembly for debuginfo.
"""
def _getasm(self, fn, sig):
fn.compile(sig)
return fn.inspect_asm(sig)
def _check(self, fn, sig, expect):
asm = self._getasm(fn, sig=sig)
m = re.search(r"\.section.+debug", asm, re.I)
got = m is not None
self.assertEqual(expect, got, msg='debug info not found in:\n%s' % asm)
def test_no_debuginfo_in_asm(self):
@jit(nopython=True, debug=False)
def foo(x):
return x
self._check(foo, sig=(types.int32,), expect=False)
def test_debuginfo_in_asm(self):
@jit(nopython=True, debug=True)
def foo(x):
return x
self._check(foo, sig=(types.int32,), expect=True)
def test_environment_override(self):
with override_config('DEBUGINFO_DEFAULT', 1):
# Using default value
@jit(nopython=True)
def foo(x):
return x
self._check(foo, sig=(types.int32,), expect=True)
# User override default
@jit(nopython=True, debug=False)
def bar(x):
return x
self._check(bar, sig=(types.int32,), expect=False)
class TestDebugInfoEmission(TestCase):
""" Tests that debug info is emitted correctly.
"""
_NUMBA_OPT_0_ENV = {'NUMBA_OPT': '0'}
def _get_llvmir(self, fn, sig):
with override_config('OPT', 0):
fn.compile(sig)
return fn.inspect_llvm(sig)
def _get_metadata(self, fn, sig):
ll = self._get_llvmir(fn, sig).splitlines()
meta_re = re.compile(r'![0-9]+ =.*')
metadata = []
for line in ll:
if meta_re.match(line):
metadata.append(line)
return metadata
def _subprocess_test_runner(self, test_name):
themod = self.__module__
thecls = type(self).__name__
self.subprocess_test_runner(test_module=themod,
test_class=thecls,
test_name=test_name,
envvars=self._NUMBA_OPT_0_ENV)
def test_DW_LANG(self):
@njit(debug=True)
def foo():
pass
metadata = self._get_metadata(foo, sig=())
DICompileUnit = metadata[0]
self.assertEqual('!0', DICompileUnit[:2])
self.assertIn('!DICompileUnit(language: DW_LANG_C_plus_plus',
DICompileUnit)
self.assertIn('producer: "Numba"', DICompileUnit)
def test_DILocation(self):
""" Tests that DILocation information is reasonable.
"""
@njit(debug=True, error_model='numpy')
def foo(a):
b = a + 1.23
c = a * 2.34
d = b / c
print(d)
return d
# the above produces LLVM like:
# define function() {
# entry:
# alloca
# store 0 to alloca
# <arithmetic for doing the operations on b, c, d>
# setup for print
# branch
# other_labels:
# ... <elided>
# }
#
# The following checks that:
# * the alloca and store have no !dbg
# * the arithmetic occurs in the order defined and with !dbg
# * that the !dbg entries are monotonically increasing in value with
# source line number
sig = (types.float64,)
metadata = self._get_metadata(foo, sig=sig)
full_ir = self._get_llvmir(foo, sig=sig)
module = llvm.parse_assembly(full_ir)
name = foo.overloads[foo.signatures[0]].fndesc.mangled_name
funcs = [x for x in module.functions if x.name == name]
self.assertEqual(len(funcs), 1)
func = funcs[0]
blocks = [x for x in func.blocks]
self.assertGreater(len(blocks), 1)
block = blocks[0]
# Find non-call instr and check the sequence is as expected
instrs = [x for x in block.instructions if x.opcode != 'call']
op_seq = [x.opcode for x in instrs]
op_expect = ('fadd', 'fmul', 'fdiv')
self.assertIn(''.join(op_expect), ''.join(op_seq))
# Parse out metadata from end of each line, check it monotonically
# ascends with LLVM source line. Also store all the dbg references,
# these will be checked later.
line2dbg = set()
re_dbg_ref = re.compile(r'.*!dbg (![0-9]+).*$')
found = -1
for instr in instrs:
inst_as_str = str(instr)
matched = re_dbg_ref.match(inst_as_str)
if not matched:
# if there's no match, ensure it is one of alloca or store,
# it's important that the zero init/alloca instructions have
# no dbg data
accepted = ('alloca ', 'store ')
self.assertTrue(any([x in inst_as_str for x in accepted]))
continue
groups = matched.groups()
self.assertEqual(len(groups), 1)
dbg_val = groups[0]
int_dbg_val = int(dbg_val[1:])
if found >= 0:
self.assertTrue(int_dbg_val >= found)
found = int_dbg_val
# some lines will alias dbg info, this is fine, it's only used to
# make sure that the line numbers are correct WRT python
line2dbg.add(dbg_val)
pysrc, pysrc_line_start = inspect.getsourcelines(foo)
# build a map of dbg reference to DI* information
metadata_definition_map = dict()
meta_definition_split = re.compile(r'(![0-9]+) = (.*)')
for line in metadata:
matched = meta_definition_split.match(line)
if matched:
dbg_val, info = matched.groups()
metadata_definition_map[dbg_val] = info
# Pull out metadata entries referred to by the llvm line end !dbg
# check they match the python source, the +2 is for the @njit decorator
# and the function definition line.
offsets = [0, # b = a + 1
1, # a * 2.34
2, # d = b / c
3, # print(d)
]
pyln_range = [pysrc_line_start + 2 + x for x in offsets]
# do the check
for (k, line_no) in zip(sorted(line2dbg, key=lambda x: int(x[1:])),
pyln_range):
dilocation_info = metadata_definition_map[k]
self.assertIn(f'line: {line_no}', dilocation_info)
# Check that variable "a" is declared as on the same line as function
# definition.
expr = r'.*!DILocalVariable\(name: "a",.*line: ([0-9]+),.*'
match_local_var_a = re.compile(expr)
for entry in metadata_definition_map.values():
matched = match_local_var_a.match(entry)
if matched:
groups = matched.groups()
self.assertEqual(len(groups), 1)
dbg_line = int(groups[0])
self.assertEqual(dbg_line, pysrc_line_start)
break
else:
self.fail('Assertion on DILocalVariable not made')
@needs_subprocess
def test_DILocation_entry_blk_impl(self):
""" This tests that the unconditional jump emitted at the tail of
the entry block has no debug metadata associated with it. In practice,
if debug metadata is associated with it, it manifests as the
prologue_end being associated with the end_sequence or similar (due to
the way code gen works for the entry block)."""
@njit(debug=True)
def foo(a):
return a + 1
foo(123)
full_ir = foo.inspect_llvm(foo.signatures[0])
# The above produces LLVM like:
#
# define function() {
# entry:
# alloca
# store 0 to alloca
# unconditional jump to body:
#
# body:
# ... <elided>
# }
module = llvm.parse_assembly(full_ir)
name = foo.overloads[foo.signatures[0]].fndesc.mangled_name
funcs = [x for x in module.functions if x.name == name]
self.assertEqual(len(funcs), 1)
func = funcs[0]
blocks = [x for x in func.blocks]
self.assertEqual(len(blocks), 2)
entry_block, body_block = blocks
# Assert that the tail of the entry block is an unconditional jump to
# the body block and that the jump has no associated debug info.
entry_instr = [x for x in entry_block.instructions]
ujmp = entry_instr[-1]
self.assertEqual(ujmp.opcode, 'br')
ujmp_operands = [x for x in ujmp.operands]
self.assertEqual(len(ujmp_operands), 1)
target_data = ujmp_operands[0]
target = str(target_data).split(':')[0].strip()
# check the unconditional jump target is to the body block
self.assertEqual(target, body_block.name)
# check the uncondition jump instr itself has no metadata
self.assertTrue(str(ujmp).endswith(target))
def test_DILocation_entry_blk(self):
# Test runner for test_DILocation_entry_blk_impl, needs a subprocess
# as jitting literally anything at any point in the lifetime of the
# process ends up with a codegen at opt 3. This is not amenable to this
# test!
# This test relies on the CFG not being simplified as it checks the jump
# from the entry block to the first basic block. Force OPT as 0, if set
# via the env var the targetmachine and various pass managers all end up
# at OPT 0 and the IR is minimally transformed prior to lowering to ELF.
self._subprocess_test_runner('test_DILocation_entry_blk_impl')
@needs_subprocess
def test_DILocation_decref_impl(self):
""" This tests that decref's generated from `ir.Del`s as variables go
out of scope do not have debuginfo associated with them (the location of
`ir.Del` is an implementation detail).
"""
@njit(debug=True)
def sink(*x):
pass
# This function has many decrefs!
@njit(debug=True)
def foo(a):
x = (a, a)
if a[0] == 0:
sink(x)
return 12
z = x[0][0]
return z
sig = (types.float64[::1],)
full_ir = self._get_llvmir(foo, sig=sig)
# make sure decref lines end with `meminfo.<number>)` without !dbg info.
count = 0
for line in full_ir.splitlines():
line_stripped = line.strip()
if line_stripped.startswith('call void @NRT_decref'):
self.assertRegex(line, r'.*meminfo\.[0-9]+\)$')
count += 1
self.assertGreater(count, 0) # make sure there were some decrefs!
def test_DILocation_decref(self):
# Test runner for test_DILocation_decref_impl, needs a subprocess
# with opt=0 to preserve decrefs.
self._subprocess_test_runner('test_DILocation_decref_impl')
def test_DILocation_undefined(self):
""" Tests that DILocation information for undefined vars is associated
with the line of the function definition (so it ends up in the prologue)
"""
@njit(debug=True)
def foo(n):
if n:
if n > 0:
c = 0
return c
else:
# variable c is not defined in this branch
c += 1
return c
sig = (types.intp,)
metadata = self._get_metadata(foo, sig=sig)
pysrc, pysrc_line_start = inspect.getsourcelines(foo)
# Looks for versions of variable "c" and captures the line number
expr = r'.*!DILocalVariable\(name: "c\$?[0-9]?",.*line: ([0-9]+),.*'
matcher = re.compile(expr)
associated_lines = set()
for md in metadata:
match = matcher.match(md)
if match:
groups = match.groups()
self.assertEqual(len(groups), 1)
associated_lines.add(int(groups[0]))
self.assertEqual(len(associated_lines), 3) # 3 versions of 'c'
self.assertIn(pysrc_line_start, associated_lines)
def test_DILocation_versioned_variables(self):
""" Tests that DILocation information for versions of variables matches
up to their definition site."""
# Note: there's still something wrong in the DI/SSA naming, the ret c is
# associated with the logically first definition.
@njit(debug=True)
def foo(n):
if n:
c = 5
else:
c = 1
return c
sig = (types.intp,)
metadata = self._get_metadata(foo, sig=sig)
pysrc, pysrc_line_start = inspect.getsourcelines(foo)
# Looks for SSA versioned names i.e. <basename>$<version id> of the
# variable 'c' and captures the line
expr = r'.*!DILocalVariable\(name: "c\$[0-9]?",.*line: ([0-9]+),.*'
matcher = re.compile(expr)
associated_lines = set()
for md in metadata:
match = matcher.match(md)
if match:
groups = match.groups()
self.assertEqual(len(groups), 1)
associated_lines.add(int(groups[0]))
self.assertEqual(len(associated_lines), 2) # 2 SSA versioned names 'c'
# Now find the `c = ` lines in the python source
py_lines = set()
for ix, pyln in enumerate(pysrc):
if 'c = ' in pyln:
py_lines.add(ix + pysrc_line_start)
self.assertEqual(len(py_lines), 2) # 2 assignments to c
# check that the DILocation from the DI for `c` matches the python src
self.assertEqual(associated_lines, py_lines)
if __name__ == '__main__':
unittest.main()
| cpcloud/numba | numba/tests/test_debuginfo.py | Python | bsd-2-clause | 14,320 |
"""
https://docs.djangoproject.com/en/dev/intro/tutorial05/
"""
import datetime
import unittest
from django.utils import timezone
from django.test import TestCase
from django.core.urlresolvers import reverse
from polls.models import Poll
class PollMethodTests(TestCase):
def test_was_published_recently_with_future_poll(self):
"""
was_published_recently() should return False for polls whose
pub_date is in the future
"""
future_poll = Poll(pub_date=timezone.now() + datetime.timedelta(days=30))
self.assertEqual(future_poll.was_published_recently(), False)
def test_was_published_recently_with_old_poll(self):
"""
was_published_recently() should return False for polls whose pub_date
is older than 1 day
"""
old_poll = Poll(pub_date=timezone.now() - datetime.timedelta(days=30))
self.assertEqual(old_poll.was_published_recently(), False)
def test_was_published_recently_with_recent_poll(self):
"""
was_published_recently() should return True for polls whose pub_date
is within the last day
"""
recent_poll = Poll(pub_date=timezone.now() - datetime.timedelta(hours=1))
self.assertEqual(recent_poll.was_published_recently(), True)
def create_poll(question, days):
"""
Creates a poll with the given `question` published the given number of
`days` offset to now (negative for polls published in the past,
positive for polls that have yet to be published).
"""
return Poll.objects.create(question=question,
pub_date=timezone.now() + datetime.timedelta(days=days))
@unittest.skip("""NoReverseMatch: u"'polls" is not a registered namespace""")
class PollViewTests(TestCase):
def test_index_view_with_no_polls(self):
"""
If no polls exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_poll_list'], [])
def test_index_view_with_a_past_poll(self):
"""
Polls with a pub_date in the past should be displayed on the index page.
"""
create_poll(question="Past poll.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: Past poll.>']
)
def test_index_view_with_a_future_poll(self):
"""
Polls with a pub_date in the future should not be displayed on the
index page.
"""
create_poll(question="Future poll.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.", status_code=200)
self.assertQuerysetEqual(response.context['latest_poll_list'], [])
def test_index_view_with_future_poll_and_past_poll(self):
"""
Even if both past and future polls exist, only past polls should be
displayed.
"""
create_poll(question="Past poll.", days=-30)
create_poll(question="Future poll.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: Past poll.>']
)
def test_index_view_with_two_past_polls(self):
"""
The polls index page may display multiple polls.
"""
create_poll(question="Past poll 1.", days=-30)
create_poll(question="Past poll 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: Past poll 2.>', '<Poll: Past poll 1.>']
)
class NorthTests(TestCase):
fixtures = ['demo']
def test_initdb_demo(self):
#~ from django.core.management import call_command
#~ rv = call_command('initdb','demo',interactive=False)
#~ self.assertEqual(rv,0)
self.assertEqual(len(Poll.objects.all()),3)
| lsaffre/north | docs/tutorials/polls/polls/tests.py | Python | bsd-2-clause | 4,293 |
class config_eval_or(config_base):
math_level = 1
op = '||'
mutable = 3
def possible_values(self, param_values, prop, stat):
if True in param_values:
return ( { 'true', 'false' }, 3 )
else:
return config_base.possible_values(self, param_values, prop, stat)
def eval_or(param):
for p in param:
if eval_boolean([p]) == 'true':
return 'true'
return 'false'
# TESTS
# IN [ 'true', 'true' ]
# OUT 'true'
# IN [ 'true', 'false' ]
# OUT 'true'
# IN [ 'false', 'true' ]
# OUT 'true'
# IN [ 'false', 'false' ]
# OUT 'false'
| plepe/pgmapcss | pgmapcss/eval/eval_or.py | Python | agpl-3.0 | 603 |
from PyQt4.QtGui import *
from electrum_doge.plugins import BasePlugin, hook
from electrum_doge.i18n import _
import datetime
from electrum_doge.util import format_satoshis
try:
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
flag_matlib=True
except:
flag_matlib=False
class Plugin(BasePlugin):
def fullname(self):
return 'Plot History'
def description(self):
return '%s\n%s' % (_("Ability to plot transaction history in graphical mode."), _("Warning: Requires matplotlib library."))
def is_available(self):
if flag_matlib:
return True
else:
return False
def is_enabled(self):
if not self.is_available():
return False
else:
return True
@hook
def init_qt(self, gui):
self.win = gui.main_window
@hook
def export_history_dialog(self, d,hbox):
self.wallet = d.wallet
history = self.wallet.get_tx_history()
if len(history) > 0:
b = QPushButton(_("Preview plot"))
hbox.addWidget(b)
b.clicked.connect(lambda: self.do_plot(self.wallet))
else:
b = QPushButton(_("No history to plot"))
hbox.addWidget(b)
def do_plot(self,wallet):
history = wallet.get_tx_history()
balance_Val=[]
fee_val=[]
value_val=[]
datenums=[]
unknown_trans=0
pending_trans=0
counter_trans=0
for item in history:
tx_hash, confirmations, is_mine, value, fee, balance, timestamp = item
if confirmations:
if timestamp is not None:
try:
datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp)))
balance_string = format_satoshis(balance, False)
balance_Val.append(float((format_satoshis(balance,False)))*1000.0)
except [RuntimeError, TypeError, NameError] as reason:
unknown_trans=unknown_trans+1
pass
else:
unknown_trans=unknown_trans+1
else:
pending_trans=pending_trans+1
if value is not None:
value_string = format_satoshis(value, True)
value_val.append(float(value_string)*1000.0)
else:
value_string = '--'
if fee is not None:
fee_string = format_satoshis(fee, True)
fee_val.append(float(fee_string))
else:
fee_string = '0'
if tx_hash:
label, is_default_label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
x=19
test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ."
box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k"))
box1.set_text(test11)
box = HPacker(children=[box1],
align="center",
pad=0.1, sep=15)
anchored_box = AnchoredOffsetbox(loc=3,
child=box, pad=0.5,
frameon=True,
bbox_to_anchor=(0.5, 1.02),
bbox_transform=ax.transAxes,
borderpad=0.5,
)
ax.add_artist(anchored_box)
plt.ylabel('mBTC')
plt.xlabel('Dates')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance')
axarr[0].legend(loc='upper left')
axarr[0].set_title('History Transactions')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[1].plot(datenums,fee_val,marker='o',linestyle='-',color='red',label='Fee')
axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value')
axarr[1].legend(loc='upper left')
# plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12)
plt.show()
| electrumalt/electrum-doge | plugins/plot.py | Python | gpl-3.0 | 4,569 |
import sqlite3
class database(object):
def __init__(self):
self.conn = sqlite3.connect('data/coinbase.db')
self.c = self.conn.cursor()
self.c.execute('''pragma foreign_keys=on''')
self.__create_tables_all()
# create all tables
def __create_tables_all(self):
self.__create_table_coin_table()
self.__create_table_coin_price()
self.__create_table_rank()
self.__create_table_market()
self.__create_table_balance()
self.__create_table_users()
self.__create__table_time_of_update()
self.__create__table_wallets()
def __create_table_coin_table(self):
self.c.execute('''
CREATE TABLE IF NOT EXISTS coin_table (
idCoin_num INTEGER NOT NULL,
coin_name VARCHAR(45) NOT NULL,
coin_symbol VARCHAR(45) NOT NULL,
coin_id VARCHAR(45) NOT NULL,
PRIMARY KEY (idCoin_num));
''')
def __create_table_coin_price(self):
self.c.execute('''
CREATE TABLE IF NOT EXISTS coin_price (
coin_price_id INTEGER NOT NULL,
fk_coin_id INT NOT NULL,
fk_idUpdate INT NOT NULL,
usd_price REAL NOT NULL,
btc_price REAL NOT NULL,
marketcap_usd REAL NOT NULL,
PRIMARY KEY (coin_price_id),
FOREIGN KEY(fk_coin_id) REFERENCES coin_table(idCoin_num),
FOREIGN KEY(fk_idUpdate) REFERENCES time_of_update(idUpdate));
''')
def __create_table_rank(self):
self.c.execute('''
CREATE TABLE IF NOT EXISTS rank (
rankID INTEGER NOT NULL,
fk_idUpdate INT NOT NULL,
rank INT NOT NULL,
coin_symbol VARCHAR(10) NOT NULL,
fk_coin_id INT NOT NULL,
PRIMARY KEY (rankID),
FOREIGN KEY (fk_coin_id) REFERENCES coin_table (idCoin_num),
FOREIGN KEY(fk_idUpdate) REFERENCES time_of_update(idUpdate));
''')
def __create_table_market(self):
self.c.execute('''
CREATE TABLE IF NOT EXISTS market (
market_id INTEGER NOT NULL,
idCoin_num INTEGER NOT NULL,
fk_idUpdate INT NOT NULL,
total_supply REAL NULL,
available_supply REAL NULL,
volumen24h REAL NULL,
percent_change VARCHAR(5) NULL,
PRIMARY KEY (market_id),
FOREIGN KEY (idCoin_num) REFERENCES coin_table (idCoin_num),
FOREIGN KEY(fk_idUpdate) REFERENCES time_of_update(idUpdate));
''')
def __create__table_time_of_update(self):
self.c.execute('''
CREATE TABLE IF NOT EXISTS time_of_update(
idUpdate INTEGER NOT NULL,
datetime VARCHAR(45) NOT NULL,
userid INTEGER NOT NULL,
PRIMARY KEY (idUpdate),
FOREIGN KEY (userid) REFERENCES users(userID));
''')
def __create_table_balance(self):
self.c.execute('''
CREATE TABLE IF NOT EXISTS balance (
idBalance INTEGER NOT NULL,
idCoin INTEGER NOT NULL,
coinSymbol VARCHAR (45) NOT NULL,
time_stamp VARCHAR (45) NOT NULL,
balance REAL NULL,
userID INT NOT NULL,
PRIMARY KEY (idBalance),
FOREIGN KEY (userID) REFERENCES users (userID),
FOREIGN KEY (idCoin) REFERENCES coin_table (idCoin_num));
''')
def __create_table_users(self):
self.c.execute('''
CREATE TABLE IF NOT EXISTS users (
userID INTEGER NOT NULL,
userName TEXT NOT NULL,
userPass TEXT NOT NULL,
userEmail TEXT,
recovPin INT NOT NULL,
PRIMARY KEY (userID)
)
''')
def __create__table_wallets(self):
self.c.execute('''
CREATE TABLE IF NOT EXISTS wallet (
walletID INTEGER NOT NULL,
fk_userID INTEGER NOT NULL,
fk_coinID INTEGER NOT NULL,
coin_symbol VARCHAR (45) NOT NULL,
public_key VARCHAR (100) NOT NULL,
wallet_balance REAL,
timestamp VARCHAR (45) NOT NULL,
wallet_name VARCHAR (45),
PRIMARY KEY (walletID),
FOREIGN KEY (fk_userID) REFERENCES users(userID),
FOREIGN KEY (fk_coinID) REFERENCES coin_table(idCoin_num))
''')
# insert function
def insert_coin_table(self, symbol, idc, name):
self.c.execute('''
INSERT INTO coin_table (coin_symbol, coin_id, coin_name) VALUES(?,?,?)
''', (symbol, idc, name))
def insert_coin_price(self, numid, usd, btc, mc, updateid):
self.c.execute('''
INSERT INTO coin_price (fk_coin_id, usd_price, btc_price, marketcap_usd, fk_idUpdate) VALUES(?,?,?,?,?)
''', (numid, usd, btc, mc, updateid))
def insert_market(self, numid, totsup, avasup, volday, perchan, updateid):
self.c.execute('''
INSERT INTO market (idCoin_num, total_supply, available_supply, volumen24h, percent_change, fk_idUpdate) VALUES(?,?,?,?,?,?)
''', (numid, totsup, avasup, volday, perchan, updateid))
def insert_rank(self, numid, coin_symbol, rank, updateid):
self.c.execute('''
INSERT INTO rank (fk_coin_id, coin_symbol, rank, fk_idUpdate) VALUES(?,?,?,?)
''', (numid, coin_symbol, rank, updateid))
def insert_user(self, username, password, pincode, email):
self.c.execute('''
INSERT INTO users (userName, userPass, userEmail, recovPin) VALUES(?,?,?,?)
''', (username, password, email, pincode))
def insert_balance(self, coinid, coinsymbol, timestamp, balance, userid):
self.c.execute('''
INSERT INTO balance (idCoin, coinSymbol, time_stamp, balance, userID) VALUES(?,?,?,?,?)
''', (coinid, coinsymbol, timestamp, balance, userid))
def insert_datetime(self, datetime, userid):
self.c.execute('''
INSERT INTO time_of_update (datetime, userid) VALUES(?,?)
''', (datetime, userid))
def insert_wallet(self, userid, coinId, coinsy, pkey, balance, timestamp, wallet_name='no_data'):
self.c.execute('''
INSERT INTO wallet (fk_userID, fk_coinID, coin_symbol, public_key, wallet_balance, timestamp, wallet_name) VALUES (?,?,?,?,?,?,?)
''', (userid, coinId, coinsy, pkey, balance, timestamp, wallet_name, ))
def commit(self):
self.conn.commit()
# close
def close_db(self):
self.conn.close()
def check_existence_user_name(self, username):
self.c.execute(
"SELECT userID FROM users WHERE userName = ?", (username, ))
r = self.c.fetchone()
if r == None:
return -1
else:
return int(r[0])
def check_existence_user_email(self, email):
self.c.execute(
"SELECT userID FROM users WHERE userEmail = ?", (email, ))
r = self.c.fetchone()
if r == None:
return -1
else:
return int(r[0])
def check_password(self, userid):
self.c.execute(
"SELECT userPass FROM users WHERE userID = ?", (userid, ))
r = self.c.fetchone()
return r[0]
def check_user_pincode(self, userid):
self.c.execute(
"SELECT recovPin FROM users WHERE userID = ?", (userid, ))
r = self.c.fetchone()
return r[0]
def check_if_coin_in_table(self, coin_symbol):
self.c.execute(
"SELECT idCoin_num FROM coin_table WHERE coin_symbol = ?", (coin_symbol, ))
r = self.c.fetchone()
if r == None:
return-1
else:
return int(r[0])
def check_if_coin_in_balance(self, coinid):
self.c.execute(
"SELECT idBalance FROM balance WHERE idCoin = ?", (coinid, ))
r = self.c.fetchone()
if r == None:
return-1
else:
return int(r[0])
def check_update_id(self, updatetime):
self.c.execute(
"SELECT idUpdate FROM time_of_update WHERE datetime = ?", (updatetime, ))
r = self.c.fetchone()
return int(r[0])
def check_last_update_id(self):
self.c.execute(
"SELECT idUpdate FROM time_of_update ORDER BY idUpdate DESC LIMIT 1")
r = self.c.fetchone()
return int(r[0])
def check_update_date(self, updateid):
self.c.execute(
"SELECT datetime FROM time_of_update WHERE idUpdate = ?", (updateid, ))
r = self.c.fetchone()
return r[0]
def load_update_all(self):
self.c.execute("SELECT * FROM time_of_update")
r = self.c.fetchall()
return r
def load_update_ids(self):
self.c.execute("SELECT idUpdate FROM time_of_update")
r = self.c.fetchall()
return r
def load_all_coins(self):
self.c.execute("SELECT * FROM coin_table")
r = self.c.fetchall()
return r
# return coin data based on id of a coin
def load_coin_all_data(self, coinid, updateid):
coindata = ()
self.c.execute(
"SELECT usd_price, btc_price, marketcap_usd FROM coin_price WHERE fk_coin_id = ? AND fk_idUpdate = ?", (coinid, updateid, ))
r = self.c.fetchone()
self.c.execute(
"SELECT available_supply, volumen24h, percent_change FROM market WHERE idCoin_num = ? AND fk_idUpdate = ?", (coinid, updateid, ))
m = self.c.fetchone()
coindata = r + m
return coindata
def load_coin_price(self, coinid, updateid):
self.c.execute(
"SELECT usd_price, btc_price FROM coin_price WHERE fk_coin_id = ? AND fk_idUpdate = ?", (coinid, updateid, ))
r = self.c.fetchone()
return r
def load_coin_price_with_mc(self, coinid, updateid):
self.c.execute(
"Select usd_price, btc_price, marketcap_usd FROM coin_price WHERE fk_coin_id = ? AND fk_idUpdate = ?", (coinid, updateid, ))
r = self.c.fetchone()
return r
def load_coins_by_rank(self, updateid, howmany):
self.c.execute(
"SELECT rank, coin_symbol FROM rank WHERE fk_idUpdate = ? ORDER BY rank LIMIT ?", (updateid, howmany, ))
r = self.c.fetchall()
return r
def load_user_balance(self, userid):
self.c.execute("SELECT * FROM balance WHERE userID = ?", (userid, ))
r = self.c.fetchall()
return r
def load_user_wallet(self, userid):
self.c.execute("SELECT * FROM wallet WHERE fk_userID = ?", (userid, ))
r = self.c.fetchall()
return r
# UPDATED FUNCTIONS
def update_balance_amount(self, userid, coinid, amount, times):
self.c.execute("UPDATE balance SET time_stamp = ?, balance = ? WHERE userID = ? AND idCoin = ?",
(times, amount, userid, coinid, ))
def update_wallet(self, userid, coinid, amount):
self.c.execute("UPDATE wallet SET wallet_balance = ? WHERE fk_userID = ? AND fk_coinID = ?",
(amount, userid, coinid, ))
def update_wallet_name(self, walletID, name):
self.c.execute(
"UPDATE wallet SET wallet_name = ? WHERE walletID = ?", (name, walletID, ))
def update_password(self, userid, password):
self.c.execute(
"UPDATE users SET userPass = ? WHERE userID = ?", (password, userid, ))
# DELETE OPTIONS
def delete_balance_row(self, userid, coinid):
self.c.execute(
"DELETE FROM balance WHERE userID = ? AND idCoin = ?", (userid, coinid, ))
def delete_wallet_row(self, rowid):
self.c.execute("DELETE FROM wallet WHERE walletID = ?", (rowid, ))
| Bobypalcka/coinwatcher | lib/databaseCore.py | Python | unlicense | 13,002 |
"""Implement the services discovery feature from Hass.io for Add-ons."""
import asyncio
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPServiceUnavailable
from homeassistant import config_entries
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import ATTR_NAME, ATTR_SERVICE, EVENT_HOMEASSISTANT_START
from homeassistant.core import callback
from .const import ATTR_ADDON, ATTR_CONFIG, ATTR_DISCOVERY, ATTR_UUID
from .handler import HassioAPIError
_LOGGER = logging.getLogger(__name__)
@callback
def async_setup_discovery_view(hass: HomeAssistantView, hassio):
"""Discovery setup."""
hassio_discovery = HassIODiscovery(hass, hassio)
hass.http.register_view(hassio_discovery)
# Handle exists discovery messages
async def _async_discovery_start_handler(event):
"""Process all exists discovery on startup."""
try:
data = await hassio.retrieve_discovery_messages()
except HassioAPIError as err:
_LOGGER.error("Can't read discover info: %s", err)
return
jobs = [
hassio_discovery.async_process_new(discovery)
for discovery in data[ATTR_DISCOVERY]
]
if jobs:
await asyncio.wait(jobs)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, _async_discovery_start_handler
)
class HassIODiscovery(HomeAssistantView):
"""Hass.io view to handle base part."""
name = "api:hassio_push:discovery"
url = "/api/hassio_push/discovery/{uuid}"
def __init__(self, hass: HomeAssistantView, hassio):
"""Initialize WebView."""
self.hass = hass
self.hassio = hassio
async def post(self, request, uuid):
"""Handle new discovery requests."""
# Fetch discovery data and prevent injections
try:
data = await self.hassio.get_discovery_message(uuid)
except HassioAPIError as err:
_LOGGER.error("Can't read discovery data: %s", err)
raise HTTPServiceUnavailable() from None
await self.async_process_new(data)
return web.Response()
async def delete(self, request, uuid):
"""Handle remove discovery requests."""
data = await request.json()
await self.async_process_del(data)
return web.Response()
async def async_process_new(self, data):
"""Process add discovery entry."""
service = data[ATTR_SERVICE]
config_data = data[ATTR_CONFIG]
# Read additional Add-on info
try:
addon_info = await self.hassio.get_addon_info(data[ATTR_ADDON])
except HassioAPIError as err:
_LOGGER.error("Can't read add-on info: %s", err)
return
config_data[ATTR_ADDON] = addon_info[ATTR_NAME]
# Use config flow
await self.hass.config_entries.flow.async_init(
service, context={"source": config_entries.SOURCE_HASSIO}, data=config_data
)
async def async_process_del(self, data):
"""Process remove discovery entry."""
service = data[ATTR_SERVICE]
uuid = data[ATTR_UUID]
# Check if really deletet / prevent injections
try:
data = await self.hassio.get_discovery_message(uuid)
except HassioAPIError:
pass
else:
_LOGGER.warning("Retrieve wrong unload for %s", service)
return
# Use config flow
for entry in self.hass.config_entries.async_entries(service):
if entry.source != config_entries.SOURCE_HASSIO:
continue
await self.hass.config_entries.async_remove(entry)
| kennedyshead/home-assistant | homeassistant/components/hassio/discovery.py | Python | apache-2.0 | 3,707 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.