hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ced77f9b79511fe1d0c809c0b89f8a8b9ed3ac04
| 832
|
py
|
Python
|
inference/utils.py
|
macarthur-lab/MyoSeq_reports
|
e91ed57f01fcbf2e0ea982d95741a69b16c4c2fb
|
[
"MIT"
] | null | null | null |
inference/utils.py
|
macarthur-lab/MyoSeq_reports
|
e91ed57f01fcbf2e0ea982d95741a69b16c4c2fb
|
[
"MIT"
] | null | null | null |
inference/utils.py
|
macarthur-lab/MyoSeq_reports
|
e91ed57f01fcbf2e0ea982d95741a69b16c4c2fb
|
[
"MIT"
] | null | null | null |
def get_samples(sampleFile):
"""
Get requested sample IDs from input file (one sample per line)
:param str samp: Name of file containing requested samples
:return: List of sample IDs
:rtype: list
"""
samples = []
with open(sampleFile) as s:
for line in s:
samples.append(line.strip())
return samples
def check_missing_samples(samples, check, fname):
"""
Checks if any requested samples are missing from a file
:param list samples: List of strings (requested sample IDs)
:param list check: List of strings (sample IDs found in file)
:param str fname: File name
:return: None
:rtype: None
"""
missing = set(samples) - set(check)
if len(missing) > 0:
logging.warning('{} not found in {} file'.format(','.join(missing), fname))
| 28.689655
| 83
| 0.641827
|
cdbfe2ade1050f220719ec43b177f5c756765332
| 1,712
|
py
|
Python
|
lib/python2.7/site-packages/twilio/rest/resources/task_router/tasks.py
|
Gchorba/Ask
|
cf68c9a522d2c67519a64927c603990c1399af57
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/twilio/rest/resources/task_router/tasks.py
|
Gchorba/Ask
|
cf68c9a522d2c67519a64927c603990c1399af57
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/twilio/rest/resources/task_router/tasks.py
|
Gchorba/Ask
|
cf68c9a522d2c67519a64927c603990c1399af57
|
[
"MIT"
] | null | null | null |
from .. import NextGenInstanceResource, NextGenListResource
class Task(NextGenInstanceResource):
"""
A Task resource
"""
def delete(self):
"""
Delete a task.
"""
return self.parent.delete_instance(self.name)
def update(self, **kwargs):
"""
Update a task.
"""
return self.parent.update_instance(self.name, kwargs)
class Tasks(NextGenListResource):
""" A list of Task resources """
name = "Tasks"
instance = Task
def create(self, attributes, workflow_sid, **kwargs):
"""
Create a Task.
:param attributes: Url-encoded JSON string describing the attributes of
this task. This data will be passed back to the Workflow's
AssignmentCallbackURL when the Task is assigned to a Worker. An
example task: { 'task_type': 'call', 'twilio_call_sid': '...',
'customer_ticket_number': '12345' }.
:param workflow_sid: The workflow_sid for the Workflow that you would
like to handle routing for this Task.
:param timeout: If provided, time-to-live for the task in seconds,
before it is automatically canceled
"""
kwargs['attributes'] = attributes
kwargs['workflow_sid'] = workflow_sid
return self.create_instance(kwargs)
def delete(self, sid):
"""
Delete the given task
"""
return self.delete_instance(sid)
def update(self, sid, **kwargs):
"""
Update a :class:`Task` with the given parameters.
All the parameters are describe above in :meth:`create`
"""
return self.update_instance(sid, kwargs)
| 29.016949
| 79
| 0.606893
|
6f6d57311d8c52083dbd257a9a4bdadd32ebd875
| 38,234
|
py
|
Python
|
src/olympia/signing/tests/test_views.py
|
varundey/addons-server
|
bdc39c4f0510a5e1c8b4e3551f5f15fcd50f10e9
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/signing/tests/test_views.py
|
varundey/addons-server
|
bdc39c4f0510a5e1c8b4e3551f5f15fcd50f10e9
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/signing/tests/test_views.py
|
varundey/addons-server
|
bdc39c4f0510a5e1c8b4e3551f5f15fcd50f10e9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import os
from datetime import datetime, timedelta
from django.conf import settings
from django.forms import ValidationError
from django.test.utils import override_settings
from django.utils import translation
import mock
import responses
from rest_framework.response import Response
from waffle.testutils import override_switch
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon, AddonUser
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import addon_factory, reverse_ns, TestCase
from olympia.api.tests.utils import APIKeyAuthTestMixin
from olympia.applications.models import AppVersion
from olympia.devhub import tasks
from olympia.files.models import File, FileUpload
from olympia.lib.akismet.models import AkismetReport
from olympia.signing.views import VersionView
from olympia.users.models import UserProfile
from olympia.versions.models import Version
class SigningAPITestMixin(APIKeyAuthTestMixin):
fixtures = ['base/addon_3615', 'base/user_4043307']
def setUp(self):
self.user = UserProfile.objects.get(email='del@icio.us')
self.api_key = self.create_api_key(self.user, str(self.user.pk) + ':f')
class BaseUploadVersionTestMixin(SigningAPITestMixin):
def setUp(self):
super(BaseUploadVersionTestMixin, self).setUp()
self.guid = '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}'
self.view = VersionView.as_view()
create_version_patcher = mock.patch(
'olympia.devhub.tasks.create_version_for_upload',
tasks.create_version_for_upload.non_atomic)
self.create_version_for_upload = create_version_patcher.start()
self.addCleanup(create_version_patcher.stop)
auto_sign_version_patcher = mock.patch(
'olympia.devhub.views.auto_sign_version')
self.auto_sign_version = auto_sign_version_patcher.start()
self.addCleanup(auto_sign_version_patcher.stop)
def url(self, guid, version, pk=None):
if guid is None:
args = [version]
else:
args = [guid, version]
if pk is not None:
args.append(pk)
return reverse_ns('signing.version', args=args)
def create_version(self, version):
response = self.request('PUT', self.url(self.guid, version), version)
assert response.status_code in [201, 202]
def xpi_filepath(self, addon, version):
return os.path.join(
'src', 'olympia', 'signing', 'fixtures',
'{addon}-{version}.xpi'.format(addon=addon, version=version))
def request(self, method='PUT', url=None, version='3.0',
addon='@upload-version', filename=None, channel=None,
extra_kwargs=None):
if filename is None:
filename = self.xpi_filepath(addon, version)
if url is None:
url = self.url(addon, version)
with open(filename) as upload:
data = {'upload': upload}
if method == 'POST' and version:
data['version'] = version
if channel:
data['channel'] = channel
return getattr(self.client, method.lower())(
url, data,
HTTP_AUTHORIZATION=self.authorization(),
format='multipart', **(extra_kwargs or {}))
def make_admin(self, user):
admin_group = Group.objects.create(name='Admin', rules='*:*')
GroupUser.objects.create(group=admin_group, user=user)
class TestUploadVersion(BaseUploadVersionTestMixin, TestCase):
def test_not_authenticated(self):
# Use self.client.put so that we don't add the authorization header.
response = self.client.put(self.url(self.guid, '12.5'))
assert response.status_code == 401
def test_addon_does_not_exist(self):
guid = '@create-version'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', addon=guid, version='1.0')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.guid == guid
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(latest_version)
assert not addon.tags.filter(tag_text='dynamic theme').exists()
def test_new_addon_random_slug_unlisted_channel(self):
guid = '@create-webextension'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', addon=guid, version='1.0')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert len(addon.slug) == 20
assert 'create' not in addon.slug
def test_user_does_not_own_addon(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_admin_does_not_own_addon(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
self.make_admin(self.user)
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_version_does_not_match_manifest_file(self):
response = self.request('PUT', self.url(self.guid, '2.5'))
assert response.status_code == 400
assert response.data['error'] == (
'Version does not match the manifest file.')
def test_version_already_exists(self):
response = self.request(
'PUT', self.url(self.guid, '2.1.072'), version='2.1.072')
assert response.status_code == 409
assert response.data['error'] == ('Version already exists. '
'Latest version is: 2.1.072.')
@mock.patch('olympia.devhub.views.Version.from_upload')
def test_no_version_yet(self, from_upload):
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_added(self):
assert Addon.objects.get(guid=self.guid).status == amo.STATUS_PUBLIC
qs = Version.objects.filter(addon__guid=self.guid, version='3.0')
assert not qs.exists()
existing = Version.objects.filter(addon__guid=self.guid)
assert existing.count() == 1
assert existing[0].channel == amo.RELEASE_CHANNEL_LISTED
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
version = qs.get()
assert version.addon.guid == self.guid
assert version.version == '3.0'
assert version.statuses[0][1] == amo.STATUS_AWAITING_REVIEW
assert version.addon.status == amo.STATUS_PUBLIC
assert version.channel == amo.RELEASE_CHANNEL_LISTED
self.auto_sign_version.assert_called_with(version)
assert not version.all_files[0].is_mozilla_signed_extension
assert not version.addon.tags.filter(tag_text='dynamic theme').exists()
def test_version_already_uploaded(self):
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 409
assert response.data['error'] == ('Version already exists. '
'Latest version is: 3.0.')
def test_version_failed_review(self):
self.create_version('3.0')
version = Version.objects.get(addon__guid=self.guid, version='3.0')
version.update(reviewed=datetime.today())
version.files.get().update(reviewed=datetime.today(),
status=amo.STATUS_DISABLED)
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 409
assert response.data['error'] == ('Version already exists. '
'Latest version is: 3.0.')
# Verify that you can check the status after upload (#953).
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_added_is_experiment(self):
self.grant_permission(self.user, 'Experiments:submit')
guid = 'experiment@xpi'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.1',
filename='src/olympia/files/fixtures/files/'
'telemetry_experiment.xpi')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(latest_version)
def test_version_added_is_experiment_reject_no_perm(self):
guid = 'experiment@xpi'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.1',
filename='src/olympia/files/fixtures/files/'
'telemetry_experiment.xpi')
assert response.status_code == 400
assert response.data['error'] == (
'You cannot submit this type of add-on')
def test_mozilla_signed_allowed(self):
guid = '@webextension-guid'
self.user.update(email='redpanda@mozilla.com')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'webextension_signed_already.xpi')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(latest_version)
assert latest_version.all_files[0].is_mozilla_signed_extension
def test_mozilla_signed_not_allowed_not_mozilla(self):
guid = '@webextension-guid'
self.user.update(email='yellowpanda@notzilla.com')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'webextension_signed_already.xpi')
assert response.status_code == 400
assert response.data['error'] == (
'You cannot submit a Mozilla Signed Extension')
def test_system_addon_allowed(self):
guid = 'systemaddon@mozilla.org'
self.user.update(email='redpanda@mozilla.com')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'mozilla_guid.xpi')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(latest_version)
def test_system_addon_not_allowed_not_mozilla(self):
guid = 'systemaddon@mozilla.com'
self.user.update(email='yellowpanda@notzilla.com')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.1',
filename='src/olympia/files/fixtures/files/'
'mozilla_guid.xpi')
assert response.status_code == 400
assert response.data['error'] == (
u'You cannot submit an add-on with a guid ending "@mozilla.org" '
u'or "@shield.mozilla.org" or "@pioneer.mozilla.org" '
u'or "@mozilla.com"')
def test_system_addon_update_allowed(self):
"""Updates to system addons are allowed from anyone."""
guid = 'systemaddon@mozilla.org'
self.user.update(email='pinkpanda@notzilla.com')
orig_addon = addon_factory(
guid='systemaddon@mozilla.org',
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
AddonUser.objects.create(
addon=orig_addon,
user=self.user)
response = self.request(
'PUT',
addon=guid, version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'mozilla_guid.xpi')
assert response.status_code == 202
addon = Addon.unfiltered.filter(guid=guid).get()
assert addon.versions.count() == 2
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.auto_sign_version.assert_called_with(latest_version)
def test_invalid_version_response_code(self):
# This raises an error in parse_addon which is not covered by
# an exception handler.
response = self.request(
'PUT',
self.url(self.guid, '1.0'),
addon='@create-webextension-invalid-version',
version='1.0')
assert response.status_code == 400
def test_raises_response_code(self):
# A check that any bare error in handle_upload will return a 400.
with mock.patch('olympia.signing.views.devhub_handle_upload') as patch:
patch.side_effect = ValidationError(message='some error')
response = self.request('PUT', self.url(self.guid, '1.0'))
assert response.status_code == 400
def test_no_version_upload_for_admin_disabled_addon(self):
addon = Addon.objects.get(guid=self.guid)
addon.update(status=amo.STATUS_DISABLED)
response = self.request(
'PUT', self.url(self.guid, '3.0'), version='3.0')
assert response.status_code == 400
error_msg = 'cannot add versions to an addon that has status: %s.' % (
amo.STATUS_CHOICES_ADDON[amo.STATUS_DISABLED])
assert error_msg in response.data['error']
def test_channel_ignored_for_new_addon(self):
guid = '@create-version'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', addon=guid, version='1.0',
channel='listed')
assert response.status_code == 201
addon = qs.get()
assert addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
def test_no_channel_selects_last_channel(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.count() == 1
assert addon.versions.all()[0].channel == amo.RELEASE_CHANNEL_LISTED
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
new_version = addon.versions.latest()
assert new_version.channel == amo.RELEASE_CHANNEL_LISTED
new_version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.request(
'PUT', self.url(self.guid, '4.0-beta1'), version='4.0-beta1')
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
third_version = addon.versions.latest()
assert third_version.channel == amo.RELEASE_CHANNEL_UNLISTED
def test_unlisted_channel_for_listed_addon(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.count() == 1
assert addon.versions.all()[0].channel == amo.RELEASE_CHANNEL_LISTED
response = self.request('PUT', self.url(self.guid, '3.0'),
channel='unlisted')
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_UNLISTED
def test_listed_channel_for_complete_listed_addon(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.count() == 1
assert addon.has_complete_metadata()
response = self.request('PUT', self.url(self.guid, '3.0'),
channel='listed')
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
def test_listed_channel_fails_for_incomplete_addon(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.count() == 1
addon.current_version.update(license=None) # Make addon incomplete.
addon.versions.latest().update(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert not addon.has_complete_metadata(
has_listed_versions=True)
response = self.request('PUT', self.url(self.guid, '3.0'),
channel='listed')
assert response.status_code == 400
error_msg = (
'You cannot add a listed version to this addon via the API')
assert error_msg in response.data['error']
@override_switch('akismet-spam-check', active=False)
def test_akismet_waffle_off(self):
addon = Addon.objects.get(guid=self.guid)
response = self.request(
'PUT', self.url(self.guid, '3.0'), channel='listed')
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
assert AkismetReport.objects.count() == 0
assert response.status_code == 202
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_reports_created_ham_outcome(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
addon = Addon.objects.get(guid=self.guid)
response = self.request(
'PUT', self.url(self.guid, '3.0'), channel='listed')
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
assert response.status_code == 202
comment_check_mock.assert_called_once()
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == 'Upload Version Test XPI' # the addon's name
validation_response = self.get(self.url(self.guid, '3.0'))
assert validation_response.status_code == 200
assert 'spam' not in validation_response.content
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=False)
@override_settings(AKISMET_API_KEY=None)
def test_akismet_reports_created_spam_outcome_logging_only(self):
akismet_url = settings.AKISMET_API_URL.format(
api_key='none', action='comment-check')
responses.add(responses.POST, akismet_url, json=True)
addon = Addon.objects.get(guid=self.guid)
response = self.request(
'PUT', self.url(self.guid, '3.0'), channel='listed')
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
assert response.status_code == 202
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == 'Upload Version Test XPI' # the addon's name
assert report.result == AkismetReport.MAYBE_SPAM
validation_response = self.get(self.url(self.guid, '3.0'))
assert validation_response.status_code == 200
assert 'spam' not in validation_response.content
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=True)
@override_settings(AKISMET_API_KEY=None)
def test_akismet_reports_created_spam_outcome_action_taken(self):
akismet_url = settings.AKISMET_API_URL.format(
api_key='none', action='comment-check')
responses.add(responses.POST, akismet_url, json=True)
addon = Addon.objects.get(guid=self.guid)
response = self.request(
'PUT', self.url(self.guid, '3.0'), channel='listed')
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
assert response.status_code == 202
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == 'Upload Version Test XPI' # the addon's name
assert report.result == AkismetReport.MAYBE_SPAM
validation_response = self.get(self.url(self.guid, '3.0'))
assert validation_response.status_code == 200
assert 'spam' in validation_response.content
data = json.loads(validation_response.content)
assert data['validation_results']['messages'][0]['id'] == [
u'validation', u'messages', u'akismet_is_spam_name'
]
class TestUploadVersionWebextension(BaseUploadVersionTestMixin, TestCase):
def setUp(self):
super(TestUploadVersionWebextension, self).setUp()
AppVersion.objects.create(application=amo.FIREFOX.id, version='42.0')
AppVersion.objects.create(application=amo.FIREFOX.id, version='*')
def test_addon_does_not_exist_webextension(self):
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension',
version='1.0')
assert response.status_code == 201
guid = response.data['guid']
addon = Addon.unfiltered.get(guid=guid)
assert addon.guid is not None
assert addon.guid != self.guid
version = Version.objects.get(addon__guid=guid, version='1.0')
assert version.files.all()[0].is_webextension is True
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(
latest_version)
def test_addon_does_not_exist_webextension_with_guid_in_url(self):
guid = '@custom-guid-provided'
# Override the filename self.request() picks, we want that specific
# file but with a custom guid.
filename = self.xpi_filepath('@create-webextension', '1.0')
response = self.request(
'PUT', # PUT, not POST, since we're specifying a guid in the URL.
filename=filename,
addon=guid, # Will end up in the url since we're not passing one.
version='1.0')
assert response.status_code == 201
assert response.data['guid'] == '@custom-guid-provided'
addon = Addon.unfiltered.get(guid=response.data['guid'])
assert addon.guid == '@custom-guid-provided'
version = Version.objects.get(addon__guid=guid, version='1.0')
assert version.files.all()[0].is_webextension is True
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(
latest_version)
def test_addon_does_not_exist_webextension_with_invalid_guid_in_url(self):
guid = 'custom-invalid-guid-provided'
# Override the filename self.request() picks, we want that specific
# file but with a custom guid.
filename = self.xpi_filepath('@create-webextension', '1.0')
response = self.request(
'PUT', # PUT, not POST, since we're specifying a guid in the URL.
filename=filename,
addon=guid, # Will end up in the url since we're not passing one.
version='1.0')
assert response.status_code == 400
assert response.data['error'] == u'Invalid GUID in URL'
assert not Addon.unfiltered.filter(guid=guid).exists()
def test_optional_id_not_allowed_for_regular_addon(self):
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-version-no-id',
version='1.0')
assert response.status_code == 400
def test_webextension_reuse_guid(self):
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension-with-guid',
version='1.0')
guid = response.data['guid']
assert guid == '@webextension-with-guid'
addon = Addon.unfiltered.get(guid=guid)
assert addon.guid == '@webextension-with-guid'
def test_webextension_reuse_guid_but_only_create(self):
# Uploading the same version with the same id fails. People
# have to use the regular `PUT` endpoint for that.
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension-with-guid',
version='1.0')
assert response.status_code == 201
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension-with-guid',
version='1.0')
assert response.status_code == 400
assert response.data['error'] == 'Duplicate add-on ID found.'
def test_webextension_optional_version(self):
# Uploading the same version with the same id fails. People
# have to use the regular `PUT` endpoint for that.
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@create-webextension-with-guid-and-version',
version='99.0')
assert response.status_code == 201
assert (
response.data['guid'] ==
'@create-webextension-with-guid-and-version')
assert response.data['version'] == '99.0'
def test_webextension_resolve_translations(self):
fname = (
'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi')
response = self.request(
'POST',
url=reverse_ns('signing.version'),
addon='@notify-link-clicks-i18n',
version='1.0',
filename=fname)
assert response.status_code == 201
addon = Addon.unfiltered.get(guid=response.data['guid'])
# Normalized from `en` to `en-US`
assert addon.default_locale == 'en-US'
assert addon.name == 'Notify link clicks i18n'
assert addon.summary == (
'Shows a notification when the user clicks on links.')
translation.activate('de')
addon.reload()
assert addon.name == 'Meine Beispielerweiterung'
assert addon.summary == u'Benachrichtigt den Benutzer über Linkklicks'
def test_too_long_guid_not_in_manifest_forbidden(self):
fname = (
'src/olympia/files/fixtures/files/webextension_no_id.xpi')
guid = (
'this_guid_is_longer_than_the_limit_of_64_chars_see_bug_1201176_'
'and_should_fail@webextension-guid')
response = self.request(
'PUT',
url=self.url(guid, '1.0'),
version='1.0',
filename=fname)
assert response.status_code == 400
assert response.data == {
'error': (
u'Please specify your Add-on GUID in the manifest if it\'s '
u'longer than 64 characters.')
}
assert not Addon.unfiltered.filter(guid=guid).exists()
def test_too_long_guid_in_manifest_allowed(self):
fname = (
'src/olympia/files/fixtures/files/webextension_too_long_guid.xpi')
guid = (
'this_guid_is_longer_than_the_limit_of_64_chars_see_bug_1201176_'
'and_should_fail@webextension-guid')
response = self.request(
'PUT',
url=self.url(guid, '1.0'),
version='1.0',
filename=fname)
assert response.status_code == 201
assert Addon.unfiltered.filter(guid=guid).exists()
def test_dynamic_theme_tag_added(self):
addon = Addon.objects.get(guid=self.guid)
addon.current_version.update(version='0.9')
def parse_addon_wrapper(*args, **kwargs):
from olympia.files.utils import parse_addon
parsed = parse_addon(*args, **kwargs)
parsed['permissions'] = parsed.get('permissions', []) + ['theme']
return parsed
with mock.patch('olympia.devhub.tasks.parse_addon',
wraps=parse_addon_wrapper):
# But unlisted should be ignored
response = self.request(
'PUT', self.url(self.guid, '1.0'), version='1.0',
addon='@create-webextension', channel='unlisted')
assert response.status_code == 202, response.data['error']
assert not addon.tags.filter(tag_text='dynamic theme').exists()
addon.versions.latest().delete(hard=True)
# Only listed version get the tag
response = self.request(
'PUT', self.url(self.guid, '1.0'), version='1.0',
addon='@create-webextension', channel='listed')
assert response.status_code == 202, response.data['error']
assert addon.tags.filter(tag_text='dynamic theme').exists()
class TestCheckVersion(BaseUploadVersionTestMixin, TestCase):
def test_not_authenticated(self):
# Use self.client.get so that we don't add the authorization header.
response = self.client.get(self.url(self.guid, '12.5'))
assert response.status_code == 401
def test_addon_does_not_exist(self):
response = self.get(self.url('foo', '12.5'))
assert response.status_code == 404
assert response.data['error'] == (
'Could not find add-on with guid "foo".')
def test_user_does_not_own_addon(self):
self.create_version('3.0')
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_admin_can_view(self):
self.create_version('3.0')
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.make_admin(self.user)
self.api_key = self.create_api_key(self.user, 'bar')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_does_not_exist(self):
response = self.get(self.url(self.guid, '2.5'))
assert response.status_code == 404
assert (response.data['error'] ==
'No uploaded file for that addon and version.')
def test_version_exists(self):
self.create_version('3.0')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_exists_with_pk(self):
# Mock Version.from_upload so the Version won't be created.
with mock.patch('olympia.devhub.tasks.Version.from_upload'):
self.create_version('3.0')
upload = FileUpload.objects.latest()
upload.update(created=datetime.today() - timedelta(hours=1))
self.create_version('3.0')
newer_upload = FileUpload.objects.latest()
assert newer_upload != upload
response = self.get(self.url(self.guid, '3.0', upload.uuid.hex))
assert response.status_code == 200
# For backwards-compatibility reasons, we return the uuid as "pk".
assert response.data['pk'] == upload.uuid.hex
assert 'processed' in response.data
def test_version_exists_with_pk_not_owner(self):
orig_user, orig_api_key = self.user, self.api_key
# This will create a version for the add-on with guid @create-version
# using a new user.
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.request('PUT', addon='@create-version', version='1.0')
assert response.status_code == 201
upload = FileUpload.objects.latest()
# Check that the user that created the upload can access it properly.
response = self.get(
self.url('@create-version', '1.0', upload.uuid.hex))
assert response.status_code == 200
assert 'processed' in response.data
# This will create a version for the add-on from the fixture with the
# regular fixture user.
self.user, self.api_key = orig_user, orig_api_key
self.create_version('3.0')
# Check that we can't access the FileUpload by uuid even if we pass in
# an add-on and version that we own if we don't own the FileUpload.
response = self.get(self.url(self.guid, '3.0', upload.uuid.hex))
assert response.status_code == 404
assert 'error' in response.data
def test_version_download_url(self):
version_string = '3.0'
qs = File.objects.filter(version__addon__guid=self.guid,
version__version=version_string)
assert not qs.exists()
self.create_version(version_string)
response = self.get(self.url(self.guid, version_string))
assert response.status_code == 200
file_ = qs.get()
assert response.data['files'][0]['download_url'] == absolutify(
reverse_ns('signing.file', kwargs={'file_id': file_.id}) +
'/delicious_bookmarks-3.0-fx.xpi?src=api')
def test_file_hash(self):
version_string = '3.0'
qs = File.objects.filter(version__addon__guid=self.guid,
version__version=version_string)
assert not qs.exists()
self.create_version(version_string)
response = self.get(self.url(self.guid, version_string))
assert response.status_code == 200
file_ = qs.get()
filename = self.xpi_filepath('@upload-version', version_string)
assert response.data['files'][0]['hash'] == \
file_.generate_hash(filename=filename)
def test_has_failed_upload(self):
addon = Addon.objects.get(guid=self.guid)
FileUpload.objects.create(addon=addon, version='3.0')
self.create_version('3.0')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
class TestSignedFile(SigningAPITestMixin, TestCase):
def setUp(self):
super(TestSignedFile, self).setUp()
self.file_ = self.create_file()
def url(self):
return reverse_ns('signing.file', args=[self.file_.pk])
def create_file(self):
addon = addon_factory(
name='thing', version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED},
users=[self.user])
return addon.latest_unlisted_version.all_files[0]
def test_can_download_once_authenticated(self):
response = self.get(self.url())
assert response.status_code == 200
assert response[settings.XSENDFILE_HEADER] == (
self.file_.file_path)
def test_cannot_download_without_authentication(self):
response = self.client.get(self.url()) # no auth
assert response.status_code == 401
def test_api_relies_on_version_downloader(self):
with mock.patch('olympia.versions.views.download_file') as df:
df.return_value = Response({})
self.get(self.url())
assert df.called is True
assert df.call_args[0][0].user == self.user
assert df.call_args[0][1] == str(self.file_.pk)
| 42.10793
| 79
| 0.640451
|
9681d3bdd6e6a5e379a86bee2ef0f797ee573676
| 3,754
|
py
|
Python
|
lib/services/loadbalancer/ncloud_loadbalancer/__init__.py
|
KidongSohn/ncloud-sdk-py
|
1c62471a9bd320d77164ed3193a0ebb9f64229ff
|
[
"MIT"
] | null | null | null |
lib/services/loadbalancer/ncloud_loadbalancer/__init__.py
|
KidongSohn/ncloud-sdk-py
|
1c62471a9bd320d77164ed3193a0ebb9f64229ff
|
[
"MIT"
] | null | null | null |
lib/services/loadbalancer/ncloud_loadbalancer/__init__.py
|
KidongSohn/ncloud-sdk-py
|
1c62471a9bd320d77164ed3193a0ebb9f64229ff
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
loadbalancer
OpenAPI spec version: 2018-06-21T02:19:18Z
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from ncloud_loadbalancer.api.v2_api import V2Api
# import ApiClient
from ncloud_loadbalancer.api_client import ApiClient
from ncloud_loadbalancer.configuration import Configuration
# import models into sdk package
from ncloud_loadbalancer.model.access_control_group import AccessControlGroup
from ncloud_loadbalancer.model.add_load_balancer_ssl_certificate_request import AddLoadBalancerSslCertificateRequest
from ncloud_loadbalancer.model.add_load_balancer_ssl_certificate_response import AddLoadBalancerSslCertificateResponse
from ncloud_loadbalancer.model.change_load_balanced_server_instances_request import ChangeLoadBalancedServerInstancesRequest
from ncloud_loadbalancer.model.change_load_balanced_server_instances_response import ChangeLoadBalancedServerInstancesResponse
from ncloud_loadbalancer.model.change_load_balancer_instance_configuration_request import ChangeLoadBalancerInstanceConfigurationRequest
from ncloud_loadbalancer.model.change_load_balancer_instance_configuration_response import ChangeLoadBalancerInstanceConfigurationResponse
from ncloud_loadbalancer.model.common_code import CommonCode
from ncloud_loadbalancer.model.create_load_balancer_instance_request import CreateLoadBalancerInstanceRequest
from ncloud_loadbalancer.model.create_load_balancer_instance_response import CreateLoadBalancerInstanceResponse
from ncloud_loadbalancer.model.delete_load_balancer_instances_request import DeleteLoadBalancerInstancesRequest
from ncloud_loadbalancer.model.delete_load_balancer_instances_response import DeleteLoadBalancerInstancesResponse
from ncloud_loadbalancer.model.delete_load_balancer_ssl_certificate_request import DeleteLoadBalancerSslCertificateRequest
from ncloud_loadbalancer.model.delete_load_balancer_ssl_certificate_response import DeleteLoadBalancerSslCertificateResponse
from ncloud_loadbalancer.model.get_load_balanced_server_instance_list_request import GetLoadBalancedServerInstanceListRequest
from ncloud_loadbalancer.model.get_load_balanced_server_instance_list_response import GetLoadBalancedServerInstanceListResponse
from ncloud_loadbalancer.model.get_load_balancer_instance_list_request import GetLoadBalancerInstanceListRequest
from ncloud_loadbalancer.model.get_load_balancer_instance_list_response import GetLoadBalancerInstanceListResponse
from ncloud_loadbalancer.model.get_load_balancer_ssl_certificate_list_request import GetLoadBalancerSslCertificateListRequest
from ncloud_loadbalancer.model.get_load_balancer_ssl_certificate_list_response import GetLoadBalancerSslCertificateListResponse
from ncloud_loadbalancer.model.get_load_balancer_target_server_instance_list_request import GetLoadBalancerTargetServerInstanceListRequest
from ncloud_loadbalancer.model.get_load_balancer_target_server_instance_list_response import GetLoadBalancerTargetServerInstanceListResponse
from ncloud_loadbalancer.model.load_balanced_server_instance import LoadBalancedServerInstance
from ncloud_loadbalancer.model.load_balancer_instance import LoadBalancerInstance
from ncloud_loadbalancer.model.load_balancer_rule import LoadBalancerRule
from ncloud_loadbalancer.model.load_balancer_rule_parameter import LoadBalancerRuleParameter
from ncloud_loadbalancer.model.region import Region
from ncloud_loadbalancer.model.server_health_check_status import ServerHealthCheckStatus
from ncloud_loadbalancer.model.server_instance import ServerInstance
from ncloud_loadbalancer.model.ssl_certificate import SslCertificate
from ncloud_loadbalancer.model.zone import Zone
| 69.518519
| 140
| 0.922749
|
7224c4154fa34161e3731f41913fab8ab366d1ee
| 6,667
|
py
|
Python
|
mayan/apps/documents/links/document_version_page_links.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/documents/links/document_version_page_links.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/documents/links/document_version_page_links.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 257
|
2019-05-14T10:26:37.000Z
|
2022-03-30T03:37:36.000Z
|
from django.utils.translation import ugettext_lazy as _
from mayan.apps.navigation.classes import Link
from ..icons import (
icon_document_version_page_delete, icon_document_version_page_list,
icon_document_version_page_list_append,
icon_document_version_page_list_remap,
icon_document_version_page_list_reset,
icon_document_version_page_navigation_first,
icon_document_version_page_navigation_last,
icon_document_version_page_navigation_next,
icon_document_version_page_navigation_previous,
icon_document_version_page_return_to_document,
icon_document_version_page_return_to_document_version,
icon_document_version_page_return_to_document_version_page_list,
icon_document_version_page_rotate_left,
icon_document_version_page_rotate_right,
icon_document_version_page_view, icon_document_version_page_view_reset,
icon_document_version_page_zoom_in, icon_document_version_page_zoom_out
)
from ..permissions import (
permission_document_version_edit, permission_document_version_view,
permission_document_view
)
from ..settings import setting_zoom_max_level, setting_zoom_min_level
def is_first_page(context):
return context['resolved_object'].siblings.first() == context['resolved_object']
def is_last_page(context):
return context['resolved_object'].siblings.last() == context['resolved_object']
def is_max_zoom(context):
return context['zoom'] >= setting_zoom_max_level.value
def is_min_zoom(context):
return context['zoom'] <= setting_zoom_min_level.value
link_document_version_page_delete = Link(
args='resolved_object.pk', icon=icon_document_version_page_delete,
permissions=(permission_document_version_edit,), tags='dangerous',
text=_('Delete'), view='documents:document_version_page_delete'
)
link_document_version_page_list = Link(
args='resolved_object.pk', icon=icon_document_version_page_list,
permissions=(permission_document_version_view,), text=_('Pages'),
view='documents:document_version_page_list'
)
link_document_version_page_list_append = Link(
args='resolved_object.pk', icon=icon_document_version_page_list_append,
permissions=(permission_document_version_edit,), text=_('Append all pages'),
view='documents:document_version_page_list_append'
)
link_document_version_page_list_remap = Link(
args='resolved_object.pk', icon=icon_document_version_page_list_remap,
permissions=(permission_document_version_edit,), text=_('Remap pages'),
view='documents:document_version_page_list_remap'
)
link_document_version_page_list_reset = Link(
args='resolved_object.pk', icon=icon_document_version_page_list_reset,
permissions=(permission_document_version_edit,), text=_('Reset pages'),
view='documents:document_version_page_list_reset'
)
link_document_version_page_navigation_first = Link(
args='resolved_object.pk', conditional_disable=is_first_page,
icon=icon_document_version_page_navigation_first,
keep_query=True, permissions=(permission_document_version_view,),
text=_('First page'),
view='documents:document_version_page_navigation_first'
)
link_document_version_page_navigation_last = Link(
args='resolved_object.pk', conditional_disable=is_last_page,
icon=icon_document_version_page_navigation_last,
keep_query=True, text=_('Last page'),
permissions=(permission_document_version_view,),
view='documents:document_version_page_navigation_last'
)
link_document_version_page_navigation_previous = Link(
args='resolved_object.pk', conditional_disable=is_first_page,
icon=icon_document_version_page_navigation_previous,
keep_query=True, permissions=(permission_document_version_view,),
text=_('Previous page'),
view='documents:document_version_page_navigation_previous'
)
link_document_version_page_navigation_next = Link(
args='resolved_object.pk', conditional_disable=is_last_page,
icon=icon_document_version_page_navigation_next,
keep_query=True, text=_('Next page'),
permissions=(permission_document_version_view,),
view='documents:document_version_page_navigation_next'
)
link_document_version_page_return_to_document = Link(
args='resolved_object.document_version.document.pk',
icon=icon_document_version_page_return_to_document,
permissions=(permission_document_view,),
text=_('Document'), view='documents:document_preview'
)
link_document_version_page_return_to_document_version = Link(
args='resolved_object.document_version.pk',
icon=icon_document_version_page_return_to_document_version,
permissions=(permission_document_version_view,),
text=_('Document version'), view='documents:document_version_preview'
)
link_document_version_page_return_to_document_version_page_list = Link(
args='resolved_object.document_version.pk',
icon=icon_document_version_page_return_to_document_version_page_list,
permissions=(permission_document_version_view,),
text=_('Document version pages'),
view='documents:document_version_page_list'
)
link_document_version_page_rotate_left = Link(
args='resolved_object.pk', icon=icon_document_version_page_rotate_left,
keep_query=True, permissions=(permission_document_version_view,),
text=_('Rotate left'), view='documents:document_version_page_rotate_left'
)
link_document_version_page_rotate_right = Link(
args='resolved_object.pk', icon=icon_document_version_page_rotate_right,
keep_query=True, permissions=(permission_document_version_view,),
text=_('Rotate right'),
view='documents:document_version_page_rotate_right'
)
link_document_version_page_view = Link(
args='resolved_object.pk', icon=icon_document_version_page_view,
permissions=(permission_document_version_view,), text=_('Page image'),
view='documents:document_version_page_view'
)
link_document_version_page_view_reset = Link(
args='resolved_object.pk', icon=icon_document_version_page_view_reset,
permissions=(permission_document_version_view,), text=_('Reset view'),
view='documents:document_version_page_view_reset'
)
link_document_version_page_zoom_in = Link(
args='resolved_object.pk', conditional_disable=is_max_zoom,
icon=icon_document_version_page_zoom_in, keep_query=True,
permissions=(permission_document_version_view,), text=_('Zoom in'),
view='documents:document_version_page_zoom_in'
)
link_document_version_page_zoom_out = Link(
args='resolved_object.pk', conditional_disable=is_min_zoom,
icon=icon_document_version_page_zoom_out, keep_query=True,
permissions=(permission_document_version_view,), text=_('Zoom out'),
view='documents:document_version_page_zoom_out'
)
| 44.446667
| 84
| 0.818359
|
fbbfa89ebe5b550006b541f5e8d75616bd047c14
| 1,905
|
py
|
Python
|
data_prep/MP2RAGE/15_cmb_all_masks.py
|
MSchnei/MRI_segmentation_preparation_scripts
|
02f65b584e09908247202fff57714b63ef44e7dd
|
[
"MIT"
] | 1
|
2022-03-17T15:46:25.000Z
|
2022-03-17T15:46:25.000Z
|
data_prep/MP2RAGE/15_cmb_all_masks.py
|
MSchnei/MRI_segmentation_preparation_scripts
|
02f65b584e09908247202fff57714b63ef44e7dd
|
[
"MIT"
] | null | null | null |
data_prep/MP2RAGE/15_cmb_all_masks.py
|
MSchnei/MRI_segmentation_preparation_scripts
|
02f65b584e09908247202fff57714b63ef44e7dd
|
[
"MIT"
] | null | null | null |
"""Opening closing operations on MRI data (nifti)."""
import os
import numpy as np
from nibabel import load, save, Nifti1Image
# set analysis path
analysispath = str(os.environ['parent_path']) + "/data/shared_data/data_mp2rage"
# set list with subj names
subjnames = [
'sub-001',
'sub-013',
'sub-014',
'sub-019',
]
# set list with tissue labels
labellist = ['_csf', '_vessels', '_subcortical', '_sinus', '_wm',
'_ventricles', '_gm']
# set array with numerical values in order corresponding to labels list
aryvalues = np.array([3, 6, 5, 7, 1, 4, 2], dtype=np.int32)
for subj in subjnames:
print('Working on ' + subj)
# create path to different labels and store paths in pathlist
pathlist = []
for labelname in labellist:
pathlist.append(os.path.join(analysispath, 'derivatives', subj,
'labels', subj + labelname + '.nii.gz'))
# create nii object for every different label and store in niilist
niilist = []
for path in pathlist:
niilist.append(load(path))
# Loop over list with nii objects to get data
for indNii, nii in enumerate(niilist):
# load data as boolean
tempbool = nii.get_data().astype(np.bool)
if indNii == 0:
dataOut = np.zeros((tempbool.shape), dtype=np.int32)
# retrieve value that should be assigned
tempval = aryvalues[indNii]
# assign this value to data points indexed by boolean
dataOut[tempbool] = tempval
# Extract directory name
dirname = os.path.dirname(niilist[0].get_filename())
niiheader = niilist[0].header
niiaffine = niilist[0].affine
# save as nifti
out = Nifti1Image(dataOut, header=niiheader, affine=niiaffine)
out.set_data_dtype(np.int32)
save(out, os.path.join(dirname, subj + '_all_labels_v01.nii.gz'))
print '... image composed.'
| 29.765625
| 80
| 0.645669
|
f90a1b6d424f52c929c7aeed117d3747ba6850fe
| 5,888
|
py
|
Python
|
src/pycycle/nozzle.py
|
OpenMDAO-Plugins/pyCycle
|
e55f9cbd0d596049bfbc45a9b1c4f0b560c9367d
|
[
"Apache-2.0"
] | 3
|
2016-11-26T17:19:32.000Z
|
2020-07-07T14:50:22.000Z
|
src/pycycle/nozzle.py
|
jcchin/pyCycle
|
1a9807901af9d85b667588e8810d86aefde7bd9e
|
[
"Apache-2.0"
] | null | null | null |
src/pycycle/nozzle.py
|
jcchin/pyCycle
|
1a9807901af9d85b667588e8810d86aefde7bd9e
|
[
"Apache-2.0"
] | 5
|
2015-07-02T07:35:05.000Z
|
2016-11-26T17:18:30.000Z
|
from openmdao.main.api import Component
from openmdao.lib.datatypes.api import Float, VarTree, Enum
from pycycle.flowstation import FlowStationVar, FlowStation
from pycycle.cycle_component import CycleComponent
class Nozzle(CycleComponent):
"""Calculates the gross thrust for a convergent-divergent nozzle, assuming an ideally expanded
exit condition"""
Fl_ref = FlowStationVar(iotype="in", desc="Flowstation with reference exit conditions", copy=None)
Fl_I = FlowStationVar(iotype="in", desc="incoming air stream to nozzle", copy=None)
dPqP = Float(0, iotype="in", desc="ratio of change in total pressure to incomming total pressure")
Fl_O = FlowStationVar(iotype="out", desc="outgoing air stream from nozzle", copy=None)
Athroat_dmd = Float(iotype="out", desc="demand throat area for the nozzle at the operating condition.")
Athroat_des = Float(iotype="out", desc="nozzle throat area at the design condition")
Aexit_des = Float(iotype="out", desc="nozzle exit area at the design condition")
PsSubsonic = Float(iotype="out", desc="back pressure corresponding to subsonic expansion")
PsSupersonic = Float(iotype="out", desc="back pressure corresponding to supersonic expansion")
PsShock = Float(iotype="out", desc="back pressure corresponding to a normal shock at the nozzle exit")
Fg = Float(iotype="out", desc="gross thrust from nozzle", units="lbf")
PR = Float(iotype="out", desc="ratio between total and static pressures at the nozzle exit")
AR = Float(iotype="out", desc="ratio of exit area to throat area")
#used for mass flow balance iterations
WqAexit = Float(iotype="out", desc="mass flow per unit area at operating condition", units="lbm/(s*inch**2)")
WqAexit_dmd = Float(iotype="out", desc="demand mass flow per unit area at operating condition", units="lbm/(s*inch**2)")
switchRegime = Enum(('UNCHOKED', 'NORMAL_SHOCK', 'UNDEREXPANDED', 'PERFECTLY_EXPANDED' ,'OVEREXPANDED'),
iotype="out", desc="nozzle operating regime")
def shockPR(self, mach, gamma):
"""Calculates stagnation pressure ratio across a normal shock wave"""
MN = mach
g = gamma
return (((g+1)/2*MN**2/(1+(g-1)/2*MN**2))**(g/(g-1)) * (1/ (2*g/(g+1)*MN**2 - (g-1)/(g+1)))**(1/(g-1)))
def execute(self):
Fl_I = self.Fl_I
Fl_O = self.Fl_O
Fl_ref = self.Fl_ref
fs_throat = FlowStation()
fs_exitIdeal = FlowStation()
fs_throat.W = Fl_I.W
Pt_out = (1-self.dPqP)*Fl_I.Pt
fs_throat.setTotalTP( Fl_I.Tt, Pt_out )
fs_throat.Mach = 1.0
self.Athroat_dmd = fs_throat.area
fs_exitIdeal.W = Fl_I.W
fs_exitIdeal.setTotalTP( Fl_I.Tt, Pt_out )
fs_exitIdeal.Ps = Fl_ref.Ps
Fl_O.W = Fl_I.W
Fl_O.setTotalTP( Fl_I.Tt, Pt_out )
Fl_O.Mach = fs_exitIdeal.Mach
if self.run_design:
# Design Calculations at throat
self.Athroat_des = fs_throat.area
# Design calculations at exit
self.Aexit_des = fs_exitIdeal.area
self.switchRegime = "PERFECTLY_EXPANDED"
else:
# Find subsonic solution, curve 4
Fl_O.sub_or_super = "sub"
Fl_O.area = self.Aexit_des
MachSubsonic = Fl_O.Mach
if MachSubsonic > 1:
print "invalid nozzle subsonic solution"
PsSubsonic = Fl_O.Ps
# Find supersonic solution, curve 5
Fl_O.sub_or_super = "super"
Fl_O.area = self.Aexit_des
MachSupersonic = Fl_O.Mach
PsSupersonic = Fl_O.Ps
# normal shock at nozzle exit, curve c
Fl_O.sub_or_super = "sub"
Msuper = MachSupersonic
PtExit = self.shockPR( Msuper, fs_throat.gams ) * fs_throat.Pt
Fl_O.setTotalTP( fs_throat.Tt, PtExit )
Fl_O.area = self.Aexit_des
PsShock = Fl_O.Ps
# find correct operating regime
# curves 1 to 4
if Fl_ref.Ps >= PsSubsonic:
self.switchRegime = "UNCHOKED"
fs_throat.sub_or_super = "sub"
Fl_O.sub_or_super = "sub"
fs_throat.area = self.Athroat_des
Fl_O.setTotalTP( fs_throat.Tt, fs_throat.Pt )
Fl_O.area = self.Aexit_des
# between curves 4 and c
elif Fl_ref.Ps < PsSubsonic and Fl_ref.Ps >= PsShock:
self.switchRegime = "NORMAL_SHOCK"
Fl_O.sub_or_super = "sub"
Fl_O.Ps = Fl_ref.Ps
# between curves c and 5
elif Fl_ref.Ps < PsShock and Fl_ref.Ps > PsSupersonic:
self.switchRegime = "OVEREXPANDED"
Fl_O.sub_or_super = "super"
Fl_O.setTotalTP( fs_throat.Tt, fs_throat.Pt )
Fl_O.area = self.Aexit_des
# between curves 5 and e
elif Fl_ref.Ps <= PsSupersonic:
self.switchRegime = "UNDEREXPANDED"
Fl_O.sub_or_super = "super"
Fl_O.setTotalTP( fs_throat.Tt, fs_throat.Pt )
Fl_O.area = self.Aexit_des
if abs(Fl_ref.Ps - PsSupersonic)/Fl_ref.Ps < .001:
self.switchRegime = "PERFECTLY_EXPANDED"
self.Fg = Fl_O.W*Fl_O.Vflow/32.174 + Fl_O.area*(Fl_O.Ps-Fl_ref.Ps)
self.PR = fs_throat.Pt/Fl_O.Ps
self.AR = Fl_O.area/fs_throat.area
self.WqAexit = Fl_I.W/self.Athroat_des
self.WqAexit_dmd = Fl_I.W/self.Athroat_dmd
if self.switchRegime == "UNCHOKED":
self.WqAexit = Fl_I.W/Fl_ref.Ps
self.WqAexit_dmd = Fl_I.W/Fl_O.Ps
if __name__ == "__main__":
from openmdao.main.api import set_as_top
c = set_as_top(Nozzle())
c.run()
| 38.993377
| 124
| 0.608696
|
3aab721326294ba8b0b7d6710684bdb1b4563719
| 1,674
|
py
|
Python
|
modules/utils.py
|
merlinfuchs/clancy
|
7c08025a47a48ae2fa90ce8916fb32fa6311db68
|
[
"MIT"
] | null | null | null |
modules/utils.py
|
merlinfuchs/clancy
|
7c08025a47a48ae2fa90ce8916fb32fa6311db68
|
[
"MIT"
] | null | null | null |
modules/utils.py
|
merlinfuchs/clancy
|
7c08025a47a48ae2fa90ce8916fb32fa6311db68
|
[
"MIT"
] | null | null | null |
from dbots.cmd import *
from dbots import rest
from util import *
class UtilsModule(Module):
@Module.command()
async def nopings(self, ctx, message):
"""
Send the message without having to worry about pings
"""
await send_webhook_response(ctx, message)
@Module.command()
async def embed(self, ctx, title, url, description):
"""
Create a simple embed message
"""
try:
await send_webhook_response(ctx, embeds=[{
"title": title,
"url": url,
"description": description
}])
except rest.HTTPBadRequest:
await ctx.respond("Something went wrong :(\n"
"Did you use a valid url?", ephemeral=True)
@Module.command(
extends=dict(
expression="The expression to evaluate (e.g. 1+1)"
)
)
async def calculate(self, ctx, expression):
"""
Evaluate a math expression
"""
async with ctx.bot.session.post("http://api.mathjs.org/v4/", json={"expr": expression}) as resp:
data = await resp.json()
if data["error"] is not None:
await ctx.respond(data["error"], ephemeral=True)
return
await send_webhook_response(ctx, f"`{expression} = {data['result']}`")
# @Module.command(
# extends=dict(
# message_url="The URL or ID of the message to quote",
# text="Your response text"
# )
# )
async def quote(self, message_url: str.strip, text):
"""
Use this to quote message from other channels
"""
| 28.862069
| 104
| 0.546595
|
bdeb6fe03c20487e7edcc878dcd2309d32e5097b
| 26,083
|
py
|
Python
|
crc/services/study_service.py
|
sartography/cr-connect-workflow
|
e0d1f63c082e9f4e6e3593c45d615afe04f42729
|
[
"MIT"
] | 2
|
2019-12-02T12:39:21.000Z
|
2022-01-10T23:31:36.000Z
|
crc/services/study_service.py
|
sartography/cr-connect-workflow
|
e0d1f63c082e9f4e6e3593c45d615afe04f42729
|
[
"MIT"
] | 341
|
2019-12-11T15:23:59.000Z
|
2022-03-22T19:36:37.000Z
|
crc/services/study_service.py
|
sartography/cr-connect-workflow
|
e0d1f63c082e9f4e6e3593c45d615afe04f42729
|
[
"MIT"
] | 4
|
2021-03-23T19:17:14.000Z
|
2022-03-10T03:12:10.000Z
|
import urllib
from copy import copy
from datetime import datetime
from typing import List
import flask
import requests
from SpiffWorkflow import WorkflowException
from SpiffWorkflow.bpmn.PythonScriptEngine import Box
from SpiffWorkflow.exceptions import WorkflowTaskExecException
from ldap3.core.exceptions import LDAPSocketOpenError
from crc import db, session, app
from crc.api.common import ApiError
from crc.models.data_store import DataStoreModel
from crc.models.email import EmailModel
from crc.models.file import FileModel, File, FileSchema
from crc.models.ldap import LdapSchema
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus
from crc.models.study import StudyModel, Study, StudyStatus, Category, WorkflowMetadata, StudyEventType, StudyEvent, \
IrbStatus, StudyAssociated, StudyAssociatedSchema
from crc.models.task_event import TaskEventModel, TaskEvent
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \
WorkflowStatus, WorkflowSpecDependencyFile
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.lookup_service import LookupService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.workflow_processor import WorkflowProcessor
class StudyService(object):
"""Provides common tools for working with a Study"""
INVESTIGATOR_LIST = "investigators.xlsx" # A reference document containing details about what investigators to show, and when.
@staticmethod
def _is_valid_study(study_id):
study_info = ProtocolBuilderService().get_study_details(study_id)
if 'REVIEW_TYPE' in study_info.keys() and study_info['REVIEW_TYPE'] in [2, 3, 23, 24]:
return True
return False
def get_studies_for_user(self, user, include_invalid=False):
"""Returns a list of all studies for the given user."""
associated = session.query(StudyAssociated).filter_by(uid=user.uid, access=True).all()
associated_studies = [x.study_id for x in associated]
db_studies = session.query(StudyModel).filter((StudyModel.user_uid == user.uid) |
(StudyModel.id.in_(associated_studies))).all()
studies = []
for study_model in db_studies:
if include_invalid or self._is_valid_study(study_model.id):
studies.append(StudyService.get_study(study_model.id, study_model, do_status=False))
return studies
@staticmethod
def get_all_studies_with_files():
"""Returns a list of all studies"""
db_studies = session.query(StudyModel).all()
studies = []
for s in db_studies:
study = Study.from_model(s)
study.files = FileService.get_files_for_study(study.id)
studies.append(study)
return studies
@staticmethod
def get_study(study_id, study_model: StudyModel = None, do_status=False):
"""Returns a study model that contains all the workflows organized by category.
IMPORTANT: This is intended to be a lightweight call, it should never involve
loading up and executing all the workflows in a study to calculate information."""
if not study_model:
study_model = session.query(StudyModel).filter_by(id=study_id).first()
study = Study.from_model(study_model)
study.create_user_display = LdapService.user_info(study.user_uid).display_name
last_event: TaskEventModel = session.query(TaskEventModel) \
.filter_by(study_id=study_id, action='COMPLETE') \
.order_by(TaskEventModel.date.desc()).first()
if last_event is None:
study.last_activity_user = 'Not Started'
study.last_activity_date = ""
else:
study.last_activity_user = LdapService.user_info(last_event.user_uid).display_name
study.last_activity_date = last_event.date
study.categories = StudyService.get_categories()
workflow_metas = StudyService._get_workflow_metas(study_id)
files = FileService.get_files_for_study(study.id)
files = (File.from_models(model, FileService.get_file_data(model.id),
DocumentService.get_dictionary()) for model in files)
study.files = list(files)
# Calling this line repeatedly is very very slow. It creates the
# master spec and runs it. Don't execute this for Abandoned studies, as
# we don't have the information to process them.
if study.status != StudyStatus.abandoned:
# this line is taking 99% of the time that is used in get_study.
# see ticket #196
if do_status:
# __get_study_status() runs the master workflow to generate the status dictionary
status = StudyService._get_study_status(study_model)
study.warnings = StudyService._update_status_of_workflow_meta(workflow_metas, status)
# Group the workflows into their categories.
for category in study.categories:
category.workflows = {w for w in workflow_metas if w.category_id == category.id}
return study
@staticmethod
def get_study_associate(study_id=None, uid=None):
"""
gets details on how one uid is related to a study, returns a StudyAssociated model
"""
study = db.session.query(StudyModel).filter(StudyModel.id == study_id).first()
if study is None:
raise ApiError('study_not_found', 'No study found with id = %d' % study_id)
if uid is None:
raise ApiError('uid not specified', 'A valid uva uid is required for this function')
if uid == study.user_uid:
return StudyAssociated(uid=uid, role='owner', send_email=True, access=True)
people = db.session.query(StudyAssociated).filter((StudyAssociated.study_id == study_id) &
(StudyAssociated.uid == uid)).first()
if people:
return people
else:
raise ApiError('uid_not_associated_with_study', "user id %s was not associated with study number %d" % (uid,
study_id))
@staticmethod
def get_study_associates(study_id):
"""
gets all associated people for a study from the database
"""
study = db.session.query(StudyModel).filter(StudyModel.id == study_id).first()
if study is None:
raise ApiError('study_not_found', 'No study found with id = %d' % study_id)
people = db.session.query(StudyAssociated).filter(StudyAssociated.study_id == study_id).all()
ldap_info = LdapService.user_info(study.user_uid)
owner = StudyAssociated(uid=study.user_uid, role='owner', send_email=True, access=True,
ldap_info=ldap_info)
people.append(owner)
return people
@staticmethod
def update_study_associates(study_id, associates):
"""
updates the list of associates in the database for a study_id and a list
of dicts that contains associates
"""
if study_id is None:
raise ApiError('study_id not specified', "This function requires the study_id parameter")
for person in associates:
if not LdapService.user_exists(person.get('uid', 'impossible_uid')):
if person.get('uid', 'impossible_uid') == 'impossible_uid':
raise ApiError('associate with no uid', 'One of the associates passed as a parameter doesnt have '
'a uid specified')
raise ApiError('trying_to_grant_access_to_user_not_found_in_ldap', "You are trying to grant access to "
"%s, but that user was not found in "
"ldap "
"- please check to ensure it is a "
"valid uva uid" % person.get('uid'))
study = db.session.query(StudyModel).filter(StudyModel.id == study_id).first()
if study is None:
raise ApiError('study_id not found', "A study with id# %d was not found" % study_id)
db.session.query(StudyAssociated).filter(StudyAssociated.study_id == study_id).delete()
for person in associates:
newAssociate = StudyAssociated()
newAssociate.study_id = study_id
newAssociate.uid = person['uid']
newAssociate.role = person.get('role', None)
newAssociate.send_email = person.get('send_email', False)
newAssociate.access = person.get('access', False)
session.add(newAssociate)
session.commit()
@staticmethod
def update_study_associate(study_id=None, uid=None, role="", send_email=False, access=False):
if study_id is None:
raise ApiError('study_id not specified', "This function requires the study_id parameter")
if uid is None:
raise ApiError('uid not specified', "This function requires a uva uid parameter")
if not LdapService.user_exists(uid):
raise ApiError('trying_to_grant_access_to_user_not_found_in_ldap', "You are trying to grant access to "
"%s but they were not found in ldap "
"- please check to ensure it is a "
"valid uva uid" % uid)
study = db.session.query(StudyModel).filter(StudyModel.id == study_id).first()
if study is None:
raise ApiError('study_id not found', "A study with id# %d was not found" % study_id)
db.session.query(StudyAssociated).filter((StudyAssociated.study_id == study_id) & (StudyAssociated.uid ==
uid)).delete()
newAssociate = StudyAssociated()
newAssociate.study_id = study_id
newAssociate.uid = uid
newAssociate.role = role
newAssociate.send_email = send_email
newAssociate.access = access
session.add(newAssociate)
session.commit()
return True
@staticmethod
def delete_study(study_id):
session.query(TaskEventModel).filter_by(study_id=study_id).delete()
session.query(StudyAssociated).filter_by(study_id=study_id).delete()
session.query(EmailModel).filter_by(study_id=study_id).delete()
session.query(StudyEvent).filter_by(study_id=study_id).delete()
for workflow in session.query(WorkflowModel).filter_by(study_id=study_id):
StudyService.delete_workflow(workflow.id)
study = session.query(StudyModel).filter_by(id=study_id).first()
session.delete(study)
session.commit()
@staticmethod
def delete_workflow(workflow_id):
workflow = session.query(WorkflowModel).get(workflow_id)
if not workflow:
return
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
session.query(WorkflowSpecDependencyFile).filter_by(workflow_id=workflow_id).delete(synchronize_session='fetch')
session.query(FileModel).filter_by(workflow_id=workflow_id).update({'archived': True, 'workflow_id': None})
session.delete(workflow)
session.commit()
@staticmethod
def get_categories():
"""Returns a list of category objects, in the correct order."""
cat_models = db.session.query(WorkflowSpecCategoryModel) \
.order_by(WorkflowSpecCategoryModel.display_order).all()
categories = []
for cat_model in cat_models:
categories.append(Category(cat_model))
return categories
@staticmethod
def get_documents_status(study_id):
"""Returns a list of documents related to the study, and any file information
that is available.."""
# Get PB required docs, if Protocol Builder Service is enabled.
if ProtocolBuilderService.is_enabled() and study_id is not None:
try:
pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id)
except requests.exceptions.ConnectionError as ce:
app.logger.error(f'Failed to connect to the Protocol Builder - {str(ce)}', exc_info=True)
pb_docs = []
else:
pb_docs = []
# Loop through all known document types, get the counts for those files,
# and use pb_docs to mark those as required.
doc_dictionary = DocumentService.get_dictionary()
documents = {}
for code, doc in doc_dictionary.items():
doc['required'] = False
if ProtocolBuilderService.is_enabled() and doc['id']:
pb_data = next((item for item in pb_docs if int(item['AUXDOCID']) == int(doc['id'])), None)
if pb_data:
doc['required'] = True
doc['study_id'] = study_id
doc['code'] = code
# Make a display name out of categories
name_list = []
for cat_key in ['category1', 'category2', 'category3']:
if doc[cat_key] not in ['', 'NULL', None]:
name_list.append(doc[cat_key])
doc['display_name'] = ' / '.join(name_list)
# For each file, get associated workflow status
doc_files = FileService.get_files_for_study(study_id=study_id, irb_doc_code=code)
doc['count'] = len(doc_files)
doc['files'] = []
# when we run tests - it doesn't look like the user is available
# so we return a bogus token
token = 'not_available'
if hasattr(flask.g, 'user'):
token = flask.g.user.encode_auth_token()
for file_model in doc_files:
file = File.from_models(file_model, FileService.get_file_data(file_model.id), [])
file_data = FileSchema().dump(file)
del file_data['document']
data = db.session.query(DataStoreModel).filter(DataStoreModel.file_id == file.id).all()
data_store_data = {}
for d in data:
data_store_data[d.key] = d.value
file_data["data_store"] = data_store_data
doc['files'].append(Box(file_data))
# update the document status to match the status of the workflow it is in.
if 'status' not in doc or doc['status'] is None:
workflow: WorkflowModel = session.query(WorkflowModel).filter_by(id=file.workflow_id).first()
doc['status'] = workflow.status.value
documents[code] = doc
return Box(documents)
@staticmethod
def get_investigator_dictionary():
"""Returns a dictionary of document details keyed on the doc_code."""
file_data = FileService.get_reference_file_data(StudyService.INVESTIGATOR_LIST)
lookup_model = LookupService.get_lookup_model_for_file_data(file_data, 'code', 'label')
doc_dict = {}
for lookup_data in lookup_model.dependencies:
doc_dict[lookup_data.value] = lookup_data.data
return doc_dict
@staticmethod
def get_investigators(study_id, all=False):
"""Convert array of investigators from protocol builder into a dictionary keyed on the type. """
# Loop through all known investigator types as set in the reference file
inv_dictionary = StudyService.get_investigator_dictionary()
# Get PB required docs
pb_investigators = ProtocolBuilderService.get_investigators(study_id=study_id)
# It is possible for the same type to show up more than once in some circumstances, in those events
# append a counter to the name.
investigators = {}
for i_type in inv_dictionary:
pb_data_entries = list(item for item in pb_investigators if item['INVESTIGATORTYPE'] == i_type)
entry_count = 0
investigators[i_type] = copy(inv_dictionary[i_type])
investigators[i_type]['user_id'] = None
for pb_data in pb_data_entries:
entry_count += 1
if entry_count == 1:
t = i_type
else:
t = i_type + "_" + str(entry_count)
investigators[t] = copy(inv_dictionary[i_type])
investigators[t]['user_id'] = pb_data["NETBADGEID"]
investigators[t].update(StudyService.get_ldap_dict_if_available(pb_data["NETBADGEID"]))
if not all:
investigators = dict(filter(lambda elem: elem[1]['user_id'] is not None, investigators.items()))
return investigators
@staticmethod
def get_ldap_dict_if_available(user_id):
try:
return LdapSchema().dump(LdapService().user_info(user_id))
except ApiError as ae:
app.logger.info(str(ae))
return {"error": str(ae)}
except LDAPSocketOpenError:
app.logger.info("Failed to connect to LDAP Server.")
return {}
@staticmethod
def synch_with_protocol_builder_if_enabled(user):
"""Assures that the studies we have locally for the given user are
in sync with the studies available in protocol builder. """
if ProtocolBuilderService.is_enabled():
app.logger.info("The Protocol Builder is enabled. app.config['PB_ENABLED'] = " +
str(app.config['PB_ENABLED']))
# Get studies matching this user from Protocol Builder
pb_studies: List[ProtocolBuilderStudy] = ProtocolBuilderService.get_studies(user.uid)
# Get studies from the database
db_studies = session.query(StudyModel).filter_by(user_uid=user.uid).all()
# Update all studies from the protocol builder, create new studies as needed.
# Further assures that every active study (that does exist in the protocol builder)
# has a reference to every available workflow (though some may not have started yet)
for pb_study in pb_studies:
new_status = None
db_study = next((s for s in db_studies if s.id == pb_study.STUDYID), None)
if not db_study:
db_study = StudyModel(id=pb_study.STUDYID)
db_study.status = None # Force a new sa
new_status = StudyStatus.in_progress
session.add(db_study)
db_studies.append(db_study)
db_study.update_from_protocol_builder(pb_study)
StudyService._add_all_workflow_specs_to_study(db_study)
# If there is a new automatic status change and there isn't a manual change in place, record it.
if new_status and db_study.status != StudyStatus.hold:
db_study.status = new_status
StudyService.add_study_update_event(db_study,
status=new_status,
event_type=StudyEventType.automatic)
# Mark studies as inactive that are no longer in Protocol Builder
for study in db_studies:
pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study.id), None)
if not pb_study and study.status != StudyStatus.abandoned:
study.status = StudyStatus.abandoned
StudyService.add_study_update_event(study,
status=StudyStatus.abandoned,
event_type=StudyEventType.automatic)
db.session.commit()
@staticmethod
def add_study_update_event(study, status, event_type, user_uid=None, comment=''):
study_event = StudyEvent(study=study,
status=status,
event_type=event_type,
user_uid=user_uid,
comment=comment)
db.session.add(study_event)
db.session.commit()
@staticmethod
def _update_status_of_workflow_meta(workflow_metas, status):
# Update the status on each workflow
warnings = []
unused_statuses = status.copy() # A list of all the statuses that are not used.
for wfm in workflow_metas:
unused_statuses.pop(wfm.workflow_spec_id, None)
wfm.state_message = ''
# do we have a status for you
if wfm.workflow_spec_id not in status.keys():
warnings.append(ApiError("missing_status", "No status information provided about workflow %s" % wfm.workflow_spec_id))
continue
if not isinstance(status[wfm.workflow_spec_id], dict):
warnings.append(ApiError(code='invalid_status',
message=f'Status must be a dictionary with "status" and "message" keys. Name is {wfm.workflow_spec_id}. Status is {status[wfm.workflow_spec_id]}'))
continue
if 'message' in status[wfm.workflow_spec_id].keys():
wfm.state_message = status[wfm.workflow_spec_id]['message']
if 'status' not in status[wfm.workflow_spec_id].keys():
warnings.append(ApiError("missing_status_key",
"Workflow '%s' is present in master workflow, but doesn't have a status" % wfm.workflow_spec_id))
continue
if not WorkflowState.has_value(status[wfm.workflow_spec_id]['status']):
warnings.append(ApiError("invalid_state",
"Workflow '%s' can not be set to '%s', should be one of %s" % (
wfm.workflow_spec_id, status[wfm.workflow_spec_id]['status'], ",".join(WorkflowState.list())
)))
continue
wfm.state = WorkflowState[status[wfm.workflow_spec_id]['status']]
for status in unused_statuses:
if isinstance(unused_statuses[status], dict) and 'status' in unused_statuses[status]:
warnings.append(ApiError("unmatched_status", "The master workflow provided a status for '%s' a "
"workflow that doesn't seem to exist." %
status))
return warnings
@staticmethod
def _get_workflow_metas(study_id):
# Add in the Workflows for each category
workflow_models = db.session.query(WorkflowModel). \
join(WorkflowSpecModel). \
filter(WorkflowSpecModel.is_master_spec == False). \
filter((WorkflowSpecModel.library == False) | \
(WorkflowSpecModel.library == None)). \
filter(WorkflowModel.study_id == study_id). \
all()
workflow_metas = []
for workflow in workflow_models:
workflow_metas.append(WorkflowMetadata.from_workflow(workflow))
return workflow_metas
@staticmethod
def _get_study_status(study_model):
"""Uses the Top Level Workflow to calculate the status of the study, and it's
workflow models."""
master_specs = db.session.query(WorkflowSpecModel). \
filter_by(is_master_spec=True).all()
if len(master_specs) < 1:
raise ApiError("missing_master_spec", "No specifications are currently marked as the master spec.")
if len(master_specs) > 1:
raise ApiError("multiple_master_specs",
"There is more than one master specification, and I don't know what to do.")
return WorkflowProcessor.run_master_spec(master_specs[0], study_model)
@staticmethod
def _add_all_workflow_specs_to_study(study_model: StudyModel):
existing_models = session.query(WorkflowModel).filter(WorkflowModel.study == study_model).all()
existing_specs = list(m.workflow_spec_id for m in existing_models)
new_specs = session.query(WorkflowSpecModel). \
filter(WorkflowSpecModel.is_master_spec == False). \
filter(WorkflowSpecModel.id.notin_(existing_specs)). \
all()
errors = []
for workflow_spec in new_specs:
try:
StudyService._create_workflow_model(study_model, workflow_spec)
except WorkflowTaskExecException as wtee:
errors.append(ApiError.from_task("workflow_startup_exception", str(wtee), wtee.task))
except WorkflowException as we:
errors.append(ApiError.from_task_spec("workflow_startup_exception", str(we), we.sender))
return errors
@staticmethod
def _create_workflow_model(study: StudyModel, spec):
workflow_model = WorkflowModel(status=WorkflowStatus.not_started,
study=study,
user_id=None,
workflow_spec_id=spec.id,
last_updated=datetime.utcnow())
session.add(workflow_model)
session.commit()
return workflow_model
| 49.681905
| 188
| 0.613273
|
9bcfa0a5ec4bfa90e04715fd6fc8793e42b2a77f
| 770
|
py
|
Python
|
subtools/subase.py
|
huimingz/subtools
|
185aff705f3730bac5e4d1725c5d24c549e8b0e4
|
[
"MIT"
] | 4
|
2017-11-27T15:18:33.000Z
|
2018-07-27T06:30:23.000Z
|
subtools/subase.py
|
Kairu-Madigan/subtools
|
185aff705f3730bac5e4d1725c5d24c549e8b0e4
|
[
"MIT"
] | 1
|
2021-06-01T22:06:59.000Z
|
2021-06-01T22:06:59.000Z
|
subtools/subase.py
|
huimingz/subtools
|
185aff705f3730bac5e4d1725c5d24c549e8b0e4
|
[
"MIT"
] | 1
|
2017-11-27T08:37:35.000Z
|
2017-11-27T08:37:35.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author: "Kairu"
class Sub(object):
def __init__(self):
self.text = ""
def from_file(self, path, encoding="utf-8", format=None, *args, **kwargs):
"""从文件中读取字幕内容"""
pass
def from_str(self, text, format=None, *args, **kwargs):
"""从字符串中读取字幕内容"""
pass
def from_bin(self, content, encoding="utf-8", format=None, *args, **kwargs):
"""从二进制中获取字幕内容"""
pass
def as_json(self):
"""返回json序列号数据"""
pass
def as_pickle(self):
"""返回pickle序列号数据"""
pass
def as_str(self):
"""返回str字幕内容"""
pass
def as_bin(self):
"""返回二进制字幕内容"""
pass
| 17.906977
| 81
| 0.48961
|
8d0bf284a91754131c14b3756cff78aca752b622
| 10,071
|
py
|
Python
|
Lib/site-packages/rest_framework/generics.py
|
inging44/python3
|
fcd8d9d2ee54b46b757ecf34f284b4e60a43097a
|
[
"bzip2-1.0.6"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
Lib/site-packages/rest_framework/generics.py
|
inging44/python3
|
fcd8d9d2ee54b46b757ecf34f284b4e60a43097a
|
[
"bzip2-1.0.6"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
Lib/site-packages/rest_framework/generics.py
|
inging44/python3
|
fcd8d9d2ee54b46b757ecf34f284b4e60a43097a
|
[
"bzip2-1.0.6"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
"""
Generic views that provide commonly needed behaviour.
"""
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db.models.query import QuerySet
from django.http import Http404
from django.shortcuts import get_object_or_404 as _get_object_or_404
from rest_framework import mixins, views
from rest_framework.settings import api_settings
def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"""
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
"""
try:
return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise Http404
class GenericAPIView(views.APIView):
"""
Base class for all other generic views.
"""
# You'll need to either set these attributes,
# or override `get_queryset()`/`get_serializer_class()`.
# If you are overriding a view method, it is important that you call
# `get_queryset()` instead of accessing the `queryset` property directly,
# as `queryset` will get evaluated only once, and those results are cached
# for all subsequent requests.
queryset = None
serializer_class = None
# If you want to use object lookups other than pk, set 'lookup_field'.
# For more complex lookup requirements override `get_object()`.
lookup_field = 'pk'
lookup_url_kwarg = None
# The filter backend classes to use for queryset filtering
filter_backends = api_settings.DEFAULT_FILTER_BACKENDS
# The style to use for queryset pagination.
pagination_class = api_settings.DEFAULT_PAGINATION_CLASS
def get_queryset(self):
"""
Get the list of items for this view.
This must be an iterable, and may be a queryset.
Defaults to using `self.queryset`.
This method should always be used rather than accessing `self.queryset`
directly, as `self.queryset` gets evaluated only once, and those results
are cached for all subsequent requests.
You may want to override this if you need to provide different
querysets depending on the incoming request.
(Eg. return a list of items that is specific to the user)
"""
assert self.queryset is not None, (
"'%s' should either include a `queryset` attribute, "
"or override the `get_queryset()` method."
% self.__class__.__name__
)
queryset = self.queryset
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = queryset.all()
return queryset
def get_object(self):
"""
Returns the object the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
"""
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
def get_serializer_class(self):
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
return {
'request': self.request,
'format': self.format_kwarg,
'view': self
}
def filter_queryset(self, queryset):
"""
Given a queryset, filter it with whichever filter backend is in use.
You are unlikely to want to override this method, although you may need
to call it either from a list view, or from a custom `get_object`
method if you want to apply the configured filtering backend to the
default queryset.
"""
for backend in list(self.filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
return queryset
@property
def paginator(self):
"""
The paginator instance associated with the view, or `None`.
"""
if not hasattr(self, '_paginator'):
if self.pagination_class is None:
self._paginator = None
else:
self._paginator = self.pagination_class()
return self._paginator
def paginate_queryset(self, queryset):
"""
Return a single page of results, or `None` if pagination is disabled.
"""
if self.paginator is None:
return None
return self.paginator.paginate_queryset(queryset, self.request, view=self)
def get_paginated_response(self, data):
"""
Return a paginated style `Response` object for the given output data.
"""
assert self.paginator is not None
return self.paginator.get_paginated_response(data)
# Concrete view classes that provide method handlers
# by composing the mixin classes with the base view.
class CreateAPIView(mixins.CreateModelMixin,
GenericAPIView):
"""
Concrete view for creating a model instance.
"""
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ListAPIView(mixins.ListModelMixin,
GenericAPIView):
"""
Concrete view for listing a queryset.
"""
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class RetrieveAPIView(mixins.RetrieveModelMixin,
GenericAPIView):
"""
Concrete view for retrieving a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class DestroyAPIView(mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for deleting a model instance.
"""
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class UpdateAPIView(mixins.UpdateModelMixin,
GenericAPIView):
"""
Concrete view for updating a model instance.
"""
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class ListCreateAPIView(mixins.ListModelMixin,
mixins.CreateModelMixin,
GenericAPIView):
"""
Concrete view for listing a queryset or creating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class RetrieveUpdateAPIView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
GenericAPIView):
"""
Concrete view for retrieving, updating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class RetrieveDestroyAPIView(mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for retrieving or deleting a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class RetrieveUpdateDestroyAPIView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for retrieving, updating or deleting a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| 34.255102
| 82
| 0.63946
|
73d5a65bf829a99b706a39dfc549d9803ebb4eb0
| 34
|
py
|
Python
|
02_bootstrap-scripts-python/bootstrap/__main__.py
|
aws-samples/eks-bootstrap-scripts
|
9845edf6e64ba98bbb0e1cbb61733fdb2642b126
|
[
"MIT-0"
] | 3
|
2021-10-24T01:10:58.000Z
|
2022-01-28T16:20:32.000Z
|
02_bootstrap-scripts-python/bootstrap/__main__.py
|
aws-samples/eks-bootstrap-scripts
|
9845edf6e64ba98bbb0e1cbb61733fdb2642b126
|
[
"MIT-0"
] | null | null | null |
02_bootstrap-scripts-python/bootstrap/__main__.py
|
aws-samples/eks-bootstrap-scripts
|
9845edf6e64ba98bbb0e1cbb61733fdb2642b126
|
[
"MIT-0"
] | 1
|
2021-10-24T01:10:58.000Z
|
2021-10-24T01:10:58.000Z
|
from .bootstrap import main
main()
| 17
| 27
| 0.794118
|
153b1db50f22ba5c025e3af9ed9a84793c78f549
| 708
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
zahidaliayub/spec-wallet
|
5d09eee58fb2a87a952985de22a9b40425eb18e0
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
zahidaliayub/spec-wallet
|
5d09eee58fb2a87a952985de22a9b40425eb18e0
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
zahidaliayub/spec-wallet
|
5d09eee58fb2a87a952985de22a9b40425eb18e0
|
[
"MIT"
] | 5
|
2015-08-15T20:52:25.000Z
|
2021-03-13T08:10:05.000Z
|
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):4319")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
| 21.454545
| 78
| 0.508475
|
8c0a211699704036184f1e4e306caf82e49c88ef
| 6,357
|
py
|
Python
|
python/eva/__init__.py
|
seounghwan-oh/py_EVA_for_homomorphic_encryption
|
d1f2895ba54bc4a7204b6188f608e82c9edf5868
|
[
"MIT"
] | 119
|
2020-11-21T04:03:31.000Z
|
2022-03-31T11:01:00.000Z
|
python/eva/__init__.py
|
seounghwan-oh/py_EVA_for_homomorphic_encryption
|
d1f2895ba54bc4a7204b6188f608e82c9edf5868
|
[
"MIT"
] | 25
|
2020-11-24T21:38:09.000Z
|
2022-03-24T12:35:56.000Z
|
python/eva/__init__.py
|
seounghwan-oh/py_EVA_for_homomorphic_encryption
|
d1f2895ba54bc4a7204b6188f608e82c9edf5868
|
[
"MIT"
] | 37
|
2020-11-23T13:59:49.000Z
|
2022-03-29T10:07:28.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from ._eva import *
import numbers
import psutil
# Find the number of CPU cores available to this process. This has to happen before Galois is initialized because it
# messes with the CPU affinity of the process.
_default_num_threads = len(psutil.Process().cpu_affinity())
# Initialize Galois here (trying to do it in the static initialization step of the native library hangs).
_global_guard = _eva._GaloisGuard()
# Set the default number of threads to use to match the cores.
set_num_threads(_default_num_threads)
_current_program = None
def _curr():
""" Returns the EvaProgram that is currently in context """
global _current_program
if _current_program == None:
raise RuntimeError("No Program in context")
return _current_program
def _py_to_term(x, program):
""" Maps supported types into EVA terms """
if isinstance(x, Expr):
return x.term
elif isinstance(x, list):
return program._make_dense_constant(x)
elif isinstance(x, numbers.Number):
return program._make_uniform_constant(x)
elif isinstance(x, Term):
return x
else:
raise TypeError("No conversion to Term available for " + str(x))
def py_to_eva(x, program = None):
""" Maps supported types into EVA terms. May be used in library functions
to provide uniform support for Expr instances and python types that
are convertible into constants in EVA programs.
Parameters
----------
x : eva.Expr, EVA native Term, list or a number
The value to be converted to an Expr
program : EvaProgram, optional
The program a new term is created in (if necessary). If None then
the program currently in context is used (again if necessary).
"""
if isinstance(x, Expr):
return x
else:
if program == None:
program = _curr()
return Expr(_py_to_term(x, program), program)
class Expr():
""" Wrapper for EVA's native Term class. Provides operator overloads that
create terms in the associated EvaProgram.
Attributes
----------
term
The EVA native term
program : eva.EVAProgram
The program the wrapped term is in
"""
def __init__(self, term, program):
self.term = term
self.program = program
def __add__(self,other):
""" Create a new addition term """
return Expr(self.program._make_term(Op.Add, [self.term, _py_to_term(other, self.program)]), self.program)
def __radd__(self,other):
""" Create a new addition term """
return Expr(self.program._make_term(Op.Add, [_py_to_term(other, self.program), self.term]), self.program)
def __sub__(self,other):
""" Create a new subtraction term """
return Expr(self.program._make_term(Op.Sub, [self.term, _py_to_term(other, self.program)]), self.program)
def __rsub__(self,other):
""" Create a new subtraction term """
return Expr(self.program._make_term(Op.Sub, [_py_to_term(other, self.program), self.term]), self.program)
def __mul__(self,other):
""" Create a new multiplication term """
return Expr(self.program._make_term(Op.Mul, [self.term, _py_to_term(other, self.program)]), self.program)
def __rmul__(self,other):
""" Create a new multiplication term """
return Expr(self.program._make_term(Op.Mul, [_py_to_term(other, self.program), self.term]), self.program)
def __pow__(self,exponent):
""" Create exponentiation as nested multiplication terms """
if exponent < 1:
raise ValueError("exponent must be greater than zero, got " + exponent)
result = self.term
for i in range(exponent-1):
result = self.program._make_term(Op.Mul, [result, self.term])
return Expr(result, self.program)
def __lshift__(self,rotation):
""" Create a left rotation term """
return Expr(self.program._make_left_rotation(self.term, rotation), self.program)
def __rshift__(self,rotation):
""" Create a right rotation term """
return Expr(self.program._make_right_rotation(self.term, rotation), self.program)
def __neg__(self):
""" Create a negation term """
return Expr(self.program._make_term(Op.Negate, [self.term]), self.program)
class EvaProgram(Program):
""" A wrapper for EVA's native Program class. Acts as a context manager to
set the program the Input and Output free functions operate on. """
def __init__(self, name, vec_size):
""" Create a new EvaProgram with a name and a vector size
Parameters
----------
name : str
The name of the program
vec_size : int
The number of elements in all values in the program
Must be a power-of-two
"""
super().__init__(name, vec_size)
def __enter__(self):
global _current_program
if _current_program != None:
raise RuntimeError("There is already an EVA Program in context")
_current_program = self
def __exit__(self, exc_type, exc_value, exc_traceback):
global _current_program
if _current_program != self:
raise RuntimeError("This program is not currently in context")
_current_program = None
def Input(name, is_encrypted=True):
""" Create a new named input term in the current EvaProgram
Parameters
----------
name : str
The name of the input
is_encrypted : bool, optional
Whether this input should be encrypted or not (default: True)
"""
program = _curr()
return Expr(program._make_input(name, Type.Cipher if is_encrypted else Type.Raw), program)
def Output(name, expr):
""" Create a new named output term in the current EvaProgram
Parameters
----------
name : str
The name of the output
is_encrypted : bool, optional
Whether this input should be encrypted or not (default: True)
"""
program = _curr()
program._make_output(name, _py_to_term(expr, program))
| 36.959302
| 116
| 0.641655
|
629581506471e64bfc4802c6ae0fd3358ed1bd86
| 4,099
|
py
|
Python
|
test/functional/feature_includeconf.py
|
olivingcoin/olivingcoin
|
e34ee63da7c35c717c8045b1ed1c43562430c508
|
[
"MIT"
] | null | null | null |
test/functional/feature_includeconf.py
|
olivingcoin/olivingcoin
|
e34ee63da7c35c717c8045b1ed1c43562430c508
|
[
"MIT"
] | null | null | null |
test/functional/feature_includeconf.py
|
olivingcoin/olivingcoin
|
e34ee63da7c35c717c8045b1ed1c43562430c508
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Olivingcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import OlivingcoinTestFramework
class IncludeConfTest(OlivingcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "olivingcoin.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
# Commented out as long as we ignore invalid arguments in configuration files
#with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# f.write("foo=bar\n")
#self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Invalid configuration value foo")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "olivingcoin.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| 49.385542
| 224
| 0.696023
|
c7298ae350d1549923ca935fd10489213bc869cd
| 23,057
|
py
|
Python
|
main.py
|
PritamKamble/ImageEditorPython
|
f7a5efb1177d0cc5368f6a59a72fc6155d1fc7cc
|
[
"MIT"
] | 1
|
2021-01-12T10:03:12.000Z
|
2021-01-12T10:03:12.000Z
|
main.py
|
2727-ask/ImageEditorPython
|
f7a5efb1177d0cc5368f6a59a72fc6155d1fc7cc
|
[
"MIT"
] | null | null | null |
main.py
|
2727-ask/ImageEditorPython
|
f7a5efb1177d0cc5368f6a59a72fc6155d1fc7cc
|
[
"MIT"
] | 1
|
2020-10-01T08:04:15.000Z
|
2020-10-01T08:04:15.000Z
|
"""
Date: 07 DEC 2018
Author: Pritam Kamble
"""
from UI.dialogs import UIdialog
import grayscale, flip, _filter, _rotation, _enhance
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
import threading
import copy
import datetime
# from multiprocessing.pool import ThreadPool
from random import choice
# constants
FILTERS = (
"BLUR", "CONTOUR", "DETAIL", "EDGE_ENHANCE", "EDGE_ENHANCE_MORE", "EMBOSS", "FIND_EDGES", "SHARPEN", "SMOOTH",
"SMOOTH_MORE")
DEGREES = ("ROTATE_30", "ROTATE_45", "ROTATE_90", "ROTATE_135", "ROTATE_180", "ROTATE_225", "ROTATE_270")
class KedClient:
def __init__(self):
self.root = Tk()
# stack to store every edit so users can undo
self.stack = []
# image enhancement variables
self.image_enhance_min_value = 0.0
self.image_enhance_max_value = 2.0
self.image_brightness = 1.0
self.image_color_balance = 1.0
self.image_sharpness = 1.0
self.image_contrast = 1.0
self.default_image = None
self.default_image_copy = None
self.init_default_image()
print(self.stack)
# widgets textVariables
self.filter_var = StringVar()
self.rotation_var = StringVar()
# initializing tkinter widgets
self.init_widgets(self.default_image_copy)
# adding widgets to grid
self.add_widgets_to_frame()
# setting root window title
self.root.title("K Editor")
# fixing root window size
self.root.resizable(width=False, height=False)
self.root.protocol("WM_DELETE_WINDOW", self.on_closing)
self.root.mainloop()
def init_default_image(self):
self.setDefaultImage("./test_images/touka.png")
self.default_image_copy = self.make_image_copy(self.default_image, None)
# resizing image to fit picture panel
self.default_image_copy = self.resize_image(self.default_image_copy, 800, 500)
def setDefaultImage(self, path):
"""method to set default image in picture panel """
self.default_image = Image.open(path)
self.stack.append(self.default_image)
def make_image_copy(self, image, image_filename):
"""method to make image copy"""
if image == None:
image_copy = Image.open(image_filename).copy()
else:
image_copy = image.copy()
return image_copy
def resize_image(self, image, *args):
"""method to resize image
param - args : (width,height)
"""
# print(args)
resized_image = image.resize(args, Image.ANTIALIAS)
return resized_image
def init_widgets(self, image):
"""function to initialize Ked widgets"""
self.content = ttk.Frame(self.root)
# widgets for picture preview
self.picture = ImageTk.PhotoImage(image)
self.picture_container = ttk.Frame(self.content, width=850, height=500, borderwidth=2, relief="sunken")
self.picture_panel = ttk.Label(self.picture_container, image=self.picture)
# menu widgets
self.menu_frame = ttk.Frame(self.content)
self.menu_frame_right = ttk.Frame(self.content)
self.grayscale_button = ttk.Button(self.menu_frame, text="Grayscale",
command=lambda: self.grayscale_button_handler())
self.flip_button = ttk.Button(self.menu_frame, text="Flip", command=lambda: self.flip_button_handler())
# image enhancing buttons
self.brightness_label = ttk.Label(self.menu_frame_right, text="Brightness")
self.brightness_button_inc = ttk.Button(self.menu_frame_right, text="+",
command=lambda: self.brightness_button_inc_handler(), width=2)
self.brightness_button_dec = ttk.Button(self.menu_frame_right, text="-",
command=lambda: self.brightness_button_dec_handler(), width=2)
self.color_balance_label = ttk.Label(self.menu_frame_right, text="Colorbalance")
self.color_balance_button_inc = ttk.Button(self.menu_frame_right, text="+",
command=lambda: self.color_balance_button_inc_handler(), width=2)
self.color_balance_button_dec = ttk.Button(self.menu_frame_right, text="-",
command=lambda: self.color_balance_button_dec_handler(), width=2)
self.sharpness_label = ttk.Label(self.menu_frame_right, text="Sharpness")
self.sharpness_button_inc = ttk.Button(self.menu_frame_right, text="+",
command=lambda: self.sharpness_button_inc_handler(), width=2)
self.sharpness_button_dec = ttk.Button(self.menu_frame_right, text="-",
command=lambda: self.sharpness_button_dec_handler(), width=2)
self.contrast_label = ttk.Label(self.menu_frame_right, text="Contrast")
self.contrast_button_inc = ttk.Button(self.menu_frame_right, text="+",
command=lambda: self.contrast_button_inc_handler(), width=2)
self.contrast_button_dec = ttk.Button(self.menu_frame_right, text="-",
command=lambda: self.contrast_button_dec_handler(), width=2)
# comboboxes
self.filter_label = ttk.Label(self.menu_frame, text="Filters: ")
self.filter_combobox = ttk.Combobox(self.menu_frame, textvariable=self.filter_var, state='readonly', width=19)
self.filter_combobox.bind('<<ComboboxSelected>>', lambda e: self.filter_combobox_event_handler())
self.filter_combobox['values'] = FILTERS
self.rotation_label = ttk.Label(self.menu_frame, text="Rotate: ")
self.rotation_combobox = ttk.Combobox(self.menu_frame, textvariable=self.rotation_var, state='readonly',
width=19)
self.rotation_combobox.bind('<<ComboboxSelected>>', lambda e1: self.rotation_combobox_event_handler())
self.rotation_combobox['values'] = DEGREES
self.image_info_button = ttk.Button(self.menu_frame, text="Image Info",
command=lambda: self.image_info_button_handler())
self.undo_button = ttk.Button(self.menu_frame, text="Undo", command=lambda: self.undo_button_handler())
self.open_file_button = ttk.Button(self.menu_frame_right, text="Open",
command=lambda: self.file_dialog_handler())
self.save_file_button = ttk.Button(self.menu_frame_right, text="Save", command=lambda: self.save_file_handler())
# message labels
self.message_label = ttk.Label(self.picture_container, text="Converting...")
def add_widgets_to_frame(self):
"""method to add widgets in grid format"""
self.content.grid(column=0, row=0)
self.picture_container.grid(column=0, row=0, padx=5, pady=5)
self.picture_panel.grid(column=0, row=0, padx=2, pady=2, sticky="nsew")
self.menu_frame.grid(column=0, row=1, padx=0, pady=5, sticky="w")
self.menu_frame_right.grid(column=1, row=0, padx=0, pady=5, sticky="n")
self.grayscale_button.grid(column=0, row=0, padx=5, pady=5)
self.flip_button.grid(column=1, row=0, padx=5, pady=5)
self.filter_label.grid(column=2, row=0, pady=5)
self.filter_combobox.grid(column=3, row=0, padx=5, pady=5, columnspan=1)
self.rotation_label.grid(column=4, row=0, pady=5)
self.rotation_combobox.grid(column=5, row=0, padx=5, pady=5)
self.image_info_button.grid(column=6, row=0, padx=5, pady=5)
self.undo_button.grid(column=7, row=0, padx=5, pady=5)
self.brightness_label.grid(row=0, column=1, padx=5, pady=5, sticky="w")
self.brightness_button_inc.grid(row=0, column=2, padx=5, pady=5, sticky="w")
self.brightness_button_dec.grid(row=0, column=3, padx=5, pady=5, sticky="w")
self.color_balance_label.grid(row=1, column=1, padx=5, pady=5, sticky="w")
self.color_balance_button_inc.grid(row=1, column=2, padx=5, pady=5, sticky="w")
self.color_balance_button_dec.grid(row=1, column=3, padx=5, pady=5, sticky="w")
self.sharpness_label.grid(row=2, column=1, padx=5, pady=5, sticky="w")
self.sharpness_button_inc.grid(row=2, column=2, padx=5, pady=5, sticky="w")
self.sharpness_button_dec.grid(row=2, column=3, padx=5, pady=5, sticky="w")
self.contrast_label.grid(row=3, column=1, padx=5, pady=5, sticky="w")
self.contrast_button_inc.grid(row=3, column=2, padx=5, pady=5, sticky="w")
self.contrast_button_dec.grid(row=3, column=3, padx=5, pady=5, sticky="w")
self.open_file_button.grid(row=4, column=1, padx=5, pady=20, columnspan=3)
self.save_file_button.grid(row=5, column=1, padx=5, pady=20, columnspan=3)
self.message_label.grid(row=0, column=0)
self.message_label.grid_forget()
def grayscale_button_handler(self):
def callback():
self.message_label.grid()
image = self.stack[len(self.stack) - 1]
image_copy = self.make_image_copy(image, None)
image_copy = grayscale.convert_to_grayscale(image_copy)
# print(image_copy)
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
self.message_label.grid_forget()
print("After converting to grayscale")
print(str(self.stack))
thread = threading.Thread(target=callback)
thread.start()
def flip_button_handler(self):
image = self.stack[len(self.stack) - 1]
image_copy = self.make_image_copy(image, None)
image_copy = flip.flip_image(image_copy)
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
print("After Flipping")
print(str(self.stack))
def filter_combobox_event_handler(self):
def callback():
self.message_label.grid()
filter_name = str(self.filter_combobox.get())
image = self.stack[len(self.stack) - 1]
image_copy = self.make_image_copy(image, None)
image_copy = _filter.apply_filter(image_copy, filter_name)
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
print("After applying filter")
print(str(self.stack))
self.message_label.grid_forget()
thread = threading.Thread(target=callback)
thread.start()
def rotation_combobox_event_handler(self):
degrees = str(self.rotation_combobox.get())
image = self.stack[len(self.stack) - 1]
image_copy = self.make_image_copy(image, None)
image_copy = _rotation.apply_rotation(image_copy, degrees)
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
print("After Rotating")
print(str(self.stack))
def image_info_button_handler(self):
UIdialog.show_image_info_dialog(self.stack[len(self.stack) - 1])
def brightness_button_inc_handler(self):
if self.image_brightness < self.image_enhance_max_value:
# get image from stack
image = self.stack[0]
# calculate current image brighness and inc it by 0.1
self.image_brightness = self.image_brightness + 0.1
self.image_brightness = round(self.image_brightness, 1)
# make image copy
print("Brightness: {}".format(self.image_brightness))
image_copy = self.make_image_copy(image, None)
# enhance image brightness
image_copy = _enhance.enhance_all(image_copy, self.image_brightness, self.image_color_balance,
self.image_sharpness, self.image_contrast)
# update stack
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
# #for testing purposes
# print("After Increasing brightness")
# print(str(self.stack))
else:
print("Image brightness is maximum")
def brightness_button_dec_handler(self):
if self.image_brightness > self.image_enhance_min_value:
# get image from stack
image = self.stack[0]
# calculate current image brighness and dec it by 0.1
self.image_brightness = self.image_brightness - 0.1
self.image_brightness = round(self.image_brightness, 1)
# make image copy
print("Brightness: {}".format(self.image_brightness))
image_copy = self.make_image_copy(image, None)
# enhance image brightness
image_copy = _enhance.enhance_all(image_copy, self.image_brightness, self.image_color_balance,
self.image_sharpness, self.image_contrast)
# update stack
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
# #for testing purposes
# print("After decreasing brightness")
# print(str(self.stack))
else:
# self.can_be_dec = False
print("Image brightness is minimum")
def color_balance_button_inc_handler(self):
if self.image_color_balance < self.image_enhance_max_value:
# get image from stack
image = self.stack[0]
# calculate current image brighness and inc it by 0.1
self.image_color_balance = self.image_color_balance + 0.1
self.image_color_balance = round(self.image_color_balance, 1)
# make image copy
print("Color_balance: {}".format(self.image_color_balance))
image_copy = self.make_image_copy(image, None)
# enhance image brightness
image_copy = _enhance.enhance_all(image_copy, self.image_brightness, self.image_color_balance,
self.image_sharpness, self.image_contrast) # update stack
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
# #for testing purposes
# print("After Increasing brightness")
# print(str(self.stack))
else:
print("Image color_balance is maximum")
def color_balance_button_dec_handler(self):
if self.image_color_balance > self.image_enhance_min_value:
# get image from stack
image = self.stack[0]
# calculate current image brighness and dec it by 0.1
self.image_color_balance = self.image_color_balance - 0.1
self.image_color_balance = round(self.image_color_balance, 1)
# make image copy
print("Color_balance: {}".format(self.image_color_balance))
image_copy = self.make_image_copy(image, None)
# enhance color balance
image_copy = _enhance.enhance_all(image_copy, self.image_brightness, self.image_color_balance,
self.image_sharpness, self.image_contrast)
# update stack
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
# #for testing purposes
# print("After decreasing brightness")
# print(str(self.stack))
else:
# self.can_be_dec = False
print("Image color_balance is minimum")
def sharpness_button_inc_handler(self):
if self.image_sharpness < self.image_enhance_max_value:
# get image from stack
image = self.stack[0]
# calculate current image brighness and inc it by 0.1
self.image_sharpness = self.image_sharpness + 0.1
self.image_sharpness = round(self.image_sharpness, 1)
# make image copy
print("Sharpness: {}".format(self.image_sharpness))
image_copy = self.make_image_copy(image, None)
# enhance image sharpness
image_copy = _enhance.enhance_all(image_copy, self.image_brightness, self.image_color_balance,
self.image_sharpness, self.image_contrast)
# update stack
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
# #for testing purposes
# print("After Increasing brightness")
# print(str(self.stack))
else:
print("Image sharpness is maximum")
def sharpness_button_dec_handler(self):
if self.image_sharpness > self.image_enhance_min_value:
# get image from stack
image = self.stack[0]
# calculate current image brighness and dec it by 0.1
self.image_sharpness = self.image_sharpness - 0.1
self.image_sharpness = round(self.image_sharpness, 1)
# make image copy
print("Sharpness: {}".format(self.image_sharpness))
image_copy = self.make_image_copy(image, None)
# enhance image brightness
image_copy = _enhance.enhance_all(image_copy, self.image_brightness, self.image_color_balance,
self.image_sharpness, self.image_contrast)
# update stack
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
# #for testing purposes
# print("After decreasing brightness")
# print(str(self.stack))
else:
# self.can_be_dec = False
print("Image sharpness is minimum")
def contrast_button_inc_handler(self):
if self.image_contrast < self.image_enhance_max_value:
# get image from stack
image = self.stack[0]
# calculate current image brighness and inc it by 0.1
self.image_contrast = self.image_contrast + 0.1
self.image_contrast = round(self.image_contrast, 1)
# make image copy
print("Contrast: {}".format(self.image_contrast))
image_copy = self.make_image_copy(image, None)
# enhance image brightness
image_copy = _enhance.enhance_all(image_copy, self.image_brightness, self.image_color_balance,
self.image_sharpness, self.image_contrast)
# update stack
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
# #for testing purposes
# print("After Increasing brightness")
# print(str(self.stack))
else:
print("Image contrast is maximum")
def contrast_button_dec_handler(self):
if self.image_contrast > self.image_enhance_min_value:
# get image from stack
image = self.stack[0]
# calculate current image brighness and dec it by 0.1
self.image_contrast = self.image_contrast - 0.1
self.image_contrast = round(self.image_contrast, 1)
# make image copy
print("Contrast: {}".format(self.image_contrast))
image_copy = self.make_image_copy(image, None)
# enhance image brightness
image_copy = _enhance.enhance_all(image_copy, self.image_brightness, self.image_color_balance,
self.image_sharpness, self.image_contrast)
# update stack
self.stack.append(image_copy)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
# #for testing purposes
# print("After decreasing brightness")
# print(str(self.stack))
else:
# self.can_be_dec = False
print("Image contrast is minimum")
def undo_button_handler(self):
"""method to undo changes done to image"""
if (len(self.stack) > 1):
self.stack.pop()
print(str(self.stack))
image = self.stack[len(self.stack) - 1]
image_copy = self.make_image_copy(image, None)
image_copy = self.resize_image(image_copy, 800, 500)
self.update_picture_panel(image_copy)
# print("After Undoing")
# print(str(self.stack))
else:
UIdialog.show_error_edit_image_first()
# print("undo clicked")
def file_dialog_handler(self):
"""method to handle open image dialog"""
# clear stack
self.stack.clear()
# reset image enhance variables
self.image_brightness = 1.0
self.image_color_balance = 1.0
self.image_sharpness = 1.0
self.image_contrast = 1.0
# opening image
image_filename = UIdialog.open_file_dialog()
# making image copy
image_copy = self.make_image_copy(None, image_filename)
# appending newly opened image to stack
self.stack.append(image_copy)
# resizing image to fit picture panel
image_copy = self.resize_image(image_copy, 800, 500)
# updating picture panel
self.update_picture_panel(image_copy)
def save_file_handler(self):
"""method for saving file"""
def callback():
image = self.stack[len(self.stack) - 1]
timestamp = str(int(datetime.datetime.now().timestamp()))
file = "./test_images/IMG" + timestamp
image.save(file + ".png")
print("File save successfully!")
thread = threading.Thread(target=callback)
thread.start()
def update_picture_panel(self, image):
"""method to update picture in picture panel"""
self.picture = ImageTk.PhotoImage(image)
self.picture_panel.configure(image=self.picture)
self.picture_panel.photo = self.picture
def on_closing(self):
"""method to clear stack when window is closed"""
self.stack.clear()
print(self.stack)
del self.stack[:]
self.root.destroy()
if __name__ == "__main__":
# thread = threading.Thread(target=KedClient())
# thread.start()
client = KedClient()
| 46.021956
| 120
| 0.624019
|
1f55aff0cb9306efce1493253058b82c5aca878b
| 330
|
py
|
Python
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/user/unix/__init__.py
|
bidhata/EquationGroupLeaks
|
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
|
[
"Unlicense"
] | 9
|
2019-11-22T04:58:40.000Z
|
2022-02-26T16:47:28.000Z
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/user/unix/__init__.py
|
bidhata/EquationGroupLeaks
|
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
|
[
"Unlicense"
] | null | null | null |
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/user/unix/__init__.py
|
bidhata/EquationGroupLeaks
|
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
|
[
"Unlicense"
] | 8
|
2017-09-27T10:31:18.000Z
|
2022-01-08T10:30:46.000Z
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import dsz
def IsRoot():
try:
user = dsz.process.GetCurrent()
return user == 'root'
except:
pass
return False
| 22
| 67
| 0.633333
|
9e167c64f95b7fa01e791e78ebfa5e3c26fbd0ed
| 3,297
|
py
|
Python
|
model-optimizer/extensions/ops/non_max_suppression_test.py
|
JOCh1958/openvino
|
070201feeec5550b7cf8ec5a0ffd72dc879750be
|
[
"Apache-2.0"
] | 1
|
2021-04-06T03:32:12.000Z
|
2021-04-06T03:32:12.000Z
|
model-optimizer/extensions/ops/non_max_suppression_test.py
|
JOCh1958/openvino
|
070201feeec5550b7cf8ec5a0ffd72dc879750be
|
[
"Apache-2.0"
] | 28
|
2021-09-24T09:29:02.000Z
|
2022-03-28T13:20:46.000Z
|
model-optimizer/extensions/ops/non_max_suppression_test.py
|
JOCh1958/openvino
|
070201feeec5550b7cf8ec5a0ffd72dc879750be
|
[
"Apache-2.0"
] | 1
|
2020-08-30T11:48:03.000Z
|
2020-08-30T11:48:03.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.ops.non_max_suppression import NonMaxSuppression
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, connect
class TestNonMaxSuppressionInfer(unittest.TestCase):
def setUp(self):
nodes = {
**regular_op_with_shaped_data('boxes', [10, 100, 4], {'type': 'Parameter'}),
**regular_op_with_shaped_data('scores', [10, 5, 100], {'type': 'Parameter'}),
**valued_const_with_data('max_output_per_class', int64_array(7)),
**regular_op_with_shaped_data('nms', None, {'op': 'NonMaxSuppression', 'type': 'NonMaxSuppression',
'name': 'nms'}),
**result('output'),
}
self.graph = build_graph(nodes, [
*connect('boxes', '0:nms'),
*connect('scores', '1:nms'),
*connect('max_output_per_class', '2:nms'),
*connect('nms', 'output'),
], nodes_with_edges_only=True)
def test_nms_infer_opset1(self):
nms_node = Node(self.graph, 'nms')
nms_node['version'] = 'opset1'
NonMaxSuppression.infer(nms_node)
NonMaxSuppression.type_infer(nms_node)
self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [100, 3]))
self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64)
def test_nms_infer_i64_opset3(self):
nms_node = Node(self.graph, 'nms')
nms_node['version'] = 'opset3'
nms_node['output_type'] = np.int64
NonMaxSuppression.infer(nms_node)
NonMaxSuppression.type_infer(nms_node)
self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [100, 3]))
self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64)
def test_nms_infer_i32_opset3(self):
nms_node = Node(self.graph, 'nms')
nms_node['version'] = 'opset3'
nms_node['output_type'] = np.int32
NonMaxSuppression.infer(nms_node)
NonMaxSuppression.type_infer(nms_node)
self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [100, 3]))
self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32)
def test_nms_infer_i32_opset4(self):
nms_node = Node(self.graph, 'nms')
nms_node['version'] = 'opset4'
nms_node['output_type'] = np.int32
NonMaxSuppression.infer(nms_node)
NonMaxSuppression.type_infer(nms_node)
self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [10 * 5 * 7, 3]))
self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32)
def test_nms_infer_i64_opset4(self):
nms_node = Node(self.graph, 'nms')
nms_node['version'] = 'opset4'
nms_node['output_type'] = np.int64
NonMaxSuppression.infer(nms_node)
NonMaxSuppression.type_infer(nms_node)
self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [10 * 5 * 7, 3]))
self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64)
| 41.2125
| 117
| 0.651501
|
d6b53fe056311af867bc1dea3ddb6867a31ba770
| 3,015
|
py
|
Python
|
tensorflow_addons/models/tests/crf_wrapper_test.py
|
old-school-kid/addons
|
825f2d24a4074334bda38e4e00d6c42c0da25029
|
[
"Apache-2.0"
] | 1,560
|
2018-11-26T23:57:34.000Z
|
2022-03-27T10:37:34.000Z
|
tensorflow_addons/models/tests/crf_wrapper_test.py
|
midsterx/addons
|
334cd7ca8fb944aab38164a13d7d2203d7c39605
|
[
"Apache-2.0"
] | 2,067
|
2018-11-28T04:40:23.000Z
|
2022-03-31T11:36:50.000Z
|
tensorflow_addons/models/tests/crf_wrapper_test.py
|
midsterx/addons
|
334cd7ca8fb944aab38164a13d7d2203d7c39605
|
[
"Apache-2.0"
] | 679
|
2018-11-27T14:39:25.000Z
|
2022-03-31T10:09:22.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CRF (Conditional Random Field) Model Wrapper."""
import os
import tempfile
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_addons.models.crf_wrapper import CRFModelWrapper
def get_test_data():
x = np.array(
[
[
# O B-X I-X B-Y I-Y
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
],
[
# O B-X I-X B-Y I-Y
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
],
]
)
y = np.array([[1, 2, 2], [1, 1, 1]]) # B-X I-X I-X # B-X B-X B-X
return x, y
@pytest.mark.usefixtures("run_with_mixed_precision_policy")
def test_traing():
x_np, y_np = get_test_data()
get_some_model(x_np, y_np)
def get_some_model(x_np, y_np, sanity_check=True):
x_input = tf.keras.layers.Input(shape=x_np.shape[1:])
y_outputs = tf.keras.layers.Lambda(lambda x: x)(x_input)
base_model = tf.keras.Model(x_input, y_outputs)
model = CRFModelWrapper(base_model, y_np.shape[-1])
model.compile("adam")
if sanity_check:
model.fit(x=x_np, y=y_np)
model.evaluate(x_np, y_np)
model.predict(x_np)
return model
def clone(model: CRFModelWrapper):
with tempfile.TemporaryDirectory() as tmpdir:
file_path = os.path.join(tmpdir, "model")
model.save(file_path)
new_model = tf.keras.models.load_model(file_path)
return new_model
def assert_all_equal(array_list1, array_list2):
for arr1, arr2 in zip(array_list1, array_list2):
np.testing.assert_equal(np.array(arr1), np.array(arr2))
def test_serialization():
x_np, y_np = get_test_data()
model = get_some_model(x_np, y_np, sanity_check=False)
new_model = clone(model)
assert_all_equal(model.predict(x_np), new_model.predict(x_np))
assert_all_equal(model.get_weights(), new_model.get_weights())
original_loss = model.train_on_batch(x_np, y_np, return_dict=True)["crf_loss"]
clone_loss = new_model.train_on_batch(x_np, y_np, return_dict=True)["crf_loss"]
assert_all_equal(model.get_weights(), new_model.get_weights())
assert original_loss == clone_loss
| 31.40625
| 83
| 0.627197
|
3c66e0bfbd4aeafebb36df2cdce16596144639f2
| 1,617
|
py
|
Python
|
sdk/deploymentmanager/azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/models/cloud_error_body_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
sdk/deploymentmanager/azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/models/cloud_error_body_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
sdk/deploymentmanager/azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/models/cloud_error_body_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CloudErrorBody(Model):
"""Detailed error information of any failure.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: Error code string.
:vartype code: str
:ivar message: Descriptive error information.
:vartype message: str
:param target: Error target
:type target: str
:param details: More detailed error information.
:type details: list[~azure.mgmt.deploymentmanager.models.CloudErrorBody]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(self, *, target: str=None, details=None, **kwargs) -> None:
super(CloudErrorBody, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = target
self.details = details
| 33
| 76
| 0.584416
|
22fbebd974b65b34d770ba5c0f76306fef455e68
| 21,058
|
py
|
Python
|
tests/examples/minlplib/sssd18-06persp.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 2
|
2021-07-03T13:19:10.000Z
|
2022-02-06T10:48:13.000Z
|
tests/examples/minlplib/sssd18-06persp.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 1
|
2021-07-04T14:52:14.000Z
|
2021-07-15T10:17:11.000Z
|
tests/examples/minlplib/sssd18-06persp.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | null | null | null |
# MINLP written by GAMS Convert at 04/21/18 13:54:21
#
# Equation counts
# Total E G L N X C B
# 67 25 0 42 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 151 25 126 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 475 421 54 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b4 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b5 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b6 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b7 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b8 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b9 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b10 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b11 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b12 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b13 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b14 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b15 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b16 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b17 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b18 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b19 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b20 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b21 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b22 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b23 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b24 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b25 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b26 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b27 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b28 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b29 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b30 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b31 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b32 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b33 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b34 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b35 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b36 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b37 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b38 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b39 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b40 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b41 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b42 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b43 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b44 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b45 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b46 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b47 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b48 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b49 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b50 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b51 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b52 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b53 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b54 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b73 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b74 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b75 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b76 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b77 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b78 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b79 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b80 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b81 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b82 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b83 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b84 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b85 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b106 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b107 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b108 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b109 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b110 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b111 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b112 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b113 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b114 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b115 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b116 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b117 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b118 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b119 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b120 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b121 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b122 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b123 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b124 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b125 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b126 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=2.18693238738188)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=1.97796411576132)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=1.30935949980452)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=1.24466335994743)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=1.88552981695056)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=1.87818302003327)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0.228739544443079)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0.228739544443079)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0.228739544443079)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0.221400039184785)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0.221400039184785)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0.221400039184785)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0.188993167429519)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0.188993167429519)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0.188993167429519)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0.184832966070627)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0.184832966070627)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0.184832966070627)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0.21781439765115)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0.21781439765115)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0.21781439765115)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0.217519526145533)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0.217519526145533)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0.217519526145533)
m.obj = Objective(expr= 63.4638470839033*m.b1 + 406.464924344563*m.b2 + 281.054038749709*m.b3 + 357.899009619357*m.b4
+ 283.867227208487*m.b5 + 346.427860883825*m.b6 + 174.902031629248*m.b7 + 327.682040608985*m.b8
+ 195.408950113586*m.b9 + 411.209540848557*m.b10 + 341.151615907997*m.b11
+ 306.690501464422*m.b12 + 217.736042166853*m.b13 + 590.531921051569*m.b14
+ 469.541866006763*m.b15 + 371.170896461036*m.b16 + 301.885481955089*m.b17
+ 482.559449428939*m.b18 + 266.695094430501*m.b19 + 661.407332369201*m.b20
+ 469.726457930889*m.b21 + 365.202026294741*m.b22 + 207.423237700342*m.b23
+ 464.900263444655*m.b24 + 416.573440009268*m.b25 + 427.21293769024*m.b26
+ 421.557561337466*m.b27 + 131.588490152482*m.b28 + 195.079739824454*m.b29
+ 327.092772346777*m.b30 + 284.74538638165*m.b31 + 91.2881292105079*m.b32
+ 151.13720061786*m.b33 + 158.491236423963*m.b34 + 174.161578418524*m.b35
+ 70.9637233498753*m.b36 + 455.733220723331*m.b37 + 159.976116957465*m.b38
+ 94.4221181484321*m.b39 + 501.080276859661*m.b40 + 450.105521915833*m.b41
+ 218.986984440606*m.b42 + 754.787490214755*m.b43 + 145.720505553027*m.b44
+ 360.826762020128*m.b45 + 512.320209445762*m.b46 + 533.899656702829*m.b47
+ 217.438198555652*m.b48 + 257.356951080936*m.b49 + 469.748170208231*m.b50
+ 224.941373479115*m.b51 + 574.696478620214*m.b52 + 453.651669444504*m.b53
+ 396.680178831932*m.b54 + 355.480538495142*m.b55 + 455.001425048605*m.b56
+ 410.327875101372*m.b57 + 107.716832660101*m.b58 + 127.140023996384*m.b59
+ 331.094295675558*m.b60 + 182.462253711509*m.b61 + 460.500595074032*m.b62
+ 320.358519241588*m.b63 + 267.389834464462*m.b64 + 154.515161518257*m.b65
+ 322.544727498533*m.b66 + 33.1863391968753*m.b67 + 615.771638171722*m.b68
+ 401.573448620245*m.b69 + 502.776036957456*m.b70 + 369.539939879878*m.b71
+ 490.231458199826*m.b72 + 180.326894384108*m.b73 + 351.782220377873*m.b74
+ 230.814529409496*m.b75 + 424.244156625063*m.b76 + 357.224268091235*m.b77
+ 334.18273348498*m.b78 + 501.721049311591*m.b79 + 663.739169113737*m.b80
+ 452.23673398428*m.b81 + 920.634818812952*m.b82 + 798.472532832495*m.b83
+ 676.77410056404*m.b84 + 407.527006741593*m.b85 + 510.493559429826*m.b86
+ 468.587901001095*m.b87 + 140.053665522904*m.b88 + 171.808834000698*m.b89
+ 381.118854530951*m.b90 + 179.901289120497*m.b91 + 881.284249355185*m.b92
+ 649.077324059404*m.b93 + 661.262090699325*m.b94 + 520.002854424345*m.b95
+ 730.978694813241*m.b96 + 678.238937211925*m.b97 + 398.969088179479*m.b98
+ 483.529007052756*m.b99 + 249.519882483891*m.b100 + 342.614106364254*m.b101
+ 292.077181816541*m.b102 + 170.281172626711*m.b103 + 225.734424617283*m.b104
+ 168.147658999551*m.b105 + 104.518622131715*m.b106 + 46.8477886786758*m.b107
+ 136.089840994616*m.b108 + 310.191094*m.b109 + 117.377523177255*m.b110 + 76.582257499663*m.b111
+ 439.61435975*m.b112 + 149.716022877401*m.b113 + 92.6683043463223*m.b114 + 350.33553925*m.b115
+ 135.660413957549*m.b116 + 89.5371309630422*m.b117 + 261.032076*m.b118
+ 112.326275197259*m.b119 + 78.152225609751*m.b120 + 473.56432275*m.b121
+ 158.186763322588*m.b122 + 96.9684211447128*m.b123 + 351.54659075*m.b124
+ 129.748325387621*m.b125 + 83.6038830543306*m.b126 + 92063.4818812952*m.x127
+ 92063.4818812952*m.x128 + 92063.4818812952*m.x129 + 92063.4818812952*m.x130
+ 92063.4818812952*m.x131 + 92063.4818812952*m.x132, sense=minimize)
m.c2 = Constraint(expr= 0.669744132*m.b1 + 0.711284112*m.b7 + 0.798385084*m.b13 + 1.430176337*m.b19
+ 0.706194095*m.b25 + 0.501285943*m.b31 + 1.04003433*m.b37 + 1.252787639*m.b43
+ 1.278441868*m.b49 + 0.80906674*m.b55 + 1.021192966*m.b61 + 1.20737712*m.b67
+ 0.657698048*m.b73 + 1.314509471*m.b79 + 0.849949545*m.b85 + 1.327992452*m.b91
+ 1.118160701*m.b97 + 0.605008155*m.b103 - 2.10079896525*m.x133 - 4.2015979305*m.x134
- 6.30239689575*m.x135 == 0)
m.c3 = Constraint(expr= 0.669744132*m.b2 + 0.711284112*m.b8 + 0.798385084*m.b14 + 1.430176337*m.b20
+ 0.706194095*m.b26 + 0.501285943*m.b32 + 1.04003433*m.b38 + 1.252787639*m.b44
+ 1.278441868*m.b50 + 0.80906674*m.b56 + 1.021192966*m.b62 + 1.20737712*m.b68
+ 0.657698048*m.b74 + 1.314509471*m.b80 + 0.849949545*m.b86 + 1.327992452*m.b92
+ 1.118160701*m.b98 + 0.605008155*m.b104 - 2.1704413425*m.x136 - 4.340882685*m.x137
- 6.5113240275*m.x138 == 0)
m.c4 = Constraint(expr= 0.669744132*m.b3 + 0.711284112*m.b9 + 0.798385084*m.b15 + 1.430176337*m.b21
+ 0.706194095*m.b27 + 0.501285943*m.b33 + 1.04003433*m.b39 + 1.252787639*m.b45
+ 1.278441868*m.b51 + 0.80906674*m.b57 + 1.021192966*m.b63 + 1.20737712*m.b69
+ 0.657698048*m.b75 + 1.314509471*m.b81 + 0.849949545*m.b87 + 1.327992452*m.b93
+ 1.118160701*m.b99 + 0.605008155*m.b105 - 2.5426093695*m.x139 - 5.085218739*m.x140
- 7.6278281085*m.x141 == 0)
m.c5 = Constraint(expr= 0.669744132*m.b4 + 0.711284112*m.b10 + 0.798385084*m.b16 + 1.430176337*m.b22
+ 0.706194095*m.b28 + 0.501285943*m.b34 + 1.04003433*m.b40 + 1.252787639*m.b46
+ 1.278441868*m.b52 + 0.80906674*m.b58 + 1.021192966*m.b64 + 1.20737712*m.b70
+ 0.657698048*m.b76 + 1.314509471*m.b82 + 0.849949545*m.b88 + 1.327992452*m.b94
+ 1.118160701*m.b100 + 0.605008155*m.b106 - 2.59983815925*m.x142 - 5.1996763185*m.x143
- 7.79951447775*m.x144 == 0)
m.c6 = Constraint(expr= 0.669744132*m.b5 + 0.711284112*m.b11 + 0.798385084*m.b17 + 1.430176337*m.b23
+ 0.706194095*m.b29 + 0.501285943*m.b35 + 1.04003433*m.b41 + 1.252787639*m.b47
+ 1.278441868*m.b53 + 0.80906674*m.b59 + 1.021192966*m.b65 + 1.20737712*m.b71
+ 0.657698048*m.b77 + 1.314509471*m.b83 + 0.849949545*m.b89 + 1.327992452*m.b95
+ 1.118160701*m.b101 + 0.605008155*m.b107 - 2.20617095775*m.x145 - 4.4123419155*m.x146
- 6.61851287325*m.x147 == 0)
m.c7 = Constraint(expr= 0.669744132*m.b6 + 0.711284112*m.b12 + 0.798385084*m.b18 + 1.430176337*m.b24
+ 0.706194095*m.b30 + 0.501285943*m.b36 + 1.04003433*m.b42 + 1.252787639*m.b48
+ 1.278441868*m.b54 + 0.80906674*m.b60 + 1.021192966*m.b66 + 1.20737712*m.b72
+ 0.657698048*m.b78 + 1.314509471*m.b84 + 0.849949545*m.b90 + 1.327992452*m.b96
+ 1.118160701*m.b102 + 0.605008155*m.b108 - 2.20916166375*m.x148 - 4.4183233275*m.x149
- 6.62748499125*m.x150 == 0)
m.c8 = Constraint(expr= m.b1 + m.b2 + m.b3 + m.b4 + m.b5 + m.b6 == 1)
m.c9 = Constraint(expr= m.b7 + m.b8 + m.b9 + m.b10 + m.b11 + m.b12 == 1)
m.c10 = Constraint(expr= m.b13 + m.b14 + m.b15 + m.b16 + m.b17 + m.b18 == 1)
m.c11 = Constraint(expr= m.b19 + m.b20 + m.b21 + m.b22 + m.b23 + m.b24 == 1)
m.c12 = Constraint(expr= m.b25 + m.b26 + m.b27 + m.b28 + m.b29 + m.b30 == 1)
m.c13 = Constraint(expr= m.b31 + m.b32 + m.b33 + m.b34 + m.b35 + m.b36 == 1)
m.c14 = Constraint(expr= m.b37 + m.b38 + m.b39 + m.b40 + m.b41 + m.b42 == 1)
m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45 + m.b46 + m.b47 + m.b48 == 1)
m.c16 = Constraint(expr= m.b49 + m.b50 + m.b51 + m.b52 + m.b53 + m.b54 == 1)
m.c17 = Constraint(expr= m.b55 + m.b56 + m.b57 + m.b58 + m.b59 + m.b60 == 1)
m.c18 = Constraint(expr= m.b61 + m.b62 + m.b63 + m.b64 + m.b65 + m.b66 == 1)
m.c19 = Constraint(expr= m.b67 + m.b68 + m.b69 + m.b70 + m.b71 + m.b72 == 1)
m.c20 = Constraint(expr= m.b73 + m.b74 + m.b75 + m.b76 + m.b77 + m.b78 == 1)
m.c21 = Constraint(expr= m.b79 + m.b80 + m.b81 + m.b82 + m.b83 + m.b84 == 1)
m.c22 = Constraint(expr= m.b85 + m.b86 + m.b87 + m.b88 + m.b89 + m.b90 == 1)
m.c23 = Constraint(expr= m.b91 + m.b92 + m.b93 + m.b94 + m.b95 + m.b96 == 1)
m.c24 = Constraint(expr= m.b97 + m.b98 + m.b99 + m.b100 + m.b101 + m.b102 == 1)
m.c25 = Constraint(expr= m.b103 + m.b104 + m.b105 + m.b106 + m.b107 + m.b108 == 1)
m.c26 = Constraint(expr= m.b109 + m.b110 + m.b111 <= 1)
m.c27 = Constraint(expr= m.b112 + m.b113 + m.b114 <= 1)
m.c28 = Constraint(expr= m.b115 + m.b116 + m.b117 <= 1)
m.c29 = Constraint(expr= m.b118 + m.b119 + m.b120 <= 1)
m.c30 = Constraint(expr= m.b121 + m.b122 + m.b123 <= 1)
m.c31 = Constraint(expr= m.b124 + m.b125 + m.b126 <= 1)
m.c32 = Constraint(expr= - m.b109 + m.x133 <= 0)
m.c33 = Constraint(expr= - m.b110 + m.x134 <= 0)
m.c34 = Constraint(expr= - m.b111 + m.x135 <= 0)
m.c35 = Constraint(expr= - m.b112 + m.x136 <= 0)
m.c36 = Constraint(expr= - m.b113 + m.x137 <= 0)
m.c37 = Constraint(expr= - m.b114 + m.x138 <= 0)
m.c38 = Constraint(expr= - m.b115 + m.x139 <= 0)
m.c39 = Constraint(expr= - m.b116 + m.x140 <= 0)
m.c40 = Constraint(expr= - m.b117 + m.x141 <= 0)
m.c41 = Constraint(expr= - m.b118 + m.x142 <= 0)
m.c42 = Constraint(expr= - m.b119 + m.x143 <= 0)
m.c43 = Constraint(expr= - m.b120 + m.x144 <= 0)
m.c44 = Constraint(expr= - m.b121 + m.x145 <= 0)
m.c45 = Constraint(expr= - m.b122 + m.x146 <= 0)
m.c46 = Constraint(expr= - m.b123 + m.x147 <= 0)
m.c47 = Constraint(expr= - m.b124 + m.x148 <= 0)
m.c48 = Constraint(expr= - m.b125 + m.x149 <= 0)
m.c49 = Constraint(expr= - m.b126 + m.x150 <= 0)
m.c50 = Constraint(expr=m.x133*m.b109 + m.x133*m.x127 - m.x127*m.b109 <= 0)
m.c51 = Constraint(expr=m.x134*m.b110 + m.x134*m.x127 - m.x127*m.b110 <= 0)
m.c52 = Constraint(expr=m.x135*m.b111 + m.x135*m.x127 - m.x127*m.b111 <= 0)
m.c53 = Constraint(expr=m.x136*m.b112 + m.x136*m.x128 - m.x128*m.b112 <= 0)
m.c54 = Constraint(expr=m.x137*m.b113 + m.x137*m.x128 - m.x128*m.b113 <= 0)
m.c55 = Constraint(expr=m.x138*m.b114 + m.x138*m.x128 - m.x128*m.b114 <= 0)
m.c56 = Constraint(expr=m.x139*m.b115 + m.x139*m.x129 - m.x129*m.b115 <= 0)
m.c57 = Constraint(expr=m.x140*m.b116 + m.x140*m.x129 - m.x129*m.b116 <= 0)
m.c58 = Constraint(expr=m.x141*m.b117 + m.x141*m.x129 - m.x129*m.b117 <= 0)
m.c59 = Constraint(expr=m.x142*m.b118 + m.x142*m.x130 - m.x130*m.b118 <= 0)
m.c60 = Constraint(expr=m.x143*m.b119 + m.x143*m.x130 - m.x130*m.b119 <= 0)
m.c61 = Constraint(expr=m.x144*m.b120 + m.x144*m.x130 - m.x130*m.b120 <= 0)
m.c62 = Constraint(expr=m.x145*m.b121 + m.x145*m.x131 - m.x131*m.b121 <= 0)
m.c63 = Constraint(expr=m.x146*m.b122 + m.x146*m.x131 - m.x131*m.b122 <= 0)
m.c64 = Constraint(expr=m.x147*m.b123 + m.x147*m.x131 - m.x131*m.b123 <= 0)
m.c65 = Constraint(expr=m.x148*m.b124 + m.x148*m.x132 - m.x132*m.b124 <= 0)
m.c66 = Constraint(expr=m.x149*m.b125 + m.x149*m.x132 - m.x132*m.b125 <= 0)
m.c67 = Constraint(expr=m.x150*m.b126 + m.x150*m.x132 - m.x132*m.b126 <= 0)
| 55.270341
| 120
| 0.628265
|
cf058134a69f9562b79223cc73ce74fc64cd03f1
| 18,990
|
py
|
Python
|
bins/ml/predictors/predict-from-cache-iris-classifier.py
|
jay-johnson/datanode
|
0b491332f8b74478c02c9951a997702db1b79d9e
|
[
"Apache-2.0"
] | 2
|
2017-02-02T09:08:25.000Z
|
2017-03-03T11:54:31.000Z
|
bins/ml/predictors/predict-from-cache-iris-classifier.py
|
jay-johnson/datanode
|
0b491332f8b74478c02c9951a997702db1b79d9e
|
[
"Apache-2.0"
] | null | null | null |
bins/ml/predictors/predict-from-cache-iris-classifier.py
|
jay-johnson/datanode
|
0b491332f8b74478c02c9951a997702db1b79d9e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Load common imports and system envs to build the core object
import sys, os
# For running inside the docker container use:
#import matplotlib
#matplotlib.use('Agg')
# Load the Environment:
os.environ["ENV_DEPLOYMENT_TYPE"] = "JustRedis"
from src.common.inits_for_python import *
#####################################################################
#
# Start Arg Processing:
#
action = "Predict ML Classifier from Cache"
parser = argparse.ArgumentParser(description="Parser for Action: " + str(action))
parser.add_argument('-f', '--csvfile', help='CSV File', dest='csvfile')
parser.add_argument('-n', '--dsname', help='Dataset Name', dest='ds_name')
parser.add_argument('-b', '--s3bucket', help='S3 Bucket (Optional)', dest='s_bucket')
parser.add_argument('-k', '--s3key', help='S3 Key (Optional)', dest='s_key')
parser.add_argument('-u', '--usedate', help='Use Date', dest='usedate')
parser.add_argument("-d", "--debug", help="Debug Flag", dest='debug', action='store_true')
args = parser.parse_args()
if args.debug:
debug = True
core.enable_debug()
ds_name = "iris_classifier"
if args.ds_name:
ds_name = str(args.ds_name).strip().lstrip()
now = datetime.datetime.now()
cur_date = now
cur_date_str = now.strftime("%Y-%m-%d")
if args.usedate:
cur_date_str = str(args.usedate)
send_email = "1" # by default send email
s3_bucket = "demodatasets"
s3_key = "dataset_" + str(str(ds_name).upper().strip().lstrip()) + "_" + str(cur_date_str) + ".csv"
analysis_version = 2
if args.s_bucket:
s3_bucket = str(args.s_bucket)
if args.s_key:
s3_key = str(args.s_key)
dataset_filename = "iris.csv"
ml_csv = str(os.getenv("ENV_DATA_SRC_DIR", "/opt/work/data/src")) + "/" + dataset_filename
if args.csvfile:
ml_csv = str(args.csvfile)
#
# End Arg Processing
#
#####################################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
if os.path.exists(ml_csv) == False:
if os.path.exists("/opt/work/data/examples/iris.csv"):
org_path = "/opt/work/data/examples/iris.csv"
os.system("cp " + str(org_path) + " " + ml_csv)
elif os.path.exists(os.getenv("ENV_DATANODE_REPO", "/opt/work") + "/data/examples/iris.csv"):
org_path = os.getenv("ENV_DATANODE_REPO", "/opt/work") + "/data/examples/iris.csv"
os.system("cp " + str(org_path) + " " + ml_csv)
else:
lg("Recreating iris dataset: /opt/work/bins/ml/downloaders/download_iris.py", 6)
os.system("/opt/work/bins/ml/downloaders/download_iris.py")
if os.path.exists(ml_csv) == False:
lg("Failed to recreate iris dataset with: /opt/work/bins/ml/downloaders/download_iris.py", 0)
lg("Stopping", 6)
sys.exit(1)
# end of checking if the csv file is available
lg("Processing ML Predictions for CSV(" + str(ml_csv) + ")", 6)
max_features_to_display = 10
num_estimators = 200
show_importance_plot = True
show_confusion_plot = True
random_state = 0
# For forecasting:
units_ahead_set = []
units_ahead = 0
now = datetime.datetime.now()
title_prefix = ds_name
confusion_plot_title = ds_name + " - Random Forest Confusion Matrix\nThe darker the square on the diagonal the better the predictions\n\n"
featimp_plot_title = ds_name + " - Feature Importance with Estimators(" + str(num_estimators) + ")"
row_names = [ "Actual" ] # CM - Y Axis
col_names = [ "Predictions" ] # CM - X Axis
num_jobs = 8
ranked_features = []
org_ranked_features = []
ml_type = "Predict with Filter"
ml_algo_name = "xgb-regressor"
ml_algo_name = "xgb-classifier"
price_min = 0.10
train_test_ratio = 0.1
# What column has the labeled targets as integers? (added-manually to the dataset)
target_column_name = "ResultTargetValue"
# possible values in the Target Column
target_column_values = [ "Iris-setosa", "Iris-versicolor", "Iris-virginica" ]
# What columns can the algorithms use for training and learning?
feature_column_names = [ "SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "ResultTargetValue" ]
label_column_name = "ResultLabel"
ignore_features = [ # Prune non-int/float columns as needed:
label_column_name
]
algo_nodes = []
forcast_df = None
ml_request = {
"MLType" : ml_type,
"MLAlgo" : {
"Name" : ml_algo_name,
"Version" : 1,
"Meta" : {
"UnitsAhead" : units_ahead,
"DatasetName" : ds_name,
"FilterMask" : None,
"Source" : {
"CSVFile" : ml_csv,
"S3File" : "", # <Bucket Name>:<Key>
"RedisKey" : "" # <App Name>:<Key>
},
},
"Steps" : {
"Train" :{
"LearningRate" : 0.1,
"NumEstimators" : 1000,
"Objective" : "reg:linear",
"MaxDepth" : 6,
"MaxDeltaStep" : 0,
"MinChildWeight" : 1,
"Gamma" : 0,
"SubSample" : 0.8,
"ColSampleByTree" : 0.8,
"ColSampleByLevel" : 1.0,
"RegAlpha" : 0,
"RegLambda" : 1,
"BaseScore" : 0.5,
"NumThreads" : -1, # infinite = -1
"ScaledPositionWeight" : 1,
"Seed" : 27,
"Debug" : True
}
},
"Cache" : {
"RLoc" : "CACHE:_MODELS_" + str(ds_name) + "_LATEST",
"UseCaches" : True
}
},
"FeatureColumnNames": feature_column_names,
"TargetColumnName" : target_column_name,
"TargetColumnValues": target_column_values,
"IgnoreFeatures" : ignore_features,
"UnitsAheadSet" : units_ahead_set,
"UnitsAheadType" : "",
"PredictionType" : "Predict",
"MaxFeatures" : 10,
"Version" : 1,
"TrackingType" : "UseTargetColAndUnits",
"TrackingName" : core.to_upper(ds_name),
"TrackingID" : "ML_" + ds_name + "_" + str(core.build_unique_key()),
"Debug" : False
}
# Load dataset to build
csv_res = core.ml_load_csv_dataset(ml_request, core.get_rds(), core.get_dbs(), debug)
if csv_res["Status"] != "SUCCESS":
lg("ERROR: Failed to Load CSV(" + str(ml_request["MLAlgo"]["Meta"]["Source"]["CSVFile"]) + ")", 0)
sys.exit(1)
ds_df = csv_res["Record"]["SourceDF"]
# Build a Filter for pruning bad records out before creating the train/test sets
samples_filter_mask = (ds_df["SepalLength"] > 0.0) \
& (ds_df["PetalWidth"] > 0.0)
# For patching on the fly you can use the encoder method to replace labels with target dictionary values:
#ready_df = core.ml_encode_target_column(ds_df, "ResultLabel", "Target")
show_pair_plot = False
if show_pair_plot:
lg("Samples(" + str(len(ds_df.index)) + ") in CSV(" + str(ml_request["MLAlgo"]["Meta"]["Source"]["CSVFile"]) + ")", 6)
lg("")
print ds_df.describe()
lg("")
num_per_class = ds_df.groupby("ResultLabel").size()
print num_per_class
lg("")
pair_plot_req = {
"Title" : "Iris Dataset PairPlot",
"SourceDF" : ds_df[samples_filter_mask],
"Style" : "default",
"DiagKind" : "hist", # "kde" or "hist"
"HueColumnName" : ml_request["TargetColumnName"],
"XLabel" : "",
"YLabel" : "",
"CompareColumns": ml_request["FeatureColumnNames"],
"Size" : 3.0,
"ImgFile" : str(os.getenv("ENV_DATA_SRC_DIR", "/opt/work/data/src")) + "/" + "validate_jupyter_iris_classification_pairplot.png",
"ShowPlot" : True
}
lg("Plotting Validation Pair Plot - Please wait a moment...", 6)
core.sb_pairplot(pair_plot_req)
if os.path.exists(pair_plot_req["ImgFile"]):
lg("Done Plotting Valiation Pair Plot - Saved(" + str(pair_plot_req["ImgFile"]) + ")", 5)
else:
lg("Failed to save Validation Pair Plot(" + str(pair_plot_req["ImgFile"]) + "). Please check the ENV_DATA_SRC_DIR is writeable by this user and exposed to the docker container correctly.", 0)
# end of showing a pairplot for validation
# Create a Prediction Column
ml_request["MLAlgo"]["Meta"]["SamplesFilterMask"] = samples_filter_mask
# Create a Result Column
core.enable_debug()
ml_images = []
train_results = core.ml_train_models_for_predictions(ml_request, core.get_rds(), core.get_dbs(), debug)
if train_results["Status"] != "SUCCESS":
lg("ERROR: Failed to Train Models for Predictions with Error(" + str(train_results["Error"]) + ") StoppedEarly(" + str(train_results["Record"]["StoppedEarly"]) + ")", 0)
sys.exit(1)
algo_nodes = train_results["Record"]["AlgoNodes"]
predict_row = {
"SepalLength" : 5.4,
"SepalWidth" : 3.4,
"PetalLength" : 1.7,
"PetalWidth" : 0.2,
"ResultTargetValue" : 0
}
predict_row_df = pd.DataFrame(predict_row, index=[0])
predict_req = {
"AlgoNodes" : algo_nodes,
"PredictionMask": samples_filter_mask,
"PredictionRow" : predict_row_df
}
predict_results = core.ml_compile_predictions_from_models(predict_req, core.get_rds(), core.get_dbs(), debug)
if predict_results["Status"] != "SUCCESS":
lg("ERROR: Failed to Compile Predictions from Models with Error(" + str(predict_results["Error"]) + ")", 0)
sys.exit(1)
lg("Done with Predictions", 6)
if predict_results["Status"] == "SUCCESS":
al_req = train_results["Record"]
al_req["DSName"] = ml_request["TrackingName"]
al_req["Version"] = 1
al_req["FeatureColumnNames"]= ml_request["FeatureColumnNames"]
al_req["TargetColumnName"] = ml_request["TargetColumnName"]
al_req["TargetColumnValues"]= ml_request["TargetColumnValues"]
al_req["IgnoreFeatures"] = ml_request["IgnoreFeatures"]
al_req["PredictionType"] = ml_request["PredictionType"]
al_req["ConfMatrices"] = predict_results["Record"]["ConfMatrices"]
al_req["PredictionMarkers"] = predict_results["Record"]["PredictionMarkers"]
analysis_dataset = core.ml_compile_analysis_dataset(al_req, core.get_rds(), core.get_dbs(), debug)
lg("Analyzed Models(" + str(len(analysis_dataset["Models"])) + ")", 6)
lg("-----------------------------------------------------", 6)
lg("Caching Models", 6)
cache_req = {
"Name" : "CACHE",
"Key" : "_MODELS_" + str(al_req["Tracking"]["TrackingName"]) + "_LATEST",
"TrackingID": "_MD_" + str(al_req["Tracking"]["TrackingName"]),
"Analysis" : analysis_dataset
}
cache_results = core.ml_cache_analysis_and_models(cache_req, core.get_rds(), core.get_dbs(), debug)
lg("Done Caching Models", 6)
lg("-----------------------------------------------------", 6)
lg("Creating Analysis Visualizations", 6)
# Turn this False to prevent displaying images
analysis_dataset["ShowPlot"] = True
analysis_dataset["SourceDF"] = al_req["SourceDF"]
lg("Plotting Feature Importance", 6)
for midx,model_node in enumerate(analysis_dataset["Models"]):
predict_col = model_node["Target"]
if predict_col == "ResultTargetValue":
plot_req = {
"ImgFile" : analysis_dataset["FeatImpImgFile"],
"Model" : model_node["Model"],
"XLabel" : str(predict_col),
"YLabel" : "Importance Amount",
"Title" : str(predict_col) + " Importance Analysis",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_model_feature_importance(plot_req, debug)
for img in image_list:
ml_images.append(img)
# end of for all models
lg("Plotting PairPlot", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Pair Plot",
"ImgFile" : str(analysis_dataset["PairPlotImgFile"]),
"SourceDF" : al_req["SourceDF"],
"HueColumnName" : target_column_name,
"CompareColumns": feature_column_names,
"Markers" : ["o", "s", "D"],
"Width" : 15.0,
"Height" : 15.0,
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_pairplot(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting Confusion Matrices", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Confusion Matrix",
"ImgFile" : str(analysis_dataset["CMatrixImgFile"]),
"SourceDF" : al_req["SourceDF"],
"ConfMatrices" : al_req["ConfMatrices"],
"Width" : 15.0,
"Height" : 15.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_confusion_matrix(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting Scatters", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Scatter Plot",
"ImgFile" : str(analysis_dataset["ScatterImgFile"]),
"SourceDF" : analysis_dataset["SourceDF"],
"UnitsAheadType" : analysis_dataset["UnitsAheadType"],
"FeatureColumnNames": analysis_dataset["FeatureColumnNames"],
"Hue" : label_column_name,
"Width" : 7.0,
"Height" : 7.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_all_scatterplots(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting JointPlots", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Joint Plot",
"ImgFile" : str(analysis_dataset["JointPlotImgFile"]),
"SourceDF" : analysis_dataset["SourceDF"],
"UnitsAheadType" : analysis_dataset["UnitsAheadType"],
"FeatureColumnNames": analysis_dataset["FeatureColumnNames"],
"Hue" : label_column_name,
"Width" : 15.0,
"Height" : 15.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_all_jointplots(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Done Creating Analysis Visualizations", 6)
lg("-----------------------------------------------------", 6)
else:
lg("", 6)
lg("ERROR: Failed Processing Predictions for Dataset(" + str(ds_name) + ") with Error:", 6)
lg(ml_results["Error"], 6)
lg("", 6)
sys.exit(2)
# end of if success
lg("", 6)
lg("Analysis Complete Saved Images(" + str(len(ml_images)) + ")", 5)
lg("", 6)
sys.exit(0)
| 44.787736
| 199
| 0.47367
|
bf7650586cf2e0d29d53f59133030ab79083af58
| 6,104
|
py
|
Python
|
domainbed/scripts/collect_results.py
|
zhuwenzhen/DomainBed
|
e8e8ed831bf30887675e5b3a5117d9d66d0ee46f
|
[
"MIT"
] | null | null | null |
domainbed/scripts/collect_results.py
|
zhuwenzhen/DomainBed
|
e8e8ed831bf30887675e5b3a5117d9d66d0ee46f
|
[
"MIT"
] | null | null | null |
domainbed/scripts/collect_results.py
|
zhuwenzhen/DomainBed
|
e8e8ed831bf30887675e5b3a5117d9d66d0ee46f
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import collections
import argparse
import functools
import glob
import pickle
import itertools
import json
import os
import random
import sys
import numpy as np
import tqdm
from domainbed import datasets
from domainbed import algorithms
from domainbed.lib import misc, reporting
from domainbed import model_selection
from domainbed.lib.query import Q
import warnings
def format_mean(data, latex):
"""Given a list of datapoints, return a string describing their mean and
standard error"""
if len(data) == 0:
return None, None, "X"
mean = 100 * np.mean(list(data))
err = 100 * np.std(list(data) / np.sqrt(len(data)))
if latex:
return mean, err, "{:.1f} $\\pm$ {:.1f}".format(mean, err)
else:
return mean, err, "{:.1f} +/- {:.1f}".format(mean, err)
def print_table(table, header_text, row_labels, col_labels, colwidth=10, latex=True):
"""Pretty-print a 2D array of data, optionally with row/col labels"""
print("")
if latex:
num_cols = len(table[0])
print("\\begin{center}")
print("\\adjustbox{max width=\\textwidth}{%")
print("\\begin{tabular}{l" + "c" * num_cols + "}")
print("\\toprule")
else:
print("--------", header_text)
for row, label in zip(table, row_labels):
row.insert(0, label)
if latex:
col_labels = [
"\\textbf{" + str(col_label).replace("%", "\\%") + "}" for col_label in col_labels
]
table.insert(0, col_labels)
for r, row in enumerate(table):
misc.print_row(row, colwidth=colwidth, latex=latex)
if latex and r == 0:
print("\\midrule")
if latex:
print("\\bottomrule")
print("\\end{tabular}}")
print("\\end{center}")
def print_results_tables(records, selection_method, latex):
"""Given all records, print a results table for each dataset."""
grouped_records = (
reporting.get_grouped_records(records)
.map(lambda group: {**group, "sweep_acc": selection_method.sweep_acc(group["records"])})
.filter(lambda g: g["sweep_acc"] is not None)
)
# read algorithm names and sort (predefined order)
alg_names = Q(records).select("args.algorithm").unique()
alg_names = [n for n in algorithms.ALGORITHMS if n in alg_names] + [
n for n in alg_names if n not in algorithms.ALGORITHMS
]
# read dataset names and sort (lexicographic order)
dataset_names = Q(records).select("args.dataset").unique().sorted()
dataset_names = [d for d in datasets.DATASETS if d in dataset_names]
for dataset in dataset_names:
if latex:
print()
print("\\subsubsection{{{}}}".format(dataset))
test_envs = range(datasets.num_environments(dataset))
table = [[None for _ in [*test_envs, "Avg"]] for _ in alg_names]
for i, algorithm in enumerate(alg_names):
means = []
for j, test_env in enumerate(test_envs):
trial_accs = grouped_records.filter_equals(
"dataset, algorithm, test_env", (dataset, algorithm, test_env)
).select("sweep_acc")
mean, err, table[i][j] = format_mean(trial_accs, latex)
means.append(mean)
if None in means:
table[i][-1] = "X"
else:
table[i][-1] = "{:.1f}".format(sum(means) / len(means))
col_labels = ["Algorithm", *datasets.get_dataset_class(dataset).ENVIRONMENTS, "Avg"]
header_text = f"Dataset: {dataset}, " f"model selection method: {selection_method.name}"
print_table(table, header_text, alg_names, list(col_labels), colwidth=20, latex=latex)
# Print an "averages" table
if latex:
print()
print("\\subsubsection{Averages}")
table = [[None for _ in [*dataset_names, "Avg"]] for _ in alg_names]
for i, algorithm in enumerate(alg_names):
means = []
for j, dataset in enumerate(dataset_names):
trial_averages = (
grouped_records.filter_equals("algorithm, dataset", (algorithm, dataset))
.group("trial_seed")
.map(lambda trial_seed, group: group.select("sweep_acc").mean())
)
mean, err, table[i][j] = format_mean(trial_averages, latex)
means.append(mean)
if None in means:
table[i][-1] = "X"
else:
table[i][-1] = "{:.1f}".format(sum(means) / len(means))
col_labels = ["Algorithm", *dataset_names, "Avg"]
header_text = f"Averages, model selection method: {selection_method.name}"
print_table(table, header_text, alg_names, col_labels, colwidth=25, latex=latex)
if __name__ == "__main__":
np.set_printoptions(suppress=True)
parser = argparse.ArgumentParser(description="Domain generalization testbed")
parser.add_argument("--input_dir", type=str, default="")
parser.add_argument("--latex", action="store_true")
args = parser.parse_args()
results_file = "results.tex" if args.latex else "results.txt"
sys.stdout = misc.Tee(os.path.join(args.input_dir, results_file), "w")
records = reporting.load_records(args.input_dir)
if args.latex:
print("\\documentclass{article}")
print("\\usepackage{booktabs}")
print("\\usepackage{adjustbox}")
print("\\begin{document}")
print("\\section{Full DomainBed results}")
print("% Total records:", len(records))
else:
print("Total records:", len(records))
SELECTION_METHODS = [
model_selection.IIDAccuracySelectionMethod,
model_selection.LeaveOneOutSelectionMethod,
model_selection.OracleSelectionMethod,
]
for selection_method in SELECTION_METHODS:
if args.latex:
print()
print("\\subsection{{Model selection: {}}}".format(selection_method.name))
print_results_tables(records, selection_method, args.latex)
if args.latex:
print("\\end{document}")
| 34.292135
| 96
| 0.623362
|
baa921c72db2fd7ea7910b6444a7cdca85c62a14
| 7,660
|
py
|
Python
|
contrib/devtools/update-translations.py
|
defland/aither
|
35c7cef0aef09253bec92ce961463533cbf22fc1
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
defland/aither
|
35c7cef0aef09253bec92ce961463533cbf22fc1
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
defland/aither
|
35c7cef0aef09253bec92ce961463533cbf22fc1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'aither_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
| 37.54902
| 124
| 0.629373
|
5dceadc3c7d630bbdf40d22ca332a98133c8c476
| 1,258
|
py
|
Python
|
app/routes/platforms/web/__init__.py
|
Tingerlink/tingerwork
|
0e3d360bf97a62e088f12aa72277200b75e43643
|
[
"MIT"
] | null | null | null |
app/routes/platforms/web/__init__.py
|
Tingerlink/tingerwork
|
0e3d360bf97a62e088f12aa72277200b75e43643
|
[
"MIT"
] | null | null | null |
app/routes/platforms/web/__init__.py
|
Tingerlink/tingerwork
|
0e3d360bf97a62e088f12aa72277200b75e43643
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Module '__init__.py' of the project 'tingerwork'
# :date_create: 10.12.2017.0:28
# :author: Tingerlink
# :description:
from flask_classy import FlaskView
from flask_classy import route, request
from routes.tools import response_builder
from routes.tools.request_validator import RequestValidator
from tools import yaml_linker as schema
class ApiWeb(FlaskView):
route_base = '/web/'
def __init__(self, group_name):
self.validator = RequestValidator()
self.group_name = group_name
self.group_schema = schema.get_methods()[group_name]
def action(self, method_name, logic, logic_args=None):
args = request.args.to_dict(flat=True)
args.update({"ip": request.remote_addr})
if logic_args:
args.update(logic_args)
error = self.validator.check(self.group_schema[method_name], args)
if error:
return response_builder.create_error_response(error.get_response_data())
result = logic(args)
if type(result) is response_builder.Error:
return response_builder.create_error_response(result.get_response_data())
return response_builder.create_response(result)
| 29.952381
| 86
| 0.68442
|
71e77f97d87979eca0398fd98bc4ec8ee26e70ab
| 10,963
|
py
|
Python
|
pandas/core/window/indexers.py
|
gabriellm1/pandas
|
020040b3b92516b445ddd8daba3b9818340e82d4
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T17:32:26.000Z
|
2020-10-29T17:32:26.000Z
|
pandas/core/window/indexers.py
|
gabriellm1/pandas
|
020040b3b92516b445ddd8daba3b9818340e82d4
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/window/indexers.py
|
gabriellm1/pandas
|
020040b3b92516b445ddd8daba3b9818340e82d4
|
[
"BSD-3-Clause"
] | 1
|
2022-03-15T14:46:56.000Z
|
2022-03-15T14:46:56.000Z
|
"""Indexer objects for computing start/end window bounds for rolling operations"""
from datetime import timedelta
from typing import Dict, Optional, Tuple, Type
import numpy as np
from pandas._libs.window.indexers import calculate_variable_window_bounds
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import ensure_platform_int
from pandas.tseries.offsets import Nano
get_window_bounds_doc = """
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
class BaseIndexer:
"""Base class for window bounds calculations."""
def __init__(
self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs
):
"""
Parameters
----------
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.index_array = index_array
self.window_size = window_size
# Set user defined kwargs as attributes that can be used in get_window_bounds
for key, value in kwargs.items():
setattr(self, key, value)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
class FixedWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of fixed length."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
if center:
offset = (self.window_size - 1) // 2
else:
offset = 0
end = np.arange(1 + offset, num_values + 1 + offset, dtype="int64")
start = end - self.window_size
end = np.clip(end, 0, num_values)
start = np.clip(start, 0, num_values)
return start, end
class VariableWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of variable length, namely for time series."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
return calculate_variable_window_bounds(
num_values, self.window_size, min_periods, center, closed, self.index_array
)
class VariableOffsetWindowIndexer(BaseIndexer):
"""Calculate window boundaries based on a non-fixed offset such as a BusinessDay"""
def __init__(
self,
index_array: Optional[np.ndarray] = None,
window_size: int = 0,
index=None,
offset=None,
**kwargs,
):
super().__init__(index_array, window_size, **kwargs)
self.index = index
self.offset = offset
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
# if windows is variable, default is 'right', otherwise default is 'both'
if closed is None:
closed = "right" if self.index is not None else "both"
right_closed = closed in ["right", "both"]
left_closed = closed in ["left", "both"]
if self.index[num_values - 1] < self.index[0]:
index_growth_sign = -1
else:
index_growth_sign = 1
start = np.empty(num_values, dtype="int64")
start.fill(-1)
end = np.empty(num_values, dtype="int64")
end.fill(-1)
start[0] = 0
# right endpoint is closed
if right_closed:
end[0] = 1
# right endpoint is open
else:
end[0] = 0
# start is start of slice interval (including)
# end is end of slice interval (not including)
for i in range(1, num_values):
end_bound = self.index[i]
start_bound = self.index[i] - index_growth_sign * self.offset
# left endpoint is closed
if left_closed:
start_bound -= Nano(1)
# advance the start bound until we are
# within the constraint
start[i] = i
for j in range(start[i - 1], i):
if (self.index[j] - start_bound) * index_growth_sign > timedelta(0):
start[i] = j
break
# end bound is previous end
# or current index
if (self.index[end[i - 1]] - end_bound) * index_growth_sign <= timedelta(0):
end[i] = i + 1
else:
end[i] = end[i - 1]
# right endpoint is open
if not right_closed:
end[i] -= 1
return start, end
class ExpandingIndexer(BaseIndexer):
"""Calculate expanding window bounds, mimicking df.expanding()"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
return (
np.zeros(num_values, dtype=np.int64),
np.arange(1, num_values + 1, dtype=np.int64),
)
class FixedForwardWindowIndexer(BaseIndexer):
"""
Creates window boundaries for fixed-length windows that include the
current row.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
>>> df.rolling(window=indexer, min_periods=1).sum()
B
0 1.0
1 3.0
2 2.0
3 4.0
4 4.0
"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
if center:
raise ValueError("Forward-looking windows can't have center=True")
if closed is not None:
raise ValueError(
"Forward-looking windows don't support setting the closed argument"
)
start = np.arange(num_values, dtype="int64")
end_s = start[: -self.window_size] + self.window_size
end_e = np.full(self.window_size, num_values, dtype="int64")
end = np.concatenate([end_s, end_e])
return start, end
class GroupbyIndexer(BaseIndexer):
"""Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()"""
def __init__(
self,
index_array: Optional[np.ndarray] = None,
window_size: int = 0,
groupby_indicies: Optional[Dict] = None,
window_indexer: Type[BaseIndexer] = BaseIndexer,
indexer_kwargs: Optional[Dict] = None,
**kwargs,
):
"""
Parameters
----------
index_array : np.ndarray or None
np.ndarray of the index of the original object that we are performing
a chained groupby operation over. This index has been pre-sorted relative to
the groups
window_size : int
window size during the windowing operation
groupby_indicies : dict or None
dict of {group label: [positional index of rows belonging to the group]}
window_indexer : BaseIndexer
BaseIndexer class determining the start and end bounds of each group
indexer_kwargs : dict or None
Custom kwargs to be passed to window_indexer
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.groupby_indicies = groupby_indicies or {}
self.window_indexer = window_indexer
self.indexer_kwargs = indexer_kwargs or {}
super().__init__(
index_array, self.indexer_kwargs.pop("window_size", window_size), **kwargs
)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
# 1) For each group, get the indices that belong to the group
# 2) Use the indices to calculate the start & end bounds of the window
# 3) Append the window bounds in group order
start_arrays = []
end_arrays = []
window_indicies_start = 0
for key, indices in self.groupby_indicies.items():
if self.index_array is not None:
index_array = self.index_array.take(ensure_platform_int(indices))
else:
index_array = self.index_array
indexer = self.window_indexer(
index_array=index_array,
window_size=self.window_size,
**self.indexer_kwargs,
)
start, end = indexer.get_window_bounds(
len(indices), min_periods, center, closed
)
start = start.astype(np.int64)
end = end.astype(np.int64)
# Cannot use groupby_indicies as they might not be monotonic with the object
# we're rolling over
window_indicies = np.arange(
window_indicies_start, window_indicies_start + len(indices)
)
window_indicies_start += len(indices)
# Extend as we'll be slicing window like [start, end)
window_indicies = np.append(
window_indicies, [window_indicies[-1] + 1]
).astype(np.int64)
start_arrays.append(window_indicies.take(ensure_platform_int(start)))
end_arrays.append(window_indicies.take(ensure_platform_int(end)))
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
return start, end
| 31.962099
| 88
| 0.60239
|
0160bbe0aacf55c076f1610a38ff651247053404
| 629
|
py
|
Python
|
nba_data/data/matchup.py
|
jaebradley/nba_data
|
30d817bbc1c5474774f97f3800354492e382d206
|
[
"MIT"
] | 8
|
2017-01-07T13:32:16.000Z
|
2019-08-08T17:36:26.000Z
|
nba_data/data/matchup.py
|
jaebradley/nba_data
|
30d817bbc1c5474774f97f3800354492e382d206
|
[
"MIT"
] | 72
|
2016-09-01T01:21:07.000Z
|
2021-03-25T21:41:38.000Z
|
nba_data/data/matchup.py
|
jaebradley/nba_data
|
30d817bbc1c5474774f97f3800354492e382d206
|
[
"MIT"
] | 4
|
2016-12-06T10:30:59.000Z
|
2021-09-08T21:23:43.000Z
|
from team import Team
class MatchUp:
def __init__(self, home_team, away_team):
self.home_team = home_team
self.away_team = away_team
@staticmethod
def create(home_team_id, away_team_id):
return MatchUp(home_team=Team.get_team_by_id(team_id=home_team_id),
away_team=Team.get_team_by_id(team_id=away_team_id))
@staticmethod
def create(home_team_abbreviation, away_team_abbreviation):
return MatchUp(home_team=Team.get_team_by_abbreviation(home_team_abbreviation),
away_team=Team.get_team_by_abbreviation(away_team_abbreviation))
| 34.944444
| 87
| 0.720191
|
a95eae30ae55487c90056694f4aef1b03ea952c5
| 2,123
|
py
|
Python
|
telethon/tl/custom/forward.py
|
TgCat/Telethon
|
f9aabaca2049906a7e94dcab010bcc7a645a8278
|
[
"MIT"
] | 6,709
|
2016-09-07T07:11:00.000Z
|
2022-03-31T23:22:59.000Z
|
telethon/tl/custom/forward.py
|
TgCat/Telethon
|
f9aabaca2049906a7e94dcab010bcc7a645a8278
|
[
"MIT"
] | 3,211
|
2016-09-15T10:29:22.000Z
|
2022-03-31T15:51:33.000Z
|
telethon/tl/custom/forward.py
|
TgCat/Telethon
|
f9aabaca2049906a7e94dcab010bcc7a645a8278
|
[
"MIT"
] | 1,403
|
2016-10-09T03:07:33.000Z
|
2022-03-31T14:09:55.000Z
|
from .chatgetter import ChatGetter
from .sendergetter import SenderGetter
from ... import utils, helpers
from ...tl import types
class Forward(ChatGetter, SenderGetter):
"""
Custom class that encapsulates a :tl:`MessageFwdHeader` providing an
abstraction to easily access information like the original sender.
Remember that this class implements `ChatGetter
<telethon.tl.custom.chatgetter.ChatGetter>` and `SenderGetter
<telethon.tl.custom.sendergetter.SenderGetter>` which means you
have access to all their sender and chat properties and methods.
Attributes:
original_fwd (:tl:`MessageFwdHeader`):
The original :tl:`MessageFwdHeader` instance.
Any other attribute:
Attributes not described here are the same as those available
in the original :tl:`MessageFwdHeader`.
"""
def __init__(self, client, original, entities):
# Copy all the fields, not reference! It would cause memory cycles:
# self.original_fwd.original_fwd.original_fwd.original_fwd
# ...would be valid if we referenced.
self.__dict__.update(original.__dict__)
self.original_fwd = original
sender_id = sender = input_sender = peer = chat = input_chat = None
if original.from_id:
ty = helpers._entity_type(original.from_id)
if ty == helpers._EntityType.USER:
sender_id = utils.get_peer_id(original.from_id)
sender, input_sender = utils._get_entity_pair(
sender_id, entities, client._entity_cache)
elif ty in (helpers._EntityType.CHAT, helpers._EntityType.CHANNEL):
peer = original.from_id
chat, input_chat = utils._get_entity_pair(
utils.get_peer_id(peer), entities, client._entity_cache)
# This call resets the client
ChatGetter.__init__(self, peer, chat=chat, input_chat=input_chat)
SenderGetter.__init__(self, sender_id, sender=sender, input_sender=input_sender)
self._client = client
# TODO We could reload the message
| 40.826923
| 88
| 0.678285
|
f80011c3fb0b6185eaba542690e5d2046d2693db
| 1,406
|
py
|
Python
|
server/service/users.py
|
hmiguellima/jogodafortuna
|
ad8008b551858c691db9ab63a4985cfaf45a6e8b
|
[
"Apache-2.0"
] | null | null | null |
server/service/users.py
|
hmiguellima/jogodafortuna
|
ad8008b551858c691db9ab63a4985cfaf45a6e8b
|
[
"Apache-2.0"
] | null | null | null |
server/service/users.py
|
hmiguellima/jogodafortuna
|
ad8008b551858c691db9ab63a4985cfaf45a6e8b
|
[
"Apache-2.0"
] | null | null | null |
# import the Bottle framework
from bottle import Bottle, request, template, redirect, abort
# Run the Bottle wsgi application. We don't need to call run() since our
# application is embedded within an App Engine WSGI application server.
bottle = Bottle()
from controllers.users import UserController
users = UserController()
@bottle.get('/service/v1/user/detail/<user_id>')
def user_detail(user_id=''):
user = users.detail(user_id)
if not user:
abort(404)
return dict(name=user.name, email=user.email)
@bottle.get('/service/v1/user/list')
def users_list():
return dict(users=users.list())
@bottle.get('/service/v1/user/edit/<user_id>')
def user_edit(user_id):
user = users.detail(user_id)
if not user:
abort(404)
return template('edit_user', dict(name=user.name, email=user.email))
@bottle.put('/service/v1/user/edit/<user_id>')
def user_edit_put(user_id):
name = request.forms.get('name')
email = request.forms.get('email')
users.edit(user_id, name, email)
return redirect('/service/v1/user/detail/%s' % user_id)
@bottle.get('/service/v1/user/create')
def user_create():
return template('create_user')
@bottle.post('/service/v1/user/create')
def user_create_post(user_id=''):
name = request.forms.get('name')
email = request.forms.get('email')
users.create(name, email)
return redirect('/service/v1/user/list')
| 27.038462
| 72
| 0.701991
|
fe75e7471e1f95b1b8433e28c95bda6cd9ca7cf8
| 14,302
|
py
|
Python
|
core/domain/subscription_services_test.py
|
Himanshu1495/oppia
|
8a3a4d6ff633aca12bbd043648a2d45ccdd583e9
|
[
"Apache-2.0"
] | null | null | null |
core/domain/subscription_services_test.py
|
Himanshu1495/oppia
|
8a3a4d6ff633aca12bbd043648a2d45ccdd583e9
|
[
"Apache-2.0"
] | null | null | null |
core/domain/subscription_services_test.py
|
Himanshu1495/oppia
|
8a3a4d6ff633aca12bbd043648a2d45ccdd583e9
|
[
"Apache-2.0"
] | 1
|
2021-09-22T10:37:34.000Z
|
2021-09-22T10:37:34.000Z
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for subscription management."""
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_domain
from core.domain import feedback_services
from core.domain import rights_manager
from core.domain import subscription_services
from core.platform import models
from core.tests import test_utils
(user_models,) = models.Registry.import_models([models.NAMES.user])
COLLECTION_ID = 'col_id'
COLLECTION_ID_2 = 'col_id_2'
EXP_ID = 'exp_id'
EXP_ID_2 = 'exp_id_2'
FEEDBACK_THREAD_ID = 'fthread_id'
FEEDBACK_THREAD_ID_2 = 'fthread_id_2'
USER_ID = 'user_id'
class SubscriptionsTest(test_utils.GenericTestBase):
"""Tests for subscription management."""
OWNER_2_EMAIL = 'owner2@example.com'
OWNER2_USERNAME = 'owner2'
def setUp(self):
super(SubscriptionsTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.OWNER_2_EMAIL, self.OWNER2_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.owner_2_id = self.get_user_id_from_email(self.OWNER_2_EMAIL)
def _get_thread_ids_subscribed_to(self, user_id):
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
return (
subscriptions_model.feedback_thread_ids
if subscriptions_model else [])
def _get_exploration_ids_subscribed_to(self, user_id):
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
return (
subscriptions_model.activity_ids
if subscriptions_model else [])
def _get_collection_ids_subscribed_to(self, user_id):
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
return (
subscriptions_model.collection_ids
if subscriptions_model else [])
def test_subscribe_to_feedback_thread(self):
self.assertEqual(self._get_thread_ids_subscribed_to(USER_ID), [])
subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID)
self.assertEqual(
self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID])
# Repeated subscriptions to the same thread have no effect.
subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID)
self.assertEqual(
self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID])
subscription_services.subscribe_to_thread(
USER_ID, FEEDBACK_THREAD_ID_2)
self.assertEqual(
self._get_thread_ids_subscribed_to(USER_ID),
[FEEDBACK_THREAD_ID, FEEDBACK_THREAD_ID_2])
def test_subscribe_to_exploration(self):
self.assertEqual(self._get_exploration_ids_subscribed_to(USER_ID), [])
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID)
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID])
# Repeated subscriptions to the same exploration have no effect.
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID)
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID])
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID_2)
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID),
[EXP_ID, EXP_ID_2])
def test_get_exploration_ids_subscribed_to(self):
self.assertEqual(
subscription_services.get_exploration_ids_subscribed_to(
USER_ID), [])
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID)
self.assertEqual(
subscription_services.get_exploration_ids_subscribed_to(USER_ID),
[EXP_ID])
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID_2)
self.assertEqual(
subscription_services.get_exploration_ids_subscribed_to(USER_ID),
[EXP_ID, EXP_ID_2])
def test_thread_and_exp_subscriptions_are_tracked_individually(self):
self.assertEqual(self._get_thread_ids_subscribed_to(USER_ID), [])
subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID)
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID)
self.assertEqual(
self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID])
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID])
def test_posting_to_feedback_thread_results_in_subscription(self):
# The viewer posts a message to the thread.
message_text = 'text'
feedback_services.create_thread(
'exp_id', 'state_name', self.viewer_id, 'subject', message_text)
thread_ids_subscribed_to = self._get_thread_ids_subscribed_to(
self.viewer_id)
self.assertEqual(len(thread_ids_subscribed_to), 1)
full_thread_id = thread_ids_subscribed_to[0]
thread_id = (
feedback_domain.FeedbackThread.get_thread_id_from_full_thread_id(
full_thread_id))
self.assertEqual(
feedback_services.get_messages('exp_id', thread_id)[0].text,
message_text)
# The editor posts a follow-up message to the thread.
new_message_text = 'new text'
feedback_services.create_message(
'exp_id', thread_id, self.editor_id, '', '', new_message_text)
# The viewer and editor are now both subscribed to the thread.
self.assertEqual(
self._get_thread_ids_subscribed_to(self.viewer_id),
[full_thread_id])
self.assertEqual(
self._get_thread_ids_subscribed_to(self.editor_id),
[full_thread_id])
def test_creating_exploration_results_in_subscription(self):
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [])
exp_services.save_new_exploration(
USER_ID,
exp_domain.Exploration.create_default_exploration(
EXP_ID, 'Title', 'Category'))
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID])
def test_adding_new_exploration_owner_or_editor_role_results_in_subscription(self): # pylint: disable=line-too-long
exploration = exp_domain.Exploration.create_default_exploration(
EXP_ID, 'Title', 'Category')
exp_services.save_new_exploration(self.owner_id, exploration)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_2_id), [])
rights_manager.assign_role_for_exploration(
self.owner_id, EXP_ID, self.owner_2_id, rights_manager.ROLE_OWNER)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_2_id), [EXP_ID])
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.editor_id), [])
rights_manager.assign_role_for_exploration(
self.owner_id, EXP_ID, self.editor_id, rights_manager.ROLE_EDITOR)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.editor_id), [EXP_ID])
def test_adding_new_exploration_viewer_role_does_not_result_in_subscription(self): # pylint: disable=line-too-long
exploration = exp_domain.Exploration.create_default_exploration(
EXP_ID, 'Title', 'Category')
exp_services.save_new_exploration(self.owner_id, exploration)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.viewer_id), [])
rights_manager.assign_role_for_exploration(
self.owner_id, EXP_ID, self.viewer_id, rights_manager.ROLE_VIEWER)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.viewer_id), [])
def test_deleting_exploration_does_not_delete_subscription(self):
exploration = exp_domain.Exploration.create_default_exploration(
EXP_ID, 'Title', 'Category')
exp_services.save_new_exploration(self.owner_id, exploration)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_id), [EXP_ID])
exp_services.delete_exploration(self.owner_id, EXP_ID)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_id), [EXP_ID])
def test_subscribe_to_collection(self):
self.assertEqual(self._get_collection_ids_subscribed_to(USER_ID), [])
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID)
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID])
# Repeated subscriptions to the same collection have no effect.
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID)
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID])
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID_2)
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID),
[COLLECTION_ID, COLLECTION_ID_2])
def test_get_collection_ids_subscribed_to(self):
self.assertEqual(
subscription_services.get_collection_ids_subscribed_to(
USER_ID), [])
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID)
self.assertEqual(
subscription_services.get_collection_ids_subscribed_to(USER_ID),
[COLLECTION_ID])
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID_2)
self.assertEqual(
subscription_services.get_collection_ids_subscribed_to(USER_ID),
[COLLECTION_ID, COLLECTION_ID_2])
def test_creating_collection_results_in_subscription(self):
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID), [])
self.save_new_default_collection(COLLECTION_ID, USER_ID)
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID])
def test_adding_new_collection_owner_or_editor_role_results_in_subscription(
self):
self.save_new_default_collection(COLLECTION_ID, self.owner_id)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_2_id), [])
rights_manager.assign_role_for_collection(
self.owner_id, COLLECTION_ID, self.owner_2_id,
rights_manager.ROLE_OWNER)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_2_id),
[COLLECTION_ID])
self.assertEqual(
self._get_collection_ids_subscribed_to(self.editor_id), [])
rights_manager.assign_role_for_collection(
self.owner_id, COLLECTION_ID, self.editor_id,
rights_manager.ROLE_EDITOR)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.editor_id),
[COLLECTION_ID])
def test_adding_new_collection_viewer_role_does_not_result_in_subscription(
self):
self.save_new_default_collection(COLLECTION_ID, self.owner_id)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.viewer_id), [])
rights_manager.assign_role_for_collection(
self.owner_id, COLLECTION_ID, self.viewer_id,
rights_manager.ROLE_VIEWER)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.viewer_id), [])
def test_deleting_collection_does_not_delete_subscription(self):
self.save_new_default_collection(COLLECTION_ID, self.owner_id)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_id),
[COLLECTION_ID])
collection_services.delete_collection(self.owner_id, COLLECTION_ID)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_id),
[COLLECTION_ID])
def test_adding_exploration_to_collection_does_not_create_subscription(
self):
self.save_new_default_collection(COLLECTION_ID, self.owner_id)
# The author is subscribed to the collection but to no explorations.
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_id),
[COLLECTION_ID])
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_id), [])
# Another author creates an exploration.
self.save_new_valid_exploration(EXP_ID, self.owner_2_id)
# If the collection author adds the exploration to his/her collection,
# the collection author should not be subscribed to the exploration nor
# should the exploration author be subscribed to the collection.
collection_services.update_collection(self.owner_id, COLLECTION_ID, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': EXP_ID
}], 'Add new exploration to collection.')
# Ensure subscriptions are as expected.
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_id),
[COLLECTION_ID])
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_2_id), [EXP_ID])
| 42.692537
| 119
| 0.715285
|
edc8f38b3d63d7a6de75168bddab830ea6e91017
| 5,494
|
py
|
Python
|
dataset/forced_alignment/audio.py
|
FilthyFrankTheGoanimator/Voice
|
0bf3570bd6b376c936ea9f04fc15f129e738b168
|
[
"BSD-3-Clause"
] | 556
|
2021-03-10T19:09:47.000Z
|
2022-03-30T13:45:13.000Z
|
dataset/forced_alignment/audio.py
|
FilthyFrankTheGoanimator/Voice
|
0bf3570bd6b376c936ea9f04fc15f129e738b168
|
[
"BSD-3-Clause"
] | 100
|
2021-03-14T12:35:46.000Z
|
2022-03-26T07:57:42.000Z
|
dataset/forced_alignment/audio.py
|
FilthyFrankTheGoanimator/Voice
|
0bf3570bd6b376c936ea9f04fc15f129e738b168
|
[
"BSD-3-Clause"
] | 84
|
2021-03-16T21:44:06.000Z
|
2022-03-30T21:58:28.000Z
|
import wave
import collections
from webrtcvad import Vad
DEFAULT_RATE = 16000
DEFAULT_CHANNELS = 1
DEFAULT_WIDTH = 2
DEFAULT_FORMAT = (DEFAULT_RATE, DEFAULT_CHANNELS, DEFAULT_WIDTH)
def get_num_samples(pcm_buffer_size, audio_format=DEFAULT_FORMAT):
"""
Credit: https://github.com/mozilla/DSAlign
Gets number of samples in audio file.
Parameters
----------
pcm_buffer_size : int
Size of audio PCM buffer
audio_format : tuple
Tuple containing the audio sample rate, channels & width
Returns
-------
int
Number of samples
"""
_, channels, width = audio_format
return pcm_buffer_size // (channels * width)
def get_pcm_duration(pcm_buffer_size, audio_format=DEFAULT_FORMAT):
"""
Credit: https://github.com/mozilla/DSAlign
Gets duration of audio file.
Parameters
----------
pcm_buffer_size : int
Size of audio PCM buffer
audio_format : tuple
Tuple containing the audio sample rate, channels & width
Returns
-------
float
Audio duration
"""
return get_num_samples(pcm_buffer_size, audio_format) / audio_format[0]
def read_frames(wav_file, frame_duration_ms=30, yield_remainder=False):
"""
Credit: https://github.com/mozilla/DSAlign
Read frames of audio file.
Parameters
----------
wav_file : wave
Opened wav file using wave
frame_duration_ms : int
Frame duration in milliseconds
yield_remainder : bool
Whether to yield remaining audio frames
Yields
-------
Audio frames
"""
frame_size = int(DEFAULT_FORMAT[0] * (frame_duration_ms / 1000.0))
while True:
try:
data = wav_file.readframes(frame_size)
if not yield_remainder and get_pcm_duration(len(data), DEFAULT_FORMAT) * 1000 < frame_duration_ms:
break
yield data
except EOFError:
break
def read_frames_from_file(audio_path, audio_format=DEFAULT_FORMAT, frame_duration_ms=30, yield_remainder=False):
"""
Credit: https://github.com/mozilla/DSAlign
Read frames of audio file.
Parameters
----------
audio_path : str
Path to audio file
audio_format : tuple
Tuple containing the audio sample rate, channels & width
frame_duration_ms : int
Frame duration in milliseconds
yield_remainder : bool
Whether to yield remaining audio frames
Yields
-------
Audio frames
"""
audio = wave.open(audio_path, "r")
for frame in read_frames(audio, frame_duration_ms=frame_duration_ms, yield_remainder=yield_remainder):
yield frame
def vad_split(audio_frames, audio_format=DEFAULT_FORMAT, num_padding_frames=10, threshold=0.5, aggressiveness=3):
"""
Credit: https://github.com/mozilla/DSAlign
Splits audio into segments using Voice Activity Detection.
Parameters
----------
audio_frames : list
List of audio frames
audio_format : tuple
Tuple containing the audio sample rate, channels & width
num_padding_frames : int
Number of frames to pad
threshold : float
Minimum threshold
aggressiveness : int
Aggressivess of VAD split
Yields
-------
Audio segments (tuples containing number of frames, start time & end time))
"""
sample_rate, channels, width = audio_format
if channels != 1:
raise ValueError("VAD-splitting requires mono samples")
if width != 2:
raise ValueError("VAD-splitting requires 16 bit samples")
if sample_rate not in [8000, 16000, 32000, 48000]:
raise ValueError("VAD-splitting only supported for sample rates 8000, 16000, 32000, or 48000")
if aggressiveness not in [0, 1, 2, 3]:
raise ValueError("VAD-splitting aggressiveness mode has to be one of 0, 1, 2, or 3")
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
vad = Vad(int(aggressiveness))
voiced_frames = []
frame_duration_ms = 0
frame_index = 0
for frame_index, frame in enumerate(audio_frames):
frame_duration_ms = get_pcm_duration(len(frame), audio_format) * 1000
if int(frame_duration_ms) not in [10, 20, 30]:
raise ValueError("VAD-splitting only supported for frame durations 10, 20, or 30 ms")
is_speech = vad.is_speech(frame, sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
if num_voiced > threshold * ring_buffer.maxlen:
triggered = True
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
if num_unvoiced > threshold * ring_buffer.maxlen:
triggered = False
yield b"".join(voiced_frames), frame_duration_ms * max(
0, frame_index - len(voiced_frames)
), frame_duration_ms * frame_index
ring_buffer.clear()
voiced_frames = []
if len(voiced_frames) > 0:
yield b"".join(voiced_frames), frame_duration_ms * (frame_index - len(voiced_frames)), frame_duration_ms * (
frame_index + 1
)
| 30.865169
| 116
| 0.644703
|
07584fe89dff1b627876a32eb0cf8615d2f1f4d2
| 976
|
py
|
Python
|
eventsourcing/application/django.py
|
scbabacus/eventsourcing
|
8404c5b26719ed9d9d1d257ebba774879c7243c4
|
[
"BSD-3-Clause"
] | null | null | null |
eventsourcing/application/django.py
|
scbabacus/eventsourcing
|
8404c5b26719ed9d9d1d257ebba774879c7243c4
|
[
"BSD-3-Clause"
] | null | null | null |
eventsourcing/application/django.py
|
scbabacus/eventsourcing
|
8404c5b26719ed9d9d1d257ebba774879c7243c4
|
[
"BSD-3-Clause"
] | null | null | null |
from eventsourcing.application.command import CommandProcess
from eventsourcing.application.process import ProcessApplication
from eventsourcing.application.simple import SimpleApplication
from eventsourcing.infrastructure.django.manager import DjangoRecordManager
from eventsourcing.infrastructure.django.models import StoredEventRecord
class ApplicationWithDjango(SimpleApplication):
record_manager_class = DjangoRecordManager
stored_event_record_class = StoredEventRecord
class ProcessApplicationWithDjango(ApplicationWithDjango, ProcessApplication):
pass
class CommandProcessWithDjango(ApplicationWithDjango, CommandProcess):
pass
class SimpleApplication(ApplicationWithDjango):
"""Shorter name for ApplicationWithDjango."""
class ProcessApplication(ProcessApplicationWithDjango):
"""Shorter name for ProcessApplicationWithDjango."""
class CommandProcess(CommandProcessWithDjango):
"""Shorter name for CommandProcessWithDjango."""
| 30.5
| 78
| 0.846311
|
c5f9401efdbf30db0427243f58fa7446d4aca295
| 270
|
py
|
Python
|
data_collection_app/data_collection_app/doctype/item_specification/item_specification.py
|
georgreen/prototype-app
|
9cadbc305d07b5b8832630cb4a3368b2855be5da
|
[
"MIT"
] | null | null | null |
data_collection_app/data_collection_app/doctype/item_specification/item_specification.py
|
georgreen/prototype-app
|
9cadbc305d07b5b8832630cb4a3368b2855be5da
|
[
"MIT"
] | null | null | null |
data_collection_app/data_collection_app/doctype/item_specification/item_specification.py
|
georgreen/prototype-app
|
9cadbc305d07b5b8832630cb4a3368b2855be5da
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Inquire.Solute and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ItemSpecification(Document):
pass
| 24.545455
| 53
| 0.781481
|
5da366e8bc899a357691d995528c494546ca11dc
| 2,124
|
py
|
Python
|
bgc_detection/antismash/antismash_tsv_to_csv.py
|
prihoda/bgc-pipeline-1
|
3df49f1b4c25232ce3f7e622607465c8f6a88df8
|
[
"MIT"
] | 6
|
2019-05-06T04:32:47.000Z
|
2022-01-25T03:24:10.000Z
|
bgc_detection/antismash/antismash_tsv_to_csv.py
|
prihoda/bgc-pipeline
|
29300da912fd1836eea8e285e2e50f5326f021f3
|
[
"MIT"
] | 6
|
2019-02-15T19:02:56.000Z
|
2021-07-12T08:28:48.000Z
|
bgc_detection/antismash/antismash_tsv_to_csv.py
|
prihoda/bgc-pipeline
|
29300da912fd1836eea8e285e2e50f5326f021f3
|
[
"MIT"
] | 4
|
2019-09-09T07:15:23.000Z
|
2021-07-12T07:25:04.000Z
|
#!/usr/bin/env python
# David Prihoda
# Convert an antiSMASH TSV file taken from 'txt/*_BGC.txt' file into a Candidate CSV file
import argparse
import pandas as pd
import numpy as np
def antismash_tsv_candidates(export):
"""
Convert an antiSMASH TSV file taken from 'txt/*_BGC.txt' into a Candidate DataFrame
:param export: DataFrame of antiSMASH TSV file taken from 'txt/*_BGC.txt'
:return: Candidate DataFrame
"""
candidates = pd.DataFrame()
candidates['contig_id'] = export['Contig']
candidates['nucl_start'] = export['BGC_range'].apply(lambda r: r.split(';')[0]).astype(np.int)
candidates['nucl_end'] = export['BGC_range'].apply(lambda r: r.split(';')[1]).astype(np.int)
candidates['nucl_length'] = candidates['nucl_end'] - candidates['nucl_start'] + 1
candidates['candidate_id'] = candidates.apply(lambda row: '{}:{}-{}'.format(row['contig_id'], row['nucl_start'], row['nucl_end']), axis=1)
candidates['classes'] = export['BGC type']
candidates['detection_rules'] = export['detection rules used']
candidates['genes'] = export['genes']
candidates['subclusters'] = export['subclusters']
candidates['NRPSs/PKSs'] = export['NRPSs/PKSs']
candidates['signature_genes'] = export['signature_genes']
candidates['RiPPs'] = export['RiPPs']
candidates['predicted_structure'] = export['predicted structure']
candidates['monomers'] = export['monomers']
return candidates
if __name__ == "__main__":
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input", required=True,
help="Path to AntiSMASH txt result (merged into tsv) file path.", metavar="FILE")
parser.add_argument("-o", "--output", dest="output", required=True,
help="Output csv file path.", metavar="FILE")
options = parser.parse_args()
export = pd.read_csv(options.input, sep='\t')
candidates = antismash_tsv_candidates(export)
candidates.to_csv(options.output, index=False)
print('Saved {} candidates to {}'.format(len(candidates), options.output))
| 44.25
| 142
| 0.677495
|
2300ea4d3d5851dc31be7490cc251f576b72de66
| 131
|
py
|
Python
|
landing/views.py
|
kabloosh1234/booking-buddy
|
886c77398101a60a9617fd6d0f8b6e59321c38bb
|
[
"MIT"
] | null | null | null |
landing/views.py
|
kabloosh1234/booking-buddy
|
886c77398101a60a9617fd6d0f8b6e59321c38bb
|
[
"MIT"
] | 5
|
2020-06-05T20:34:46.000Z
|
2021-06-10T18:18:24.000Z
|
landing/views.py
|
kabloosh1234/booking-buddy
|
886c77398101a60a9617fd6d0f8b6e59321c38bb
|
[
"MIT"
] | 2
|
2021-12-24T17:06:01.000Z
|
2021-12-24T17:06:29.000Z
|
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'landing/index.html')
| 26.2
| 48
| 0.763359
|
66809d921087f809f3021e8bc12ac24f51714489
| 1,436
|
py
|
Python
|
AI/day01/Module_NFS/submit/mini_justi_loss.py
|
Ersikan/Pool2021
|
cc64658039dee04127a3a641f891781c53647244
|
[
"MIT"
] | 16
|
2021-03-09T10:25:18.000Z
|
2022-02-08T14:29:24.000Z
|
AI/day01/Module_NFS/submit/mini_justi_loss.py
|
Ersikan/Pool2021
|
cc64658039dee04127a3a641f891781c53647244
|
[
"MIT"
] | null | null | null |
AI/day01/Module_NFS/submit/mini_justi_loss.py
|
Ersikan/Pool2021
|
cc64658039dee04127a3a641f891781c53647244
|
[
"MIT"
] | 3
|
2021-02-10T09:32:21.000Z
|
2022-02-01T17:07:59.000Z
|
import numpy as np
import matplotlib.pyplot as plt
LR = 0.1
def f1(x, y):
return #Need Some Code
def d_f1_dx(x, y):
return #Need Some Code
def d_f1_dy(x, y):
return #Need Some Code
def f2(x):
return #Need Some Code
def d_f2_dx(x):
return #Need Some Code
def getData_f1() :
return #Need Some Code
def getData_f2():
return #Need Some Code
def loss_visualisation_f1(min_x, min_y, min_z):
"""Visualisation de la fonction f1"""
x, y = getData_f1()
X, Y = np.meshgrid(x, y)
Z = f1(X, Y)
ax = plt.subplot(121, projection='3d')
ax.contour(X, Y, Z, 100)
ax.set_xlabel('input x')
ax.set_ylabel('input y')
ax.set_zlabel('prediction (error)')
ax.view_init(30, 30)
ax.set_title('f1')
ax.plot(min_x, min_y, min_z, markersize=10,
marker='x', markeredgecolor='r')
def loss_visualisation_f2(min_x, min_y):
"""Visualisation de la fonction f2"""
x = getData_f2()
y = f2(x)
ax = plt.subplot(122)
ax.plot(x, y)
ax.set_xlabel('input')
ax.set_ylabel('prediction (error)')
ax.set_title('f2')
ax.plot(min_x, min_y, markersize=10, marker='x')
if __name__ == '__main__':
#-- minimisation de la fonction f2 --#
print("Minimisation de la fonction f2")
# Need Some Code
loss_visualisation_f2(1, 1)
#-- minimisation de la fonction f1 --#
# Need Some Code
loss_visualisation_f1(1, 1, 1)
plt.show()
| 22.092308
| 52
| 0.628134
|
3cab4915f783d07a751ef743ff04900e0626f0e0
| 1,047
|
py
|
Python
|
pandajedi/jedicore/JediDBProxyPool.py
|
PanDAWMS/panda-jedi
|
e4c90563b3b9e9521cb73ccdedaa8ecaa38af5ed
|
[
"Apache-2.0"
] | 2
|
2020-04-17T10:24:09.000Z
|
2020-05-12T17:59:06.000Z
|
pandajedi/jedicore/JediDBProxyPool.py
|
PanDAWMS/panda-jedi
|
e4c90563b3b9e9521cb73ccdedaa8ecaa38af5ed
|
[
"Apache-2.0"
] | 20
|
2015-08-25T13:40:14.000Z
|
2022-03-29T12:50:46.000Z
|
pandajedi/jedicore/JediDBProxyPool.py
|
PanDAWMS/panda-jedi
|
e4c90563b3b9e9521cb73ccdedaa8ecaa38af5ed
|
[
"Apache-2.0"
] | 10
|
2015-05-27T14:01:42.000Z
|
2021-09-20T17:38:02.000Z
|
from pandaserver import taskbuffer
import taskbuffer.DBProxyPool
from . import JediDBProxy
# use customized proxy
taskbuffer.DBProxyPool.DBProxy = JediDBProxy
class DBProxyPool(taskbuffer.DBProxyPool.DBProxyPool):
# constructor
def __init__(self, dbhost, dbpasswd, nConnection, useTimeout=False):
taskbuffer.DBProxyPool.DBProxyPool.__init__(self, dbhost, dbpasswd,
nConnection, useTimeout)
# get a DBProxyObj containing a proxy
def get(self):
proxy_obj = DBProxyObj(db_proxy_pool=self)
return proxy_obj
# object of context manager for db proxy
class DBProxyObj(object):
# constructor
def __init__(self, db_proxy_pool):
self.proxy_pool = db_proxy_pool
self.proxy = None
# get proxy
def __enter__(self):
self.proxy = self.proxy_pool.getProxy()
return self.proxy
# release proxy
def __exit__(self, type, value, traceback):
self.proxy_pool.putProxy(self.proxy)
self.proxy = None
| 28.297297
| 76
| 0.679083
|
2897a833e2614669726ca50c8d86a9ae0b53120b
| 4,829
|
py
|
Python
|
tables/dump-parsers/convert-to-postfix.py
|
yash-srivastava19/sempre
|
b27c06906da33e345c645ff9470132bf6d1c26dc
|
[
"Apache-2.0"
] | 812
|
2015-01-08T01:58:39.000Z
|
2022-03-24T02:43:05.000Z
|
tables/dump-parsers/convert-to-postfix.py
|
yash-srivastava19/sempre
|
b27c06906da33e345c645ff9470132bf6d1c26dc
|
[
"Apache-2.0"
] | 181
|
2015-01-26T21:54:04.000Z
|
2022-03-09T17:52:04.000Z
|
tables/dump-parsers/convert-to-postfix.py
|
yash-srivastava19/sempre
|
b27c06906da33e345c645ff9470132bf6d1c26dc
|
[
"Apache-2.0"
] | 314
|
2015-01-14T11:23:08.000Z
|
2022-03-07T02:36:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert LispTree formulas to Postfix.
If the file is tab-separated, only process the first column.
"""
import sys, os, shutil, re, argparse, json, gzip
from codecs import open
from itertools import izip
from collections import defaultdict
import fileinput
import lisptree
NUMBER_PREFIX = 'N'
DATE_PREFIX = 'D'
TYPE_ROW = 'type-row'
AGGREGATE = ['count', 'min', 'max', 'sum', 'avg']
MERGE_ARITH = ['and', 'or', '-']
SUPERLATIVE = ['argmin', 'argmax']
def convert(tree, args):
answer = []
u_stack = []
def recurse(subtree):
if isinstance(subtree, basestring):
answer.append(subtree)
return
if isinstance(subtree[0], basestring):
opr = subtree[0]
if opr in AGGREGATE:
assert len(subtree) == 2, str(subtree)
recurse(subtree[1])
answer.append(opr)
elif opr in MERGE_ARITH:
assert len(subtree) == 3, str(subtree)
recurse(subtree[1])
recurse(subtree[2])
answer.append(opr)
elif opr in SUPERLATIVE:
assert len(subtree) in (3, 5), str(subtree)
if len(subtree) == 3:
u, b = subtree[1], subtree[2]
else:
u, b = subtree[3], subtree[4]
if args.implicit_superlative_lambda:
assert b[0] == 'reverse'
assert b[1][0] == 'lambda'
u_stack.append(convert(u, args))
recurse(b[1][2])
answer.append(opr)
u_stack.pop()
else:
recurse(u)
recurse(b)
answer.append(opr)
elif opr == 'lambda':
assert len(subtree) == 3, str(subtree)
recurse(subtree[2])
answer.append(opr)
elif opr == 'reverse':
assert len(subtree) == 2, str(subtree)
recurse(subtree[1])
answer.append(opr)
elif opr == 'var':
assert len(subtree) == 2, str(subtree)
if args.implicit_superlative_lambda:
answer.extend(u_stack[-1])
answer.append(subtree[1])
elif opr == 'number':
assert len(subtree) == 2, str(subtree)
answer.append(NUMBER_PREFIX + subtree[1])
elif opr == 'date':
assert len(subtree) == 4, str(subtree)
answer.append(DATE_PREFIX + '-'.join(
'XX' if x == '-1' else x for x in subtree[1:4]))
else: # Join with a name
assert len(subtree) == 2, str(subtree)
if (args.collapse_type_row and
'fb:type.object.type' == opr and
'fb:type.row' == subtree[1]):
answer.append(TYPE_ROW)
else:
recurse(subtree[1])
answer.append(opr)
if not args.implicit_join:
answer.append('join')
else: # Join with a complex construct
assert len(subtree) == 2, str(subtree)
# Only allows ((reverse ...) ...)
assert subtree[0][0] == 'reverse', str(subtree)
assert len(subtree[0]) == 2, str(subtree)
recurse(subtree[1])
answer.append('!' + subtree[0][1])
if not args.implicit_join:
answer.append('join')
recurse(tree)
return answer
def process(line, args):
line = line.rstrip('\n').split('\t')
line[args.field] = ' '.join(convert(lisptree.parse(line[args.field]), args))
print '\t'.join(line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--implicit-join', action='store_true',
help='Do not output "." for joins')
parser.add_argument('-s', '--implicit-superlative-lambda', action='store_true',
help='Do not output "lambda reverse" for superlatives')
parser.add_argument('-t', '--collapse-type-row', action='store_true',
help='Collapse "(type row)" into a single token')
parser.add_argument('-f', '--field', type=int, default=0,
help='Field index (tab-separated; 0-based) containing the logical form')
parser.add_argument('infile')
args = parser.parse_args()
if args.infile != '-':
opener = gzip.open if args.infile.endswith('.gz') else open
with opener(args.infile, 'r', 'utf8') as fin:
for line in fin:
process(line, args)
else:
for line in sys.stdin:
process(line, args)
if __name__ == '__main__':
main()
| 36.862595
| 84
| 0.512528
|
76a57e7d414b12bd5bb36e7c3b0b41fb5016b697
| 228
|
py
|
Python
|
programming-laboratory-I/yeku/aprovacao.py
|
MisaelAugusto/computer-science
|
d21335a2dc824b54ffe828370f0e6717fd0c7c27
|
[
"MIT"
] | null | null | null |
programming-laboratory-I/yeku/aprovacao.py
|
MisaelAugusto/computer-science
|
d21335a2dc824b54ffe828370f0e6717fd0c7c27
|
[
"MIT"
] | null | null | null |
programming-laboratory-I/yeku/aprovacao.py
|
MisaelAugusto/computer-science
|
d21335a2dc824b54ffe828370f0e6717fd0c7c27
|
[
"MIT"
] | null | null | null |
# coding: utf-8
unidade = int(raw_input('Unidade? '))
media = float(raw_input('Média de aprovação na unidade? '))
proxima_unidade = unidade + 1
print '\nO aluno vai para a unidade %i com média %.1f.' % (proxima_unidade,media)
| 28.5
| 81
| 0.70614
|
4547059766800df7fd23e282c7b0029e82e5208e
| 103
|
py
|
Python
|
pyweatherflowudp/__init__.py
|
briis/pyweatherflowudp
|
5a28d674aff4aac8c5cdf6c27f71eee4b98c25e3
|
[
"MIT"
] | 3
|
2021-12-22T21:53:07.000Z
|
2022-01-11T17:28:35.000Z
|
pyweatherflowudp/__init__.py
|
briis/pyweatherflowudp
|
5a28d674aff4aac8c5cdf6c27f71eee4b98c25e3
|
[
"MIT"
] | 2
|
2021-12-01T06:27:58.000Z
|
2021-12-02T08:45:32.000Z
|
pyweatherflowudp/__init__.py
|
briis/pyweatherflowudp
|
5a28d674aff4aac8c5cdf6c27f71eee4b98c25e3
|
[
"MIT"
] | 1
|
2021-12-26T18:24:13.000Z
|
2021-12-26T18:24:13.000Z
|
"""Asynchronous library to read UDP Packets from Weatherflow weather systems."""
__version__ = "1.3.0"
| 34.333333
| 80
| 0.757282
|
0ccd5385affc6a4183f443a246735b54fe87c0d7
| 15,123
|
py
|
Python
|
pykg2vec/core/KG2E.py
|
jiangnanyida/pykg2vec
|
0c125fe04b59286206045b9884ee10569182a2a5
|
[
"MIT"
] | 1
|
2020-06-26T16:50:38.000Z
|
2020-06-26T16:50:38.000Z
|
pykg2vec/core/KG2E.py
|
jiangnanyida/pykg2vec
|
0c125fe04b59286206045b9884ee10569182a2a5
|
[
"MIT"
] | null | null | null |
pykg2vec/core/KG2E.py
|
jiangnanyida/pykg2vec
|
0c125fe04b59286206045b9884ee10569182a2a5
|
[
"MIT"
] | 1
|
2020-06-26T16:50:39.000Z
|
2020-06-26T16:50:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pykg2vec.core.KGMeta import ModelMeta
class KG2E(ModelMeta):
"""`Learning to Represent Knowledge Graphs with Gaussian Embedding`_
Instead of assumming entities and relations as determinstic points in the
embedding vector spaces, KG2E models both entities and relations (h, r and t)
using random variables derived from multivariate Gaussian distribution.
KG2E then evaluates a fact using translational relation by evaluating the
distance between two distributions, r and t-h. KG2E provides two distance
measures (KL-divergence and estimated likelihood).
Args:
config (object): Model configuration parameters.
Attributes:
config (object): Model configuration.
model_name (str): Name of the model.
data_stats (object): Class object with knowlege graph statistics.
Examples:
>>> from pykg2vec.core.KG2E import KG2E
>>> from pykg2vec.utils.trainer import Trainer
>>> model = KG2E()
>>> trainer = Trainer(model=model, debug=False)
>>> trainer.build_model()
>>> trainer.train_model()
Portion of the code based on `this Source`_.
.. _this Source:
https://github.com/mana-ysh/gaussian-embedding/blob/master/src/models/gaussian_model.py
.. _Learning to Represent Knowledge Graphs with Gaussian Embedding:
https://pdfs.semanticscholar.org/0ddd/f37145689e5f2899f8081d9971882e6ff1e9.pdf
"""
def __init__(self, config=None):
self.config = config
self.data_stats = self.config.kg_meta
if self.config.distance_measure == "expected_likelihood":
self.model_name = 'KG2E_EL'
else:
self.model_name = 'KG2E_KL'
def def_inputs(self):
"""Defines the inputs to the model.
Attributes:
pos_h (Tensor): Positive Head entities ids.
pos_r (Tensor): Positive Relation ids of the triple.
pos_t (Tensor): Positive Tail entity ids of the triple.
neg_h (Tensor): Negative Head entities ids.
neg_r (Tensor): Negative Relation ids of the triple.
neg_t (Tensor): Negative Tail entity ids of the triple.
test_h_batch (Tensor): Batch of head ids for testing.
test_r_batch (Tensor): Batch of relation ids for testing
test_t_batch (Tensor): Batch of tail ids for testing.
"""
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
self.test_h_batch = tf.placeholder(tf.int32, [None])
self.test_t_batch = tf.placeholder(tf.int32, [None])
self.test_r_batch = tf.placeholder(tf.int32, [None])
def def_parameters(self):
"""Defines the model parameters.
Attributes:
num_total_ent (int): Total number of entities.
num_total_rel (int): Total number of relations.
k (Tensor): Size of the latent dimesnion for entities and relations.
ent_embeddings_mu (Tensor Variable): Lookup variable containing mean of embedding of the entities.
rel_embeddings_mu (Tensor Variable): Lookup variable containing mean embedding of the relations.
ent_embeddings_sigma (Tensor Variable): Lookup variable containing variance of embedding of the entities.
rel_embeddings_sigma (Tensor Variable): Lookup variable containing variance embedding of the relations.
parameter_list (list): List of Tensor parameters.
"""
num_total_ent = self.data_stats.tot_entity
num_total_rel = self.data_stats.tot_relation
k = self.config.hidden_size
with tf.name_scope("embedding"):
# the mean for each element in the embedding space.
self.ent_embeddings_mu = tf.get_variable(name="ent_embeddings_mu", shape=[num_total_ent, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=True))
self.rel_embeddings_mu = tf.get_variable(name="rel_embeddings_mu", shape=[num_total_rel, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=True))
# as the paper suggested, sigma is simplified to be the diagonal element in the covariance matrix.
self.ent_embeddings_sigma = tf.get_variable(name="ent_embeddings_sigma", shape=[num_total_ent, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=True))
self.rel_embeddings_sigma = tf.get_variable(name="rel_embeddings_sigma", shape=[num_total_rel, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=True))
self.parameter_list = [self.ent_embeddings_mu, self.ent_embeddings_sigma,
self.rel_embeddings_mu, self.rel_embeddings_sigma]
self.ent_embeddings_sigma = tf.maximum(self.config.cmin,
tf.minimum(self.config.cmax, (self.ent_embeddings_sigma + 1.0)))
self.rel_embeddings_sigma = tf.maximum(self.config.cmin,
tf.minimum(self.config.cmax, (self.rel_embeddings_sigma + 1.0)))
def def_loss(self):
"""Defines the loss function for the algorithm."""
pos_h_mu, pos_h_sigma, pos_r_mu, pos_r_sigma, pos_t_mu, pos_t_sigma = self.get_embed_guassian(self.pos_h,
self.pos_r,
self.pos_t)
neg_h_mu, neg_h_sigma, neg_r_mu, neg_r_sigma, neg_t_mu, neg_t_sigma = self.get_embed_guassian(self.neg_h,
self.neg_r,
self.neg_t)
if self.config.distance_measure == "expected_likelihood":
score_pos = self.cal_score_expected_likelihood(pos_h_mu, pos_h_sigma, pos_r_mu, pos_r_sigma, pos_t_mu,
pos_t_sigma)
score_neg = self.cal_score_expected_likelihood(neg_h_mu, neg_h_sigma, neg_r_mu, neg_r_sigma, neg_t_mu,
neg_t_sigma)
else:
score_pos = self.cal_score_kl_divergence(pos_h_mu, pos_h_sigma, pos_r_mu, pos_r_sigma, pos_t_mu,
pos_t_sigma)
score_neg = self.cal_score_kl_divergence(neg_h_mu, neg_h_sigma, neg_r_mu, neg_r_sigma, neg_t_mu,
neg_t_sigma)
self.loss = tf.reduce_sum(tf.maximum(score_pos + self.config.margin - score_neg, 0))
def cal_score_kl_divergence(self, h_mu, h_sigma, r_mu, r_sigma, t_mu, t_sigma):
""" It calculates the kl_divergence as a score.
trace_fac: tr(sigma_r-1 * (sigma_h + sigma_t))
mul_fac: (mu_h + mu_r - mu_t).T * sigma_r-1 * (mu_h + mu_r - mu_t)
det_fac: log(det(sigma_r)/det(sigma_h + sigma_t))
Args:
h_mu (Tensor): Mean of the embedding value of the head.
h_sigma(Tensor): Variance of the embedding value of the head.
r_mu(Tensor): Mean of the embedding value of the relation.
r_sigma(Tensor): Variance of the embedding value of the relation.
t_mu(Tensor): Mean of the embedding value of the tail.
t_sigma(Tensor): Variance of the embedding value of the tail.
Returns:
Tensor: Score after calculating the KL_Divergence.
"""
trace_fac = tf.reduce_sum((h_sigma + t_sigma) / r_sigma, -1)
mul_fac = tf.reduce_sum((- h_mu + t_mu - r_mu) ** 2 / r_sigma, -1)
det_fac = tf.reduce_sum(tf.log(h_sigma + t_sigma) - tf.log(r_sigma), -1)
return trace_fac + mul_fac - det_fac - self.config.hidden_size
def cal_score_expected_likelihood(self, h_mu, h_sigma, r_mu, r_sigma, t_mu, t_sigma):
""" It calculates the expected likelihood as a score.
mul_fac: (mu_h + mu_r - mu_t).T * sigma_r-1 * (mu_h + mu_r - mu_t)
det_fac: log(det(sigma_r + sigma_h + sigma_t))
Args:
h_mu (Tensor): Mean of the embedding value of the head.
h_sigma(Tensor): Variance of the embedding value of the head.
r_mu(Tensor): Mean of the embedding value of the relation.
r_sigma(Tensor): Variance of the embedding value of the relation.
t_mu(Tensor): Mean of the embedding value of the tail.
t_sigma(Tensor): Variance of the embedding value of the tail.
Returns:
Tensor: Score after calculating the expected likelihood.
"""
mul_fac = tf.reduce_sum((h_mu + r_mu - t_mu) ** 2 / (h_sigma + r_sigma + t_sigma), -1)
det_fac = tf.reduce_sum(tf.log(h_sigma + r_sigma + t_sigma), -1)
return mul_fac + det_fac - self.config.hidden_size
def test_batch(self):
"""Function that performs batch testing for the algorithm.
Returns:
Tensors: Returns ranks of head and tail.
"""
test_h_mu, test_h_sigma, test_r_mu, test_r_sigma, test_t_mu, test_t_sigma = self.get_embed_guassian(
self.test_h_batch,
self.test_r_batch,
self.test_t_batch)
test_h_mu = tf.expand_dims(test_h_mu, axis=1)
test_h_sigma = tf.expand_dims(test_h_sigma, axis=1)
test_r_mu = tf.expand_dims(test_r_mu, axis=1)
test_r_sigma = tf.expand_dims(test_r_sigma, axis=1)
test_t_mu = tf.expand_dims(test_t_mu, axis=1)
test_t_sigma = tf.expand_dims(test_t_sigma, axis=1)
norm_ent_embeddings_mu = tf.nn.l2_normalize(self.ent_embeddings_mu, axis=1)
norm_ent_embeddings_sigma = tf.nn.l2_normalize(self.ent_embeddings_sigma, axis=1)
if self.config.distance_measure == "expected_likelihood":
score_head = self.cal_score_expected_likelihood(norm_ent_embeddings_mu, norm_ent_embeddings_sigma, \
test_r_mu, test_r_sigma, \
test_t_mu, test_t_sigma)
score_tail = self.cal_score_expected_likelihood(test_h_mu, test_h_sigma, \
test_r_mu, test_r_sigma, \
norm_ent_embeddings_mu, norm_ent_embeddings_sigma)
else:
score_head = self.cal_score_kl_divergence(norm_ent_embeddings_mu, norm_ent_embeddings_sigma, \
test_r_mu, test_r_sigma, \
test_t_mu, test_t_sigma)
score_tail = self.cal_score_kl_divergence(test_h_mu, test_h_sigma, \
test_r_mu, test_r_sigma, \
norm_ent_embeddings_mu, norm_ent_embeddings_sigma)
_, head_rank = tf.nn.top_k(score_head, k=self.data_stats.tot_entity)
_, tail_rank = tf.nn.top_k(score_tail, k=self.data_stats.tot_entity)
return head_rank, tail_rank
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
norm_ent_embeddings = tf.nn.l2_normalize(self.ent_embeddings, axis=1)
norm_rel_embeddings = tf.nn.l2_normalize(self.rel_embeddings, axis=1)
emb_h = tf.nn.embedding_lookup(norm_ent_embeddings, h)
emb_r = tf.nn.embedding_lookup(norm_rel_embeddings, r)
emb_t = tf.nn.embedding_lookup(norm_ent_embeddings, t)
return emb_h, emb_r, emb_t
def get_embed_guassian(self, h, r, t):
"""Function to get the embedding.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
norm_ent_embeddings_mu = tf.nn.l2_normalize(self.ent_embeddings_mu, axis=1)
norm_rel_embeddings_mu = tf.nn.l2_normalize(self.rel_embeddings_mu, axis=1)
norm_ent_embeddings_sigma = tf.nn.l2_normalize(self.ent_embeddings_sigma, axis=1)
norm_rel_embeddings_sigma = tf.nn.l2_normalize(self.rel_embeddings_sigma, axis=1)
emb_h_mu = tf.nn.embedding_lookup(norm_ent_embeddings_mu, h)
emb_r_mu = tf.nn.embedding_lookup(norm_rel_embeddings_mu, r)
emb_t_mu = tf.nn.embedding_lookup(norm_ent_embeddings_mu, t)
emb_h_sigma = tf.nn.embedding_lookup(norm_ent_embeddings_sigma, h)
emb_r_sigma = tf.nn.embedding_lookup(norm_rel_embeddings_sigma, r)
emb_t_sigma = tf.nn.embedding_lookup(norm_ent_embeddings_sigma, t)
return emb_h_mu, emb_h_sigma, emb_r_mu, emb_r_sigma, emb_t_mu, emb_t_sigma
def get_embed(self, h, r, t, sess):
"""Function to get the embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
sess (object): Tensorflow Session object.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h, emb_r, emb_t = self.embed(h, r, t)
h, r, t = sess.run([emb_h, emb_r, emb_t])
return h, r, t
def get_proj_embed(self, h, r, t, sess=None):
""""Function to get the projected embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
sess (object): Tensorflow Session object.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
return self.get_embed(h, r, t, sess)
| 49.421569
| 121
| 0.598823
|
8eb8d5af4010786f6fdbbeb678e1b007ae6dafc8
| 2,072
|
py
|
Python
|
DQMOffline/EGamma/test/PhotonAnalyzer_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQMOffline/EGamma/test/PhotonAnalyzer_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQMOffline/EGamma/test/PhotonAnalyzer_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("photonAnalysis")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("DQMOffline.EGamma.photonAnalyzer_cfi")
process.load("DQMOffline.EGamma.zmumugammaAnalyzer_cfi")
process.load("DQMOffline.EGamma.photonOfflineClient_cfi")
process.load("DQMServices.Components.MEtoEDMConverter_cff")
process.load("DQMServices.Components.DQMStoreStats_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
DQMStore = cms.Service("DQMStore")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
#process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_7_0_0_pre2/RelValZEE/GEN-SIM-DIGI-RECO/PRE_ST62_V8_FastSim-v1/00000/0229B33C-E10F-E311-9C16-002618943829.root'
# '/store/relval/CMSSW_7_0_0_pre2/RelValH130GGgluonfusion/GEN-SIM-DIGI-RECO/PRE_ST62_V8_FastSim-v1/00000/2EB245F1-A30F-E311-80ED-0025905938A4.root'
))
from DQMOffline.EGamma.photonAnalyzer_cfi import *
photonAnalysis.Verbosity = cms.untracked.int32(0)
photonAnalysis.useTriggerFiltering = cms.bool(False)
#photonAnalysis.standAlone = cms.bool(True)
from DQMOffline.EGamma.photonOfflineClient_cfi import *
photonOfflineClient.standAlone = cms.bool(True)
#from DQMServices.Components.DQMStoreStats_cfi import *
#dqmStoreStats.runOnEndRun = cms.untracked.bool(False)
#dqmStoreStats.runOnEndJob = cms.untracked.bool(True)
#process.p1 = cms.Path(process.photonAnalysis)
#process.p1 = cms.Path(process.photonAnalysis*process.dqmStoreStats)
process.p1 = cms.Path(process.photonAnalysis*process.zmumugammaAnalysis*process.photonOfflineClient*process.dqmStoreStats)
#process.p1 = cms.Path(process.photonAnalysis*process.photonOfflineClient*process.dqmStoreStats)
process.schedule = cms.Schedule(process.p1)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.MessageLogger = cms.Service("MessageLogger")
| 37.672727
| 147
| 0.821911
|
b5272ac48a746c06f1094ab3cf87b436aee5d60d
| 4,006
|
py
|
Python
|
configs/ttfnext/ttfnext_r18_search_gumbel_lr5e4_3e4wd_nons_0_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/ttfnext/ttfnext_r18_search_gumbel_lr5e4_3e4wd_nons_0_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/ttfnext/ttfnext_r18_search_gumbel_lr5e4_3e4wd_nons_0_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='TTFNeXt',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFXHead',
search_k=4,
search_op_gumbel_softmax=True,
search_edge_gumbel_softmax=True,
inplanes=(64, 128, 256, 512),
planes=(64, 128, 256, 512),
ops_list=('skip_connect', 'conv_1x1', 'conv_3x3', 'sep_conv_3x3',
'sep_conv_5x5', 'sep_conv_7x7', 'dil_conv_3x3', 'dil_conv_5x5',
'mdcn_3x3'),
head_conv=128,
wh_conv=64,
hm_head_conv_num=2,
wh_head_conv_num=1,
num_classes=81,
wh_offset_base=16,
wh_gaussian=True,
alpha=0.54,
hm_weight=1.,
wh_weight=5.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
search=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
search_config = dict(
tune_epoch_start=1,
tune_epoch_end=8,
search_optimizer=dict(
type='Adam',
lr=5e-4,
betas=(0.5, 0.999),
weight_decay=3e-4))
# learning policy
lr_config = dict(
policy='fixed',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5)
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_in_n_epoch=[1])
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 8
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'ttfnext18_search_gumbel_nons_wd3e4_lr5e4_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 31.054264
| 84
| 0.6333
|
13664f3045b40e53dde1fc8cbce7cabbd55df01a
| 4,296
|
py
|
Python
|
chartpy_examples/vispy_demo.py
|
Joukahainen/chartpy
|
410f9e4553cb07be7d11823cad404f10da079ada
|
[
"Apache-2.0"
] | 519
|
2016-08-17T10:38:58.000Z
|
2022-03-30T19:30:15.000Z
|
chartpy_examples/vispy_demo.py
|
Joukahainen/chartpy
|
410f9e4553cb07be7d11823cad404f10da079ada
|
[
"Apache-2.0"
] | 5
|
2016-08-21T22:16:17.000Z
|
2019-12-06T06:17:13.000Z
|
chartpy_examples/vispy_demo.py
|
Joukahainen/chartpy
|
410f9e4553cb07be7d11823cad404f10da079ada
|
[
"Apache-2.0"
] | 108
|
2016-08-21T12:01:10.000Z
|
2022-03-25T06:38:58.000Z
|
__author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
This example generates several Brownian paths with millions of time steps. It then plots these using two different backends
- VisPy (GPU accelerated) backend
- matplotlib backend
For this number of points, VisPy will tend to be much quicker when manipulating the plot and zooming. Note, the VisPy
support is very limited at this stage in chartpy, and doesn't for example yet support date labels.
"""
from chartpy import Chart, Style
import numpy
import pandas
from math import sqrt
from scipy.stats import norm
import numpy as np
## from SciPy cookbook http://scipy.github.io/old-wiki/pages/Cookbook/BrownianMotion
def brownian(x0, n, dt, delta, out=None):
"""
Generate an instance of Brownian motion (i.e. the Wiener process):
X(t) = X(0) + N(0, delta**2 * t; 0, t)
where N(a,b; t0, t1) is a normally distributed random variable with mean a and
variance b. The parameters t0 and t1 make explicit the statistical
independence of N on different time intervals; that is, if [t0, t1) and
[t2, t3) are disjoint intervals, then N(a, b; t0, t1) and N(a, b; t2, t3)
are independent.
Written as an iteration scheme,
X(t + dt) = X(t) + N(0, delta**2 * dt; t, t+dt)
If `x0` is an array (or array-like), each value in `x0` is treated as
an initial condition, and the value returned is a numpy array with one
more dimension than `x0`.
Arguments
---------
x0 : float or numpy array (or something that can be converted to a numpy array
using numpy.asarray(x0)).
The initial condition(s) (i.e. position(s)) of the Brownian motion.
n : int
The number of steps to take.
dt : float
The time step.
delta : float
delta determines the "speed" of the Brownian motion. The random variable
of the position at time t, X(t), has a normal distribution whose mean is
the position at time t=0 and whose variance is delta**2*t.
out : numpy array or None
If `out` is not None, it specifies the array in which to put the
result. If `out` is None, a new numpy array is created and returned.
Returns
-------
A numpy array of floats with shape `x0.shape + (n,)`.
Note that the initial value `x0` is not included in the returned array.
"""
x0 = np.asarray(x0)
# For each element of x0, generate a sample of n numbers from a
# normal distribution.
r = norm.rvs(size=x0.shape + (n,), scale=delta * sqrt(dt))
# If `out` was not given, create an output array.
if out is None:
out = np.empty(r.shape)
# This computes the Brownian motion by forming the cumulative sum of
# the random samples.
np.cumsum(r, axis=-1, out=out)
# Add the initial condition.
out += np.expand_dims(x0, axis=-1)
return out
if __name__ == '__main__':
print('Generate paths')
delta = 2 # The Wiener process parameter.
T = 10.0 # Total time.
N = 10 * 1000000 # Number of steps.
dt = T/N # Time step size
m = 5 # Number of realizations to generate.
x = numpy.empty((m,N+1)) # Create an empty array to store the realizations.
x[:, 0] = 50 # Initial values of x.
brownian(x[:,0], N, dt, delta, out=x[:,1:])
t = numpy.linspace(0.0, N*dt, N+1)
df = pandas.DataFrame(index=t, data=x.T)
style = Style(save_fig=True)
print('About to plot vispy...')
# try vispy, which will work (uses GPU)
Chart().plot(df, engine='vispy', style=style)
print('About to plot matplotlib...')
# try matplotlib, which will likely be very slow or crash...
Chart().plot(df, engine='matplotlib', style=style)
| 33.5625
| 123
| 0.664106
|
3ad9916f6a11ba65ce034d0e04b889b382ec5368
| 1,836
|
py
|
Python
|
tests/app/db/test_redis.py
|
renovate-tests/pol
|
dca9aa4ce34273575d69a140dc3bb1d2ac14ecbf
|
[
"MIT"
] | 5
|
2019-05-11T05:14:44.000Z
|
2019-09-07T10:22:53.000Z
|
tests/app/db/test_redis.py
|
renovate-tests/pol
|
dca9aa4ce34273575d69a140dc3bb1d2ac14ecbf
|
[
"MIT"
] | 161
|
2019-09-09T07:30:25.000Z
|
2022-03-14T19:52:43.000Z
|
tests/app/db/test_redis.py
|
renovate-tests/pol
|
dca9aa4ce34273575d69a140dc3bb1d2ac14ecbf
|
[
"MIT"
] | 3
|
2019-09-07T13:15:05.000Z
|
2020-05-06T04:30:46.000Z
|
import pickle
import random
import string
import pytest
from app.core import config
from app.db.redis import PickleRedis, setup_redis_pool
def random_str(length: int = 8):
all_char = string.ascii_letters + string.digits + string.punctuation
return "".join(random.choice(all_char) for i in range(length))
async def close(r: PickleRedis):
r.close()
await r.wait_closed()
@pytest.mark.asyncio
async def test_setup_redis_pool():
pool = await setup_redis_pool()
assert isinstance(pool, PickleRedis)
await close(pool)
@pytest.mark.asyncio
async def test_set_key(redis_client):
pool = await setup_redis_pool()
key = random_str()
value = random_str()
await pool.set(key, value)
assert redis_client.get(key) == pickle.dumps(value), "redis value are not pickled"
redis_client.delete(key)
await close(pool)
@pytest.mark.asyncio
async def test_get_key(redis_client):
pool = await setup_redis_pool()
key = random_str()
value = random_str()
redis_client.set(key, pickle.dumps(value))
assert value == await pool.get(key), "redis value are not pickled"
redis_client.delete(key)
await close(pool)
@pytest.mark.asyncio
async def test_decode_error(redis_client):
pool = await setup_redis_pool()
key = "unpickle-able key"
value = "23333-123s"
redis_client.set(key, value)
value = await pool.get(key)
assert value is None, "unpickle-able key should return `None`"
assert not await pool.exists(key), "unpickle-able key should be deleted"
redis_client.delete(key)
await close(pool)
@pytest.mark.asyncio
async def test_redis_script_lua_name():
pool = await setup_redis_pool()
for lua_fs in (config.PROJ_ROOT / "app" / "lua").glob("*.lua"):
assert lua_fs.name in pool.script_hash
await close(pool)
| 24.157895
| 86
| 0.71024
|
f0b890d71402b4ebef2a654badf45c09cea48335
| 462
|
py
|
Python
|
PracticeData/PracticeScript_SB.py
|
everymind/SurprisingMindsOnYoutube
|
d6f4c5173e14ec2fa9e1e6156133ca4d9e09a562
|
[
"MIT"
] | null | null | null |
PracticeData/PracticeScript_SB.py
|
everymind/SurprisingMindsOnYoutube
|
d6f4c5173e14ec2fa9e1e6156133ca4d9e09a562
|
[
"MIT"
] | null | null | null |
PracticeData/PracticeScript_SB.py
|
everymind/SurprisingMindsOnYoutube
|
d6f4c5173e14ec2fa9e1e6156133ca4d9e09a562
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import numpy as np
x, y = np.loadtxt('2018-08-15T15_06_26-pupilproper.txt', delimiter=',', unpack=True)
plt.plot(y,x, label='Trial1')
x, y = np.loadtxt('pupilarea2.txt', delimiter=',', unpack=True,)
plt.plot(y,x, color='green', label='Trial2')
plt.xlabel('Time')
plt.ylabel('Area of Pupil')
plt.title('Area of Pupil Against Time')
plt.legend()
plt.savefig("GraphOneSB.png")
plt.show()
| 25.666667
| 84
| 0.720779
|
48320d6a3d4212dbf52aa697d1635725144eefd5
| 8,130
|
py
|
Python
|
get_pulse.py
|
JediRhymeTrix/webcam-pulse-detector
|
5b00810ef637acfb4dc6d6ca011c32e53df606d7
|
[
"Apache-2.0"
] | null | null | null |
get_pulse.py
|
JediRhymeTrix/webcam-pulse-detector
|
5b00810ef637acfb4dc6d6ca011c32e53df606d7
|
[
"Apache-2.0"
] | null | null | null |
get_pulse.py
|
JediRhymeTrix/webcam-pulse-detector
|
5b00810ef637acfb4dc6d6ca011c32e53df606d7
|
[
"Apache-2.0"
] | null | null | null |
from lib.device import Camera
from lib.processors_noopenmdao import findFaceGetPulse
from lib.interface import plotXY, imshow, waitKey, destroyWindow
from cv2 import moveWindow
import argparse
import numpy as np
import datetime
#TODO: work on serial port comms, if anyone asks for it
#from serial import Serial
import socket
import sys
class getPulseApp(object):
"""
Python application that finds a face in a webcam stream, then isolates the
forehead.
Then the average green-light intensity in the forehead region is gathered
over time, and the detected person's pulse is estimated.
"""
def __init__(self, args):
# Imaging device - must be a connected camera (not an ip camera or mjpeg
# stream)
serial = args.serial
baud = args.baud
self.send_serial = False
self.send_udp = False
if serial:
self.send_serial = True
if not baud:
baud = 9600
else:
baud = int(baud)
self.serial = Serial(port=serial, baudrate=baud)
udp = args.udp
if udp:
self.send_udp = True
if ":" not in udp:
ip = udp
port = 5005
else:
ip, port = udp.split(":")
port = int(port)
self.udp = (ip, port)
self.sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.cameras = []
self.selected_cam = 0
for i in range(3):
camera = Camera(camera=i) # first camera by default
if camera.valid or not len(self.cameras):
self.cameras.append(camera)
else:
break
self.w, self.h = 0, 0
self.pressed = 0
# Containerized analysis of recieved image frames (an openMDAO assembly)
# is defined next.
# This assembly is designed to handle all image & signal analysis,
# such as face detection, forehead isolation, time series collection,
# heart-beat detection, etc.
# Basically, everything that isn't communication
# to the camera device or part of the GUI
self.processor = findFaceGetPulse(bpm_limits=[50, 160],
data_spike_limit=2500.,
face_detector_smoothness=10.)
# Init parameters for the cardiac data plot
self.bpm_plot = False
self.plot_title = "Data display - raw signal (top) and PSD (bottom)"
# Maps keystrokes to specified methods
#(A GUI window must have focus for these to work)
self.key_controls = {"s": self.toggle_search,
"d": self.toggle_display_plot,
"c": self.toggle_cam,
"f": self.write_csv}
# timing data for the main loop
self.frame_count = 0
self.avg_throughput = 0.0
self.avg_latency = 0.0
def toggle_cam(self):
if len(self.cameras) > 1:
self.processor.find_faces = True
self.bpm_plot = False
destroyWindow(self.plot_title)
self.selected_cam += 1
self.selected_cam = self.selected_cam % len(self.cameras)
def write_csv(self):
"""
Writes current data to a csv file
"""
fn = "Webcam-pulse" + str(datetime.datetime.now())
fn = fn.replace(":", "_").replace(".", "_")
data = np.vstack((self.processor.times, self.processor.samples)).T
np.savetxt(fn + ".csv", data, delimiter=',')
print("Writing csv")
def toggle_search(self):
"""
Toggles a motion lock on the processor's face detection component.
Locking the forehead location in place significantly improves
data quality, once a forehead has been sucessfully isolated.
"""
#state = self.processor.find_faces.toggle()
state = self.processor.find_faces_toggle()
print("face detection lock =", not state)
def toggle_display_plot(self):
"""
Toggles the data display.
"""
if self.bpm_plot:
print("bpm plot disabled")
self.bpm_plot = False
destroyWindow(self.plot_title)
else:
print("bpm plot enabled")
if self.processor.find_faces:
self.toggle_search()
self.bpm_plot = True
self.make_bpm_plot()
moveWindow(self.plot_title, self.w, 0)
def make_bpm_plot(self):
"""
Creates and/or updates the data display
"""
plotXY([[self.processor.times,
self.processor.samples],
[self.processor.freqs,
self.processor.fft]],
labels=[False, True],
showmax=[False, "bpm"],
label_ndigits=[0, 0],
showmax_digits=[0, 1],
skip=[3, 3],
name=self.plot_title,
bg=self.processor.slices[0])
def key_handler(self):
"""
Handle keystrokes, as set at the bottom of __init__()
A plotting or camera frame window must have focus for keypresses to be
detected.
"""
self.pressed = waitKey(10) & 255 # wait for keypress for 10 ms
if self.pressed == 27: # exit program on 'esc'
print("Exiting")
for cam in self.cameras:
cam.cam.release()
if self.send_serial:
self.serial.close()
sys.exit()
for key in self.key_controls.keys():
if chr(self.pressed) == key:
self.key_controls[key]()
def main_loop(self):
"""
Single iteration of the application's main loop.
"""
# Get current image frame from the camera
frame = self.cameras[self.selected_cam].get_frame()
self.frame_count += 1
start_time = start = datetime.datetime.now()
self.h, self.w, _c = frame.shape
# display unaltered frame
# imshow("Original",frame)
# set current image frame to the processor's input
self.processor.frame_in = frame
# process the image frame to perform all needed analysis
self.processor.run(self.selected_cam)
# collect the output frame for display
output_frame = self.processor.frame_out
# show the processed/annotated output frame
imshow("Processed", output_frame)
end = datetime.datetime.now()
time_taken = (end - start).total_seconds()
self.avg_latency = ((self.avg_latency * (self.frame_count - 1) + time_taken)
/ self.frame_count)
throughput = 1 / time_taken if time_taken else 0
self.avg_throughput = (self.avg_throughput * (self.frame_count - 1) +
throughput) / self.frame_count
# Check if a key was pressed
self.key_handler()
# create and/or update the raw data display if needed
if self.bpm_plot:
self.make_bpm_plot()
if self.send_serial:
self.serial.write(str(self.processor.bpm) + "\r\n")
if self.send_udp:
self.sock.sendto(str(self.processor.bpm), self.udp)
# handle any key presses
self.key_handler()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Webcam pulse detector.')
parser.add_argument('--serial', default=None,
help='serial port destination for bpm data')
parser.add_argument('--baud', default=None,
help='Baud rate for serial transmission')
parser.add_argument('--udp', default=None,
help='udp address:port destination for bpm data')
args = parser.parse_args()
App = getPulseApp(args)
try:
while True:
App.main_loop()
finally:
print('avg_throughput: ', App.avg_throughput)
print('avg_latency: ', App.avg_latency)
| 34.303797
| 84
| 0.570726
|
3f073861c8bfa4bea65ff9ccb43ce4d1de4dca6b
| 270
|
py
|
Python
|
tests/artificial/transf_Quantization/trend_MovingMedian/cycle_5/ar_12/test_artificial_32_Quantization_MovingMedian_5_12_20.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/artificial/transf_Quantization/trend_MovingMedian/cycle_5/ar_12/test_artificial_32_Quantization_MovingMedian_5_12_20.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/artificial/transf_Quantization/trend_MovingMedian/cycle_5/ar_12/test_artificial_32_Quantization_MovingMedian_5_12_20.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 12);
| 38.571429
| 170
| 0.737037
|
aee341bd88b05cb9502b7beb427a8ae9cf3f6e4a
| 64,244
|
py
|
Python
|
sklearn/decomposition/_dict_learning.py
|
MaiRajborirug/scikit-learn
|
c18d015372f7041099d19c215cd4c36ffd6fe5c5
|
[
"BSD-3-Clause"
] | 199
|
2020-08-27T09:03:21.000Z
|
2021-11-09T11:21:07.000Z
|
sklearn/decomposition/_dict_learning.py
|
MaiRajborirug/scikit-learn
|
c18d015372f7041099d19c215cd4c36ffd6fe5c5
|
[
"BSD-3-Clause"
] | 4
|
2021-10-07T08:40:50.000Z
|
2021-11-18T19:11:40.000Z
|
sklearn/decomposition/_dict_learning.py
|
MaiRajborirug/scikit-learn
|
c18d015372f7041099d19c215cd4c36ffd6fe5c5
|
[
"BSD-3-Clause"
] | 2
|
2018-10-02T03:13:40.000Z
|
2020-04-27T07:29:18.000Z
|
""" Dictionary learning.
"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
import warnings
from math import ceil
import numpy as np
from scipy import linalg
from joblib import Parallel, effective_n_jobs
from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin
from ..utils import deprecated
from ..utils import check_array, check_random_state, gen_even_slices, gen_batches
from ..utils.extmath import randomized_svd, row_norms, svd_flip
from ..utils.validation import check_is_fitted
from ..utils.fixes import delayed
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _check_positive_coding(method, positive):
if positive and method in ["omp", "lars"]:
raise ValueError(
"Positive constraint not supported for '{}' coding method.".format(method)
)
def _sparse_encode(
X,
dictionary,
gram,
cov=None,
algorithm="lasso_lars",
regularization=None,
copy_cov=True,
init=None,
max_iter=1000,
check_input=True,
verbose=0,
positive=False,
):
"""Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram : ndarray of shape (n_components, n_components) or None
Precomputed Gram matrix, `dictionary * dictionary'`
gram can be `None` if method is 'threshold'.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary * X'`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
regularization : int or float, default=None
The regularization parameter. It corresponds to alpha when
algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
Otherwise it corresponds to `n_nonzero_coefs`.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
check_input : bool, default=True
If `False`, the input arrays `X` and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive: bool, default=False
Whether to enforce a positivity constraint on the sparse code.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_components, n_features)
The sparse codes.
See Also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if dictionary.shape[1] != X.shape[1]:
raise ValueError(
"Dictionary and X have different numbers of features:"
"dictionary.shape: {} X.shape{}".format(dictionary.shape, X.shape)
)
if cov is None and algorithm != "lasso_cd":
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
_check_positive_coding(algorithm, positive)
if algorithm == "lasso_lars":
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all="ignore")
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(
alpha=alpha,
fit_intercept=False,
verbose=verbose,
normalize=False,
precompute=gram,
fit_path=False,
positive=positive,
max_iter=max_iter,
)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == "lasso_cd":
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(
alpha=alpha,
fit_intercept=False,
normalize="deprecated", # as it was False by default
precompute=gram,
max_iter=max_iter,
warm_start=True,
positive=positive,
)
if init is not None:
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == "lars":
try:
err_mgt = np.seterr(all="ignore")
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(
fit_intercept=False,
verbose=verbose,
normalize=False,
precompute=gram,
n_nonzero_coefs=int(regularization),
fit_path=False,
)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == "threshold":
new_code = (np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T
if positive:
np.clip(new_code, 0, None, out=new_code)
elif algorithm == "omp":
new_code = orthogonal_mp_gram(
Gram=gram,
Xy=cov,
n_nonzero_coefs=int(regularization),
tol=None,
norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov,
).T
else:
raise ValueError(
'Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.' % algorithm
)
if new_code.ndim != 2:
return new_code.reshape(n_samples, n_components)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(
X,
dictionary,
*,
gram=None,
cov=None,
algorithm="lasso_lars",
n_nonzero_coefs=None,
alpha=None,
copy_cov=True,
init=None,
max_iter=1000,
n_jobs=None,
check_input=True,
verbose=0,
positive=False,
):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram : ndarray of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary' * X`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`n_nonzero_coefs=int(n_features / 10)`.
alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
check_input : bool, default=True
If `False`, the input arrays X and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive : bool, default=False
Whether to enforce positivity when finding the encoding.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse codes
See Also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == "lasso_cd":
dictionary = check_array(dictionary, order="C", dtype="float64")
X = check_array(X, order="C", dtype="float64")
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != "threshold":
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != "lasso_cd":
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ("lars", "omp"):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.0
if effective_n_jobs(n_jobs) == 1 or algorithm == "threshold":
code = _sparse_encode(
X,
dictionary,
gram,
cov=cov,
algorithm=algorithm,
regularization=regularization,
copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive,
)
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice],
dictionary,
gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization,
copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False,
verbose=verbose,
positive=positive,
)
for this_slice in slices
)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(
dictionary,
Y,
code,
A=None,
B=None,
verbose=False,
random_state=None,
positive=False,
):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
Value of the dictionary at the previous iteration.
Y : ndarray of shape (n_samples, n_features)
Data matrix.
code : ndarray of shape (n_samples, n_components)
Sparse coding of the data against which to optimize the dictionary.
A : ndarray of shape (n_components, n_components), default=None
Together with `B`, sufficient stats of the online model to update the
dictionary.
B : ndarray of shape (n_features, n_components), default=None
Together with `A`, sufficient stats of the online model to update the
dictionary.
verbose: bool, default=False
Degree of output the procedure will print.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
positive : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
"""
n_samples, n_components = code.shape
random_state = check_random_state(random_state)
if A is None:
A = code.T @ code
if B is None:
B = Y.T @ code
n_unused = 0
for k in range(n_components):
if A[k, k] > 1e-6:
# 1e-6 is arbitrary but consistent with the spams implementation
dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
else:
# kth atom is almost never used -> sample a new one from the data
newd = Y[random_state.choice(n_samples)]
# add small noise to avoid making the sparse coding ill conditioned
noise_level = 0.01 * (newd.std() or 1) # avoid 0 std
noise = random_state.normal(0, noise_level, size=len(newd))
dictionary[k] = newd + noise
code[:, k] = 0
n_unused += 1
if positive:
np.clip(dictionary[k], 0, None, out=dictionary[k])
# Projection on the constraint set ||V_k|| <= 1
dictionary[k] /= max(linalg.norm(dictionary[k]), 1)
if verbose and n_unused > 0:
print(f"{n_unused} unused atoms resampled.")
def dict_learning(
X,
n_components,
*,
alpha,
max_iter=100,
tol=1e-8,
method="lars",
n_jobs=None,
dict_init=None,
code_init=None,
callback=None,
verbose=False,
random_state=None,
return_n_iter=False,
positive_dict=False,
positive_code=False,
method_max_iter=1000,
):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
n_components : int
Number of dictionary atoms to extract.
alpha : int
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for the stopping condition.
method : {'lars', 'cd'}, default='lars'
The method used:
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the sparse code for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
callback : callable, default=None
Callable that gets invoked every five iterations
verbose : bool, default=False
To control the verbosity of the procedure.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : ndarray of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See Also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ("lars", "cd"):
raise ValueError("Coding method %r not supported as a fit algorithm." % method)
_check_positive_coding(method, positive_code)
method = "lasso_" + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order="F")
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
# flip the initial code's sign to enforce deterministic output
code, dictionary = svd_flip(code, dictionary)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[
dictionary, np.zeros((n_components - r, dictionary.shape[1]))
]
# Fortran-order dict better suited for the sparse coding which is the
# bottleneck of this algorithm.
dictionary = np.asfortranarray(dictionary)
errors = []
current_cost = np.nan
if verbose == 1:
print("[dict_learning]", end=" ")
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = time.time() - t0
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print(
"Iteration % 3i (elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost)
)
# Update code
code = sparse_encode(
X,
dictionary,
algorithm=method,
alpha=alpha,
init=code,
n_jobs=n_jobs,
positive=positive_code,
max_iter=method_max_iter,
verbose=verbose,
)
# Update dictionary in place
_update_dict(
dictionary,
X,
code,
verbose=verbose,
random_state=random_state,
positive=positive_dict,
)
# Cost function
current_cost = 0.5 * np.sum((X - code @ dictionary) ** 2) + alpha * np.sum(
np.abs(code)
)
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(
X,
n_components=2,
*,
alpha=1,
n_iter=100,
return_code=True,
dict_init=None,
callback=None,
batch_size=3,
verbose=False,
shuffle=True,
n_jobs=None,
method="lars",
iter_offset=0,
random_state=None,
return_inner_stats=False,
inner_stats=None,
return_n_iter=False,
positive_dict=False,
positive_code=False,
method_max_iter=1000,
):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
This is accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
n_components : int or None, default=2
Number of dictionary atoms to extract. If None, then ``n_components``
is set to ``n_features``.
alpha : float, default=1
Sparsity controlling parameter.
n_iter : int, default=100
Number of mini-batch iterations to perform.
return_code : bool, default=True
Whether to also return the code U or just the dictionary `V`.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios.
callback : callable, default=None
callable that gets invoked every five iterations.
batch_size : int, default=3
The number of samples to take in each batch.
verbose : bool, default=False
To control the verbosity of the procedure.
shuffle : bool, default=True
Whether to shuffle the data before splitting it in batches.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default=0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
return_inner_stats : bool, default=False
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If `return_inner_stats` is `True`, `return_code` is
ignored.
inner_stats : tuple of (A, B) ndarrays, default=None
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid losing the history of the evolution.
`A` `(n_components, n_components)` is the dictionary covariance matrix.
`B` `(n_features, n_components)` is the data approximation matrix.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform when solving the lasso problem.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components),
The sparse code (only returned if `return_code=True`).
dictionary : ndarray of shape (n_components, n_features),
The solutions to the dictionary learning problem.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See Also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ("lars", "cd"):
raise ValueError("Coding method not supported as a fit algorithm.")
_check_positive_coding(method, positive_code)
method = "lasso_" + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components, random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[
dictionary, np.zeros((n_components - r, dictionary.shape[1]))
]
if verbose == 1:
print("[dict_learning]", end=" ")
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
# Fortran-order dict better suited for the sparse coding which is the
# bottleneck of this algorithm.
dictionary = check_array(dictionary, order="F", dtype=np.float64, copy=False)
dictionary = np.require(dictionary, requirements="W")
X_train = check_array(X_train, order="C", dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = time.time() - t0
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100.0 / verbose) == 0:
print(
"Iteration % 3i (elapsed time: % 3is, % 4.1fmn)" % (ii, dt, dt / 60)
)
this_code = sparse_encode(
this_X,
dictionary,
algorithm=method,
alpha=alpha,
n_jobs=n_jobs,
check_input=False,
positive=positive_code,
max_iter=method_max_iter,
verbose=verbose,
)
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code.T, this_code)
B *= beta
B += np.dot(this_X.T, this_code)
# Update dictionary in place
_update_dict(
dictionary,
this_X,
this_code,
A,
B,
verbose=verbose,
random_state=random_state,
positive=positive_dict,
)
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary, (A, B), ii - iter_offset + 1
else:
return dictionary, (A, B)
if return_code:
if verbose > 1:
print("Learning code...", end=" ")
elif verbose == 1:
print("|", end=" ")
code = sparse_encode(
X,
dictionary,
algorithm=method,
alpha=alpha,
n_jobs=n_jobs,
check_input=False,
positive=positive_code,
max_iter=method_max_iter,
verbose=verbose,
)
if verbose > 1:
dt = time.time() - t0
print("done (total time: % 3is, % 4.1fmn)" % (dt, dt / 60))
if return_n_iter:
return code, dictionary, ii - iter_offset + 1
else:
return code, dictionary
if return_n_iter:
return dictionary, ii - iter_offset + 1
else:
return dictionary
class _BaseSparseCoding(_ClassNamePrefixFeaturesOutMixin, TransformerMixin):
"""Base class from SparseCoder and DictionaryLearning algorithms."""
def __init__(
self,
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha,
split_sign,
n_jobs,
positive_code,
transform_max_iter,
):
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.transform_max_iter = transform_max_iter
self.split_sign = split_sign
self.n_jobs = n_jobs
self.positive_code = positive_code
def _transform(self, X, dictionary):
"""Private method allowing to accommodate both DictionaryLearning and
SparseCoder."""
X = self._validate_data(X, reset=False)
# transform_alpha has to be changed in _transform
# this is done for consistency with the value of alpha
if (
hasattr(self, "alpha")
and self.alpha != 1.0
and self.transform_alpha is None
):
warnings.warn(
"By default transform_alpha will be equal to"
"alpha instead of 1.0 starting from version 1.2",
FutureWarning,
)
transform_alpha = 1.0 # TODO change to self.alpha in 1.2
else:
transform_alpha = self.transform_alpha
code = sparse_encode(
X,
dictionary,
algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=transform_alpha,
max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
positive=self.positive_code,
)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
def transform(self, X):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
return self._transform(X, self.components_)
class SparseCoder(_BaseSparseCoding, BaseEstimator):
"""Sparse coding.
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution;
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if
the estimated components are sparse;
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`lasso_lars`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The unchanged dictionary atoms.
.. deprecated:: 0.24
This attribute is deprecated in 0.24 and will be removed in
1.1 (renaming of 0.26). Use `dictionary` instead.
n_components_ : int
Number of atoms.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchDictionaryLearning : A faster, less accurate, version of the
dictionary learning algorithm.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
SparsePCA : Mini-batch Sparse Principal Components Analysis.
sparse_encode : Sparse coding where each row of the result is the solution
to a sparse coding problem.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import SparseCoder
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
>>> dictionary = np.array(
... [[0, 1, 0],
... [-1, -1, 2],
... [1, 1, 1],
... [0, 1, 1],
... [0, 2, 1]],
... dtype=np.float64
... )
>>> coder = SparseCoder(
... dictionary=dictionary, transform_algorithm='lasso_lars',
... transform_alpha=1e-10,
... )
>>> coder.transform(X)
array([[ 0., 0., -1., 0., 0.],
[ 0., 1., 1., 0., 0.]])
"""
_required_parameters = ["dictionary"]
def __init__(
self,
dictionary,
*,
transform_algorithm="omp",
transform_n_nonzero_coefs=None,
transform_alpha=None,
split_sign=False,
n_jobs=None,
positive_code=False,
transform_max_iter=1000,
):
super().__init__(
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha,
split_sign,
n_jobs,
positive_code,
transform_max_iter,
)
self.dictionary = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : Ignored
Not used, present for API consistency by convention.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
return self
@deprecated( # type: ignore
"The attribute `components_` is deprecated "
"in 0.24 and will be removed in 1.1 (renaming of 0.26). Use the "
"`dictionary` instead."
)
@property
def components_(self):
return self.dictionary
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
return super()._transform(X, self.dictionary)
def _more_tags(self):
return {"requires_fit": False}
@property
def n_components_(self):
"""Number of atoms."""
return self.dictionary.shape[0]
@property
def n_features_in_(self):
"""Number of features seen during `fit`."""
return self.dictionary.shape[1]
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.n_components_
class DictionaryLearning(_BaseSparseCoding, BaseEstimator):
"""Dictionary learning.
Finds a dictionary (a set of atoms) that performs well at sparsely
encoding the fitted data.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 <= 1 for all 0 <= k < n_components
||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
the entry-wise matrix norm which is the sum of the absolute values
of all the entries in the matrix.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int, default=None
Number of dictionary elements to extract. If None, then ``n_components``
is set to ``n_features``.
alpha : float, default=1.0
Sparsity controlling parameter.
max_iter : int, default=1000
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for numerical error.
fit_algorithm : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (:func:`~sklearn.linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be
faster if the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(:func:`~sklearn.linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution.
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'`
will be faster if the estimated components are sparse.
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution.
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and
`algorithm='omp'`. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `None`, defaults to `alpha`.
n_jobs : int or None, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the code, for warm restart. Only used if `code_init`
and `dict_init` are not None.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial values for the dictionary, for warm restart. Only used if
`code_init` and `dict_init` are not None.
verbose : bool, default=False
To control the verbosity of the procedure.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run.
See Also
--------
MiniBatchDictionaryLearning: A faster, less accurate, version of the
dictionary learning algorithm.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
SparseCoder : Find a sparse representation of data from a fixed,
precomputed dictionary.
SparsePCA : Sparse Principal Components Analysis.
References
----------
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import DictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42,
... )
>>> dict_learner = DictionaryLearning(
... n_components=15, transform_algorithm='lasso_lars', random_state=42,
... )
>>> X_transformed = dict_learner.fit_transform(X)
We can check the level of sparsity of `X_transformed`:
>>> np.mean(X_transformed == 0)
0.87...
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
0.08...
"""
def __init__(
self,
n_components=None,
*,
alpha=1,
max_iter=1000,
tol=1e-8,
fit_algorithm="lars",
transform_algorithm="omp",
transform_n_nonzero_coefs=None,
transform_alpha=None,
n_jobs=None,
code_init=None,
dict_init=None,
verbose=False,
split_sign=False,
random_state=None,
positive_code=False,
positive_dict=False,
transform_max_iter=1000,
):
super().__init__(
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha,
split_sign,
n_jobs,
positive_code,
transform_max_iter,
)
self.n_components = n_components
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X,
n_components,
alpha=self.alpha,
tol=self.tol,
max_iter=self.max_iter,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code,
)
self.components_ = U
self.error_ = E
return self
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
class MiniBatchDictionaryLearning(_BaseSparseCoding, BaseEstimator):
"""Mini-batch dictionary learning.
Finds a dictionary (a set of atoms) that performs well at sparsely
encoding the fitted data.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 <= 1 for all 0 <= k < n_components
||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
the entry-wise matrix norm which is the sum of the absolute values
of all the entries in the matrix.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int, default=None
Number of dictionary elements to extract.
alpha : float, default=1
Sparsity controlling parameter.
n_iter : int, default=1000
Total number of iterations to perform.
fit_algorithm : {'lars', 'cd'}, default='lars'
The algorithm used:
- `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`)
- `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
batch_size : int, default=3
Number of samples in each mini-batch.
shuffle : bool, default=True
Whether to shuffle the samples before forming batches.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value of the dictionary for warm restart scenarios.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}, default='omp'
Algorithm used to transform the data:
- `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
- `'lasso_lars'`: uses Lars to compute the Lasso solution.
- `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster
if the estimated components are sparse.
- `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution.
- `'threshold'`: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``.
transform_n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and
`algorithm='omp'`. If `None`, then
`transform_n_nonzero_coefs=int(n_features / 10)`.
transform_alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `None`, defaults to `alpha`.
verbose : bool, default=False
To control the verbosity of the procedure.
split_sign : bool, default=False
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
transform_max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
.. versionadded:: 0.22
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Components extracted from the data.
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid losing the
history of the evolution, but they shouldn't have any use for the
end user.
`A` `(n_components, n_components)` is the dictionary covariance matrix.
`B` `(n_features, n_components)` is the data approximation matrix.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run.
iter_offset_ : int
The number of iteration on data batches that has been
performed before.
random_state_ : RandomState instance
RandomState instance that is generated either from a seed, the random
number generattor or by `np.random`.
See Also
--------
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
SparseCoder : Find a sparse representation of data from a fixed,
precomputed dictionary.
SparsePCA : Sparse Principal Components Analysis.
References
----------
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/sierra/pdfs/icml09.pdf)
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import MiniBatchDictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
... n_samples=100, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42)
>>> dict_learner = MiniBatchDictionaryLearning(
... n_components=15, transform_algorithm='lasso_lars', random_state=42,
... )
>>> X_transformed = dict_learner.fit_transform(X)
We can check the level of sparsity of `X_transformed`:
>>> np.mean(X_transformed == 0)
0.86...
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
0.07...
"""
def __init__(
self,
n_components=None,
*,
alpha=1,
n_iter=1000,
fit_algorithm="lars",
n_jobs=None,
batch_size=3,
shuffle=True,
dict_init=None,
transform_algorithm="omp",
transform_n_nonzero_coefs=None,
transform_alpha=None,
verbose=False,
split_sign=False,
random_state=None,
positive_code=False,
positive_dict=False,
transform_max_iter=1000,
):
super().__init__(
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha,
split_sign,
n_jobs,
positive_code,
transform_max_iter,
)
self.n_components = n_components
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
self.positive_dict = positive_dict
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = self._validate_data(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X,
self.n_components,
alpha=self.alpha,
n_iter=self.n_iter,
return_code=False,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
dict_init=self.dict_init,
batch_size=self.batch_size,
shuffle=self.shuffle,
verbose=self.verbose,
random_state=random_state,
return_inner_stats=True,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code,
)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
self.random_state_ = random_state
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Update the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
iter_offset : int, default=None
The number of iteration on data batches that has been
performed before this call to `partial_fit`. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
if hasattr(self, "components_"):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, "inner_stats_", None)
if iter_offset is None:
iter_offset = getattr(self, "iter_offset_", 0)
X = self._validate_data(X, reset=(iter_offset == 0))
U, (A, B) = dict_learning_online(
X,
self.n_components,
alpha=self.alpha,
n_iter=1,
method=self.fit_algorithm,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
dict_init=dict_init,
batch_size=len(X),
shuffle=False,
verbose=self.verbose,
return_code=False,
iter_offset=iter_offset,
random_state=self.random_state_,
return_inner_stats=True,
inner_stats=inner_stats,
positive_dict=self.positive_dict,
positive_code=self.positive_code,
)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + 1
return self
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
| 33.047325
| 88
| 0.617552
|
d9a3fca4565fc06defc14f87c3871da9cec62b50
| 8,338
|
py
|
Python
|
cdpcli/configloader.py
|
anuragpatro/cdpcli
|
fe2b78308e4f8c09aa9609a43a646f314fa20327
|
[
"Apache-2.0"
] | 7
|
2020-10-01T14:03:57.000Z
|
2022-02-23T17:47:08.000Z
|
cdpcli/configloader.py
|
anuragpatro/cdpcli
|
fe2b78308e4f8c09aa9609a43a646f314fa20327
|
[
"Apache-2.0"
] | 1
|
2020-11-30T08:00:50.000Z
|
2020-12-01T08:00:20.000Z
|
cdpcli/configloader.py
|
anuragpatro/cdpcli
|
fe2b78308e4f8c09aa9609a43a646f314fa20327
|
[
"Apache-2.0"
] | 7
|
2020-11-07T20:55:47.000Z
|
2021-11-09T18:45:33.000Z
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import configparser
import copy
import os
import shlex
from cdpcli.exceptions import ConfigNotFound, ConfigParseError
def multi_file_load_config(*filenames):
"""Load and combine multiple INI configs with profiles.
This function will take a list of filesnames and return
a single dictionary that represents the merging of the loaded
config files.
If any of the provided filenames does not exist, then that file
is ignored. It is therefore ok to provide a list of filenames,
some of which may not exist.
Configuration files are **not** deep merged, only the top level
keys are merged. The filenames should be passed in order of
precedence. The first config file has precedence over the
second config file, which has precedence over the third config file,
etc. The only exception to this is that the "profiles" key is
merged to combine profiles from multiple config files into a
single profiles mapping. However, if a profile is defined in
multiple config files, then the config file with the highest
precedence is used. Profile values themselves are not merged.
For example::
FileA FileB FileC
[foo] [foo] [bar]
a=1 a=2 a=3
b=2
[bar] [baz] [profile a]
a=2 a=3 region=e
[profile a] [profile b] [profile c]
region=c region=d region=f
The final result of ``multi_file_load_config(FileA, FileB, FileC)``
would be::
{"foo": {"a": 1}, "bar": {"a": 2}, "baz": {"a": 3},
"profiles": {"a": {"region": "c"}}, {"b": {"region": d"}},
{"c": {"region": "f"}}}
Note that the "foo" key comes from A, even though it's defined in both
FileA and FileB. Because "foo" was defined in FileA first, then the values
for "foo" from FileA are used and the values for "foo" from FileB are
ignored. Also note where the profiles originate from. Profile "a"
comes FileA, profile "b" comes from FileB, and profile "c" comes
from FileC.
"""
configs = []
profiles = []
for filename in filenames:
try:
loaded = load_config(filename)
except ConfigNotFound:
continue
profiles.append(loaded.pop('profiles'))
configs.append(loaded)
merged_config = _merge_list_of_dicts(configs)
merged_profiles = _merge_list_of_dicts(profiles)
merged_config['profiles'] = merged_profiles
return merged_config
def _merge_list_of_dicts(list_of_dicts):
merged_dicts = {}
for single_dict in list_of_dicts:
for key, value in single_dict.items():
if key not in merged_dicts:
merged_dicts[key] = value
return merged_dicts
def load_config(config_filename):
"""Parse a INI config with profiles.
This will parse an INI config file and map top level profiles
into a top level "profile" key.
If you want to parse an INI file and map all section names to
top level keys, use ``raw_config_parse`` instead.
"""
parsed = raw_config_parse(config_filename)
return build_profile_map(parsed)
def raw_config_parse(config_filename):
"""Returns the parsed INI config contents.
Each section name is a top level key.
:returns: A dict with keys for each profile found in the config
file and the value of each key being a dict containing name
value pairs found in that profile.
:raises: ConfigNotFound, ConfigParseError
"""
config = {}
path = config_filename
if path is not None:
path = os.path.expandvars(path)
path = os.path.expanduser(path)
if not os.path.isfile(path):
raise ConfigNotFound(path=path)
cp = configparser.RawConfigParser()
try:
cp.read(path)
except configparser.Error:
raise ConfigParseError(path=path)
else:
for section in cp.sections():
config[section] = {}
for option in cp.options(section):
config_value = cp.get(section, option)
if config_value.startswith('\n'):
# Then we need to parse the inner contents as
# hierarchical. We support a single level
# of nesting for now.
try:
config_value = _parse_nested(config_value)
except ValueError:
raise ConfigParseError(path=path)
config[section][option] = config_value
return config
def _parse_nested(config_value):
# Given a value like this:
# \n
# foo = bar
# bar = baz
# We need to parse this into
# {'foo': 'bar', 'bar': 'baz}
parsed = {}
for line in config_value.splitlines():
line = line.strip()
if not line:
continue
# The caller will catch ValueError
# and raise an appropriate error
# if this fails.
key, value = line.split('=', 1)
parsed[key.strip()] = value.strip()
return parsed
def build_profile_map(parsed_ini_config):
"""Convert the parsed INI config into a profile map.
The config file format requires that every profile except the
default to be prepended with "profile", e.g.::
[profile test]
cdp_... = foo
cdp_... = bar
[profile bar]
cdp_... = foo
cdp_... = bar
# This is *not* a profile
[preview]
otherstuff = 1
# Neither is this
[foobar]
morestuff = 2
The build_profile_map will take a parsed INI config file where each top
level key represents a section name, and convert into a format where all
the profiles are under a single top level "profiles" key, and each key in
the sub dictionary is a profile name. For example, the above config file
would be converted from::
{"profile test": {"cdp_...": "foo", "cdp_...": "bar"},
"profile bar": {"cdp_...": "foo", "cdp_...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
into::
{"profiles": {"test": {"cdp_...": "foo", "cdp_...": "bar"},
"bar": {"cdp_...": "foo", "cdp_...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
If there are no profiles in the provided parsed INI contents, then
an empty dict will be the value associated with the ``profiles`` key.
.. note::
This will not mutate the passed in parsed_ini_config. Instead it will
make a deepcopy and return that value.
"""
parsed_config = copy.deepcopy(parsed_ini_config)
profiles = {}
final_config = {}
for key, values in parsed_config.items():
if key.startswith("profile"):
try:
parts = shlex.split(key)
except ValueError:
continue
if len(parts) == 2:
profiles[parts[1]] = values
elif key == 'default':
# default section is special and is considered a profile
# name but we don't require you use 'profile "default"'
# as a section.
profiles[key] = values
else:
final_config[key] = values
final_config['profiles'] = profiles
return final_config
| 34.312757
| 79
| 0.600024
|
c744a751ac829b8df6e6ea43ded8308d051da6ea
| 2,783
|
py
|
Python
|
_unittests/ut_asv_benchmark/test_create_asv_benchmark_rf.py
|
henrywu2019/mlprodict
|
4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad
|
[
"MIT"
] | 1
|
2020-12-18T03:49:53.000Z
|
2020-12-18T03:49:53.000Z
|
_unittests/ut_asv_benchmark/test_create_asv_benchmark_rf.py
|
henrywu2019/mlprodict
|
4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad
|
[
"MIT"
] | null | null | null |
_unittests/ut_asv_benchmark/test_create_asv_benchmark_rf.py
|
henrywu2019/mlprodict
|
4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad
|
[
"MIT"
] | null | null | null |
"""
@brief test log(time=6s)
"""
import os
import unittest
import re
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase, get_temp_folder
from pyquickhelper.loghelper.run_cmd import run_script
from pyquickhelper.texthelper.version_helper import compare_module_version
import sklearn
from mlprodict.asv_benchmark import create_asv_benchmark
import mlprodict
class TestCreateAsvBenchmarkRF(ExtTestCase):
def test_create_asv_benchmark_rf(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
self.assertNotEmpty(mlprodict)
temp = get_temp_folder(__file__, "temp_create_asv_benchmark_rf")
created = create_asv_benchmark(
location=temp, verbose=1, fLOG=fLOG,
runtime=('scikit-learn', 'python', 'onnxruntime1'),
exc=False, execute=True, models={'RandomForestRegressor'})
self.assertNotEmpty(created)
reg = re.compile("class ([a-zA-Z0-9_]+)[(]")
verif = False
allnames = []
for path, _, files in os.walk(os.path.join(temp, 'benches')):
for zoo in files:
if '__init__' in zoo:
continue
fLOG("process '{}'".format(zoo))
fullname = os.path.join(path, zoo)
with open(fullname, 'r', encoding='utf-8') as f:
content = f.read()
names = reg.findall(content)
name = names[0]
content += "\n\ncl = %s()\ncl.setup_cache()\n" % name
allnames.append(fullname)
with open(fullname, 'w', encoding='utf-8') as f:
f.write(content)
__, err = run_script(fullname, wait=True)
lines = [_ for _ in err.split('\n') if _ and _[0] != ' ']
lines = [_ for _ in lines if "Warning" not in _]
lines = [
_ for _ in lines if "No module named 'mlprodict'" not in _]
lines = [_ for _ in lines if "Traceback " not in _]
err = "\n".join(lines).strip(' \n\r')
if len(err) > 0:
raise RuntimeError(
"Issue with '{}'\n{}".format(fullname, err))
if (zoo.endswith("bench_RandomForestReg_default_b_reg_nest100.py") and
compare_module_version(sklearn.__version__, "0.21") >= 0):
if "random_state=42" not in content:
raise AssertionError(content)
else:
verif = True
if not verif:
raise AssertionError("Visited files\n{}".format(
"\n".join(allnames)))
if __name__ == "__main__":
unittest.main()
| 40.926471
| 86
| 0.561265
|
d723ff88b21bebb486f6659fac645d737bcce281
| 22,221
|
py
|
Python
|
mmtbx/refinement/adp_refinement.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/refinement/adp_refinement.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/refinement/adp_refinement.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import mmtbx.refinement.group
from mmtbx.refinement import minimization
from mmtbx.refinement import print_statistics
from mmtbx import utils
from mmtbx.tls import tools
import iotbx.phil
from cctbx import adptbx
from cctbx.array_family import flex
import scitbx.lbfgs
from libtbx.test_utils import approx_equal
from libtbx import adopt_init_args, Auto
from libtbx.utils import user_plus_sys_time
time_adp_refinement_py = 0.0
def show_times(out = None):
if(out is None): out = sys.stdout
total = time_adp_refinement_py
if(total > 0.01):
print >> out, "ADP refinement:"
print >> out, " time spent in adp_refinement.py = %-7.2f" % time_adp_refinement_py
return total
group_adp_master_params = iotbx.phil.parse("""\
number_of_macro_cycles = 3
.type = int
.expert_level = 1
max_number_of_iterations = 25
.type = int
.expert_level = 1
convergence_test = False
.type = bool
.expert_level = 3
run_finite_differences_test = False
.type = bool
.expert_level = 3
use_restraints = True
.type = bool
.expert_level = 0
restraints_weight = None
.type = float
.expert_level = 0
""")
tls_master_params = iotbx.phil.parse("""\
one_residue_one_group = None
.type = bool
.style = tribool
refine_T = True
.type = bool
refine_L = True
.type = bool
refine_S = True
.type = bool
number_of_macro_cycles = 2
.type = int
max_number_of_iterations = 25
.type = int
start_tls_value = None
.type = float
run_finite_differences_test = False
.type = bool
.help = Test with finite differences instead of gradients. FOR \
DEVELOPMENT PURPOSES ONLY.
.expert_level = 3
eps = 1.e-6
.type = float
.help = Finite difference setting.
.expert_level = 3
min_tls_group_size = 5
.type = int
.help = min number of atoms allowed per TLS group
verbose = True
.type = bool
""")
individual_adp_master_params = iotbx.phil.parse("""\
iso {
max_number_of_iterations = 25
.type = int
scaling {
scale_max = 3.0
.type = float
scale_min = 10.0
.type = float
}
}
""")
adp_restraints_master_params = iotbx.phil.parse("""\
iso {
use_u_local_only = False
.type = bool
sphere_radius = 5.0
.type = float
distance_power = 1.69
.type = float
average_power = 1.03
.type = float
wilson_b_weight_auto = False
.type = bool
wilson_b_weight = None
.type = float
plain_pairs_radius = 5.0
.type = float
refine_ap_and_dp = False
.type = bool
}
""")
class manager(object):
def __init__(
self,
fmodels,
model,
all_params,
group_adp_selections = None,
group_adp_selections_h = None,
group_adp_params = None,
tls_selections = None,
tls_params = tls_master_params.extract(),
individual_adp_params = individual_adp_master_params.extract(),
adp_restraints_params = adp_restraints_master_params.extract(),
refine_adp_individual = None,
refine_adp_group = None,
refine_tls = None,
tan_b_iso_max = None,
restraints_manager = None,
target_weights = None,
macro_cycle = None,
log = None,
h_params = None,
nproc = None):
global time_adp_refinement_py
if(group_adp_params is None):
group_adp_params = group_adp_master_params.extract()
scatterers = fmodels.fmodel_xray().xray_structure.scatterers()
timer = user_plus_sys_time()
if(log is None): log = sys.stdout
tan_u_iso = False
param = 0
if(tan_b_iso_max > 0.0):
tan_u_iso = True
param = int(tan_b_iso_max)
if(macro_cycle == 1):
offset = True
else:
offset = False
if(refine_tls):
print_statistics.make_sub_header(text = "TLS refinement",
out = log)
tls_sel_st = flex.size_t()
for ts in tls_selections:
tls_sel_st.extend(ts)
tls_sel_bool = flex.bool(scatterers.size(), flex.size_t(tls_sel_st))
### totally ad hoc fix
tmp_site_t = flex.size_t()
for gs in group_adp_selections:
for gs_ in gs:
tmp_site_t.append(gs_)
###
if(macro_cycle == 1 or tmp_site_t.size() != scatterers.size()):
gbr_selections = []
for s in tls_selections:
gbr_selections.append(s)
else:
gbr_selections = []
for gs in group_adp_selections:
gbr_selection = flex.size_t()
for gs_ in gs:
if(tls_sel_bool[gs_]):
gbr_selection.append(gs_)
if(gbr_selection.size() > 0):
gbr_selections.append(gbr_selection)
gbr_selections_one_arr = flex.size_t()
for gbs in gbr_selections:
gbr_selections_one_arr.extend(gbs)
scatterers = fmodels.fmodel_xray().xray_structure.scatterers()
for gbr_selection in gbr_selections_one_arr:
scatterers[gbr_selection].flags.set_use_u_iso(True)
group_b_manager = mmtbx.refinement.group.manager(
fmodel = fmodels.fmodel_xray(),
selections = gbr_selections,
convergence_test = group_adp_params.convergence_test,
max_number_of_iterations = 50,
number_of_macro_cycles = 1,
refine_adp = True,
use_restraints = False, #XXX do not use in TLS refinement for now
log = log)
scatterers = fmodels.fmodel_xray().xray_structure.scatterers()
for tls_selection_ in tls_selections:
for tls_selection__ in tls_selection_:
scatterers[tls_selection__].flags.set_use_u_aniso(True)
model.show_groups(tls = True, out = log)
current_target_name = fmodels.fmodel_xray().target_name
fmodels.fmodel_xray().update(target_name = "ls_wunit_k1")
tools.split_u(fmodels.fmodel_xray().xray_structure, tls_selections, offset)
self.tls_refinement_manager = tools.tls_refinement(
fmodel = fmodels.fmodel_xray(),
model = model,
selections = tls_selections,
selections_1d = tls_sel_st,
refine_T = tls_params.refine_T,
refine_L = tls_params.refine_L,
refine_S = tls_params.refine_S,
number_of_macro_cycles = tls_params.number_of_macro_cycles,
max_number_of_iterations = tls_params.max_number_of_iterations,
start_tls_value = tls_params.start_tls_value,
run_finite_differences_test = tls_params.run_finite_differences_test,
eps = tls_params.eps,
out = log,
macro_cycle = macro_cycle,
verbose = tls_params.verbose)
fmodels.fmodel_xray().update(target_name = current_target_name)
fmodels.update_xray_structure(
xray_structure = self.tls_refinement_manager.fmodel.xray_structure,
update_f_calc = True)
model.set_xray_structure(fmodels.fmodel_xray().xray_structure)
if(refine_adp_individual):
refine_adp(
model = model,
fmodels = fmodels,
target_weights = target_weights,
individual_adp_params = individual_adp_params,
adp_restraints_params = adp_restraints_params,
h_params = h_params,
log = log,
all_params = all_params,
nproc = nproc)
if(refine_adp_group):
print_statistics.make_sub_header(
text= "group isotropic ADP refinement", out = log)
group_b_manager = mmtbx.refinement.group.manager(
fmodel = fmodels.fmodel_xray(),
selections = group_adp_selections,
convergence_test = group_adp_params.convergence_test,
max_number_of_iterations = group_adp_params.max_number_of_iterations,
number_of_macro_cycles = group_adp_params.number_of_macro_cycles,
run_finite_differences_test = group_adp_params.run_finite_differences_test,
use_restraints = group_adp_params.use_restraints,
restraints_weight = group_adp_params.restraints_weight,
refine_adp = True,
log = log)
time_adp_refinement_py += timer.elapsed()
class refine_adp(object):
def __init__(
self,
model,
fmodels,
target_weights,
individual_adp_params,
adp_restraints_params,
h_params,
log,
all_params,
nproc=None):
adopt_init_args(self, locals())
d_min = fmodels.fmodel_xray().f_obs().d_min()
# initialize with defaults...
if(target_weights is not None):
import mmtbx.refinement.weights_params
wcp = mmtbx.refinement.weights_params.tw_customizations_params.extract()
for w_s_c in wcp.weight_selection_criteria:
if(d_min >= w_s_c.d_min and d_min < w_s_c.d_max):
r_free_range_width = w_s_c.r_free_range_width
r_free_r_work_gap = w_s_c.r_free_minus_r_work
mean_diff_b_iso_bonded_fraction = w_s_c.mean_diff_b_iso_bonded_fraction
min_diff_b_iso_bonded = w_s_c.min_diff_b_iso_bonded
break
# ...then customize
wsc = all_params.target_weights.weight_selection_criteria
if(wsc.r_free_minus_r_work is not None):
r_free_r_work_gap = wsc.r_free_minus_r_work
if(wsc.r_free_range_width is not None):
r_free_range_width = wsc.r_free_range_width
if(wsc.mean_diff_b_iso_bonded_fraction is not None):
mean_diff_b_iso_bonded_fraction = wsc.mean_diff_b_iso_bonded_fraction
if(wsc.min_diff_b_iso_bonded is not None):
min_diff_b_iso_bonded = wsc.min_diff_b_iso_bonded
#
print_statistics.make_sub_header(text="Individual ADP refinement", out = log)
assert fmodels.fmodel_xray().xray_structure is model.get_xray_structure()
#
fmodels.create_target_functors()
assert approx_equal(self.fmodels.fmodel_xray().target_w(),
self.fmodels.target_functor_result_xray(
compute_gradients=False).target_work())
rw = flex.double()
rf = flex.double()
rfrw = flex.double()
deltab = flex.double()
w = flex.double()
if(self.target_weights is not None):
fmth =" R-FACTORS <Bi-Bj> <B> WEIGHT TARGETS"
print >> self.log, fmth
print >> self.log, " work free delta data restr"
else:
print >> self.log, "Unresrained refinement..."
self.save_scatterers = self.fmodels.fmodel_xray().xray_structure.\
deep_copy_scatterers().scatterers()
if(self.target_weights is not None):
default_weight = self.target_weights.adp_weights_result.wx*\
self.target_weights.adp_weights_result.wx_scale
if(self.target_weights.twp.optimize_adp_weight):
wx_scale = [0.03,0.125,0.5,1.,1.5,2.,2.5,3.,3.5,4.,4.5,5.]
trial_weights = list( flex.double(wx_scale)*self.target_weights.adp_weights_result.wx )
self.wx_scale = 1
else:
trial_weights = [self.target_weights.adp_weights_result.wx]
self.wx_scale = self.target_weights.adp_weights_result.wx_scale
else:
default_weight = 1
trial_weights = [1]
self.wx_scale = 1
self.show(weight=default_weight)
trial_results = []
if nproc is None:
nproc = all_params.main.nproc
parallel = False
if (len(trial_weights) > 1) and ((nproc is Auto) or (nproc > 1)):
parallel = True
from libtbx import easy_mp
stdout_and_results = easy_mp.pool_map(
processes=nproc,
fixed_func=self.try_weight,
args=trial_weights,
func_wrapper="buffer_stdout_stderr") # XXX safer for phenix GUI
trial_results = [ r for so, r in stdout_and_results ]
else :
for weight in trial_weights:
result = self.try_weight(weight, print_stats=True)
trial_results.append(result)
for result in trial_results :
if(result is not None) and (result.r_work is not None):
if (parallel):
result.show(out=self.log)
rw .append(result.r_work)
rf .append(result.r_free)
rfrw .append(result.r_gap)
deltab .append(result.delta_b)
w .append(result.weight)
#
if(len(trial_weights)>1):
# filter by rfree-rwork
rw,rf,rfrw,deltab,w = self.score(rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w,
score_target=rfrw,score_target_value=r_free_r_work_gap,
secondary_target=deltab)
# filter by rfree
rw,rf,rfrw,deltab,w = self.score(rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w,
score_target=rf,score_target_value=flex.min(rf)+r_free_range_width)
# filter by <Bi-Bj>
delta_b_target = max(min_diff_b_iso_bonded, flex.mean(self.fmodels.
fmodel_xray().xray_structure.extract_u_iso_or_u_equiv()*
adptbx.u_as_b(1))*mean_diff_b_iso_bonded_fraction)
print >> log, " max suggested <Bi-Bj> for this run: %7.2f"%delta_b_target
print >> log, " max allowed Rfree-Rwork gap: %5.1f"%r_free_r_work_gap
print >> log, " range of equivalent Rfree: %5.1f"%r_free_range_width
rw,rf,rfrw,deltab,w = self.score(rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w,
score_target=deltab,score_target_value=delta_b_target)
# select the result with lowest rfree
sel = flex.sort_permutation(rf)
rw,rf,rfrw,deltab,w= self.select(
rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w,sel=sel)
#
w_best = w[0]
rw_best = rw[0]
print >> self.log, "Best ADP weight: %8.3f"%w_best
#
self.target_weights.adp_weights_result.wx = w_best
self.target_weights.adp_weights_result.wx_scale = 1
best_u_star = None
best_u_iso = None
for result in trial_results :
if(abs(result.weight-w_best)<=1.e-8):
best_u_star = result.u_star
best_u_iso = result.u_iso
break
if(best_u_iso is None) : # XXX this probably shouldn't happen...
self.fmodels.fmodel_xray().xray_structure.replace_scatterers(
self.save_scatterers.deep_copy())
else :
assert (best_u_star is not None)
xrs = self.fmodels.fmodel_xray().xray_structure
xrs.set_u_iso(values=best_u_iso)
xrs.scatterers().set_u_star(best_u_star)
new_u_iso = xrs.scatterers().extract_u_iso()
assert (new_u_iso.all_eq(best_u_iso))
self.fmodels.update_xray_structure(
xray_structure = self.fmodels.fmodel_xray().xray_structure,
update_f_calc = True)
print >> self.log, "Accepted refinement result:"
# reset alpha/beta parameters - if this is not done, the assertion
# below will fail
fmodels.create_target_functors()
if(self.fmodels.fmodel_neutron() is None):
assert approx_equal(self.fmodels.fmodel_xray().r_work()*100, rw_best,
eps=0.001)
# this needs to be done again again, just in case
fmodels.create_target_functors()
self.show(weight=w_best)
self.fmodels.fmodel_xray().xray_structure.tidy_us()
self.fmodels.update_xray_structure(
xray_structure = self.fmodels.fmodel_xray().xray_structure,
update_f_calc = True)
fmodels.create_target_functors()
assert approx_equal(self.fmodels.fmodel_xray().target_w(),
self.fmodels.target_functor_result_xray(
compute_gradients=False).target_work())
self.model.set_xray_structure(self.fmodels.fmodel_xray().xray_structure)
# XXX parallelized
def try_weight(self, weight, print_stats=False):
if(self.target_weights is not None):
self.fmodels.fmodel_xray().xray_structure.replace_scatterers(
self.save_scatterers.deep_copy())
self.fmodels.update_xray_structure(
xray_structure = self.fmodels.fmodel_xray().xray_structure,
update_f_calc = True)
self.target_weights.adp_weights_result.wx = weight
self.target_weights.adp_weights_result.wx_scale = self.wx_scale
minimized = self.minimize()
wt = weight*self.wx_scale
result = self.show(weight=wt, print_stats=print_stats)
return result
def show(self, weight = None, prefix = "", show_neutron=True,
print_stats=True):
deltab = self.model.rms_b_iso_or_b_equiv_bonded()
r_work = self.fmodels.fmodel_xray().r_work()*100.
r_free = self.fmodels.fmodel_xray().r_free()*100.
mean_b = flex.mean(
self.model.get_xray_structure().extract_u_iso_or_u_equiv())*adptbx.u_as_b(1)
if(deltab is None):
print >> self.log, " r_work=%5.2f r_free=%5.2f"%(r_work, r_free)
return None
neutron_r_work = neutron_r_free = None
if (show_neutron) and (self.fmodels.fmodel_neutron() is not None):
neutron_r_work = self.fmodels.fmodel_neutron().r_work()*100.
neutron_r_free = self.fmodels.fmodel_neutron().r_free()*100.
xrs = self.fmodels.fmodel_xray().xray_structure
result = weight_result(
r_work=r_work,
r_free=r_free,
delta_b=deltab,
mean_b=mean_b,
weight=weight,
xray_target=self.fmodels.fmodel_xray().target_w(),
neutron_r_work=neutron_r_work,
neutron_r_free=neutron_r_free,
u_star=xrs.scatterers().extract_u_star(),
u_iso=xrs.scatterers().extract_u_iso())
if (print_stats):
result.show(out=self.log)
return result
def score(self, rw, rf, rfrw, deltab, w, score_target, score_target_value,
secondary_target=None):
sel = score_target < score_target_value
sel &= score_target > 0
if(sel.count(True)>0):
rw,rf,rfrw,deltab,w = self.select(
rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w, sel=sel)
else:
if(secondary_target is None):
sel = flex.sort_permutation(score_target)
else:
sel = flex.sort_permutation(secondary_target)
rw,rf,rfrw,deltab,w = self.select(
rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w, sel=sel)
#
rw = flex.double([rw [0]])
rf = flex.double([rf [0]])
rfrw = flex.double([rfrw [0]])
deltab = flex.double([deltab[0]])
w = flex.double([w [0]])
return rw, rf, rfrw, deltab, w
def select(self, rw, rf, rfrw, deltab, w, sel):
rw = rw .select(sel)
rf = rf .select(sel)
rfrw = rfrw .select(sel)
deltab = deltab.select(sel)
w = w .select(sel)
return rw, rf, rfrw, deltab, w
def minimize(self):
utils.assert_xray_structures_equal(
x1 = self.fmodels.fmodel_xray().xray_structure,
x2 = self.model.get_xray_structure())
self.model.set_refine_individual_adp()
self.run_lbfgs()
self.model.set_xray_structure(self.fmodels.fmodel_xray().xray_structure)
#assert minimized.xray_structure is self.model.get_xray_structure()
#utils.assert_xray_structures_equal(
# x1 = minimized.xray_structure,
# x2 = self.model.get_xray_structure())
#return minimized
def run_lbfgs(self):
if(self.model.get_ncs_groups() is None or
not self.all_params.ncs.constraints.apply_to_adp):
lbfgs_termination_params = scitbx.lbfgs.termination_parameters(
max_iterations = self.individual_adp_params.iso.max_number_of_iterations)
is_neutron_scat_table = False
if(self.all_params.main.scattering_table == "neutron"):
is_neutron_scat_table = True
minimized = minimization.lbfgs(
restraints_manager = self.model.restraints_manager,
fmodels = self.fmodels,
model = self.model,
refine_adp = True,
is_neutron_scat_table = is_neutron_scat_table,
lbfgs_termination_params = lbfgs_termination_params,
iso_restraints = self.adp_restraints_params.iso,
verbose = 0,
target_weights = self.target_weights,
h_params = self.h_params)
elif(self.all_params.ncs.constraints.apply_to_coordinates):
fmodel = self.fmodels.fmodel_xray()
# update NCS groups
import mmtbx.ncs.ncs_utils as nu
nu.get_list_of_best_ncs_copy_map_correlation(
ncs_groups = self.model.get_ncs_groups(),
fmodel = fmodel)
assert "individual_adp" in self.all_params.refine.strategy
minimized = minimization.run_constrained(
model = self.model,
fmodel = fmodel,
target_weight = self.target_weights.xyz_weights_result.wx,
log = self.log,
params = self.all_params,
refine_u_iso = True,
prefix = "NCS constrained ADP refinement").minimized
self.model.set_xray_structure(fmodel.xray_structure)
else: raise RuntimeError("Bad ncs options.")
class weight_result(object):
def __init__(self, r_work, r_free, delta_b, mean_b, weight, xray_target,
neutron_r_work, neutron_r_free, u_star, u_iso):
adopt_init_args(self, locals())
self.r_gap = r_free - r_work
def show(self, out, prefix=""):
if (out is None) : return
if(len(prefix.strip())>0): prefix += " "
format = prefix+"%5.2f %5.2f %6.2f %6.3f %6.3f %6.3f %6.3f"
print >> out, format % (self.r_work, self.r_free, self.r_gap, self.delta_b,
self.mean_b, self.weight, self.xray_target)
if (self.neutron_r_work is not None):
print >> out, ""
print >> out, "Neutron data: r_work=%5.2f r_free=%5.2f"%(
self.neutron_r_work, self.neutron_r_free)
| 39.751342
| 97
| 0.634895
|
e9b64ffc3a51400ff4d8c3b45ab360de3f969bc3
| 31,104
|
py
|
Python
|
Src/Hosts/Silverlight/Tests/pylib/unittest.py
|
jdhardy/dlr
|
dca078fbf9d103fad4dcabda76795a23d82106bc
|
[
"Apache-2.0"
] | null | null | null |
Src/Hosts/Silverlight/Tests/pylib/unittest.py
|
jdhardy/dlr
|
dca078fbf9d103fad4dcabda76795a23d82106bc
|
[
"Apache-2.0"
] | null | null | null |
Src/Hosts/Silverlight/Tests/pylib/unittest.py
|
jdhardy/dlr
|
dca078fbf9d103fad4dcabda76795a23d82106bc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmenticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEquals((1 + 2), 3)
self.assertEquals(0 + 1, 1)
def testMultiply(self):
self.assertEquals((0 * 10), 0)
self.assertEquals((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/lib/module-unittest.html
Copyright (c) 1999-2003 Steve Purcell
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
'''
__author__ = "Steve Purcell"
__email__ = "stephen_purcell at yahoo dot com"
__version__ = "#Revision: 1.63 $"[11:-2]
import time
import sys
import traceback
import os
import types
##############################################################################
# Exported classes and functions
##############################################################################
__all__ = ['TestResult', 'TestCase', 'TestSuite', 'TextTestRunner',
'TestLoader', 'FunctionTestCase', 'main', 'defaultTestLoader']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
##############################################################################
# Backward compatibility
##############################################################################
if sys.version_info[:2] < (2, 2):
def isinstance(obj, clsinfo):
import __builtin__
if type(clsinfo) in (tuple, list):
for cls in clsinfo:
if cls is type: cls = types.ClassType
if __builtin__.isinstance(obj, cls):
return 1
return 0
else: return __builtin__.isinstance(obj, clsinfo)
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
##############################################################################
# Test framework core
##############################################################################
# All classes defined herein are 'new-style' classes, allowing use of 'super()'
__metaclass__ = type
def _strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
__unittest = 1
class TestResult:
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun = self.testsRun + 1
def stopTest(self, test):
"Called when the given test has been run"
pass
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
return ''.join(traceback.format_exception(exctype, value, tb, length))
return ''.join(traceback.format_exception(exctype, value, tb))
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(_strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
class TestCase:
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
try:
self._testMethodName = methodName
testMethod = getattr(self, methodName)
self._testMethodDoc = testMethod.__doc__
except AttributeError:
raise ValueError, "no such test method in %s: %s" % \
(self.__class__, methodName)
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
def countTestCases(self):
return 1
def defaultTestResult(self):
return TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (_strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return False
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, _strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(_strclass(self.__class__), self._testMethodName)
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
return
ok = False
try:
testMethod()
ok = True
except self.failureException:
result.addFailure(self, self._exc_info())
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
ok = False
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
def _exc_info(self):
"""Return a version of sys.exc_info() with the traceback frame
minimised; usually the top level of the traceback frame is not
needed.
"""
return sys.exc_info()
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException, msg
def failIf(self, expr, msg=None):
"Fail the test if the expression is true."
if expr: raise self.failureException, msg
def failUnless(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr: raise self.failureException, msg
def failUnlessRaises(self, excClass, callableObj, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
try:
callableObj(*args, **kwargs)
except excClass:
return
else:
if hasattr(excClass,'__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise self.failureException, "%s not raised" % excName
def failUnlessEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
if not first == second:
raise self.failureException, \
(msg or '%r != %r' % (first, second))
def failIfEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if first == second:
raise self.failureException, \
(msg or '%r == %r' % (first, second))
def failUnlessAlmostEqual(self, first, second, places=7, msg=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
"""
if round(abs(second-first), places) != 0:
raise self.failureException, \
(msg or '%r != %r within %r places' % (first, second, places))
def failIfAlmostEqual(self, first, second, places=7, msg=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
"""
if round(abs(second-first), places) == 0:
raise self.failureException, \
(msg or '%r == %r within %r places' % (first, second, places))
# Synonyms for assertion methods
assertEqual = assertEquals = failUnlessEqual
assertNotEqual = assertNotEquals = failIfEqual
assertAlmostEqual = assertAlmostEquals = failUnlessAlmostEqual
assertNotAlmostEqual = assertNotAlmostEquals = failIfAlmostEqual
assertRaises = failUnlessRaises
assert_ = assertTrue = failUnless
assertFalse = failIf
class TestSuite:
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (_strclass(self.__class__), self._tests)
__str__ = __repr__
def __eq__(self, other):
if type(self) is not type(other):
return False
return self._tests == other._tests
def __ne__(self, other):
return not self == other
# Can't guarantee hash invariant, so flag as unhashable
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self._tests:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not hasattr(test, '__call__'):
raise TypeError("the test to add must be callable")
if (isinstance(test, (type, types.ClassType)) and
issubclass(test, (TestCase, TestSuite))):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for test in self._tests:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self._tests: test.debug()
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None,
description=None):
TestCase.__init__(self)
self.__setUpFunc = setUp
self.__tearDownFunc = tearDown
self.__testFunc = testFunc
self.__description = description
def setUp(self):
if self.__setUpFunc is not None:
self.__setUpFunc()
def tearDown(self):
if self.__tearDownFunc is not None:
self.__tearDownFunc()
def runTest(self):
self.__testFunc()
def id(self):
return self.__testFunc.__name__
def __eq__(self, other):
if type(self) is not type(other):
return False
return self.__setUpFunc == other.__setUpFunc and \
self.__tearDownFunc == other.__tearDownFunc and \
self.__testFunc == other.__testFunc and \
self.__description == other.__description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self.__setUpFunc, self.__tearDownFunc,
self.__testFunc, self.__description))
def __str__(self):
return "%s (%s)" % (_strclass(self.__class__), self.__testFunc.__name__)
def __repr__(self):
return "<%s testFunc=%s>" % (_strclass(self.__class__), self.__testFunc)
def shortDescription(self):
if self.__description is not None: return self.__description
doc = self.__testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
##############################################################################
# Locating and loading tests
##############################################################################
class TestLoader:
"""This class is responsible for loading tests according to various
criteria and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = TestSuite
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, TestSuite):
raise TypeError("Test cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
return self.suiteClass(map(testCaseClass, testCaseNames))
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, TestCase)):
tests.append(self.loadTestsFromTestCase(obj))
return self.suiteClass(tests)
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy: raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if type(obj) == types.ModuleType:
return self.loadTestsFromModule(obj)
elif (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, TestCase)):
return self.loadTestsFromTestCase(obj)
elif (type(obj) == types.UnboundMethodType and
isinstance(parent, (type, types.ClassType)) and
issubclass(parent, TestCase)):
return TestSuite([parent(obj.__name__)])
elif isinstance(obj, TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, TestSuite):
return test
elif isinstance(test, TestCase):
return TestSuite([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
defaultTestLoader = TestLoader()
##############################################################################
# Patches for old functions: these functions should be considered obsolete
##############################################################################
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass: loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp, suiteClass=TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp, suiteClass=TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
##############################################################################
# Text UI
##############################################################################
class _WritelnDecorator:
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg: self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class _TextTestResult(TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
TestResult.__init__(self)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
if self.descriptions:
return test.shortDescription() or str(test)
else:
return str(test)
def startTest(self, test):
TestResult.startTest(self, test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
TestResult.addSuccess(self, test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
TestResult.addError(self, test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
TestResult.addFailure(self, test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner:
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
##############################################################################
# Facilities for running tests from the command line
##############################################################################
class TestProgram:
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def __init__(self, module='__main__', defaultTest=None,
argv=None, testRunner=TextTestRunner,
testLoader=defaultTestLoader):
if type(module) == type(''):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.verbosity = 1
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg: print msg
print self.USAGE % self.__dict__
sys.exit(2)
def parseArgs(self, argv):
import getopt
try:
options, args = getopt.getopt(argv[1:], 'hHvq',
['help','verbose','quiet'])
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if len(args) == 0 and self.defaultTest is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
return
if len(args) > 0:
self.testNames = args
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def runTests(self):
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity)
except TypeError:
# didn't accept the verbosity argument
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
result = testRunner.run(self.test)
return not result.wasSuccessful()
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
| 35.628866
| 120
| 0.59539
|
46ae6866889e3c0f9da59c6f973da7a158d84e1f
| 13,257
|
py
|
Python
|
epydoc/markup/doctest.py
|
AnjaneyuluBatta505/epydoc
|
e074483d519912218b1fb1d4eacb492076a1ed73
|
[
"MIT"
] | 1
|
2022-01-29T08:19:34.000Z
|
2022-01-29T08:19:34.000Z
|
epydoc/markup/doctest.py
|
AnjaneyuluBatta505/epydoc
|
e074483d519912218b1fb1d4eacb492076a1ed73
|
[
"MIT"
] | null | null | null |
epydoc/markup/doctest.py
|
AnjaneyuluBatta505/epydoc
|
e074483d519912218b1fb1d4eacb492076a1ed73
|
[
"MIT"
] | null | null | null |
#
# doctest.py: Syntax Highlighting for doctest blocks
# Edward Loper
#
# Created [06/28/03 02:52 AM]
# $Id: restructuredtext.py 1210 2006-04-10 13:25:50Z edloper $
#
"""
Syntax highlighting for doctest blocks. This module defines two
functions, L{doctest_to_html()} and L{doctest_to_latex()}, which can
be used to perform syntax highlighting on doctest blocks. It also
defines the more general C{colorize_doctest()}, which could be used to
do syntac highlighting on doctest blocks with other output formats.
(Both C{doctest_to_html()} and C{doctest_to_latex()} are defined using
C{colorize_doctest()}.)
"""
from __future__ import absolute_import
__docformat__ = 'epytext en'
import re
from epydoc.util import plaintext_to_html, plaintext_to_latex
__all__ = ['doctest_to_html', 'doctest_to_latex',
'DoctestColorizer', 'XMLDoctestColorizer',
'HTMLDoctestColorizer', 'LaTeXDoctestColorizer']
def doctest_to_html(s):
"""
Perform syntax highlighting on the given doctest string, and
return the resulting HTML code. This code consists of a C{<pre>}
block with class=py-doctest. Syntax highlighting is performed
using the following css classes:
- C{py-prompt} -- the Python PS1 prompt (>>>)
- C{py-more} -- the Python PS2 prompt (...)
- C{py-keyword} -- a Python keyword (for, if, etc.)
- C{py-builtin} -- a Python builtin name (abs, dir, etc.)
- C{py-string} -- a string literal
- C{py-comment} -- a comment
- C{py-except} -- an exception traceback (up to the next >>>)
- C{py-output} -- the output from a doctest block.
- C{py-defname} -- the name of a function or class defined by
a C{def} or C{class} statement.
"""
return HTMLDoctestColorizer().colorize_doctest(s)
def doctest_to_latex(s):
"""
Perform syntax highlighting on the given doctest string, and
return the resulting LaTeX code. This code consists of an
C{alltt} environment. Syntax highlighting is performed using
the following new latex commands, which must be defined externally:
- C{\pysrcprompt} -- the Python PS1 prompt (>>>)
- C{\pysrcmore} -- the Python PS2 prompt (...)
- C{\pysrckeyword} -- a Python keyword (for, if, etc.)
- C{\pysrcbuiltin} -- a Python builtin name (abs, dir, etc.)
- C{\pysrcstring} -- a string literal
- C{\pysrccomment} -- a comment
- C{\pysrcexcept} -- an exception traceback (up to the next >>>)
- C{\pysrcoutput} -- the output from a doctest block.
- C{\pysrcdefname} -- the name of a function or class defined by
a C{def} or C{class} statement.
"""
return LaTeXDoctestColorizer().colorize_doctest(s)
class DoctestColorizer:
"""
An abstract base class for performing syntax highlighting on
doctest blocks and other bits of Python code. Subclasses should
provide definitions for:
- The L{markup()} method, which takes a substring and a tag, and
returns a colorized version of the substring.
- The L{PREFIX} and L{SUFFIX} variables, which will be added
to the beginning and end of the strings returned by
L{colorize_codeblock} and L{colorize_doctest}.
"""
#: A string that is added to the beginning of the strings
#: returned by L{colorize_codeblock} and L{colorize_doctest}.
#: Typically, this string begins a preformatted area.
PREFIX = None
#: A string that is added to the end of the strings
#: returned by L{colorize_codeblock} and L{colorize_doctest}.
#: Typically, this string ends a preformatted area.
SUFFIX = None
#: The string used to divide lines
NEWLINE = '\n'
#: A list of the names of all Python keywords. ('as' is included
#: even though it is technically not a keyword.)
_KEYWORDS = ("and del for is raise"
"assert elif from lambda return"
"break else global not try"
"class except if or while"
"continue exec import pass yield"
"def finally in print as").split()
#: A list of all Python builtins.
_BUILTINS = [_BI for _BI in dir(__builtins__)
if not _BI.startswith('__')]
#: A regexp group that matches keywords.
_KEYWORD_GRP = '|'.join([r'\b%s\b' % _KW for _KW in _KEYWORDS])
#: A regexp group that matches Python builtins.
_BUILTIN_GRP = (r'(?<!\.)(?:%s)' % '|'.join([r'\b%s\b' % _BI
for _BI in _BUILTINS]))
#: A regexp group that matches Python strings.
_STRING_GRP = '|'.join(
[r'("""("""|.*?((?!").)"""))', r'("("|.*?((?!").)"))',
r"('''('''|.*?[^\\']'''))", r"('('|.*?[^\\']'))"])
#: A regexp group that matches Python comments.
_COMMENT_GRP = '(#.*?$)'
#: A regexp group that matches Python ">>>" prompts.
_PROMPT1_GRP = r'^[ \t]*>>>(?:[ \t]|$)'
#: A regexp group that matches Python "..." prompts.
_PROMPT2_GRP = r'^[ \t]*\.\.\.(?:[ \t]|$)'
#: A regexp group that matches function and class definitions.
_DEFINE_GRP = r'\b(?:def|class)[ \t]+\w+'
#: A regexp that matches Python prompts
PROMPT_RE = re.compile('(%s|%s)' % (_PROMPT1_GRP, _PROMPT2_GRP),
re.MULTILINE | re.DOTALL)
#: A regexp that matches Python "..." prompts.
PROMPT2_RE = re.compile('(%s)' % _PROMPT2_GRP,
re.MULTILINE | re.DOTALL)
#: A regexp that matches doctest exception blocks.
EXCEPT_RE = re.compile(r'^[ \t]*Traceback \(most recent call last\):.*',
re.DOTALL | re.MULTILINE)
#: A regexp that matches doctest directives.
DOCTEST_DIRECTIVE_RE = re.compile(r'#[ \t]*doctest:.*')
#: A regexp that matches all of the regions of a doctest block
#: that should be colored.
DOCTEST_RE = re.compile(
r'(.*?)((?P<STRING>%s)|(?P<COMMENT>%s)|(?P<DEFINE>%s)|'
r'(?P<KEYWORD>%s)|(?P<BUILTIN>%s)|'
r'(?P<PROMPT1>%s)|(?P<PROMPT2>%s)|(?P<EOS>\Z))' % (
_STRING_GRP, _COMMENT_GRP, _DEFINE_GRP, _KEYWORD_GRP, _BUILTIN_GRP,
_PROMPT1_GRP, _PROMPT2_GRP), re.MULTILINE | re.DOTALL)
#: This regular expression is used to find doctest examples in a
#: string. This is copied from the standard Python doctest.py
#: module (after the refactoring in Python 2.4+).
DOCTEST_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)* # PS2 lines
\n?)
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
def colorize_inline(self, s):
"""
Colorize a string containing Python code. Do not add the
L{PREFIX} and L{SUFFIX} strings to the returned value. This
method is intended for generating syntax-highlighted strings
that are appropriate for inclusion as inline expressions.
"""
return self.DOCTEST_RE.sub(self.subfunc, s)
def colorize_codeblock(self, s):
"""
Colorize a string containing only Python code. This method
differs from L{colorize_doctest} in that it will not search
for doctest prompts when deciding how to colorize the string.
"""
body = self.DOCTEST_RE.sub(self.subfunc, s)
return self.PREFIX + body + self.SUFFIX
def colorize_doctest(self, s, strip_directives=False):
"""
Colorize a string containing one or more doctest examples.
"""
output = []
charno = 0
for m in self.DOCTEST_EXAMPLE_RE.finditer(s):
# Parse the doctest example:
pysrc, want = m.group('source', 'want')
# Pre-example text:
output.append(self.NEWLINE.join(s[charno:m.start()].split('\n')))
# Example source code:
output.append(self.DOCTEST_RE.sub(self.subfunc, pysrc))
# Example output:
if want:
if self.EXCEPT_RE.match(want):
output += self.NEWLINE.join([self.markup(line, 'except')
for line in want.split('\n')])
else:
output += self.NEWLINE.join([self.markup(line, 'output')
for line in want.split('\n')])
# Update charno
charno = m.end()
# Add any remaining post-example text.
output.append(self.NEWLINE.join(s[charno:].split('\n')))
return self.PREFIX + ''.join(output) + self.SUFFIX
def subfunc(self, match):
other, text = match.group(1, 2)
#print('M %20r %20r' % (other, text)) # <- for debugging
if other:
other = self.NEWLINE.join([self.markup(line, 'other')
for line in other.split('\n')])
if match.group('PROMPT1'):
return other + self.markup(text, 'prompt')
elif match.group('PROMPT2'):
return other + self.markup(text, 'more')
elif match.group('KEYWORD'):
return other + self.markup(text, 'keyword')
elif match.group('BUILTIN'):
return other + self.markup(text, 'builtin')
elif match.group('COMMENT'):
return other + self.markup(text, 'comment')
elif match.group('STRING') and '\n' not in text:
return other + self.markup(text, 'string')
elif match.group('STRING'):
# It's a multiline string; colorize the string & prompt
# portion of each line.
pieces = []
for line in text.split('\n'):
if self.PROMPT2_RE.match(line):
if len(line) > 4:
pieces.append(self.markup(line[:4], 'more') +
self.markup(line[4:], 'string'))
else:
pieces.append(self.markup(line[:4], 'more'))
elif line:
pieces.append(self.markup(line, 'string'))
else:
pieces.append('')
return other + self.NEWLINE.join(pieces)
elif match.group('DEFINE'):
m = re.match('(?P<def>\w+)(?P<space>\s+)(?P<name>\w+)', text)
return other + (self.markup(m.group('def'), 'keyword') +
self.markup(m.group('space'), 'other') +
self.markup(m.group('name'), 'defname'))
elif match.group('EOS') is not None:
return other
else:
assert 0, 'Unexpected match!'
def markup(self, s, tag):
"""
Apply syntax highlighting to a single substring from a doctest
block. C{s} is the substring, and C{tag} is the tag that
should be applied to the substring. C{tag} will be one of the
following strings:
- C{prompt} -- the Python PS1 prompt (>>>)
- C{more} -- the Python PS2 prompt (...)
- C{keyword} -- a Python keyword (for, if, etc.)
- C{builtin} -- a Python builtin name (abs, dir, etc.)
- C{string} -- a string literal
- C{comment} -- a comment
- C{except} -- an exception traceback (up to the next >>>)
- C{output} -- the output from a doctest block.
- C{defname} -- the name of a function or class defined by
a C{def} or C{class} statement.
- C{other} -- anything else (does *not* include output.)
"""
raise AssertionError("Abstract method")
class XMLDoctestColorizer(DoctestColorizer):
"""
A subclass of DoctestColorizer that generates XML-like output.
This class is mainly intended to be used for testing purposes.
"""
PREFIX = '<colorized>\n'
SUFFIX = '</colorized>\n'
def markup(self, s, tag):
s = s.replace('&', '&').replace('<', '<').replace('>', '>')
if tag == 'other': return s
else: return '<%s>%s</%s>' % (tag, s, tag)
class HTMLDoctestColorizer(DoctestColorizer):
"""A subclass of DoctestColorizer that generates HTML output."""
PREFIX = '<pre class="py-doctest">\n'
SUFFIX = '</pre>\n'
def markup(self, s, tag):
if tag == 'other':
return plaintext_to_html(s)
else:
return ('<span class="py-%s">%s</span>' %
(tag, plaintext_to_html(s)))
class LaTeXDoctestColorizer(DoctestColorizer):
"""A subclass of DoctestColorizer that generates LaTeX output."""
PREFIX = ('\\begin{alltt}')
SUFFIX = '\\end{alltt}\n'
NEWLINE = '\\\\'
def markup(self, s, tag):
if tag == 'other':
return plaintext_to_latex(s)
else:
return '\\pysrc%s{%s}' % (tag, plaintext_to_latex(s))
| 41.557994
| 79
| 0.569058
|
8d012bd572a9939e549404f28fab9e887e13ef51
| 3,766
|
py
|
Python
|
python/test/function/test_binary_connect_convolution.py
|
syoyo/nnabla
|
b776b68dcdffe894cac1233dfd07c301415cc0fb
|
[
"Apache-2.0"
] | 1
|
2020-08-03T12:49:19.000Z
|
2020-08-03T12:49:19.000Z
|
python/test/function/test_binary_connect_convolution.py
|
langbin2014/nnabla
|
e94bac5bed65337010e2ac07a5937fb862ab2dd8
|
[
"Apache-2.0"
] | null | null | null |
python/test/function/test_binary_connect_convolution.py
|
langbin2014/nnabla
|
e94bac5bed65337010e2ac07a5937fb862ab2dd8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
import refs
from nbla_test_utils import list_context
ctxs = list_context('BinaryConnectConvolution')
def binarize(x, quantize_zero_to):
y = np.sign(x)
y[y == 0] = quantize_zero_to
return y
def ref_convolution(x, w, b, base_axis, pad, stride, dilation, group, quantize_zero_to):
y = []
for xx in x.reshape((-1,) + x.shape[base_axis:]):
y += [refs.convolution_2d(xx, w, b, pad, stride,
dilation, group)[np.newaxis]]
y = np.vstack(y)
return y.reshape(x.shape[:base_axis] + y.shape[1:])
def ref_binary_connect_convolution(x, w, wb, b, base_axis, pad, stride, dilation, group, quantize_zero_to):
return ref_convolution(x, binarize(w, quantize_zero_to), b, base_axis, pad, stride, dilation, group, quantize_zero_to)
def ref_grad_binary_connect_convolution(x, w, wb, b, dy, base_axis, pad, stride, dilation, group, quantize_zero_to):
# Set variables
vx = nn.Variable(x.shape, need_grad=True)
vx.d = x
vx.grad.zero()
vw = nn.Variable(w.shape, need_grad=True)
vw.d = binarize(w, quantize_zero_to)
vw.grad.zero()
vb = None
if b is not None:
vb = nn.Variable(b.shape, need_grad=True)
vb.d = b
vb.grad.zero()
# Execute binarized forward and back prop.
with nn.auto_forward():
vy = F.convolution(vx, vw, vb, base_axis, pad, stride, dilation, group)
vy.backward(dy)
# Return grads
if b is None:
return np.concatenate([vx.g.flat, vw.g.flat])
return np.concatenate([vx.g.flat, vw.g.flat, vb.g.flat])
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("inshape, kernel, outmaps, pad, stride, dilation",
[((2, 2, 10, 10), (3, 2), 4, (3, 0), (1, 2), (2, 1)), ])
@pytest.mark.parametrize("group", [1, 2])
@pytest.mark.parametrize("with_bias", [True, False])
@pytest.mark.parametrize("quantize_zero_to", [0.0, -1.0, 1.0])
def test_convolution_2d_forward_backward(inshape, kernel, outmaps, pad, stride,
dilation, group, with_bias, quantize_zero_to, seed, ctx,
func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
i = rng.randn(*inshape).astype(np.float32)
inmaps = inshape[-3]
kshape = (outmaps,) + (inmaps // group,) + kernel
k = rng.randn(*kshape).astype(np.float32)
base_axis = len(inshape) - 3
b = None
if with_bias:
b = rng.randn(outmaps).astype(np.float32)
inputs = [i, k, np.zeros_like(k), b]
function_tester(rng, F.binary_connect_convolution, ref_binary_connect_convolution, inputs,
func_args=[base_axis, pad, stride,
dilation, group, quantize_zero_to],
atol_f=1e-4, atol_b=3e-3, atol_accum=1e-5, dstep=1e-2, backward=[True, True, False, True],
ctx=ctx, func_name=func_name, ref_grad=ref_grad_binary_connect_convolution)
| 38.824742
| 122
| 0.656665
|
3be486a81cabe9c7ed16b4a19fa3609712e023ba
| 15,738
|
py
|
Python
|
mitmproxy/net/quic/tproxy.py
|
Meitinger/mitmproxy
|
f5d872d8fc96b97256f48b55aa137e9d81cb8edf
|
[
"MIT"
] | 7
|
2020-11-29T11:42:44.000Z
|
2022-03-28T15:31:38.000Z
|
mitmproxy/net/quic/tproxy.py
|
Meitinger/mitmproxy
|
f5d872d8fc96b97256f48b55aa137e9d81cb8edf
|
[
"MIT"
] | null | null | null |
mitmproxy/net/quic/tproxy.py
|
Meitinger/mitmproxy
|
f5d872d8fc96b97256f48b55aa137e9d81cb8edf
|
[
"MIT"
] | 1
|
2021-08-31T05:02:59.000Z
|
2021-08-31T05:02:59.000Z
|
import asyncio
import asyncio.selector_events
import collections
import ipaddress
import socket
import struct
from typing import cast, Any, Callable, Dict, Optional, Tuple
from aioquic.asyncio import QuicConnectionProtocol
from aioquic.asyncio.protocol import QuicStreamHandler
from aioquic.asyncio.server import QuicServer
from aioquic.quic.configuration import QuicConfiguration
from aioquic.tls import SessionTicketFetcher, SessionTicketHandler
IP_PKTINFO = getattr(socket, "IP_PKTINFO", 8)
IP_RECVORIGDSTADDR = getattr(socket, "IP_RECVORIGDSTADDR", 20)
sockaddr = tuple
def _native_sockaddr_to_python(sockaddr_in: bytes) -> sockaddr:
# see makesockaddr in socketmodule.c
if len(sockaddr_in) < 2:
raise ValueError("sockaddr_in too short")
(family,) = struct.unpack("h", sockaddr_in[:2])
if family == socket.AF_INET:
if len(sockaddr_in) < 16:
raise ValueError("sockaddr_in too short for IPv4")
port, in_addr, _ = struct.unpack("!H4s8s", sockaddr_in[2:16])
addr = (str(ipaddress.IPv4Address(in_addr)), port)
elif family == socket.AF_INET6:
if len(sockaddr_in) < 28:
raise ValueError("sockaddr_in too short for IPv6")
port, flowinfo, in6_addr, scope_id = struct.unpack("!H16sL", sockaddr_in[2:28])
addr = (
str(ipaddress.IPv6Address(in6_addr)),
port,
flowinfo,
scope_id,
)
else:
raise NotImplementedError
return addr
def _calculate_udp_checksum(data: bytes) -> int:
size = len(data)
# sum up all full words
checksum = 0
for i in range(0, size - (size % 2), 2):
checksum += (data[i] << 8) | data[i + 1]
# pad to multiple of words
if size % 2 != 0:
checksum += data[size - 1] << 8
# add the word carryover
while (checksum & ~0xFFFF) != 0:
checksum = (checksum >> 16) + (checksum & 0xFFFF)
# invert the sum and ensure zero is not returned
checksum = ~checksum & 0xFFFF
return checksum if checksum != 0 else 0xFFFF
def _build_ipv4_udp_payload_and_pktinfo(
src: sockaddr, dst: sockaddr, data: bytes
) -> bytes:
src_ip = socket.inet_pton(socket.AF_INET, src[0])
dst_ip = socket.inet_pton(socket.AF_INET, dst[0])
proto = 17 # UDP
udp_length = 8 + len(data)
checksum = _calculate_udp_checksum(
struct.pack(
"!4s4s2B5H",
src_ip,
dst_ip,
0,
proto,
udp_length,
src[1],
dst[1],
udp_length,
0,
)
+ data
)
udp_header = struct.pack("!4H", src[1], dst[1], udp_length, checksum)
return udp_header + data, struct.pack("!I4s4s", 0, src_ip, dst_ip)
def _build_ipv6_udp_payload_and_pktinfo(src: sockaddr, dst: sockaddr, data: bytes):
src_ip = socket.inet_pton(socket.AF_INET6, src[0])
dst_ip = socket.inet_pton(socket.AF_INET6, dst[0])
payload_length = 8 + len(data) # also upd_length
next_header = 17 # UDP
checksum = _calculate_udp_checksum(
struct.pack(
"!16s16sIH2B4H",
src_ip,
dst_ip,
payload_length,
0,
0,
next_header,
src[1],
dst[1],
payload_length,
0,
)
+ data
)
udp_header = struct.pack("!4H", src[1], dst[1], payload_length, checksum)
return udp_header + data, struct.pack("!16sI", src_ip, 0)
def _create_raw_socket(family: int, level: int) -> socket.socket:
sock = None
try:
sock = socket.socket(family, socket.SOCK_RAW, socket.IPPROTO_UDP)
sock.setblocking(False)
sock.setsockopt(level, socket.IP_TRANSPARENT, 1)
return sock
except:
if sock is not None:
sock.close()
raise
class TProxyProtocol(asyncio.BaseProtocol):
def received_from(self, data: bytes, src: sockaddr, dst: sockaddr,) -> None:
pass
def error_received(self, exc: OSError) -> None:
pass
class TProxyTransport(asyncio.BaseTransport):
def send_to(self, data: bytes, src: sockaddr, dst: sockaddr,) -> None:
raise NotImplementedError
def abort(self) -> None:
raise NotImplementedError
async def create_tproxy_endpoint(
loop: asyncio.SelectorEventLoop,
protocol_factory: Callable[[], TProxyProtocol],
local_addr: Tuple[str, int],
) -> Tuple[TProxyTransport, TProxyProtocol]:
host, port = local_addr
infos = await loop.getaddrinfo(host, port)
if not infos:
raise OSError("getaddrinfo() returned empty list")
sock_family, _, _, _, sock_addr = infos[0]
sock = None
try:
# Create a non-blocking, transparent (any IP) socket, that returns the original destination.
sock = socket.socket(sock_family, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setblocking(False)
sock.setsockopt(socket.SOL_IP, socket.IP_TRANSPARENT, 1)
sock.setsockopt(socket.SOL_IP, IP_RECVORIGDSTADDR, 1)
sock.bind(sock_addr)
except:
if sock is not None:
sock.close()
raise
protocol = protocol_factory()
waiter = loop.create_future()
transport = _TProxyTransport(loop, sock, sock_addr, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
class _TProxyTransport(asyncio.selector_events._SelectorTransport, TProxyTransport):
_buffer_factory = collections.deque
def __init__(
self,
loop: asyncio.SelectorEventLoop,
sock: socket.socket,
sock_addr: sockaddr,
protocol: TProxyProtocol,
waiter: asyncio.Future,
) -> None:
super().__init__(loop, sock, protocol)
self._sock_addr: sockaddr = sock_addr
self._send_sock_v4: socket.socket
self._send_sock_v6: Optional[socket.socket] = None
# we support dual stacks, so always create an IPv4 send socket
if sock.family == socket.AF_INET:
pass
elif sock.family == socket.AF_INET6:
self._send_sock_v6 = _create_raw_socket(
socket.AF_INET6, socket.IPPROTO_IPV6
)
else:
raise NotImplementedError(f"Address family {sock.family} not supported")
self._send_sock_v4 = _create_raw_socket(socket.AF_INET, socket.IPPROTO_IP)
# notify the protocol, start reading and signal complete
self._loop.call_soon(self._protocol.connection_made, self)
self._loop.call_soon(self._add_reader, self._sock_fd, self._read_ready)
self._loop.call_soon(asyncio.futures._set_result_unless_cancelled, waiter, None)
# override
def _call_connection_lost(self, exc):
try:
super()._call_connection_lost(exc)
finally:
self._send_sock_v4.close()
self._send_sock_v4 = None
if self._send_sock_v6 is not None:
self._send_sock_v6.close()
self._send_sock_v6 = None
def _check_and_unmap_ip_address(self, addr: sockaddr, name: str) -> sockaddr:
if not isinstance(addr, tuple) or len(addr) not in [2, 4]:
raise ValueError(f"{name} is not a valid socket address")
try:
ip = ipaddress.ip_address(addr[0])
except ValueError:
raise ValueError(f"{name} contains an invalid IP address")
if ip.version == 4:
if len(addr) == 4:
raise ValueError(f"{name} has too many components for an IPv4 address")
elif ip.version == 6:
if ip.ipv4_mapped is not None:
addr = (str(ip.ipv4_mapped), addr[1])
else:
if len(addr) == 2:
addr = addr + (0, 0)
if self._send_sock_v6 is None:
raise ValueError(
f"{name} contains an IPv6 address, but the listen socket is IPv4"
)
else:
raise ValueError(
f"{name} contains an IPv{ip.version} address which is not supported"
)
return addr
def _internal_send(self, data: bytes, src: sockaddr, dst: sockaddr) -> None:
assert len(src) == len(dst) and len(src) in [2, 4]
# generate the UDP payload and send it
if len(src) == 2:
payload, in_pktinfo = _build_ipv4_udp_payload_and_pktinfo(src, dst, data)
ancdata = [(socket.IPPROTO_IP, IP_PKTINFO, in_pktinfo)]
self._send_sock_v4.sendmsg([payload], ancdata, 0, (dst[0], 0))
else:
payload, in6_pktinfo = _build_ipv6_udp_payload_and_pktinfo(src, dst, data)
ancdata = [(socket.IPPROTO_IPV6, socket.IPV6_PKTINFO, in6_pktinfo)]
self._send_sock_v6.sendmsg([payload], ancdata, 0, (dst[0], 0, 0, 0))
# callback
def _read_ready(self):
if self._conn_lost:
return
try:
# 50 bytes is larger than sockaddr_in or sockaddr_in6
data, ancdata, _, src = self._sock.recvmsg(
self.max_size, socket.CMSG_LEN(50)
)
dst = self._sock_addr
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.SOL_IP and cmsg_type == IP_RECVORIGDSTADDR:
dst = _native_sockaddr_to_python(cmsg_data)
# on a dual stack, receive from IPv4 is possible, return mapped address like src
if self._send_sock_v6 is not None and len(dst) == 2:
dst = ("::ffff:" + dst[0], dst[1], 0, 0)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._protocol.error_received(exc)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._fatal_error(exc, "Fatal read error on datagram transport")
else:
self._protocol.received_from(data, src, dst)
# callback
def _write_ready(self):
while self._buffer:
data, src, dst = self._buffer.popleft()
try:
self._internal_send(data, src, dst)
except (BlockingIOError, InterruptedError):
self._buffer.appendleft((data, src, dst)) # try again later
break
except OSError as exc:
self._protocol.error_received(exc)
return
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._fatal_error(exc, "Fatal write error on datagram transport")
return
self._maybe_resume_protocol()
if not self._buffer:
self._loop._remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
def send_to(self, data: bytes, src: sockaddr, dst: sockaddr) -> None:
# check the input
src = self._check_and_unmap_ip_address(src, "src")
dst = self._check_and_unmap_ip_address(dst, "dst")
if len(src) != len(dst):
raise ValueError("src and dst are different IP versions")
if not data:
return
if not self._buffer:
try:
self._internal_send(data, src, dst)
return
except (BlockingIOError, InterruptedError):
self._loop._add_writer(self._sock_fd, self._write_ready)
except OSError as exc:
self._protocol.error_received(exc)
return
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._fatal_error(exc, "Fatal write error on datagram transport")
return
self._buffer.append((bytes(data), src, dst)) # make a copy of data
self._maybe_pause_protocol()
class QuicTransparentProxy(TProxyProtocol):
def __init__(
self,
*,
configuration: QuicConfiguration,
create_protocol: Callable = QuicConnectionProtocol,
session_ticket_fetcher: Optional[SessionTicketFetcher] = None,
session_ticket_handler: Optional[SessionTicketHandler] = None,
stateless_retry: bool = False,
stream_handler: Optional[QuicStreamHandler] = None,
) -> None:
self._configuration = configuration
self._create_protocol = create_protocol
self._session_ticket_fetcher = session_ticket_fetcher
self._session_ticket_handler = session_ticket_handler
self._stateless_retry = stateless_retry
self._stream_handler = stream_handler
self._transport: Optional[TProxyTransport] = None
self._servers: Dict[sockaddr, QuicServer] = {}
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self._transport = cast(TProxyTransport, transport)
def received_from(self, data: bytes, src: sockaddr, dst: sockaddr,) -> None:
server: QuicServer
if dst not in self._servers:
server = QuicServer(
configuration=self._configuration,
create_protocol=self._create_protocol,
session_ticket_fetcher=self._session_ticket_fetcher,
session_ticket_handler=self._session_ticket_handler,
stateless_retry=self._stateless_retry,
stream_handler=self._stream_handler,
)
self._servers[dst] = server
server.connection_made(QuicTransport(proxy=self, addr=dst, server=server))
else:
server = self._servers[dst]
server.datagram_received(data, src)
class QuicTransport(asyncio.DatagramTransport):
def __init__(
self, *, proxy: QuicTransparentProxy, addr: sockaddr, server: QuicServer
) -> None:
self._proxy = proxy
self._addr = addr
self._server = server
def abort(self) -> None:
self._transport.abort()
self._proxy._servers.clear()
def close(self) -> None:
if not self.is_closing():
self._proxy._servers.pop(self._addr)
def get_extra_info(self, name: str, default: Any = None) -> Any:
return (
self._addr
if name == "sockname"
else self._proxy._transport.get_extra_info(name, default)
)
def get_protocol(self) -> asyncio.BaseProtocol:
return self._server
def is_closing(self) -> bool:
return self._proxy._servers.get(self._addr) is not self._server
def sendto(self, data: bytes, addr: sockaddr = None) -> None:
if not self.is_closing():
self._proxy._transport.send_to(data, self._addr, addr)
async def transparent_serve(
host: str,
port: int,
*,
configuration: QuicConfiguration,
create_protocol: Callable = QuicConnectionProtocol,
session_ticket_fetcher: Optional[SessionTicketFetcher] = None,
session_ticket_handler: Optional[SessionTicketHandler] = None,
stateless_retry: bool = False,
stream_handler: QuicStreamHandler = None,
) -> QuicTransparentProxy:
loop = asyncio.get_event_loop()
_, protocol = await create_tproxy_endpoint(
loop=loop,
protocol_factory=lambda: QuicTransparentProxy(
configuration=configuration,
create_protocol=create_protocol,
session_ticket_fetcher=session_ticket_fetcher,
session_ticket_handler=session_ticket_handler,
stateless_retry=stateless_retry,
stream_handler=stream_handler,
),
local_addr=(host, port),
)
return cast(QuicTransparentProxy, protocol)
| 35.606335
| 100
| 0.620536
|
6b3994db5445ce1a3aa29e8b918790d090e8d726
| 77
|
py
|
Python
|
frostbyte/lang/__init__.py
|
frostbyte-lang/frostbyte-lang
|
1dbc2e130abb755a1afeca6e24a3084d4f60bc3f
|
[
"MIT"
] | null | null | null |
frostbyte/lang/__init__.py
|
frostbyte-lang/frostbyte-lang
|
1dbc2e130abb755a1afeca6e24a3084d4f60bc3f
|
[
"MIT"
] | null | null | null |
frostbyte/lang/__init__.py
|
frostbyte-lang/frostbyte-lang
|
1dbc2e130abb755a1afeca6e24a3084d4f60bc3f
|
[
"MIT"
] | null | null | null |
"""
Reference implementation for the Frostbyte programming language.
"""
| 19.25
| 68
| 0.74026
|
33edb589be03e92586f7f566256c11a51ece8be1
| 3,931
|
py
|
Python
|
arch_x86.py
|
mytbk/r2dumpbin
|
151d2dc32c16c62dca024bbd1bcbab36d1e08843
|
[
"MIT"
] | 20
|
2018-12-18T17:23:58.000Z
|
2021-12-19T09:55:28.000Z
|
arch_x86.py
|
mytbk/dumpbin
|
151d2dc32c16c62dca024bbd1bcbab36d1e08843
|
[
"MIT"
] | 2
|
2019-10-28T06:43:55.000Z
|
2020-11-01T12:51:42.000Z
|
arch_x86.py
|
mytbk/dumpbin
|
151d2dc32c16c62dca024bbd1bcbab36d1e08843
|
[
"MIT"
] | 5
|
2019-10-28T06:06:22.000Z
|
2022-03-27T02:19:50.000Z
|
# Copyright (C) 2021 Iru Cai <mytbk920423@gmail.com>
# SPDX-License-Identifier: MIT
import re
segmem_expr = re.compile('(cs|ds|es|ss|fs|gs):\[(.*)\]')
def asmfixup(dumper, insn):
orig_insn = insn["opcode"]
final_insn = orig_insn
comment = ""
# correct the r2 assembly to the NASM one
if insn["type"] == "lea":
# nasm doesn't like "lea r32, dword ..."
final_insn = orig_insn.replace("dword ", "")
elif insn["bytes"] == "f2a5":
# capstone 4.0.2 gets wrong here
final_insn = "repne movsd"
elif ("movsb" in orig_insn or "movsw" in orig_insn or "movsd" in orig_insn \
or "lods" in orig_insn or "stos" in orig_insn) \
and "66" in [insn["bytes"][0:2], insn["bytes"][2:4]]:
# capstone also seems to be wrong here
comment = orig_insn
ibytes = dumper.readBytes(insn["offset"], insn["size"])
final_insn = "db " + ", ".join(["0x{:02x}".format(i) for i in ibytes])
elif orig_insn[0:4] == "rep ":
# rep XXXsX
comment = orig_insn
final_insn = orig_insn[0:9]
elif orig_insn[0:5] == "repe ":
# repe XXXsX
comment = orig_insn
final_insn = orig_insn[0:10]
elif orig_insn[0:6] == "repne ":
# repne XXXsX
comment = orig_insn
final_insn = orig_insn[0:11]
elif orig_insn[0:6] in ["movsb ","movsw ","movsd ",
"stosb ", "stosw ", "stosd ",
"lodsb ", "lodsw ", "lodsd ",
"cmpsb ", "cmpsw ", "compsd"]:
comment = orig_insn
final_insn = orig_insn[0:5]
elif orig_insn[0:6] == "pushal":
final_insn = "pushad"
elif orig_insn[0:5] == "popal":
final_insn = "popad"
elif orig_insn[0:12] == "clflush byte":
# "clflush byte" -> "clflush"
final_insn = "clflush " + orig_insn[12:]
elif insn["type"] in ["jmp", "cjmp", "call"]:
prefix = ""
if "jecxz" in orig_insn or "loop" in orig_insn:
pass
elif insn["type"] != "call":
if insn["size"] == 2:
prefix = "short "
elif insn["size"] >= 5:
prefix = "near "
tgt = insn["jump"]
if tgt in dumper.functions:
prefix += "fcn_"
else:
prefix += "loc_"
final_insn = re.sub(
"0x.*", prefix + "{:08x}".format(tgt), orig_insn)
comment = orig_insn
elif orig_insn[0:5] in ["fcom ", "fsub ", "fxch ", "fstp ", "fdiv "] or \
orig_insn[0:6] in ["fmulp ", "fdivp ", "faddp ", "fsubp ", "fdivr "] or \
orig_insn[0:4] in ["fld "] or \
orig_insn[0:7] in ["fdivrp "]:
final_insn = orig_insn.replace("xword", "tword") # 80-bit "ten word"
final_insn = re.sub("st\(([0-9])\)", "st\\1", final_insn)
comment = orig_insn
elif orig_insn[0:7] in ["fnstsw ", "fnsave ", "frstor "]:
final_insn = orig_insn.replace(" dword", "")
elif insn["type"] in ["cmp", "add", "sub"] and insn["size"] >= 5 and \
'[' not in orig_insn:
val = insn.get("val", 0xffffffff)
ibytes = dumper.readBytes(insn["offset"], insn["size"])
if val < 0x80 and ibytes[0] != 0x66:
# nasm emits short instructions when immediate can fit in one byte
fixup = True
if val in dumper.solved or val in dumper.non_function_labels or val in dumper.label_adjust:
if not dumper.HasReloc or dumper.isRelocInsn(insn["offset"], insn["size"]):
fixup = False
if fixup:
final_insn = "db " + ", ".join(["0x{:02x}".format(i) for i in ibytes])
comment = orig_insn
# fix addressing expressions with a segment selector
final_insn = segmem_expr.sub('[\\1:\\2]', final_insn)
if final_insn == orig_insn:
comment = ""
return final_insn, comment
| 38.539216
| 103
| 0.532689
|
fd9720a79a291befe413351daf2aadd566eaa53b
| 97
|
py
|
Python
|
test/tests/list_eq_contains.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
test/tests/list_eq_contains.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/tests/list_eq_contains.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
nan = float('nan')
print nan == nan
print nan in [nan]
print [nan] == [nan]
print [nan] in [nan]
| 16.166667
| 20
| 0.608247
|
a77d23e8b3d5095b37e4ec71207b31f9d1f87e1e
| 3,721
|
py
|
Python
|
mars/tensor/arithmetic/divide.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | 2
|
2019-03-29T04:11:10.000Z
|
2020-07-08T10:19:54.000Z
|
mars/tensor/arithmetic/divide.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/arithmetic/divide.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..utils import infer_dtype
from .core import TensorBinOp
from .utils import arithmetic_operand
@arithmetic_operand
class TensorDivide(TensorBinOp):
_op_type_ = OperandDef.DIV
_func_name = 'divide'
@classmethod
def _is_sparse(cls, x1, x2):
if not np.isscalar(x1) and not np.isscalar(x2):
return False
if hasattr(x1, 'issparse') and x1.issparse():
if x2 != 0:
return True
else:
raise ZeroDivisionError('float division by zero')
@infer_dtype(np.divide)
def divide(x1, x2, out=None, where=None, **kwargs):
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend tensor.
x2 : array_like
Divisor tensor.
out : Tensor, None, or tuple of Tensor and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
**kwargs
Returns
-------
out : Tensor
The quotient `x1/x2`, element-wise. Returns a scalar if both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
In Python 2, when both `x1` and `x2` are of an integer type, `divide` will behave like `floor_divide`.
In Python 3, it behaves like `true_divide`.
Examples
--------
>>> import mars.tensor as mt
>>> mt.divide(2.0, 4.0).execute()
0.5
>>> x1 = mt.arange(9.0).reshape((3, 3))
>>> x2 = mt.arange(3.0)
>>> mt.divide(x1, x2).execute()
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> mt.divide(2, 4).execute()
0
>>> mt.divide(2, 4.).execute()
0.5
Division by zero always yields zero in integer arithmetic (again, Python 2 only),
and does not raise an exception or a warning:
>>> mt.divide(mt.array([0, 1], dtype=int), mt.array([0, 0], dtype=int)).execute()
array([0, 0])
Division by zero can, however, be caught using seterr:
>>> old_err_state = mt.seterr(divide='raise')
>>> mt.divide(1, 0).execute()
Traceback (most recent call last):
...
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = mt.seterr(**old_err_state)
>>> mt.divide(1, 0).execute()
0
"""
op = TensorDivide(**kwargs)
return op(x1, x2, out=out, where=where)
@infer_dtype(np.divide, reverse=True)
def rdivide(x1, x2, **kwargs):
op = TensorDivide(**kwargs)
return op.rcall(x1, x2)
| 32.356522
| 106
| 0.640688
|
bf4ba63c10ebe8f1b4d0dee39eed5fe6a9f728e5
| 3,243
|
py
|
Python
|
kidmondo_spider/settings.py
|
plar/kidmondo_export
|
b3719072e26f210e5951fd01aa5005332b65be5a
|
[
"Apache-2.0"
] | null | null | null |
kidmondo_spider/settings.py
|
plar/kidmondo_export
|
b3719072e26f210e5951fd01aa5005332b65be5a
|
[
"Apache-2.0"
] | null | null | null |
kidmondo_spider/settings.py
|
plar/kidmondo_export
|
b3719072e26f210e5951fd01aa5005332b65be5a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Scrapy settings for amazon project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'kidmondo_spider'
SPIDER_MODULES = ['kidmondo_spider.spiders']
NEWSPIDER_MODULE = 'kidmondo_spider.spiders'
FEED_EXPORTERS = {
'json': 'kidmondo_spider.spiders.kidmondo.UnicodeJsonLinesItemExporter'
}
IMAGES_STORE='/home/pavel/workspace/kidmondo_export/target'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'amazon (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'amazon.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'amazon.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'amazon.pipelines.SomePipeline': 300,
#}
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline': 1
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| 33.78125
| 109
| 0.785076
|
78cb6c6ff6db9ac0c462628a4d4d4bc10e588470
| 2,811
|
py
|
Python
|
sample_datasource/lacrime/lacrime_pub.py
|
rbpandya/I3-SDK
|
80d99b9dec25da9a2f55ababc3c814ff883e695b
|
[
"BSD-3-Clause"
] | 2
|
2020-04-15T23:05:09.000Z
|
2020-06-09T16:58:45.000Z
|
sample_datasource/lacrime/lacrime_pub.py
|
rbpandya/I3-SDK
|
80d99b9dec25da9a2f55ababc3c814ff883e695b
|
[
"BSD-3-Clause"
] | 5
|
2019-09-24T21:33:04.000Z
|
2019-10-09T21:05:48.000Z
|
sample_datasource/lacrime/lacrime_pub.py
|
rbpandya/I3-SDK
|
80d99b9dec25da9a2f55ababc3c814ff883e695b
|
[
"BSD-3-Clause"
] | 5
|
2019-10-23T17:46:14.000Z
|
2020-06-09T16:58:53.000Z
|
#!/usr/bin/python
"""
test_sub.py is an example of a publisher subscribing to a topic
"""
import paho.mqtt.client as mqtt
import datetime
from datetime import timedelta
import requests
import json
import os
def on_connect(client, userdata, flags, rc):
"""print out result code when connecting with the broker
Args:
client: publisher
userdata:
flags:
rc: result code
Returns:
"""
m="Connected flags"+str(flags)+"result code "\
+str(rc)+"client1_id "+str(client)
print(m)
def on_message(client1, userdata, message):
"""print out recieved message
Args:
client1: publisher
userdata:
message: recieved data
Returns:
"""
print("message received " ,str(message.payload.decode("utf-8")))
if __name__ == '__main__':
#TODO: modify topic from email message
# account : username created on I3 instance
# pw : system generated password on the notification bell
# topic : the product that is bought
# clientid : this must be unique else the connection would be lost
clientId = 'Default'
account = 'Default'
topic = ['Default']
pw = 'Default'
port = 1883
host = 'Default'
try:
if os.path.exists('config.ini'):
fread = open('config.ini', 'r')
host = str(fread.read()).split("=")[1]
print "Host :", host
fread.close()
if host == 'Default' or port == 'Default' or topic == 'Default' or account == 'Default' or clientId == 'Default':
print "ERROR: Check host, topic, subscriber and password values"
print "The subscriber is the username that was used to purchase the product"
print "The topic is the product which is purchased from the I3 Data market place"
print "The password is the system generated password when the product is purchased"
raise Exception(" Default values not changed ")
pub_client = mqtt.Client(clientId)
pub_client.on_connect = on_connect
pub_client.on_message = on_message
pub_client.username_pw_set(account, pw)
pub_client.connect(host, port) #connect to broker
except Exception as e:
print "Exception" + str(e)
exit(0)
#pub_client.subscribe(topic)
#pub_client.loop_start()
url='https://data.lacity.org/resource/7fvc-faax.json?$where=date_rptd>"'
url = url + (datetime.datetime.now() - timedelta(days=7)).isoformat() + '"'
json_data=requests.get(url,verify=False).json() #Gets data from SODA API
count = 0
while count < 2:
count += 1
pub_client.publish(topic[0], json.dumps(json_data))
## pub_client.publish(topic[1], 'Hello World')
time.sleep(2)
pub_client.disconnect()
| 28.393939
| 121
| 0.630736
|
995fe8fb1860a8b9bff4cebcc81850cf275f8406
| 2,591
|
py
|
Python
|
setup.py
|
adriaan-vd-graaf/genome_integration
|
345eae53aab9a23c080ae10f2ee3d8305f75a5c6
|
[
"MIT"
] | 13
|
2019-08-18T15:48:35.000Z
|
2022-03-28T07:48:49.000Z
|
setup.py
|
adriaan-vd-graaf/genome_integration
|
345eae53aab9a23c080ae10f2ee3d8305f75a5c6
|
[
"MIT"
] | 1
|
2021-09-10T06:58:24.000Z
|
2021-09-10T07:50:45.000Z
|
setup.py
|
adriaan-vd-graaf/genome_integration
|
345eae53aab9a23c080ae10f2ee3d8305f75a5c6
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from setuptools import find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
name = 'genome_integration'
version = '1.0'
release = '1.0'
setup(
name=name,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
python_requires='!=3.0.*, !=3.1.*, !=3.2.*,!=3.3.*, !=3.4.*, !=3.5.*, >=3.6.*, <4',
description='Genome integration, a personal library',
long_description=long_description,
# The project's main homepage.
url='none',
# Author details
author='Adriaan van der Graaf',
author_email='adriaan.vd.graaf@gmail.com',
# Choose your license
license='Copyright 2019 Adriaan van der Graaf',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Investigators interested in causal inference',
'Topic :: Genome analysis :: multiomics integration',
# Pick your license as you wish (should match "license" above)
'License :: MIT licence 2020',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: >=3.7',
'Programming Language :: Python :: 3.7'
],
# What does your project relate to?
keywords='multiomics',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['numpy', 'scipy', 'sklearn', 'statsmodels', 'plinkio', 'requests', 'bitarray', 'bgen_reader'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
setup_requires=['pytest-runner'],
tests_require=['pytest'],
entry_points={
'console_scripts': [
'genome_integration=genome_integration:main',
],
},
include_package_data=True,
)
| 29.443182
| 116
| 0.648398
|
47df330ae7f9dd82f9163f6dbc94864d34b2baa7
| 5,076
|
py
|
Python
|
setup.py
|
ageron/salt-cloud
|
2f71d170e5e6bff8aafa8459227073400e875602
|
[
"Apache-2.0"
] | 2
|
2019-03-30T02:12:57.000Z
|
2021-03-08T18:59:52.000Z
|
setup.py
|
ageron/salt-cloud
|
2f71d170e5e6bff8aafa8459227073400e875602
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ageron/salt-cloud
|
2f71d170e5e6bff8aafa8459227073400e875602
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
The setup script for salt
'''
import os
import urllib2
from distutils import log
from distutils.core import setup
from distutils.command.sdist import sdist as original_sdist
setup_kwargs = {}
USE_SETUPTOOLS = False
SALTCLOUD_SOURCE_DIR = os.path.abspath(os.path.dirname(__file__))
BOOTSTRAP_SCRIPT_DISTRIBUTED_VERSION = os.environ.get(
# The user can provide a different bootstrap-script version.
# ATTENTION: A tag for that version MUST exist
'BOOTSTRAP_SCRIPT_VERSION',
# If no bootstrap-script version was provided from the environment, let's
# provide the one we define.
'v1.5.5'
)
if 'USE_SETUPTOOLS' in os.environ:
try:
from setuptools import setup
from setuptools.command.sdist import sdist as original_sdist
USE_SETUPTOOLS = True
saltcloud_reqs = os.path.join(SALTCLOUD_SOURCE_DIR, 'requirements.txt')
requirements = ''
with open(saltcloud_reqs) as f:
requirements = f.read()
setup_kwargs['install_requires'] = requirements
except:
USE_SETUPTOOLS = False
if USE_SETUPTOOLS is False:
from distutils.core import setup
exec(
compile(
open('saltcloud/version.py').read(), 'saltcloud/version.py', 'exec'
)
)
class sdist(original_sdist):
user_options = original_sdist.user_options + [
('skip-bootstrap-download', None,
'Skip downloading the bootstrap-salt.sh script. This can also be '
'triggered by having `SKIP_BOOTSTRAP_DOWNLOAD=1` as an environment '
'variable.')
]
boolean_options = original_sdist.boolean_options + [
'skip-bootstrap-download'
]
def initialize_options(self):
original_sdist.initialize_options(self)
self.skip_bootstrap_download = False
def finalize_options(self):
original_sdist.finalize_options(self)
if 'SKIP_BOOTSTRAP_DOWNLOAD' in os.environ:
skip_bootstrap_download = os.environ.get(
'SKIP_BOOTSTRAP_DOWNLOAD', '0'
)
self.skip_bootstrap_download = skip_bootstrap_download == '1'
def run(self):
if self.skip_bootstrap_download is False:
# Let's update the bootstrap-script to the version defined to be
# distributed. See BOOTSTRAP_SCRIPT_DISTRIBUTED_VERSION above.
url = (
'https://github.com/saltstack/salt-bootstrap/raw/{0}'
'/bootstrap-salt.sh'.format(
BOOTSTRAP_SCRIPT_DISTRIBUTED_VERSION
)
)
req = urllib2.urlopen(url)
deploy_path = os.path.join(
SALTCLOUD_SOURCE_DIR,
'saltcloud',
'deploy',
'bootstrap-salt.sh'
)
if req.getcode() == 200:
try:
log.info(
'Updating bootstrap-salt.sh.'
'\n\tSource: {0}'
'\n\tDestination: {1}'.format(
url,
deploy_path
)
)
with open(deploy_path, 'w') as fp_:
fp_.write(req.read())
except (OSError, IOError), err:
log.error(
'Failed to write the updated script: {0}'.format(err)
)
else:
log.error(
'Failed to update the bootstrap-salt.sh script. HTTP '
'Error code: {0}'.format(
req.getcode()
)
)
# Let's the rest of the build command
original_sdist.run(self)
NAME = 'salt-cloud'
VER = __version__
DESC = ('Generic cloud provisioning system with build in functions ')
setup(name=NAME,
version=VER,
description=DESC,
author='Thomas S Hatch',
author_email='thatch@saltstack.com',
url='http://saltstack.org',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Distributed Computing',
],
packages=['saltcloud',
'saltcloud/utils',
'saltcloud/clouds',
],
package_data={
'saltcloud': ['deploy/*.sh'],
},
data_files=[('share/man/man1', ['doc/man/salt-cloud.1']),
('share/man/man7', ['doc/man/salt-cloud.7'])
],
scripts=['scripts/salt-cloud'],
cmdclass={
'sdist': sdist
},
**setup_kwargs
)
| 32.33121
| 79
| 0.559693
|
13fae6e5f1f12d58b1595d382e946a95f7d4b28a
| 4,461
|
py
|
Python
|
custom_components/hacs/repositories/integration.py
|
Mofeywalker/hacs
|
4176b50896f0d51e260704e0aab45a6d60676213
|
[
"MIT"
] | null | null | null |
custom_components/hacs/repositories/integration.py
|
Mofeywalker/hacs
|
4176b50896f0d51e260704e0aab45a6d60676213
|
[
"MIT"
] | null | null | null |
custom_components/hacs/repositories/integration.py
|
Mofeywalker/hacs
|
4176b50896f0d51e260704e0aab45a6d60676213
|
[
"MIT"
] | null | null | null |
"""Class for integrations in HACS."""
import json
from homeassistant.loader import async_get_custom_components
from .repository import HacsRepository, register_repository_class
@register_repository_class
class HacsIntegration(HacsRepository):
"""Integrations in HACS."""
category = "integration"
def __init__(self, full_name):
"""Initialize."""
super().__init__()
self.information.full_name = full_name
self.information.category = self.category
self.manifest = None
self.domain = None
self.content.path.local = self.localpath
@property
def localpath(self):
"""Return localpath."""
return f"{self.system.config_path}/custom_components/{self.domain}"
@property
def config_flow(self):
"""Return bool if integration has config_flow."""
if self.manifest is not None:
if self.information.full_name == "custom-components/hacs":
return False
return self.manifest.get("config_flow", False)
return False
async def validate_repository(self):
"""Validate."""
await self.common_validate()
# Attach repository
if self.repository_object is None:
self.repository_object = await self.github.get_repo(
self.information.full_name
)
# Custom step 1: Validate content.
ccdir = await self.repository_object.get_contents("custom_components", self.ref)
if not isinstance(ccdir, list):
self.validate.errors.append("Repostitory structure not compliant")
self.content.path.remote = ccdir[0].path
self.content.objects = await self.repository_object.get_contents(
self.content.path.remote, self.ref
)
self.content.files = []
for filename in self.content.objects:
self.content.files.append(filename.name)
if not await self.get_manifest():
self.validate.errors.append("Missing manifest file.")
# Handle potential errors
if self.validate.errors:
for error in self.validate.errors:
if not self.system.status.startup:
self.logger.error(error)
return self.validate.success
async def registration(self):
"""Registration."""
if not await self.validate_repository():
return False
# Run common registration steps.
await self.common_registration()
# Get the content of the manifest file.
await self.get_manifest()
# Set local path
self.content.path.local = self.localpath
async def update_repository(self):
"""Update."""
await self.common_update()
# Get integration objects.
ccdir = await self.repository_object.get_contents("custom_components", self.ref)
self.content.path.remote = ccdir[0].path
self.content.objects = await self.repository_object.get_contents(
self.content.path.remote, self.ref
)
self.content.files = []
for filename in self.content.objects:
self.content.files.append(filename.name)
await self.get_manifest()
# Set local path
self.content.path.local = self.localpath
async def reload_custom_components(self):
"""Reload custom_components (and config flows)in HA."""
self.logger.info("Reloading custom_component cache")
del self.hass.data["custom_components"]
await async_get_custom_components(self.hass)
async def get_manifest(self):
"""Get info from the manifest file."""
manifest_path = f"{self.content.path.remote}/manifest.json"
manifest = None
if "manifest.json" not in self.content.files:
return False
manifest = await self.repository_object.get_contents(manifest_path, self.ref)
manifest = json.loads(manifest.content)
if manifest:
self.manifest = manifest
self.information.authors = manifest["codeowners"]
self.domain = manifest["domain"]
self.information.name = manifest["name"]
self.information.homeassistant_version = manifest.get("homeassistant")
return True
else:
return False
| 34.315385
| 89
| 0.618471
|
19e3c97377b2f34caf432ca51c10e95eb238b34b
| 4,139
|
py
|
Python
|
sentiment/train.py
|
terrifyzhao/nlp
|
2dd15e6b584bd0620bd09aa1408d1a956a936390
|
[
"Apache-2.0"
] | 1
|
2019-08-02T05:55:10.000Z
|
2019-08-02T05:55:10.000Z
|
sentiment/train.py
|
terrifyzhao/nlp
|
2dd15e6b584bd0620bd09aa1408d1a956a936390
|
[
"Apache-2.0"
] | null | null | null |
sentiment/train.py
|
terrifyzhao/nlp
|
2dd15e6b584bd0620bd09aa1408d1a956a936390
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import json
from tqdm import tqdm
import os
import pandas as pd
from data_utils import *
from keras.callbacks import Callback
from keras_bert import Tokenizer
import codecs
import tensorflow as tf
from sentiment.sentiment_model import sentiment_model
dict_path = '../chinese_L-12_H-768_A-12/vocab.txt'
max_len = 315
token_dict = {}
additional_chars = set()
global graph
graph = tf.get_default_graph()
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
class OurTokenizer(Tokenizer):
def _tokenize(self, text):
R = []
for c in text:
if c in self._token_dict:
R.append(c)
elif self._is_space(c):
R.append('[unused1]') # space类用未经训练的[unused1]表示
else:
R.append('[UNK]') # 剩余的字符是[UNK]
return R
tokenizer = OurTokenizer(token_dict)
length = 100
def read_data():
df_train = pd.read_csv('../data/sentiment/train.csv', header=None).values[1:]
df_eval = pd.read_csv('../data/sentiment/eval.csv', header=None).values[1:]
df_test = pd.read_csv('../data/sentiment/test.csv', header=None).values[1:]
df_train = shuffle(df_train)
df_eval = shuffle(df_eval)
df_test = shuffle(df_test)
return df_train[0:length][0], df_eval[0:length][0], df_test[0:length][0]
def list_find(list1, list2):
"""在list1中寻找子串list2,如果找到,返回第一个下标;
如果找不到,返回-1。
"""
n_list2 = len(list2)
for i in range(len(list1)):
if list1[i: i + n_list2] == list2:
return i
return -1
def data_generator(data, batch_size):
while True:
X1, X2, Y = [], [], []
for i, d in enumerate(data):
x = d[0].strip()
y = int(d[1])
x = tokenizer.encode(first=x)
X1.append(x[0])
X2.append(x[1])
Y.append(y)
if len(X1) == batch_size or i == len(data) - 1:
X1 = pad_sequences(X1, maxlen=max_len)
X2 = pad_sequences(X2, maxlen=max_len)
Y = one_hot(Y, 4)
yield [X1, X2], Y
X1, X2, Y = [], [], []
def extract_entity(text_in, model):
"""解码函数,应自行添加更多规则,保证解码出来的是一个公司名
"""
text_in = text_in[:max_len]
_tokens = tokenizer.tokenize(text_in)
_x1, _x2 = tokenizer.encode(first=text_in)
_x1, _x2 = np.array([_x1]), np.array([_x2])
with graph.as_default():
_ps1 = model.predict([_x1, _x2])
start = _ps1.argmax()
return start, _ps1
class Evaluate(Callback):
def __init__(self, data, model):
self.ACC = []
self.best = 0.
self.dev_data = data
self.model = model
def on_epoch_end(self, epoch, logs=None):
acc = self.evaluate()
self.ACC.append(acc)
if acc > self.best:
self.best = acc
self.model.save_weights('../output/sentiment_model.weights')
print('acc: %.4f, best acc: %.4f\n' % (acc, self.best))
def evaluate(self):
A = 1e-10
for d in tqdm(iter(self.dev_data)):
R = extract_entity(d[0], self.model)
if R == d[1]:
A += 1
return A / len(self.dev_data)
def test(test_data, model):
"""注意官方页面写着是以\t分割,实际上却是以逗号分割
"""
with open('../result.txt', 'w', encoding='utf-8')as file:
for d in tqdm(iter(test_data)):
s = str(d[0]) + ',' + extract_entity(d[1].replace('\t', ''), model)
file.write(s + '\n')
if __name__ == '__main__':
batch_size = 8
learning_rate = 1e-5
is_test = True
train_data, dev_data, test_data = read_data()
model = sentiment_model(learning_rate=learning_rate)
# model.load_weights('../output/best_model2.weights')
evaluator = Evaluate(dev_data, model)
X = data_generator(train_data, batch_size)
steps = int((len(train_data) + batch_size - 1) / batch_size)
model.fit_generator(X, steps_per_epoch=steps, epochs=120, callbacks=[evaluator])
| 26.703226
| 84
| 0.590722
|
307ff989cdc20ce8ab9cc180823164458236e7cf
| 930
|
py
|
Python
|
example/dj/apps/issue_tracker/tests/factories.py
|
druids/django-fperms-iscore
|
8e919cdc70ed57e0eb6407469de9ef2441ae06ad
|
[
"MIT"
] | 1
|
2019-10-07T12:40:38.000Z
|
2019-10-07T12:40:38.000Z
|
example/dj/apps/issue_tracker/tests/factories.py
|
druids/django-fperms-iscore
|
8e919cdc70ed57e0eb6407469de9ef2441ae06ad
|
[
"MIT"
] | 3
|
2019-08-09T14:10:21.000Z
|
2022-02-01T13:48:01.000Z
|
example/dj/apps/issue_tracker/tests/factories.py
|
druids/django-fperms-iscore
|
8e919cdc70ed57e0eb6407469de9ef2441ae06ad
|
[
"MIT"
] | null | null | null |
import factory
from factory import fuzzy
from factory.django import DjangoModelFactory
from django.contrib.auth.models import User
from issue_tracker.models import Issue
class UserFactory(DjangoModelFactory):
username = factory.Faker('user_name')
password = fuzzy.FuzzyText(length=10)
email = 'user@test.cz'
@classmethod
def _create(cls, model_class, *args, **kwargs):
"""Override the default ``_create`` with our custom call."""
manager = cls._get_manager(model_class)
# The default would use ``manager.create(*args, **kwargs)``
return manager._create_user(*args, **kwargs)
class Meta:
model = User
class IssueFactory(DjangoModelFactory):
name = fuzzy.FuzzyText(length=10)
created_by = factory.SubFactory(UserFactory)
solver = factory.SubFactory(UserFactory)
leader = factory.SubFactory(UserFactory)
class Meta:
model = Issue
| 25.833333
| 68
| 0.704301
|
7154a6174361a0e06ea3ad26e2db4b55f9ac5a26
| 6,146
|
py
|
Python
|
permanentclock/src/plugin.py
|
FoxyRabbit67/enigma2-plugins
|
f6b94012726931fdf28e80a26226aec612b350de
|
[
"Linux-OpenIB"
] | 41
|
2016-01-21T17:54:44.000Z
|
2021-06-26T05:54:41.000Z
|
permanentclock/src/plugin.py
|
FoxyRabbit67/enigma2-plugins
|
f6b94012726931fdf28e80a26226aec612b350de
|
[
"Linux-OpenIB"
] | 22
|
2016-11-16T11:25:26.000Z
|
2021-12-13T09:13:06.000Z
|
permanentclock/src/plugin.py
|
FoxyRabbit67/enigma2-plugins
|
f6b94012726931fdf28e80a26226aec612b350de
|
[
"Linux-OpenIB"
] | 62
|
2016-02-05T22:55:48.000Z
|
2022-03-12T21:48:22.000Z
|
##
## Permanent Clock
## by AliAbdul
##
from Components.ActionMap import ActionMap
from Components.config import config, ConfigInteger, ConfigSubsection, ConfigYesNo
from Components.MenuList import MenuList
from enigma import ePoint, eTimer, getDesktop
from os import environ
from Plugins.Plugin import PluginDescriptor
from Screens.Screen import Screen
##############################################################################
config.plugins.PermanentClock = ConfigSubsection()
config.plugins.PermanentClock.enabled = ConfigYesNo(default=False)
config.plugins.PermanentClock.position_x = ConfigInteger(default=590)
config.plugins.PermanentClock.position_y = ConfigInteger(default=35)
##############################################################################
SKIN = """
<screen position="0,0" size="120,30" zPosition="10" backgroundColor="#ff000000" title="%s" flags="wfNoBorder">
<widget source="global.CurrentTime" render="Label" position="1,1" size="120,30" font="Regular;26" valign="center" halign="center" backgroundColor="#ff000000" transparent="1">
<convert type="ClockToText">Default</convert>
</widget>
</screen>""" % _("Permanent Clock")
##############################################################################
class PermanentClockScreen(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skin = SKIN
self.onShow.append(self.movePosition)
def movePosition(self):
if self.instance:
self.instance.move(ePoint(config.plugins.PermanentClock.position_x.value, config.plugins.PermanentClock.position_y.value))
##############################################################################
class PermanentClock():
def __init__(self):
self.dialog = None
def gotSession(self, session):
self.dialog = session.instantiateDialog(PermanentClockScreen, zPosition=1000)
self.dialog.movePosition()
self.showHide()
def changeVisibility(self):
if config.plugins.PermanentClock.enabled.value:
config.plugins.PermanentClock.enabled.value = False
else:
config.plugins.PermanentClock.enabled.value = True
config.plugins.PermanentClock.enabled.save()
self.showHide()
def showHide(self):
if config.plugins.PermanentClock.enabled.value:
self.dialog.show()
else:
self.dialog.hide()
pClock = PermanentClock()
##############################################################################
class PermanentClockPositioner(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skin = SKIN
self["actions"] = ActionMap(["WizardActions"],
{
"left": self.left,
"up": self.up,
"right": self.right,
"down": self.down,
"ok": self.ok,
"back": self.exit
}, -1)
desktop = getDesktop(0)
self.desktopWidth = desktop.size().width()
self.desktopHeight = desktop.size().height()
self.moveTimer = eTimer()
self.moveTimer_conn = self.moveTimer.timeout.connect(self.movePosition)
self.onExecBegin.append(self.movePosition)
def movePosition(self):
self.instance.move(ePoint(config.plugins.PermanentClock.position_x.value, config.plugins.PermanentClock.position_y.value))
self.moveTimer.start(50, 1)
def left(self):
value = config.plugins.PermanentClock.position_x.value
value -= 1
if value < 0:
value = 0
config.plugins.PermanentClock.position_x.value = value
def up(self):
value = config.plugins.PermanentClock.position_y.value
value -= 1
if value < 0:
value = 0
config.plugins.PermanentClock.position_y.value = value
def right(self):
value = config.plugins.PermanentClock.position_x.value
value += 1
if value > self.desktopWidth:
value = self.desktopWidth
config.plugins.PermanentClock.position_x.value = value
def down(self):
value = config.plugins.PermanentClock.position_y.value
value += 1
if value > self.desktopHeight:
value = self.desktopHeight
config.plugins.PermanentClock.position_y.value = value
def ok(self):
config.plugins.PermanentClock.position_x.save()
config.plugins.PermanentClock.position_y.save()
self.close()
def exit(self):
config.plugins.PermanentClock.position_x.cancel()
config.plugins.PermanentClock.position_y.cancel()
self.close()
##############################################################################
class PermanentClockMenu(Screen):
skin = """
<screen position="center,center" size="420,105" title="%s">
<widget name="list" position="10,10" size="400,85" />
</screen>""" % _("Permanent Clock")
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
self["list"] = MenuList([])
self["actions"] = ActionMap(["OkCancelActions"], {"ok": self.okClicked, "cancel": self.close}, -1)
self.onLayoutFinish.append(self.showMenu)
def showMenu(self):
list = []
if config.plugins.PermanentClock.enabled.value:
list.append(_("Deactivate permanent clock"))
else:
list.append(_("Activate permanent clock"))
list.append(_("Change permanent clock position"))
self["list"].setList(list)
def okClicked(self):
sel = self["list"].getCurrent()
if pClock.dialog is None:
pClock.gotSession(self.session)
if sel == _("Deactivate permanent clock") or sel == _("Activate permanent clock"):
pClock.changeVisibility()
self.showMenu()
else:
pClock.dialog.hide()
self.session.openWithCallback(self.positionerCallback, PermanentClockPositioner)
def positionerCallback(self, callback=None):
pClock.showHide()
##############################################################################
def sessionstart(reason, **kwargs):
if reason == 0:
pClock.gotSession(kwargs["session"])
def startConfig(session, **kwargs):
session.open(PermanentClockMenu)
def main(menuid):
if menuid != "system":
return [ ]
return [(_("Permanent Clock"), startConfig, "permanent_clock", None)]
##############################################################################
def Plugins(**kwargs):
return [
PluginDescriptor(where=[PluginDescriptor.WHERE_SESSIONSTART], fnc=sessionstart),
PluginDescriptor(name=_("Permanent Clock"), description=_("Shows the clock permanent on the screen"), where=PluginDescriptor.WHERE_MENU, fnc=main)]
| 31.84456
| 176
| 0.664823
|
0b357bb07ada26c3ba997f3da50652cc4f841b95
| 2,584
|
py
|
Python
|
gamestonk_terminal/stocks/discovery/yahoofinance_model.py
|
Aerex/GamestonkTerminal
|
680e0cd278f0d8e45031cdc9d51f247e9aa90ce1
|
[
"MIT"
] | 1,835
|
2021-05-09T02:55:06.000Z
|
2022-03-29T12:37:05.000Z
|
gamestonk_terminal/stocks/discovery/yahoofinance_model.py
|
Aerex/GamestonkTerminal
|
680e0cd278f0d8e45031cdc9d51f247e9aa90ce1
|
[
"MIT"
] | 569
|
2021-05-09T15:59:41.000Z
|
2022-03-29T12:25:16.000Z
|
gamestonk_terminal/stocks/discovery/yahoofinance_model.py
|
Aerex/GamestonkTerminal
|
680e0cd278f0d8e45031cdc9d51f247e9aa90ce1
|
[
"MIT"
] | 268
|
2021-05-10T21:46:50.000Z
|
2022-03-28T09:18:38.000Z
|
""" Yahoo Finance Model """
__docformat__ = "numpy"
import pandas as pd
import requests
def get_gainers() -> pd.DataFrame:
"""Get top gainers. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Stock Gainers
"""
url_gainers = "https://finance.yahoo.com/screener/predefined/day_gainers"
return pd.read_html(requests.get(url_gainers).text)[0]
def get_losers() -> pd.DataFrame:
"""Get top losers. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Stock Losers
"""
url_losers = "https://finance.yahoo.com/screener/predefined/day_losers"
return pd.read_html(requests.get(url_losers).text)[0]
def get_ugs() -> pd.DataFrame:
"""Get stocks with earnings growth rates better than 25% and relatively low PE and PEG ratios. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Undervalued stocks
"""
url = "https://finance.yahoo.com/screener/predefined/undervalued_growth_stocks"
data = pd.read_html(requests.get(url).text)[0]
data = data.iloc[:, :-1]
return data
def get_gtech() -> pd.DataFrame:
"""Get technology stocks with revenue and earnings growth in excess of 25%. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Growth technology stocks
"""
url = "https://finance.yahoo.com/screener/predefined/growth_technology_stocks"
data = pd.read_html(requests.get(url).text)[0]
data = data.iloc[:, :-1]
return data
def get_active() -> pd.DataFrame:
"""Get stocks ordered in descending order by intraday trade volume. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Most active stocks
"""
url = "https://finance.yahoo.com/screener/predefined/most_actives"
data = pd.read_html(requests.get(url).text)[0]
return data
def get_ulc() -> pd.DataFrame:
"""Get Yahoo Finance potentially undervalued large cap stocks. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Most undervalued large cap stocks
"""
url = "https://finance.yahoo.com/screener/predefined/undervalued_large_caps"
data = pd.read_html(requests.get(url).text)[0]
return data
def get_asc() -> pd.DataFrame:
"""Get Yahoo Finance small cap stocks with earnings growth rates better than 25%. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Most aggressive small cap stocks
"""
url = "https://finance.yahoo.com/screener/predefined/aggressive_small_caps"
data = pd.read_html(requests.get(url).text)[0]
return data
| 24.846154
| 122
| 0.657121
|
2f90a926e7c8ae9d47cca77c84ca5477595de9d3
| 759
|
py
|
Python
|
hero_slider/tests/views_tests.py
|
bitlabstudio/django-hero-slider
|
8153b3eece76c47210a266c2edb660725c34a56e
|
[
"MIT"
] | 3
|
2019-12-24T18:22:25.000Z
|
2021-07-21T19:37:30.000Z
|
hero_slider/tests/views_tests.py
|
bitmazk/django-hero-slider
|
8153b3eece76c47210a266c2edb660725c34a56e
|
[
"MIT"
] | 5
|
2015-01-01T02:54:30.000Z
|
2016-08-08T12:48:08.000Z
|
hero_slider/tests/views_tests.py
|
bitmazk/django-hero-slider
|
8153b3eece76c47210a266c2edb660725c34a56e
|
[
"MIT"
] | 3
|
2015-01-01T14:51:48.000Z
|
2016-08-05T19:46:24.000Z
|
"""Tests for the views of the ``hero_slider`` app."""
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django_libs.tests.mixins import ViewRequestFactoryTestMixin
from ..views import GetCTypeDetails
class GetCTypeDetailsTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the ``GetCTypeDetails`` view class."""
view_class = GetCTypeDetails
def test_view(self):
resp = self.is_callable(data={'pk': 1, })
ctype = ContentType.objects.get(pk=1)
self.assertIn(ctype.app_label, resp.content.decode('utf-8'), msg=(
'Should return a JSON string containing app_label and model'))
def test_bad_pk(self):
self.is_not_callable(data={'pk': 999, })
| 34.5
| 74
| 0.716733
|
a0f182b1ac469b1738f5dff68ff208a4f11047ac
| 215
|
py
|
Python
|
shortcuts/actions/calculation.py
|
maximejf42/python-shortcuts
|
addde7732e88d250aaea75c228241ed182fca850
|
[
"MIT"
] | 1
|
2021-12-07T09:07:10.000Z
|
2021-12-07T09:07:10.000Z
|
shortcuts/actions/calculation.py
|
yy/python-shortcuts
|
025c54be965600fbe7ff6ec96f594efea503cbc3
|
[
"MIT"
] | null | null | null |
shortcuts/actions/calculation.py
|
yy/python-shortcuts
|
025c54be965600fbe7ff6ec96f594efea503cbc3
|
[
"MIT"
] | null | null | null |
from shortcuts.actions.base import BaseAction, Field
class CountAction(BaseAction):
'''Count'''
itype = 'is.workflow.actions.count'
keyword = 'count'
count = Field('WFCountType', capitalize=True)
| 21.5
| 52
| 0.697674
|
061b79de324b6c5068393ad5672024bc5341a144
| 1,813
|
py
|
Python
|
src/bindings/rmr-python/examples/receive.py
|
cachengo/ric-plt-lib-rmr
|
a1575dacc478b945ea63f5d0cc3db3d66dcb5983
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
src/bindings/rmr-python/examples/receive.py
|
cachengo/ric-plt-lib-rmr
|
a1575dacc478b945ea63f5d0cc3db3d66dcb5983
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
src/bindings/rmr-python/examples/receive.py
|
cachengo/ric-plt-lib-rmr
|
a1575dacc478b945ea63f5d0cc3db3d66dcb5983
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
# ==================================================================================
# Copyright (c) 2019 Nokia
# Copyright (c) 2018-2019 AT&T Intellectual Property.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================
from setuptools import setup, find_packages
from rmr import rmr
import time
import sys
import signal
# Demonstrate NNG cleanup
def signal_handler(sig, frame):
print('SIGINT received! Cleaning up rmr')
rmr.rmr_close(mrc)
print("Byeee")
sys.exit(0)
# init rmr
mrc = rmr.rmr_init("4560".encode('utf-8'), rmr.RMR_MAX_RCV_BYTES, 0x00)
while rmr.rmr_ready(mrc) == 0:
time.sleep(1)
print("not yet ready")
rmr.rmr_set_stimeout(mrc, 2)
# capture ctrl-c
signal.signal(signal.SIGINT, signal_handler)
sbuf = None
while True:
print("Waiting for a message, will timeout after 2000ms")
sbuf = rmr.rmr_torcv_msg(mrc, sbuf, 2000)
summary = rmr.message_summary(sbuf)
if summary['message state'] == 12:
print("Nothing received =(")
else:
print("Message received!: {}".format(summary))
val = b"message recieved OK yall!"
rmr.set_payload_and_length(val, sbuf)
sbuf = rmr.rmr_rts_msg(mrc, sbuf)
time.sleep(1)
| 31.807018
| 84
| 0.63155
|
b08ada0660beee3ab49f49762bae69f432a84b0d
| 766
|
py
|
Python
|
nonbonded/tests/library/plotting/plotly/test_benchmark.py
|
SimonBoothroyd/nonbonded
|
3efbb7d943d936b47248975f9ad0d8a006ea8684
|
[
"MIT"
] | 5
|
2020-05-11T18:25:00.000Z
|
2022-01-27T10:55:09.000Z
|
nonbonded/tests/library/plotting/plotly/test_benchmark.py
|
SimonBoothroyd/nonbonded
|
3efbb7d943d936b47248975f9ad0d8a006ea8684
|
[
"MIT"
] | 88
|
2020-06-02T14:40:05.000Z
|
2022-03-02T09:20:39.000Z
|
nonbonded/tests/library/plotting/plotly/test_benchmark.py
|
SimonBoothroyd/nonbonded
|
3efbb7d943d936b47248975f9ad0d8a006ea8684
|
[
"MIT"
] | null | null | null |
from nonbonded.library.plotting.plotly.benchmark import (
plot_overall_statistics,
plot_scatter_results,
)
from nonbonded.library.statistics.statistics import StatisticType
def test_plot_overall_statistics(benchmarks_and_results, tmpdir):
benchmarks, results, _ = benchmarks_and_results
figure = plot_overall_statistics(benchmarks, results, StatisticType.RMSE)
assert figure is not None
assert figure.to_plotly() is not None
def test_plot_scatter_results(benchmarks_and_results, tmpdir):
benchmarks, results, data_sets = benchmarks_and_results
figures = plot_scatter_results(benchmarks, results, data_sets)
for figure in figures.values():
assert figure is not None
assert figure.to_plotly() is not None
| 27.357143
| 77
| 0.778068
|
741cf7ff78759a87b0918152eb88caa74c40ae1c
| 9,940
|
py
|
Python
|
tensorforce/models/distribution_model.py
|
youlei202/tensorforce-lei
|
871ef7f5c41d496aa8ad674854792ebd52ce1546
|
[
"Apache-2.0"
] | null | null | null |
tensorforce/models/distribution_model.py
|
youlei202/tensorforce-lei
|
871ef7f5c41d496aa8ad674854792ebd52ce1546
|
[
"Apache-2.0"
] | null | null | null |
tensorforce/models/distribution_model.py
|
youlei202/tensorforce-lei
|
871ef7f5c41d496aa8ad674854792ebd52ce1546
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce import util
from tensorforce.core.networks import Network
from tensorforce.core.distributions import Distribution, Bernoulli, Categorical, Gaussian, Beta
from tensorforce.models import Model
class DistributionModel(Model):
"""
Base class for models using distributions parametrized by a neural network.
"""
def __init__(
self,
states_spec,
actions_spec,
network_spec,
device,
session_config,
scope,
saver_spec,
summary_spec,
distributed_spec,
optimizer,
discount,
variable_noise,
states_preprocessing_spec,
explorations_spec,
reward_preprocessing_spec,
distributions_spec,
entropy_regularization
):
self.network_spec = network_spec
self.distributions_spec = distributions_spec
self.distributions = None
self.fn_kl_divergence = None
# Entropy regularization
assert entropy_regularization is None or entropy_regularization >= 0.0
self.entropy_regularization = entropy_regularization
super(DistributionModel, self).__init__(
states_spec=states_spec,
actions_spec=actions_spec,
device=device,
session_config=session_config,
scope=scope,
saver_spec=saver_spec,
summary_spec=summary_spec,
distributed_spec=distributed_spec,
optimizer=optimizer,
discount=discount,
variable_noise=variable_noise,
states_preprocessing_spec=states_preprocessing_spec,
explorations_spec=explorations_spec,
reward_preprocessing_spec=reward_preprocessing_spec
)
def initialize(self, custom_getter):
super(DistributionModel, self).initialize(custom_getter)
# Network
self.network = Network.from_spec(
spec=self.network_spec,
kwargs=dict(summary_labels=self.summary_labels)
)
# Distributions
self.distributions = self.create_distributions()
# Network internals
self.internals_input.extend(self.network.internals_input())
self.internals_init.extend(self.network.internals_init())
# KL divergence function
self.fn_kl_divergence = tf.make_template(
name_=(self.scope + '/kl-divergence'),
func_=self.tf_kl_divergence,
custom_getter_=custom_getter
)
def create_distributions(self):
distributions = dict()
for name, action in self.actions_spec.items():
if self.distributions_spec is not None and name in self.distributions_spec:
kwargs = dict(action)
kwargs['summary_labels'] = self.summary_labels
distributions[name] = Distribution.from_spec(
spec=self.distributions_spec[name],
kwargs=kwargs
)
elif action['type'] == 'bool':
distributions[name] = Bernoulli(
shape=action['shape'],
summary_labels=self.summary_labels
)
elif action['type'] == 'int':
distributions[name] = Categorical(
shape=action['shape'],
num_actions=action['num_actions'],
summary_labels=self.summary_labels
)
elif action['type'] == 'float':
if 'min_value' in action:
distributions[name] = Beta(
shape=action['shape'],
min_value=action['min_value'],
max_value=action['max_value'],
summary_labels=self.summary_labels
)
else:
distributions[name] = Gaussian(
shape=action['shape'],
summary_labels=self.summary_labels
)
return distributions
def tf_actions_and_internals(self, states, internals, update, deterministic):
embedding, internals = self.network.apply(x=states, internals=internals, update=update, return_internals=True)
actions = dict()
for name, distribution in self.distributions.items():
distr_params = distribution.parameterize(x=embedding)
actions[name] = distribution.sample(distr_params=distr_params, deterministic=deterministic)
return actions, internals
def tf_kl_divergence(self, states, internals, update):
embedding = self.network.apply(x=states, internals=internals, update=update)
kl_divergences = list()
for name, distribution in self.distributions.items():
distr_params = distribution.parameterize(x=embedding)
fixed_distr_params = tuple(tf.stop_gradient(input=value) for value in distr_params)
kl_divergence = distribution.kl_divergence(distr_params1=fixed_distr_params, distr_params2=distr_params)
collapsed_size = util.prod(util.shape(kl_divergence)[1:])
kl_divergence = tf.reshape(tensor=kl_divergence, shape=(-1, collapsed_size))
kl_divergences.append(kl_divergence)
kl_divergence_per_instance = tf.reduce_mean(input_tensor=tf.concat(values=kl_divergences, axis=1), axis=1)
return tf.reduce_mean(input_tensor=kl_divergence_per_instance, axis=0)
def get_optimizer_kwargs(self, states, internals, actions, terminal, reward, update):
kwargs = super(DistributionModel, self).get_optimizer_kwargs(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
kwargs['fn_kl_divergence'] = (
lambda: self.fn_kl_divergence(
states=states,
internals=internals,
update=update
)
)
return kwargs
def tf_regularization_losses(self, states, internals, update):
losses = super(DistributionModel, self).tf_regularization_losses(
states=states,
internals=internals,
update=update
)
network_loss = self.network.regularization_loss()
if network_loss is not None:
losses['network'] = network_loss
for distribution in self.distributions.values():
regularization_loss = distribution.regularization_loss()
if regularization_loss is not None:
if 'distributions' in losses:
losses['distributions'] += regularization_loss
else:
losses['distributions'] = regularization_loss
if self.entropy_regularization is not None and self.entropy_regularization > 0.0:
entropies = list()
embedding = self.network.apply(x=states, internals=internals, update=update)
for name, distribution in self.distributions.items():
distr_params = distribution.parameterize(x=embedding)
entropy = distribution.entropy(distr_params=distr_params)
collapsed_size = util.prod(util.shape(entropy)[1:])
entropy = tf.reshape(tensor=entropy, shape=(-1, collapsed_size))
entropies.append(entropy)
entropy_per_instance = tf.reduce_mean(input_tensor=tf.concat(values=entropies, axis=1), axis=1)
entropy = tf.reduce_mean(input_tensor=entropy_per_instance, axis=0)
losses['entropy'] = -self.entropy_regularization * entropy
return losses
def get_variables(self, include_non_trainable=False):
model_variables = super(DistributionModel, self).get_variables(include_non_trainable=include_non_trainable)
network_variables = self.network.get_variables(include_non_trainable=include_non_trainable)
distribution_variables = self.get_distributions_variables(self.distributions, include_non_trainable=include_non_trainable)
return model_variables + network_variables + distribution_variables
def get_summaries(self):
model_summaries = super(DistributionModel, self).get_summaries()
network_summaries = self.network.get_summaries()
distribution_summaries = self.get_distributions_summaries(self.distributions)
return model_summaries + network_summaries + distribution_summaries
@staticmethod
def get_distributions_variables(distributions, include_non_trainable=False):
distribution_variables = [
variable for name in sorted(distributions)
for variable in distributions[name].get_variables(include_non_trainable=include_non_trainable)
]
return distribution_variables
@staticmethod
def get_distributions_summaries(distributions):
distribution_summaries = [
summary for name in sorted(distributions)
for summary in distributions[name].get_summaries()
]
return distribution_summaries
| 39.919679
| 130
| 0.645272
|
d849d729c9e3c4164be9ee3af875f5f69a7f991f
| 1,500
|
py
|
Python
|
tests/test_responseparser.py
|
qyanu-pull-requests/python-sdk
|
ae27a7b680637025529e3421016d8931cbc28306
|
[
"MIT"
] | null | null | null |
tests/test_responseparser.py
|
qyanu-pull-requests/python-sdk
|
ae27a7b680637025529e3421016d8931cbc28306
|
[
"MIT"
] | null | null | null |
tests/test_responseparser.py
|
qyanu-pull-requests/python-sdk
|
ae27a7b680637025529e3421016d8931cbc28306
|
[
"MIT"
] | null | null | null |
import re
import hexonet.apiconnector.responseparser as RP
from hexonet.apiconnector.responsetemplatemanager import ResponseTemplateManager as RTM
def test_rpmethods():
rtm = RTM()
rtm.addTemplate(
'OK',
rtm.generateTemplate('200', 'Command completed successfully')
)
# #.serialize()
# [w/ PROPERTY]
r = rtm.getTemplate('OK').getHash()
r["PROPERTY"] = {
"DOMAIN": ['mydomain1.com', 'mydomain2.com', 'mydomain3.com'],
"RATING": ['1', '2', '3'],
"SUM": [3]
}
assert RP.serialize(r) == (
'[RESPONSE]\r\nPROPERTY[DOMAIN][0]=mydomain1.com\r\nPROPERTY[DOMAIN' +
'][1]=mydomain2.com\r\nPROPERTY[DOMAIN][2]=mydomain3.com\r\nPROPERT' +
'Y[RATING][0]=1\r\nPROPERTY[RATING][1]=2\r\nPROPERTY[RATING][2]=3\r' +
'\nPROPERTY[SUM][0]=3\r\nCODE=200\r\nDESCRIPTION=Command completed ' +
'successfully\r\nEOF\r\n'
)
# [w/o PROPERTY]
tpl = rtm.getTemplate('OK')
assert RP.serialize(tpl.getHash()) == tpl.getPlain()
# [w/o CODE, w/o DESCRIPTION]
h = rtm.getTemplate('OK').getHash()
h.pop('CODE')
h.pop('DESCRIPTION')
assert RP.serialize(h) == '[RESPONSE]\r\nEOF\r\n'
# [w/ QUEUETIME, w/ RUNTIME]
h = rtm.getTemplate('OK').getHash()
h["QUEUETIME"] = '0'
h["RUNTIME"] = '0.12'
assert RP.serialize(h) == (
'[RESPONSE]\r\nCODE=200\r\nDESCRIPTION=Command completed successful' +
'ly\r\nQUEUETIME=0\r\nRUNTIME=0.12\r\nEOF\r\n'
)
| 31.914894
| 87
| 0.601333
|
4e88375f3bf62fe33ed8e2369b8de0709ad57242
| 777
|
py
|
Python
|
Python3/0658-Find-K-Closest-Elements/soln-1.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0658-Find-K-Closest-Elements/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0658-Find-K-Closest-Elements/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
lefts, rights = [], []
idx = bisect.bisect_left(arr, x)
l, r = idx - 1, idx # this is important
count = k
while count:
if l < 0:
rights.extend(arr[r:r+count])
break
if r >= len(arr):
lefts.extend(arr[l-count+1:l+1][::-1])
break
if x - arr[l] <= arr[r] - x:
lefts.append(arr[l])
l -= 1
else:
rights.append(arr[r])
r += 1
count -= 1
return lefts[::-1] + rights
| 28.777778
| 54
| 0.392535
|
8f698edff44070d2e8a076a598cf3cbec6465680
| 5,153
|
py
|
Python
|
qmpy/data/meta_data.py
|
WalterjhShen/qmpy
|
686e18cecbb82a6bb523249ac1779a99fb865350
|
[
"MIT"
] | 2
|
2019-04-25T17:49:16.000Z
|
2019-06-01T01:36:04.000Z
|
qmpy/data/meta_data.py
|
WalterjhShen/qmpy
|
686e18cecbb82a6bb523249ac1779a99fb865350
|
[
"MIT"
] | 10
|
2018-07-05T03:19:58.000Z
|
2019-03-24T13:05:14.000Z
|
qmpy/data/meta_data.py
|
WalterjhShen/qmpy
|
686e18cecbb82a6bb523249ac1779a99fb865350
|
[
"MIT"
] | 1
|
2020-04-30T14:08:45.000Z
|
2020-04-30T14:08:45.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
import logging
logger = logging.getLogger(__name__)
class MetaData(models.Model):
"""
Base class for variable typed model tagging.
Model for arbitrary meta-data descriptors for various qmpy objects.
Generally accessed by properties and methods added by the "add_label"
descriptor. See "add_label" for a more detailed description of its use
Relationships
| :mod:`~qmpy.Calculation` via calculation_set
| :mod:`~qmpy.Composition` via composition_set
| :mod:`~qmpy.DOS` via dos_set
| :mod:`~qmpy.Entry` via entry_set
| :mod:`~qmpy.Structure` via structure_set
Attributes:
| id: Autoincrementing primary key
| type: Label for the kind of meta data, e.g. "hold", "keyword"
| value: Content of the meta data. e.g. "repeated failure", "known
| anti-ferromagnetic"
Examples::
>>> MetaData.get('Keyword', 'ICSD')
<Keyword: ICSD>
"""
type = models.CharField(max_length=15)
value = models.TextField()
class Meta:
app_label = 'qmpy'
db_table = 'meta_data'
def __str__(self):
return self.value
def __repr__(self):
return '<%s: %s>' % (self.type.title(), self.value)
@classmethod
def get(cls, type, value):
md, new = MetaData.objects.get_or_create(type=type, value=value)
if new:
md.save()
return md
class GlobalWarning(object):
@staticmethod
def set(warning):
md = MetaData.get('global_warning', warning)
md.save()
return md
@staticmethod
def clear(warning):
md = MetaData.get('global_warning', warning)
md.delete()
@staticmethod
def list():
return list(MetaData.objects.filter(type='global_warning'))
class GlobalInfo(object):
@staticmethod
def set(warning):
md = MetaData.get('global_info', warning)
md.save()
return md
@staticmethod
def clear(warning):
md = MetaData.get('global_info', warning)
md.delete()
@staticmethod
def list():
return list(MetaData.objects.filter(type='global_info'))
class DatabaseUpdate(object):
@staticmethod
def value():
return MetaData.objects.get(type='database_update').value
@staticmethod
def set():
MetaData.objects.filter(type='database_update').update(
value=str(datetime.date(datetime.now())))
def add_meta_data(label, plural=None, cache=None, description=''):
"""
Decorator for adding managed attributes for MetaData types to other models.
Requires that the class being decorated has a many_to_many field with
MetaData.
Example::
>>> @add_label("keywords")
>>> class NewModel(models.Model):
>>> meta_data = models.ManyToManyField('MetaData')
>>>
>>> instance = NewModel()
>>> instance.keywords
[]
>>> instance.add_keyword('decorated!')
>>> instance.keywords
[<Keyword: decorated!>]
>>> instance.remove_keyword('decorated!')
>>> instance.keywords
[]
>>> instance.keywords = ['add', 'in', 'bulk']
>>> instance.keywords
[<Keyword: add>, <Keyword: in>, <Keyword: bulk>]
"""
if plural is None:
plural = '%ss' % label
if cache is None:
cache = '_%s' % plural
label = label.lower()
plural = plural.lower()
cache = cache.lower()
def func(cls):
setattr(cls, cache, None)
def getter(self):
if getattr(self, cache) is None:
if getattr(self, 'pk') is None:
setattr(self, cache, [])
else:
data = self.meta_data.filter(type=label)
data = data.values_list('value', flat=True)
setattr(self, cache, list(data))
return getattr(self, cache)
def setter(cls, values):
setattr(cls, cache, values)
setattr(cls, plural, property(getter,
setter,
None,
description))
def adder(cls, value):
'Helper function to add %s to list of %s.' % (label, plural)
existing = getattr(cls, plural)
if not value in existing:
existing.append(value)
setattr(cls, 'add_%s' % label, adder)
def remover(cls, value):
'Helper function to remove a %s from list of %s.' % (label, plural)
existing = getattr(cls, plural)
if value in existing:
existing.remove(value)
setattr(cls, 'remove_%s' % label, remover)
doc = 'Return list of %s (MetaData objects of type %s)' % (plural,
label)
def obj_getter(self):
return [ MetaData.get(label, v) for v in getattr(self, plural) ]
setattr(cls, '%s_objects' % label, property(obj_getter, None, None, doc))
return cls
return func
| 29.786127
| 81
| 0.571512
|
10b3c22ab6f568a5fcf4c18b475ef2fb3dbaa092
| 4,613
|
py
|
Python
|
reviewboard/webapi/decorators.py
|
vigneshsrinivasan/reviewboard
|
4775130c1c1022f81edc11928e02b1b6c069f6ed
|
[
"MIT"
] | 1
|
2020-02-11T07:09:14.000Z
|
2020-02-11T07:09:14.000Z
|
reviewboard/webapi/decorators.py
|
vigneshsrinivasan/reviewboard
|
4775130c1c1022f81edc11928e02b1b6c069f6ed
|
[
"MIT"
] | null | null | null |
reviewboard/webapi/decorators.py
|
vigneshsrinivasan/reviewboard
|
4775130c1c1022f81edc11928e02b1b6c069f6ed
|
[
"MIT"
] | null | null | null |
from django.http import HttpRequest
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.decorators import simple_decorator
from djblets.webapi.core import WebAPIResponse, WebAPIResponseError
from djblets.webapi.decorators import webapi_login_required, \
webapi_response_errors, \
_find_httprequest
from djblets.webapi.encoders import BasicAPIEncoder
from djblets.webapi.errors import DOES_NOT_EXIST, NOT_LOGGED_IN, \
PERMISSION_DENIED
from reviewboard.site.models import LocalSite
@webapi_response_errors(NOT_LOGGED_IN)
@simple_decorator
def webapi_check_login_required(view_func):
"""
A decorator that checks whether login is required on this installation
and, if so, checks if the user is logged in. If login is required and
the user is not logged in, they'll get a NOT_LOGGED_IN error.
"""
def _check(*args, **kwargs):
siteconfig = SiteConfiguration.objects.get_current()
request = _find_httprequest(args)
if (siteconfig.get("auth_require_sitewide_login") or
(request.user.is_anonymous() and
'HTTP_AUTHORIZATION' in request.META)):
return webapi_login_required(view_func)(*args, **kwargs)
else:
return view_func(*args, **kwargs)
view_func.checks_login_required = True
return _check
def webapi_deprecated(deprecated_in, force_error_http_status=None,
default_api_format=None, encoders=[]):
"""Marks an API handler as deprecated.
``deprecated_in`` specifies the version that first deprecates this call.
``force_error_http_status`` forces errors to use the specified HTTP
status code.
``default_api_format`` specifies the default api format (json or xml)
if one isn't provided.
"""
def _dec(view_func):
def _view(*args, **kwargs):
if default_api_format:
request = args[0]
assert isinstance(request, HttpRequest)
method_args = getattr(request, request.method, None)
if method_args and 'api_format' not in method_args:
method_args = method_args.copy()
method_args['api_format'] = default_api_format
setattr(request, request.method, method_args)
response = view_func(*args, **kwargs)
if isinstance(response, WebAPIResponse):
response.encoders = encoders
if isinstance(response, WebAPIResponseError):
response.api_data['deprecated'] = {
'in_version': deprecated_in,
}
if (force_error_http_status and
isinstance(response, WebAPIResponseError)):
response.status_code = force_error_http_status
return response
return _view
return _dec
_deprecated_api_encoders = []
def webapi_deprecated_in_1_5(view_func):
from reviewboard.webapi.encoder import DeprecatedReviewBoardAPIEncoder
global _deprecated_api_encoders
if not _deprecated_api_encoders:
_deprecated_api_encoders = [
DeprecatedReviewBoardAPIEncoder(),
BasicAPIEncoder(),
]
return webapi_deprecated(
deprecated_in='1.5',
force_error_http_status=200,
default_api_format='json',
encoders=_deprecated_api_encoders)(view_func)
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@simple_decorator
def webapi_check_local_site(view_func):
"""Checks whether a user has access to a local site given in the URL.
This decorator can be added to get/get_list methods to check whether or
not a user should be able to view them given the local site name in the URL.
"""
def _check(*args, **kwargs):
request = _find_httprequest(args)
local_site_name = kwargs.get('local_site_name', None)
if local_site_name:
try:
local_site = LocalSite.objects.get(name=local_site_name)
if not local_site.is_accessible_by(request.user):
if request.user.is_authenticated():
return WebAPIResponseError(request, PERMISSION_DENIED)
else:
return WebAPIResponseError(request, NOT_LOGGED_IN)
except LocalSite.DoesNotExist:
return WebAPIResponseError(request, DOES_NOT_EXIST)
return view_func(*args, **kwargs)
return _check
| 35.484615
| 80
| 0.656189
|
2c6f506132ef71a1a22f72694c74270c008387fd
| 28,769
|
py
|
Python
|
tests/unit/transport/test_zeromq.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/transport/test_zeromq.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/transport/test_zeromq.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
:codeauthor: Thomas Jackson <jacksontj.89@gmail.com>
"""
from __future__ import absolute_import, print_function, unicode_literals
import ctypes
import multiprocessing
import os
import threading
import time
from concurrent.futures.thread import ThreadPoolExecutor
import salt.config
import salt.exceptions
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.log.setup
import salt.transport.client
import salt.transport.server
import salt.utils.platform
import salt.utils.process
import zmq.eventloop.ioloop
from salt.ext import six
from salt.ext.six.moves import range
from salt.ext.tornado.testing import AsyncTestCase
from salt.transport.zeromq import AsyncReqMessageClientPool
# Import test support libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import flaky, get_unused_localhost_port
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
from tests.unit.transport.mixins import (
PubChannelMixin,
ReqChannelMixin,
run_loop_in_thread,
)
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, "ZMQIOLoop"):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
class BaseZMQReqCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
if not hasattr(cls, "_handle_payload"):
return
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "zeromq",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "zeromq",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"auth_timeout": 5,
"auth_tries": 1,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.evt = threading.Event()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls.io_loop, cls.evt)
)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
if not hasattr(cls, "_handle_payload"):
return
# Attempting to kill the children hangs the test suite.
# Let the test suite handle this instead.
cls.process_manager.stop_restarting()
cls.process_manager.kill_children()
cls.evt.set()
cls.server_thread.join()
time.sleep(
2
) # Give the procs a chance to fully close before we stop the io_loop
cls.server_channel.close()
del cls.server_channel
del cls.io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
class ClearReqTestCases(BaseZMQReqCase, ReqChannelMixin):
"""
Test all of the clear msg stuff
"""
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(
self.minion_config, crypt="clear"
)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send_clear"}))
@skipIf(True, "SLOWTEST skip")
def test_master_uri_override(self):
"""
ensure master_uri kwarg is respected
"""
# minion_config should be 127.0.0.1, we want a different uri that still connects
uri = "tcp://{master_ip}:{master_port}".format(
master_ip="localhost", master_port=self.minion_config["master_port"]
)
channel = salt.transport.Channel.factory(self.minion_config, master_uri=uri)
self.assertIn("localhost", channel.master_uri)
del channel
@flaky
@not_runs_on(
kernel="linux",
os_familiy="Suse",
reason="Skipping until https://github.com/saltstack/salt/issues/32902 gets fixed",
)
class AESReqTestCases(BaseZMQReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send"}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# WARNING: This test will fail randomly on any system with > 1 CPU core!!!
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@skipIf(True, "SLOWTEST skip")
def test_badload(self):
"""
Test a variety of bad requests, make sure that we get some sort of error
"""
# TODO: This test should be re-enabled when Jenkins moves to C7.
# Once the version of salt-testing is increased to something newer than the September
# release of salt-testing, the @flaky decorator should be applied to this test.
msgs = ["", [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg, timeout=5)
class BaseZMQPubCase(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "zeromq",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "zeromq",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.PubServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.evt = threading.Event()
cls.req_server_channel.post_fork(
cls._handle_payload, io_loop=cls._server_io_loop
)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls._server_io_loop, cls.evt)
)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.process_manager.kill_children()
cls.process_manager.stop_restarting()
time.sleep(
2
) # Give the procs a chance to fully close before we stop the io_loop
cls.evt.set()
cls.server_thread.join()
cls.req_server_channel.close()
cls.server_channel.close()
cls._server_io_loop.stop()
del cls.server_channel
del cls._server_io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
def setUp(self):
super(BaseZMQPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseZMQPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
del self._start_handlers
if failures:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
@skipIf(True, "Skip until we can devote time to fix this test")
class AsyncPubChannelTest(BaseZMQPubCase, PubChannelMixin):
"""
Tests around the publish system
"""
def get_new_ioloop(self):
return salt.ext.tornado.ioloop.IOLoop()
class AsyncReqMessageClientPoolTest(TestCase):
def setUp(self):
super(AsyncReqMessageClientPoolTest, self).setUp()
sock_pool_size = 5
with patch(
"salt.transport.zeromq.AsyncReqMessageClient.__init__",
MagicMock(return_value=None),
):
self.message_client_pool = AsyncReqMessageClientPool(
{"sock_pool_size": sock_pool_size}, args=({}, "")
)
self.original_message_clients = self.message_client_pool.message_clients
self.message_client_pool.message_clients = [
MagicMock() for _ in range(sock_pool_size)
]
def tearDown(self):
with patch(
"salt.transport.zeromq.AsyncReqMessageClient.destroy",
MagicMock(return_value=None),
):
del self.original_message_clients
super(AsyncReqMessageClientPoolTest, self).tearDown()
def test_send(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock.send.return_value = []
self.assertEqual([], self.message_client_pool.send())
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2].send.return_value = [1]
self.assertEqual([1], self.message_client_pool.send())
def test_destroy(self):
self.message_client_pool.destroy()
self.assertEqual([], self.message_client_pool.message_clients)
class ZMQConfigTest(TestCase):
def test_master_uri(self):
"""
test _get_master_uri method
"""
m_ip = "127.0.0.1"
m_port = 4505
s_ip = "111.1.0.1"
s_port = 4058
m_ip6 = "1234:5678::9abc"
s_ip6 = "1234:5678::1:9abc"
with patch("salt.transport.zeromq.LIBZMQ_VERSION_INFO", (4, 1, 6)), patch(
"salt.transport.zeromq.ZMQ_VERSION_INFO", (16, 0, 1)
):
# pass in both source_ip and source_port
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port, source_ip=s_ip, source_port=s_port
) == "tcp://{0}:{1};{2}:{3}".format(s_ip, s_port, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip6, master_port=m_port, source_ip=s_ip6, source_port=s_port
) == "tcp://[{0}]:{1};[{2}]:{3}".format(s_ip6, s_port, m_ip6, m_port)
# source ip and source_port empty
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port
) == "tcp://{0}:{1}".format(m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip6, master_port=m_port
) == "tcp://[{0}]:{1}".format(m_ip6, m_port)
# pass in only source_ip
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port, source_ip=s_ip
) == "tcp://{0}:0;{1}:{2}".format(s_ip, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip6, master_port=m_port, source_ip=s_ip6
) == "tcp://[{0}]:0;[{1}]:{2}".format(s_ip6, m_ip6, m_port)
# pass in only source_port
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port, source_port=s_port
) == "tcp://0.0.0.0:{0};{1}:{2}".format(s_port, m_ip, m_port)
class PubServerChannel(TestCase, AdaptedConfigurationTestCaseMixin):
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "zeromq",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
"sign_pub_messages": False,
}
)
salt.master.SMaster.secrets["aes"] = {
"secret": multiprocessing.Array(
ctypes.c_char, six.b(salt.crypt.Crypticle.generate_key_string()),
),
}
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "zeromq",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"auth_timeout": 5,
"auth_tries": 1,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
@classmethod
def tearDownClass(cls):
del cls.minion_config
del cls.master_config
def setUp(self):
# Start the event loop, even though we don't directly use this with
# ZeroMQPubServerChannel, having it running seems to increase the
# likely hood of dropped messages.
self.io_loop = salt.ext.tornado.ioloop.IOLoop()
self.io_loop.make_current()
self.io_loop_thread = threading.Thread(target=self.io_loop.start)
self.io_loop_thread.start()
self.process_manager = salt.utils.process.ProcessManager(
name="PubServer_ProcessManager"
)
def tearDown(self):
self.io_loop.add_callback(self.io_loop.stop)
self.io_loop_thread.join()
self.process_manager.stop_restarting()
self.process_manager.kill_children()
del self.io_loop
del self.io_loop_thread
del self.process_manager
@staticmethod
def _gather_results(opts, pub_uri, results, timeout=120, messages=None):
"""
Gather results until then number of seconds specified by timeout passes
without reveiving a message
"""
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.setsockopt(zmq.LINGER, -1)
sock.setsockopt(zmq.SUBSCRIBE, b"")
sock.connect(pub_uri)
last_msg = time.time()
serial = salt.payload.Serial(opts)
crypticle = salt.crypt.Crypticle(
opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
while time.time() - last_msg < timeout:
try:
payload = sock.recv(zmq.NOBLOCK)
except zmq.ZMQError:
time.sleep(0.01)
else:
if messages:
if messages != 1:
messages -= 1
continue
payload = crypticle.loads(serial.loads(payload)["load"])
if "stop" in payload:
break
last_msg = time.time()
results.append(payload["jid"])
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows OS")
@skipIf(True, "SLOWTEST skip")
def test_publish_to_pubserv_ipc(self):
"""
Test sending 10K messags to ZeroMQPubServerChannel using IPC transport
ZMQ's ipc transport not supported on Windows
"""
opts = dict(self.master_config, ipc_mode="ipc", pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
pub_uri = "tcp://{interface}:{publish_port}".format(**server_channel.opts)
send_num = 10000
expect = []
results = []
gather = threading.Thread(
target=self._gather_results, args=(self.minion_config, pub_uri, results,)
)
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
for i in range(send_num):
expect.append(i)
load = {"tgt_type": "glob", "tgt": "*", "jid": i}
server_channel.publish(load)
server_channel.publish({"tgt_type": "glob", "tgt": "*", "stop": True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@skipIf(True, "SLOWTEST skip")
def test_zeromq_publish_port(self):
"""
test when connecting that we
use the publish_port set in opts
when its not 4506
"""
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
recon_randomize=False,
publish_port=455505,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
patch_socket = MagicMock(return_value=True)
patch_auth = MagicMock(return_value=True)
with patch.object(channel, "_socket", patch_socket), patch.object(
channel, "auth", patch_auth
):
channel.connect()
assert str(opts["publish_port"]) in patch_socket.mock_calls[0][1][0]
def test_zeromq_zeromq_filtering_decode_message_no_match(self):
"""
test AsyncZeroMQPubChannel _decode_messages when
zmq_filtering enabled and minion does not match
"""
message = [
b"4f26aeafdb2367620a393c973eddbe8f8b846eb",
b"\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf"
b"\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2"
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b"\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d"
b"\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>"
b"\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D",
]
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
recon_randomize=False,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
server_channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
with patch(
"salt.crypt.AsyncAuth.crypticle",
MagicMock(return_value={"tgt_type": "glob", "tgt": "*", "jid": 1}),
) as mock_test:
res = server_channel._decode_messages(message)
assert res.result() is None
def test_zeromq_zeromq_filtering_decode_message(self):
"""
test AsyncZeroMQPubChannel _decode_messages
when zmq_filtered enabled
"""
message = [
b"4f26aeafdb2367620a393c973eddbe8f8b846ebd",
b"\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf"
b"\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2"
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b"\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d"
b"\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>"
b"\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D",
]
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
recon_randomize=False,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
server_channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
with patch(
"salt.crypt.AsyncAuth.crypticle",
MagicMock(return_value={"tgt_type": "glob", "tgt": "*", "jid": 1}),
) as mock_test:
res = server_channel._decode_messages(message)
assert res.result()["enc"] == "aes"
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows OS")
@skipIf(True, "SLOWTEST skip")
def test_zeromq_filtering(self):
"""
Test sending messags to publisher using UDP
with zeromq_filtering enabled
"""
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
acceptance_wait_time=5,
)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
pub_uri = "tcp://{interface}:{publish_port}".format(**server_channel.opts)
send_num = 1
expect = []
results = []
gather = threading.Thread(
target=self._gather_results,
args=(self.minion_config, pub_uri, results,),
kwargs={"messages": 2},
)
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
expect.append(send_num)
load = {"tgt_type": "glob", "tgt": "*", "jid": send_num}
with patch(
"salt.utils.minions.CkMinions.check_minions",
MagicMock(
return_value={
"minions": ["minion"],
"missing": [],
"ssh_minions": False,
}
),
):
server_channel.publish(load)
server_channel.publish({"tgt_type": "glob", "tgt": "*", "stop": True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@skipIf(True, "SLOWTEST skip")
def test_publish_to_pubserv_tcp(self):
"""
Test sending 10K messags to ZeroMQPubServerChannel using TCP transport
"""
opts = dict(self.master_config, ipc_mode="tcp", pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
pub_uri = "tcp://{interface}:{publish_port}".format(**server_channel.opts)
send_num = 10000
expect = []
results = []
gather = threading.Thread(
target=self._gather_results, args=(self.minion_config, pub_uri, results,)
)
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
for i in range(send_num):
expect.append(i)
load = {"tgt_type": "glob", "tgt": "*", "jid": i}
server_channel.publish(load)
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@staticmethod
def _send_small(opts, sid, num=10):
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
for i in range(num):
load = {"tgt_type": "glob", "tgt": "*", "jid": "{}-{}".format(sid, i)}
server_channel.publish(load)
server_channel.close()
@staticmethod
def _send_large(opts, sid, num=10, size=250000 * 3):
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
for i in range(num):
load = {
"tgt_type": "glob",
"tgt": "*",
"jid": "{}-{}".format(sid, i),
"xdata": "0" * size,
}
server_channel.publish(load)
server_channel.close()
@skipIf(True, "SLOWTEST skip")
def test_issue_36469_tcp(self):
"""
Test sending both large and small messags to publisher using TCP
https://github.com/saltstack/salt/issues/36469
"""
opts = dict(self.master_config, ipc_mode="tcp", pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
send_num = 10 * 4
expect = []
results = []
pub_uri = "tcp://{interface}:{publish_port}".format(**opts)
# Allow time for server channel to start, especially on windows
time.sleep(2)
gather = threading.Thread(
target=self._gather_results, args=(self.minion_config, pub_uri, results,)
)
gather.start()
with ThreadPoolExecutor(max_workers=4) as executor:
executor.submit(self._send_small, opts, 1)
executor.submit(self._send_small, opts, 2)
executor.submit(self._send_small, opts, 3)
executor.submit(self._send_large, opts, 4)
expect = ["{}-{}".format(a, b) for a in range(10) for b in (1, 2, 3, 4)]
time.sleep(0.1)
server_channel.publish({"tgt_type": "glob", "tgt": "*", "stop": True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
| 36.416456
| 104
| 0.60155
|
37e95b964f4c28cb7587823696f33cc402f0d4a3
| 3,213
|
py
|
Python
|
speakeasy/smirnoff.py
|
jaimergp/speakeasy
|
aba633b0d03e2dd8e2e1712a032f587244a02350
|
[
"MIT"
] | 1
|
2021-03-17T18:57:27.000Z
|
2021-03-17T18:57:27.000Z
|
speakeasy/smirnoff.py
|
jaimergp/speakeasy
|
aba633b0d03e2dd8e2e1712a032f587244a02350
|
[
"MIT"
] | null | null | null |
speakeasy/smirnoff.py
|
jaimergp/speakeasy
|
aba633b0d03e2dd8e2e1712a032f587244a02350
|
[
"MIT"
] | null | null | null |
import parmed as pmd
from openforcefield.typing.engines.smirnoff import ForceField, unit
from openeye.oechem import (
oemolistream, oemolostream, OEIFlavor_MOL2_Forcefield,
OEIFlavor_Generic_Default, OEIFlavor_PDB_Default, OEIFlavor_PDB_ALL,
OEFormat_MOL2, OEFormat_MOL2H, OEWriteMolecule, OETriposAtomNames, OEMol,
OEFormat_PDB, OESmilesToMol, OEAddExplicitHydrogens, OEHasAtomIdx,
OEAtomGetResidue)
import aimtools
import sys
class Conversion(object):
def __init__(self):
self.mol2_file = None
self.output_prefix = 'mol'
self.mol2_topo = None
self.off_system = None
self.pmd_system = None
self.parm = None
self.molecules = []
self.labels = None
def convert(self):
# Set OEMol
ifs = oemolistream()
ifs.SetFlavor(OEFormat_MOL2, OEIFlavor_MOL2_Forcefield)
ifs.open(self.mol2_file)
# Read in molecules
for i,mol in enumerate(ifs.GetOEMols()):
if i > 0:
raise Exception('Only single residue molecules are currently supported')
OETriposAtomNames(mol)
self.molecules.append(OEMol(mol))
# Set topology
self.mol2_topo = pmd.load_file(self.mol2_file, structure=True)
# Parameterize
ff = ForceField('forcefield/smirnoff99Frosst.offxml')
self.labels = ff.labelMolecules(self.molecules, verbose=False)
self.off_system = ff.createSystem(
self.mol2_topo.topology,
self.molecules,
nonbondedCutoff=1.1 * unit.nanometer,
ewaldErrorTolerance=1e-4)
# Load into Parmed
self.pmd_system = pmd.openmm.topsystem.load_topology(
self.mol2_topo.topology,
self.off_system,
self.mol2_topo.positions)
# Convert to AmberParm
self.parm = pmd.amber.AmberParm.from_structure(self.pmd_system)
# HACKY PART!!
# Amber specifies that the third atom in an improper is the central
# atom, but smirnoff currently specifies the second atom. A check for
# impropers was conducted during pmd.openmm.topsystem.load_topology(),
# but that looked at the third atom, so we'll recheck the second atom.
for i,dihedral in enumerate(cnvs.parm.dihedrals):
a1 = dihedral.atom1
a2 = dihedral.atom2
a3 = dihedral.atom3
a4 = dihedral.atom4
if a1 in a2.bond_partners and a3 in a2.bond_partners and a4 in a2.bond_partners:
(dihedral.atom1, dihedral.atom2, dihedral.atom3, dihedral.atom4) = (a3,a4,a2,a1)
dihedral.improper = True
# Create unique atom types
unique_types = aimtools.unique_types.create_unique_type_list(self.parm)
# Write AMBER mol2 and frcmod
aimtools.unique_types.write_unique_frcmod_mol2s(self.parm,unique_types,names=self.output_prefix)
if __name__ == '__main__':
if len(sys.argv) < 2:
raise Exception('You must provide a MOL2 file as argument')
cnvs = Conversion()
cnvs.mol2_file = sys.argv[1]
if len(sys.argv) == 3:
cnvs.output_prefix = sys.argv[2]
cnvs.convert()
| 36.511364
| 104
| 0.654217
|
9c21c35cf61aa68e5ff72da845550c6b3d688bbc
| 6,279
|
py
|
Python
|
sushy/resources/system/processor.py
|
yrobla/sushy
|
74be09c798ac3422335a4e0e30b778639ff5a122
|
[
"Apache-2.0"
] | null | null | null |
sushy/resources/system/processor.py
|
yrobla/sushy
|
74be09c798ac3422335a4e0e30b778639ff5a122
|
[
"Apache-2.0"
] | 4
|
2020-07-08T10:53:30.000Z
|
2020-07-30T11:56:20.000Z
|
sushy/resources/system/processor.py
|
yrobla/sushy
|
74be09c798ac3422335a4e0e30b778639ff5a122
|
[
"Apache-2.0"
] | 3
|
2020-05-19T12:24:50.000Z
|
2020-07-08T10:26:51.000Z
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is referred from Redfish standard schema.
# https://redfish.dmtf.org/schemas/Processor.v1_3_0.json
import collections
import logging
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy.resources.system import mappings as sys_maps
from sushy import utils
# Representation of Summary of Processor information
ProcessorSummary = collections.namedtuple('ProcessorSummary',
['count', 'architecture'])
LOG = logging.getLogger(__name__)
class ProcessorIdField(base.CompositeField):
effective_family = base.Field('EffectiveFamily')
"""The processor effective family"""
effective_model = base.Field('EffectiveModel')
"""The processor effective model"""
identification_registers = base.Field('IdentificationRegisters')
"""The processor identification registers"""
microcode_info = base.Field('MicrocodeInfo')
"""The processor microcode info"""
step = base.Field('Step')
"""The processor stepping"""
vendor_id = base.Field('VendorID')
"""The processor vendor id"""
class Processor(base.ResourceBase):
identity = base.Field('Id', required=True)
"""The processor identity string"""
socket = base.Field('Socket')
"""The socket or location of the processor"""
processor_type = base.MappedField(
'ProcessorType', sys_maps.PROCESSOR_TYPE_VALUE_MAP)
"""The type of processor"""
processor_architecture = base.MappedField(
'ProcessorArchitecture', sys_maps.PROCESSOR_ARCH_VALUE_MAP)
"""The architecture of the processor"""
instruction_set = base.MappedField(
'InstructionSet', sys_maps.PROCESSOR_INSTRUCTIONSET_VALUE_MAP)
"""The instruction set of the processor"""
manufacturer = base.Field('Manufacturer')
"""The processor manufacturer"""
model = base.Field('Model')
"""The product model number of this device"""
max_speed_mhz = base.Field('MaxSpeedMHz', adapter=utils.int_or_none)
"""The maximum clock speed of the processor in MHz."""
processor_id = ProcessorIdField('ProcessorId')
"""The processor id"""
status = common.StatusField('Status')
"""The processor status"""
total_cores = base.Field('TotalCores', adapter=utils.int_or_none)
"""The total number of cores contained in this processor"""
total_threads = base.Field('TotalThreads', adapter=utils.int_or_none)
"""The total number of execution threads supported by this processor"""
def __init__(self, connector, identity, redfish_version=None,
registries=None):
"""A class representing a Processor
:param connector: A Connector instance
:param identity: The identity of the processor
:param redfish_version: The version of RedFish. Used to construct
the object according to schema of the given version.
:param registries: Dict of Redfish Message Registry objects to be
used in any resource that needs registries to parse messages
"""
super(Processor, self).__init__(
connector, identity, redfish_version, registries)
def _get_processor_collection_path(self):
"""Helper function to find the ProcessorCollection path"""
pro_col = self.json.get('ProcessorCollection')
if not pro_col:
raise exceptions.MissingAttributeError(
attribute='ProcessorCollection', resource=self._path)
return pro_col.get('@odata.id')
@property
@utils.cache_it
def sub_processors(self):
"""A reference to the collection of Sub-Processors"""
return ProcessorCollection(
self.conn, self._get_processor_collection_path,
redfish_version=self.redfish_version)
class ProcessorCollection(base.ResourceCollectionBase):
@property
def _resource_type(self):
return Processor
@property
@utils.cache_it
def summary(self):
"""Property to provide ProcessorSummary info
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
:returns: A namedtuple containing the ``count`` of processors
in regards to logical CPUs, and their ``architecture``.
"""
count, architecture = 0, None
for proc in self.get_members():
# Note(deray): It attempts to detect the number of CPU cores.
# It returns the number of logical CPUs.
if proc.total_threads is not None:
count += proc.total_threads
# Note(deray): Bail out of checking the architecture info
# if you have already got hold of any one of the processors'
# architecture information.
if (architecture is None
and proc.processor_architecture is not None):
architecture = proc.processor_architecture
return ProcessorSummary(count=count, architecture=architecture)
def __init__(self, connector, path, redfish_version=None, registries=None):
"""A class representing a ProcessorCollection
:param connector: A Connector instance
:param path: The canonical path to the Processor collection resource
:param redfish_version: The version of RedFish. Used to construct
the object according to schema of the given version.
:param registries: Dict of Redfish Message Registry objects to be
used in any resource that needs registries to parse messages
"""
super(ProcessorCollection, self).__init__(
connector, path, redfish_version, registries)
| 36.719298
| 79
| 0.687211
|
3f8f802b2ef4a81bf5a9d50a62df080f94bd5e12
| 1,116
|
py
|
Python
|
chapter3/dbscan.py
|
DCtheTall/introduction-to-machine-learning
|
636ec4733a504fe6798098d28337cf2088dcfce5
|
[
"MIT"
] | 4
|
2018-09-21T17:15:19.000Z
|
2022-03-17T23:25:18.000Z
|
chapter3/dbscan.py
|
DCtheTall/introduction-to-machine-learning
|
636ec4733a504fe6798098d28337cf2088dcfce5
|
[
"MIT"
] | null | null | null |
chapter3/dbscan.py
|
DCtheTall/introduction-to-machine-learning
|
636ec4733a504fe6798098d28337cf2088dcfce5
|
[
"MIT"
] | null | null | null |
"""
DBSCAN
------
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mglearn
from IPython.display import display
from sklearn.cluster import DBSCAN
from sklearn.datasets import make_blobs, make_moons
from sklearn.preprocessing import StandardScaler
X, y = make_blobs(random_state=0, n_samples=12)
dbscan = DBSCAN()
clusters = dbscan.fit_predict(X)
print 'Cluster memberships:\n{}'.format(clusters)
# Running DBSCAN on this small dataset without modifying min_samples or eps
# results in it labeling all points as noise
mglearn.plots.plot_dbscan()
plt.show()
# Plots examples of DBSCAN
# white points are noise
# Core samples are the larger marks
# Boundaries are smaller
X, y = make_moons(n_samples=200, noise=.05, random_state=0)
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
dbscan = DBSCAN()
clusters = dbscan.fit_predict(X_scaled)
plt.scatter(
X_scaled[:,0], X_scaled[:,1], c=clusters, cmap=mglearn.cm2, s=60)
plt.xlabel('Feature 0')
plt.ylabel('Feature 1')
plt.show()
# Plots how DBSCAN with default settings
# perfectly classifies the two moons dataset
| 24.26087
| 75
| 0.767921
|
25acb8a2a50f4e794bf3c24f429d6871ec82acfe
| 7,911
|
py
|
Python
|
sap/cli/atc.py
|
gtreskas/sapcli
|
ff82ddb998834e1c83dff799134f026df93b3175
|
[
"Apache-2.0"
] | null | null | null |
sap/cli/atc.py
|
gtreskas/sapcli
|
ff82ddb998834e1c83dff799134f026df93b3175
|
[
"Apache-2.0"
] | null | null | null |
sap/cli/atc.py
|
gtreskas/sapcli
|
ff82ddb998834e1c83dff799134f026df93b3175
|
[
"Apache-2.0"
] | null | null | null |
"""ATC proxy for ABAP Unit"""
import json
import os
import re
import sys
from xml.sax.saxutils import escape, quoteattr
from sap import get_logger
import sap.adt
import sap.adt.atc
from sap.cli.core import printout
from sap.errors import SAPCliError
CHECKSTYLE_VERSION = '8.36'
ERROR = 'error'
WARNING = 'warning'
INFO = 'info'
SEVERITY_MAPPING = {
'1': ERROR,
'2': ERROR,
'3': WARNING,
'4': WARNING,
'5': INFO
}
def mod_log():
"""Module logger"""
return get_logger()
class CommandGroup(sap.cli.core.CommandGroup):
"""Adapter converting command line parameters to sap.adt.Class methods
calls.
"""
def __init__(self):
super().__init__('atc')
def print_worklists_to_stream(all_results, stream, error_level=99):
"""Print results to stream"""
pad = ''
ret = 0
for run_results in all_results:
for obj in run_results.objects:
stream.write(f'{obj.object_type_id}/{obj.name}\n')
finiding_pad = pad + ' '
for finding in obj.findings:
if int(finding.priority) <= error_level:
ret += 1
stream.write(f'*{finiding_pad}{finding.priority} :: {finding.check_title} :: {finding.message_title}\n')
return 0 if ret < 1 else 1
# pylint: disable=invalid-name
def print_worklists_as_html_to_stream(all_results, stream, error_level=99):
"""Print results as html table to stream"""
ret = 0
stream.write('<table>\n')
for run_results in all_results:
for obj in run_results.objects:
stream.write('<tr><th>Object type ID</th>\n'
'<th>Name</th></tr>\n')
stream.write(f'<tr><td>{escape(obj.object_type_id)}</td>\n'
f'<td>{escape(obj.name)}</td></tr>\n')
stream.write('<tr><th>Priority</th>\n'
'<th>Check title</th>\n'
'<th>Message title</th></tr>\n')
for finding in obj.findings:
if int(finding.priority) <= error_level:
ret += 1
stream.write(f'<tr><td>{escape(str(finding.priority))}</td>\n'
f'<td>{escape(finding.check_title)}</td>\n'
f'<td>{escape(finding.message_title)}</td></tr>\n')
stream.write('</table>\n')
return 0 if ret < 1 else 1
def replace_slash(name):
"""Replaces slash with division slash symbol for CheckStyle Jenkins plugin"""
DIVISION_SLASH = '\u2215'
return (name or '').replace('/', DIVISION_SLASH)
def get_line_and_column(location):
"""Finds line and column in location"""
START_PATTERN = r'(start=)(?P<line>\d+)(,(?P<column>\d+))?'
search_result = re.search(START_PATTERN, location or '')
line = column = '0'
if search_result:
line = search_result.group('line')
column = search_result.group('column') or '0'
return line, column
# pylint: disable=invalid-name
def print_worklists_as_checkstyle_xml_to_stream(all_results, stream, error_level=99, severity_mapping=None):
"""Print results as checkstyle xml to stream for all worklists"""
if not severity_mapping:
severity_mapping = SEVERITY_MAPPING
stream.write('<?xml version="1.0" encoding="UTF-8"?>\n')
stream.write(f'<checkstyle version="{CHECKSTYLE_VERSION}">\n')
ret = 0
for run_results in all_results:
for obj in run_results.objects:
package_name = replace_slash(obj.typ)
name = replace_slash(f'{obj.package_name}/{obj.name}')
filename = f'{package_name}/{name}'
stream.write(f'<file name={quoteattr(filename)}>\n')
for finding in obj.findings:
if int(finding.priority) <= error_level:
ret += 1
severity = severity_mapping.get(str(finding.priority), INFO)
line, column = get_line_and_column(finding.location)
stream.write(f'<error '
f'line={quoteattr(line)} '
f'column={quoteattr(column)} '
f'severity={quoteattr(severity)} '
f'message={quoteattr(finding.message_title)} '
f'source={quoteattr(finding.check_title)}'
f'/>\n')
stream.write('</file>\n')
stream.write('</checkstyle>\n')
return 0 if ret < 1 else 1
@CommandGroup.command()
def customizing(connection, _):
"""Retrieves ATC customizing"""
settings = sap.adt.atc.fetch_customizing(connection)
printout('System Check Variant:', settings.system_check_variant)
@CommandGroup.argument('-m', '--max-verdicts', default=100, type=int,
help='Maximum number of findings; default == 100')
@CommandGroup.argument('-r', '--variant', default=None, type=str,
help='Executed Check Variant; default: the system variant')
@CommandGroup.argument('-e', '--error-level', default=2, type=int,
help='Exit with non zero if a finding with this or higher prio returned')
@CommandGroup.argument('name', nargs='+', type=str)
@CommandGroup.argument('type', choices=['program', 'class', 'package'])
@CommandGroup.argument('-o', '--output', default='human', choices=['human', 'html', 'checkstyle'],
help='Output format in which checks will be printed')
@CommandGroup.argument('-s', '--severity-mapping', default=None, type=str,
help='Severity mapping between error levels and Checkstyle severities')
@CommandGroup.command()
def run(connection, args):
"""Prints it out based on command line configuration.
Exceptions:
- SAPCliError:
- when the given type does not belong to the type white list
- when severity_maping argument has invalid format
"""
types = {'program': sap.adt.Program, 'class': sap.adt.Class, 'package': sap.adt.Package}
try:
typ = types[args.type]
except KeyError as ex:
raise SAPCliError(f'Unknown type: {args.type}') from ex
printer_format_mapping = {
'human': print_worklists_to_stream,
'html': print_worklists_as_html_to_stream,
'checkstyle': print_worklists_as_checkstyle_xml_to_stream
}
try:
printer = printer_format_mapping[args.output]
except KeyError as ex:
raise SAPCliError(f'Unknown format: {args.output}') from ex
severity_mapping = None
if args.output == 'checkstyle':
severity_mapping = args.severity_mapping or os.environ.get('SEVERITY_MAPPING')
if severity_mapping:
try:
severity_mapping = dict(json.loads(severity_mapping))
except (json.decoder.JSONDecodeError, TypeError) as ex:
raise SAPCliError('Severity mapping has incorrect format') from ex
if args.variant is None:
settings = sap.adt.atc.fetch_customizing(connection)
args.variant = settings.system_check_variant
results = []
if args.name:
arr_objects = []
# Make sure it can be used as a sequence of strings or as an array of strings
for objectArr in args.name:
arr_objects += objectArr.split()
for objname in arr_objects:
checks = sap.adt.atc.ChecksRunner(connection, args.variant)
objects = sap.adt.objects.ADTObjectSets()
objects.include_object(typ(connection, objname))
atcResult = checks.run_for(objects, max_verdicts=args.max_verdicts)
results.append(atcResult.worklist)
if args.output == 'checkstyle':
result = printer(results, sys.stdout, error_level=args.error_level, severity_mapping=severity_mapping)
else:
result = printer(results, sys.stdout, error_level=args.error_level)
return result
| 35.635135
| 120
| 0.61484
|
99d64ebd01c9a3a0c9fcbb1d55aaf05bb27bab92
| 9,447
|
py
|
Python
|
tests/helpers/runif.py
|
mathemusician/pytorch-lightning
|
15fa5389387b3a220bc044dd30eb0be1e8f64944
|
[
"Apache-2.0"
] | null | null | null |
tests/helpers/runif.py
|
mathemusician/pytorch-lightning
|
15fa5389387b3a220bc044dd30eb0be1e8f64944
|
[
"Apache-2.0"
] | null | null | null |
tests/helpers/runif.py
|
mathemusician/pytorch-lightning
|
15fa5389387b3a220bc044dd30eb0be1e8f64944
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from typing import Optional
import pytest
import torch
from packaging.version import Version
from pkg_resources import get_distribution
from pytorch_lightning.utilities import (
_APEX_AVAILABLE,
_BAGUA_AVAILABLE,
_DEEPSPEED_AVAILABLE,
_FAIRSCALE_AVAILABLE,
_FAIRSCALE_FULLY_SHARDED_AVAILABLE,
_HIVEMIND_AVAILABLE,
_HOROVOD_AVAILABLE,
_HPU_AVAILABLE,
_IPU_AVAILABLE,
_OMEGACONF_AVAILABLE,
_RICH_AVAILABLE,
_TORCH_GREATER_EQUAL_1_10,
_TORCH_QUANTIZE_AVAILABLE,
_TPU_AVAILABLE,
)
_HOROVOD_NCCL_AVAILABLE = False
if _HOROVOD_AVAILABLE:
import horovod
try:
# `nccl_built` returns an integer
_HOROVOD_NCCL_AVAILABLE = bool(horovod.torch.nccl_built())
except AttributeError:
# AttributeError can be raised if MPI is not available:
# https://github.com/horovod/horovod/blob/v0.23.0/horovod/torch/__init__.py#L33-L34
pass
class RunIf:
"""RunIf wrapper for simple marking specific cases, fully compatible with pytest.mark::
@RunIf(min_torch="0.0")
@pytest.mark.parametrize("arg1", [1, 2.0])
def test_wrapper(arg1):
assert arg1 > 0.0
"""
def __new__(
self,
*args,
min_gpus: int = 0,
min_torch: Optional[str] = None,
max_torch: Optional[str] = None,
min_python: Optional[str] = None,
quantization: bool = False,
amp_apex: bool = False,
bf16_cuda: bool = False,
tpu: bool = False,
ipu: bool = False,
hpu: bool = False,
horovod: bool = False,
horovod_nccl: bool = False,
skip_windows: bool = False,
standalone: bool = False,
fairscale: bool = False,
fairscale_fully_sharded: bool = False,
deepspeed: bool = False,
rich: bool = False,
skip_hanging_spawn: bool = False,
omegaconf: bool = False,
slow: bool = False,
bagua: bool = False,
hivemind: bool = False,
**kwargs,
):
"""
Args:
*args: Any :class:`pytest.mark.skipif` arguments.
min_gpus: Require this number of gpus.
min_torch: Require that PyTorch is greater or equal than this version.
max_torch: Require that PyTorch is less than this version.
min_python: Require that Python is greater or equal than this version.
quantization: Require that `torch.quantization` is available.
amp_apex: Require that NVIDIA/apex is installed.
bf16_cuda: Require that CUDA device supports bf16.
tpu: Require that TPU is available.
ipu: Require that IPU is available.
hpu: Require that HPU is available.
horovod: Require that Horovod is installed.
horovod_nccl: Require that Horovod is installed with NCCL support.
skip_windows: Skip for Windows platform.
standalone: Mark the test as standalone, our CI will run it in a separate process.
fairscale: Require that facebookresearch/fairscale is installed.
fairscale_fully_sharded: Require that `fairscale` fully sharded support is available.
deepspeed: Require that microsoft/DeepSpeed is installed.
rich: Require that willmcgugan/rich is installed.
skip_hanging_spawn: Skip the test as it's impacted by hanging loggers on spawn.
omegaconf: Require that omry/omegaconf is installed.
slow: Mark the test as slow, our CI will run it in a separate job.
bagua: Require that BaguaSys/bagua is installed.
hivemind: Require that Hivemind is installed.
**kwargs: Any :class:`pytest.mark.skipif` keyword arguments.
"""
conditions = []
reasons = []
if min_gpus:
conditions.append(torch.cuda.device_count() < min_gpus)
reasons.append(f"GPUs>={min_gpus}")
if min_torch:
torch_version = get_distribution("torch").version
conditions.append(Version(torch_version) < Version(min_torch))
reasons.append(f"torch>={min_torch}")
if max_torch:
torch_version = get_distribution("torch").version
conditions.append(Version(torch_version) >= Version(max_torch))
reasons.append(f"torch<{max_torch}")
if min_python:
py_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
conditions.append(Version(py_version) < Version(min_python))
reasons.append(f"python>={min_python}")
if quantization:
_miss_default = "fbgemm" not in torch.backends.quantized.supported_engines
conditions.append(not _TORCH_QUANTIZE_AVAILABLE or _miss_default)
reasons.append("PyTorch quantization")
if amp_apex:
conditions.append(not _APEX_AVAILABLE)
reasons.append("NVIDIA Apex")
if bf16_cuda:
try:
cond = not (torch.cuda.is_available() and _TORCH_GREATER_EQUAL_1_10 and torch.cuda.is_bf16_supported())
except (AssertionError, RuntimeError) as e:
# AssertionError: Torch not compiled with CUDA enabled
# RuntimeError: Found no NVIDIA driver on your system.
is_unrelated = "Found no NVIDIA driver" not in str(e) or "Torch not compiled with CUDA" not in str(e)
if is_unrelated:
raise e
cond = True
conditions.append(cond)
reasons.append("CUDA device bf16")
if skip_windows:
conditions.append(sys.platform == "win32")
reasons.append("unimplemented on Windows")
if tpu:
conditions.append(not _TPU_AVAILABLE)
reasons.append("TPU")
if ipu:
env_flag = os.getenv("PL_RUN_IPU_TESTS", "0")
conditions.append(env_flag != "1" or not _IPU_AVAILABLE)
reasons.append("IPU")
kwargs["ipu"] = True
if hpu:
conditions.append(not _HPU_AVAILABLE)
reasons.append("HPU")
if horovod:
conditions.append(not _HOROVOD_AVAILABLE)
reasons.append("Horovod")
if horovod_nccl:
conditions.append(not _HOROVOD_NCCL_AVAILABLE)
reasons.append("Horovod with NCCL")
if standalone:
env_flag = os.getenv("PL_RUN_STANDALONE_TESTS", "0")
conditions.append(env_flag != "1")
reasons.append("Standalone execution")
# used in tests/conftest.py::pytest_collection_modifyitems
kwargs["standalone"] = True
if fairscale:
conditions.append(not _FAIRSCALE_AVAILABLE)
reasons.append("Fairscale")
if fairscale_fully_sharded:
conditions.append(not _FAIRSCALE_FULLY_SHARDED_AVAILABLE)
reasons.append("Fairscale Fully Sharded")
if deepspeed:
conditions.append(not _DEEPSPEED_AVAILABLE)
reasons.append("Deepspeed")
if rich:
conditions.append(not _RICH_AVAILABLE)
reasons.append("Rich")
if skip_hanging_spawn:
# strategy=ddp_spawn, accelerator=cpu, python>=3.8, torch<1.9 does not work
py_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
ge_3_8 = Version(py_version) >= Version("3.8")
torch_version = get_distribution("torch").version
old_torch = Version(torch_version) < Version("1.9")
conditions.append(ge_3_8 and old_torch)
reasons.append("Impacted by hanging DDP spawn")
if omegaconf:
conditions.append(not _OMEGACONF_AVAILABLE)
reasons.append("omegaconf")
if slow:
env_flag = os.getenv("PL_RUN_SLOW_TESTS", "0")
conditions.append(env_flag != "1")
reasons.append("Slow test")
# used in tests/conftest.py::pytest_collection_modifyitems
kwargs["slow"] = True
if bagua:
conditions.append(not _BAGUA_AVAILABLE or sys.platform in ("win32", "darwin"))
reasons.append("Bagua")
if hivemind:
conditions.append(not _HIVEMIND_AVAILABLE or sys.platform in ("win32", "darwin"))
reasons.append("Hivemind")
reasons = [rs for cond, rs in zip(conditions, reasons) if cond]
return pytest.mark.skipif(
*args, condition=any(conditions), reason=f"Requires: [{' + '.join(reasons)}]", **kwargs
)
@RunIf(min_torch="99")
def test_always_skip():
exit(1)
@pytest.mark.parametrize("arg1", [0.5, 1.0, 2.0])
@RunIf(min_torch="0.0")
def test_wrapper(arg1: float):
assert arg1 > 0.0
| 36.902344
| 119
| 0.631523
|
2d08abfed8158e3cac1a151d92803c02e988bf3d
| 6,332
|
py
|
Python
|
chainer_chemistry/links/update/megnet_update.py
|
pfnet/chainerchem
|
efe323aa21f63a815130d673781e7cca1ccb72d2
|
[
"MIT"
] | 184
|
2019-11-27T12:59:01.000Z
|
2022-03-29T19:18:54.000Z
|
chainer_chemistry/links/update/megnet_update.py
|
pfnet/chainerchem
|
efe323aa21f63a815130d673781e7cca1ccb72d2
|
[
"MIT"
] | 21
|
2019-12-08T01:53:33.000Z
|
2020-10-23T01:19:56.000Z
|
chainer_chemistry/links/update/megnet_update.py
|
pfnet/chainerchem
|
efe323aa21f63a815130d673781e7cca1ccb72d2
|
[
"MIT"
] | 45
|
2019-11-28T09:59:54.000Z
|
2022-02-07T02:42:46.000Z
|
import chainer
from chainer import functions, links # NOQA
from chainer_chemistry.functions import megnet_softplus
class DenseLayer(chainer.Chain):
def __init__(self, hidden_dim=[64, 32], activation=megnet_softplus):
super(DenseLayer, self).__init__()
self.n_layers = len(hidden_dim)
self.activation = activation
with self.init_scope():
self.update_layer = chainer.ChainList(
*[links.Linear(None, hidden_dim[i])
for i in range(self.n_layers)])
def __call__(self, v):
for i in range(self.n_layers):
v = self.activation(self.update_layer[i](v))
return v
class UpdateLayer(chainer.Chain):
def __init__(self, hidden_dim=[64, 64, 32], activation=megnet_softplus):
super(UpdateLayer, self).__init__()
self.n_layers = len(hidden_dim)
self.activation = activation
with self.init_scope():
self.update_layer = chainer.ChainList(
*[links.Linear(None, hidden_dim[i])
for i in range(self.n_layers)])
def __call__(self, v):
for i in range(self.n_layers):
v = self.update_layer[i](v)
# doesn't pass the activation at the last layer
if i != (self.n_layers-1):
v = self.activation(v)
return v
def get_mean_feat(feat, idx, out_shape, xp):
"""Return mean node or edge feature in each graph.
This method is the same as average pooling
about node or edge feature in each graph.
"""
zero = xp.zeros(out_shape, dtype=xp.float32)
sum_vec = functions.scatter_add(zero, idx, feat)
one = xp.ones(feat.shape, dtype=xp.float32)
degree = functions.scatter_add(zero, idx, one)
return sum_vec / degree
class MEGNetUpdate(chainer.Chain):
"""Update submodule for MEGNet
Args:
dim_for_dense (list): dimension list of dense layer
dim_for_update (list): dimension list of update layer
dropout_ratio (float): ratio of dropout
activation (~chainer.Function or ~chainer.FunctionNode):
activate function for megnet model
`megnet_softplus` was used in original paper.
skip_intermediate (bool): When `True`, intermediate feature after dense
calculation is used for skip connection. When `False`, input
feature is used for skip connection.
It is `True` for first layer, and `False` for other layer in the
original paper.
"""
def __init__(self, dim_for_dense=[64, 32], dim_for_update=[64, 64, 32],
dropout_ratio=-1, activation=megnet_softplus,
skip_intermediate=True):
super(MEGNetUpdate, self).__init__()
if len(dim_for_dense) != 2:
raise ValueError('dim_for_dense must have 2 elements')
if len(dim_for_update) != 3:
raise ValueError('dim_for_update must have 3 elements')
self.dropout_ratio = dropout_ratio
with self.init_scope():
# for dense layer
self.dense_for_atom = DenseLayer(dim_for_dense, activation)
self.dense_for_pair = DenseLayer(dim_for_dense, activation)
self.dense_for_global = DenseLayer(dim_for_dense, activation)
# for update layer
self.update_for_atom = UpdateLayer(dim_for_update, activation)
self.update_for_pair = UpdateLayer(dim_for_update, activation)
self.update_for_global = UpdateLayer(dim_for_update, activation)
self.skip_intermediate = skip_intermediate
def __call__(self, atoms_feat, pair_feat, global_feat,
atom_idx, pair_idx, start_idx, end_idx):
# 1) Pass the Dense layer
a_f_d = self.dense_for_atom(atoms_feat)
p_f_d = self.dense_for_pair(pair_feat)
g_f_d = self.dense_for_global(global_feat)
# 2) Update the edge vector
start_node = a_f_d[start_idx]
end_node = a_f_d[end_idx]
g_f_extend_with_pair_idx = g_f_d[pair_idx]
concat_p_v = functions.concat((p_f_d, start_node, end_node,
g_f_extend_with_pair_idx))
update_p = self.update_for_pair(concat_p_v)
# 3) Update the node vector
# 1. get sum edge feature of all nodes using scatter_add method
zero = self.xp.zeros(a_f_d.shape, dtype=self.xp.float32)
sum_edeg_vec = functions.scatter_add(zero, start_idx, update_p) + \
functions.scatter_add(zero, end_idx, update_p)
# 2. get degree of all nodes using scatter_add method
one = self.xp.ones(p_f_d.shape, dtype=self.xp.float32)
degree = functions.scatter_add(zero, start_idx, one) + \
functions.scatter_add(zero, end_idx, one)
# 3. get mean edge feature of all nodes
mean_edge_vec = sum_edeg_vec / degree
# 4. concating
g_f_extend_with_atom_idx = g_f_d[atom_idx]
concat_a_v = functions.concat((a_f_d, mean_edge_vec,
g_f_extend_with_atom_idx))
update_a = self.update_for_atom(concat_a_v)
# 4) Update the global vector
out_shape = g_f_d.shape
ave_p = get_mean_feat(update_p, pair_idx, out_shape, self.xp)
ave_a = get_mean_feat(update_a, atom_idx, out_shape, self.xp)
concat_g_v = functions.concat((ave_a, ave_p, g_f_d), axis=1)
update_g = self.update_for_global(concat_g_v)
# 5) Skip connection
if self.skip_intermediate:
# Skip intermediate feature, used for first layer.
new_a_f = update_a + a_f_d
new_p_f = update_p + p_f_d
new_g_f = update_g + g_f_d
else:
# Skip input feature, used all layer except first layer.
# input feature must be same dimension with updated feature.
new_a_f = update_a + atoms_feat
new_p_f = update_p + pair_feat
new_g_f = update_g + global_feat
# 6) dropout
if self.dropout_ratio > 0.0:
new_a_f = functions.dropout(new_a_f, ratio=self.dropout_ratio)
new_p_f = functions.dropout(new_p_f, ratio=self.dropout_ratio)
new_g_f = functions.dropout(new_g_f, ratio=self.dropout_ratio)
return new_a_f, new_p_f, new_g_f
| 41.116883
| 79
| 0.638029
|
72a58db347e5578c54d7cceecb497f1714327fce
| 731
|
py
|
Python
|
codeit/closest_pair.py
|
love-adela/algorithm
|
4ccd02173c96f8369962f1fd4e5166a221690fa2
|
[
"MIT"
] | 3
|
2019-03-09T05:19:23.000Z
|
2019-04-06T09:26:36.000Z
|
codeit/closest_pair.py
|
love-adela/algorithm
|
4ccd02173c96f8369962f1fd4e5166a221690fa2
|
[
"MIT"
] | 1
|
2020-02-23T10:38:04.000Z
|
2020-02-23T10:38:04.000Z
|
codeit/closest_pair.py
|
love-adela/algorithm
|
4ccd02173c96f8369962f1fd4e5166a221690fa2
|
[
"MIT"
] | 1
|
2019-05-22T13:47:53.000Z
|
2019-05-22T13:47:53.000Z
|
#!usr/bin/python
'''
Codeit-Brute Force
6. 가장 가까운 매장 찾기
'''
# 제곱근 사용을 위한 sqrt 함수
from math import sqrt
# 두 매장의 직선 거리를 계산해 주는 함수
def distance(store1, store2):
return sqrt((store1[0] - store2[0]) ** 2 + (store1[1] - store2[1]) ** 2)
# 가장 가까운 두 매장을 찾아주는 함수
def closest_pair(coordinates):
pair = [coordinates[0], coordinates[1]]
for i in range(len(coordinates)):
for j in range(i+1, len(coordinates)):
store1, store2 = coordinates[i], coordinates[j]
if distance(pair[0], pair[1]) > distance(store1, store2):
pair = [store1, store2]
return pair
# 테스트
test_coordinates = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print(closest_pair(test_coordinates))
| 23.580645
| 76
| 0.604651
|
bd2f739973f340d52ce9112d649e76b784cf9d92
| 21,801
|
py
|
Python
|
deploy-agent/tests/unit/deploy/server/test_agent.py
|
aagxxi/teletraan
|
93af2abfd72e99258e80f978a80343656de2172f
|
[
"Apache-2.0"
] | null | null | null |
deploy-agent/tests/unit/deploy/server/test_agent.py
|
aagxxi/teletraan
|
93af2abfd72e99258e80f978a80343656de2172f
|
[
"Apache-2.0"
] | null | null | null |
deploy-agent/tests/unit/deploy/server/test_agent.py
|
aagxxi/teletraan
|
93af2abfd72e99258e80f978a80343656de2172f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import mock
import tests
from deployd.agent import DeployAgent
from deployd.common.utils import ensure_dirs
from deployd.common.types import DeployReport, DeployStatus, OpCode, DeployStage, AgentStatus, PingStatus
from deployd.types.deploy_goal import DeployGoal
from deployd.types.ping_report import PingReport
from deployd.types.ping_response import PingResponse
class TestDeployAgent(tests.TestCase):
@classmethod
def setUpClass(cls):
cls.estatus = mock.Mock()
cls.estatus.load_envs = mock.Mock(return_value=None)
cls.config = mock.Mock()
cls.config.load_env_and_configs = mock.Mock()
cls.config.get_var = mock.Mock(return_value='')
cls.config.get_intvar(return_value=1)
cls.config.get_target = mock.Mock(return_value='/tmp/tests')
cls.config.get_config_filename = mock.Mock(return_value='/etc/deployagent.conf')
cls.config.get_agent_directory = mock.Mock(return_value='/tmp/deployd/')
cls.config.get_builds_directory = mock.Mock(return_value='/tmp/deployd/builds/')
cls.config.get_log_directory = mock.Mock(return_value='/tmp/logs/')
ensure_dirs(cls.config)
cls.executor = mock.Mock()
cls.executor.execute_command = \
mock.Mock(return_value=(DeployReport(AgentStatus.SUCCEEDED)))
cls.executor.run_cmd = mock.Mock(return_value=(DeployReport(AgentStatus.SUCCEEDED)))
cls.helper = mock.Mock()
cls.helper.get_stale_builds = mock.Mock(return_value=[])
build = {}
build['id'] = '123'
build['name'] = 'abc'
build['commitShort'] = '345'
build['artifactUrl'] = 'https://test'
envvar = {}
envvar['id'] = 'abc'
envvar['url'] = 'https://test'
cls.deploy_goal1 = {}
cls.deploy_goal1['deployId'] = '123'
cls.deploy_goal1['envName'] = 'abc'
cls.deploy_goal1['envId'] = 'def'
cls.deploy_goal1['stageName'] = 'beta'
cls.deploy_goal1['deployStage'] = DeployStage.PRE_DOWNLOAD
cls.deploy_goal1['scriptVariables'] = envvar
cls.deploy_goal2 = {}
cls.deploy_goal2['deployId'] = '123'
cls.deploy_goal2['envName'] = 'abc'
cls.deploy_goal2['envId'] = 'def'
cls.deploy_goal2['stageName'] = 'beta'
cls.deploy_goal2['deployStage'] = DeployStage.DOWNLOADING
cls.deploy_goal2['build'] = build
cls.deploy_goal3 = {}
cls.deploy_goal3['deployId'] = '123'
cls.deploy_goal3['envName'] = 'abc'
cls.deploy_goal3['envId'] = 'def'
cls.deploy_goal3['stageName'] = 'beta'
cls.deploy_goal3['deployStage'] = DeployStage.STAGING
cls.deploy_goal4 = {}
cls.deploy_goal4['deployId'] = '123'
cls.deploy_goal4['envName'] = 'abc'
cls.deploy_goal4['envId'] = 'def'
cls.deploy_goal4['stageName'] = 'beta'
cls.deploy_goal4['deployStage'] = DeployStage.PRE_RESTART
cls.deploy_goal5 = {}
cls.deploy_goal5['deployId'] = '123'
cls.deploy_goal5['envName'] = 'abc'
cls.deploy_goal5['envId'] = 'def'
cls.deploy_goal5['stageName'] = 'beta'
cls.deploy_goal5['deployId'] = '234'
cls.deploy_goal5['deployStage'] = DeployStage.PRE_DOWNLOAD
cls.deploy_goal5['build'] = build
cls.deploy_goal6 = {}
cls.deploy_goal6['deployId'] = '123'
cls.deploy_goal6['envName'] = 'abc'
cls.deploy_goal6['envId'] = 'def'
cls.deploy_goal6['stageName'] = 'beta'
cls.deploy_goal6['deployId'] = '234'
cls.deploy_goal6['deployStage'] = DeployStage.SERVING_BUILD
cls.ping_response1 = {'deployGoal': cls.deploy_goal1, 'opCode': OpCode.DEPLOY}
cls.ping_response2 = {'deployGoal': cls.deploy_goal2, 'opCode': OpCode.DEPLOY}
cls.ping_response3 = {'deployGoal': cls.deploy_goal3, 'opCode': OpCode.DEPLOY}
cls.ping_response4 = {'deployGoal': cls.deploy_goal4, 'opCode': OpCode.DEPLOY}
cls.ping_response5 = {'deployGoal': cls.deploy_goal5, 'opCode': OpCode.DELETE}
cls.ping_response6 = {'deployGoal': cls.deploy_goal6, 'opCode': OpCode.DELETE}
cls.ping_noop_response = {'deployGoal': None, 'opCode': OpCode.NOOP}
def test_agent_first_run(self):
# first run
ping_response_list = [PingResponse(jsonValue=self.ping_response1),
None,
PingResponse(jsonValue=self.ping_response1)]
client = mock.Mock()
client.send_reports = mock.Mock(side_effect=ping_response_list)
d = DeployAgent(client=client, estatus=self.estatus, conf=self.config,
executor=self.executor, helper=self.helper)
self.assertTrue(d.first_run)
# first run stickiness
d._envs = {'data': 'data'}
self.assertTrue(d.first_run)
# subsequent run
client.send_reports = mock.Mock(side_effect=ping_response_list)
d = DeployAgent(client=client, estatus=self.estatus, conf=self.config,
executor=self.executor, helper=self.helper)
d._envs = {'data': 'data'}
self.assertFalse(d.first_run)
def test_agent_status_on_ping_failure(self):
ping_response_list = [PingResponse(jsonValue=self.ping_response1), None, PingResponse(jsonValue=self.ping_response1)]
client = mock.Mock()
client.send_reports = mock.Mock(side_effect=ping_response_list)
d = DeployAgent(client=client, estatus=self.estatus, conf=self.config,
executor=self.executor, helper=self.helper)
self.assertEqual(PingStatus.PLAN_CHANGED, d.update_deploy_status(DeployReport(status_code=AgentStatus.SUCCEEDED)))
self.assertEqual(PingStatus.PING_FAILED, d.update_deploy_status(DeployReport(status_code=AgentStatus.SUCCEEDED)))
self.assertEqual(PingStatus.PLAN_NO_CHANGE, d.update_deploy_status(DeployReport(status_code=AgentStatus.SUCCEEDED)))
def test_agent_with_switch_command(self):
ping_response_list = [
PingResponse(jsonValue=self.ping_response1),
PingResponse(jsonValue=self.ping_response2),
PingResponse(jsonValue=self.ping_response3),
PingResponse(jsonValue=self.ping_response4),
PingResponse(jsonValue=self.ping_response5),
PingResponse(jsonValue=self.ping_response6),
PingResponse(jsonValue=self.ping_noop_response)]
client = mock.Mock()
client.send_reports = mock.Mock(side_effect=ping_response_list)
d = DeployAgent(client=client, estatus=self.estatus, conf=self.config,
executor=self.executor, helper=self.helper)
d.serve_build()
calls = [mock.call(['deploy-downloader', '-f', '/etc/deployagent.conf', '-v',
'123', '-u', 'https://test', '-e', 'abc']),
mock.call(['deploy-stager', '-f', '/etc/deployagent.conf', '-v',
'123', '-t', '/tmp/tests', '-e', 'abc'])]
self.executor.run_cmd.assert_has_calls(calls)
self.assertEqual(len(d._envs), 0)
def test_agent_with_switch_goal(self):
build = {}
build['id'] = '123'
build['name'] = 'bar'
build['commitShort'] = '345'
build['commit'] = 'abcd'
build['artifactUrl'] = 'https://test'
build2 = {}
build2['id'] = '123'
build2['name'] = 'fool'
build2['commit'] = 'abcd'
build2['commitShort'] = '345'
build2['artifactUrl'] = 'https://test2'
envvar = {}
envvar['id'] = 'abc'
envvar['url'] = 'https://test'
envvar2 = {}
envvar2['id'] = 'bcd'
envvar2['url'] = 'https://test2'
ping_response1 = self.ping_response1
ping_response1['deployGoal']['scriptVariables'] = envvar
ping_response2 = self.ping_response2
ping_response2['deployGoal']['build'] = build
ping_response3 = self.ping_response3
ping_response4 = self.ping_response4
deploy_goal5 = {}
deploy_goal5['deployId'] = '234'
deploy_goal5['envName'] = 'bcd'
deploy_goal5['envId'] = 'efg'
deploy_goal5['stageName'] = 'prod'
deploy_goal5['deployStage'] = DeployStage.PRE_DOWNLOAD
deploy_goal5['scriptVariables'] = envvar2
deploy_goal6 = {}
deploy_goal6['deployId'] = '234'
deploy_goal6['envName'] = 'bcd'
deploy_goal6['envId'] = 'efg'
deploy_goal6['stageName'] = 'prod'
deploy_goal6['deployStage'] = DeployStage.DOWNLOADING
deploy_goal6['build'] = build2
deploy_goal7 = {}
deploy_goal7['deployId'] = '234'
deploy_goal7['envName'] = 'bcd'
deploy_goal7['envId'] = 'efg'
deploy_goal7['stageName'] = 'prod'
deploy_goal7['deployStage'] = DeployStage.STAGING
deploy_goal8 = {}
deploy_goal8['deployId'] = '234'
deploy_goal8['envName'] = 'bcd'
deploy_goal8['envId'] = 'efg'
deploy_goal8['stageName'] = 'prod'
deploy_goal8['deployStage'] = DeployStage.RESTARTING
deploy_goal9 = {}
deploy_goal9['deployId'] = '234'
deploy_goal9['envName'] = 'bcd'
deploy_goal9['envId'] = 'efg'
deploy_goal9['stageName'] = 'prod'
deploy_goal9['deployStage'] = DeployStage.POST_RESTART
deploy_goal10 = {}
deploy_goal10['deployId'] = '234'
deploy_goal10['envName'] = 'bcd'
deploy_goal10['envId'] = 'efg'
deploy_goal10['stageName'] = 'prod'
deploy_goal10['deployStage'] = DeployStage.SERVING_BUILD
ping_response5 = {'deployGoal': deploy_goal5, 'opCode': OpCode.DEPLOY}
ping_response6 = {'deployGoal': deploy_goal6, 'opCode': OpCode.DEPLOY}
ping_response7 = {'deployGoal': deploy_goal7, 'opCode': OpCode.DEPLOY}
ping_response8 = {'deployGoal': deploy_goal8, 'opCode': OpCode.DEPLOY}
ping_response9 = {'deployGoal': deploy_goal9, 'opCode': OpCode.DEPLOY}
ping_response10 = {'deployGoal': deploy_goal10, 'opCode': OpCode.DEPLOY}
ping_response_list = [
PingResponse(jsonValue=ping_response1),
PingResponse(jsonValue=ping_response2),
PingResponse(jsonValue=ping_response3),
PingResponse(jsonValue=ping_response4),
PingResponse(jsonValue=ping_response5),
PingResponse(jsonValue=ping_response6),
PingResponse(jsonValue=ping_response7),
PingResponse(jsonValue=ping_response8),
PingResponse(jsonValue=ping_response9),
PingResponse(jsonValue=ping_response10),
PingResponse(jsonValue=self.ping_noop_response)
]
client = mock.Mock()
client.send_reports = mock.Mock(side_effect=ping_response_list)
d = DeployAgent(client=client, estatus=self.estatus, conf=self.config,
executor=self.executor, helper=self.helper)
d.serve_build()
calls = [mock.call(['deploy-downloader', '-f', '/etc/deployagent.conf', '-v',
'123', '-u', 'https://test', '-e', 'abc']),
mock.call(['deploy-stager', '-f', '/etc/deployagent.conf', '-v',
'123', '-t', '/tmp/tests', '-e', 'abc']),
mock.call(['deploy-downloader', '-f', '/etc/deployagent.conf', '-v',
'123', '-u', 'https://test2', '-e', 'bcd']),
mock.call(['deploy-stager', '-f', '/etc/deployagent.conf', '-v',
'123', '-t', '/tmp/tests', '-e', 'bcd'])]
self.executor.run_cmd.assert_has_calls(calls)
self.assertEqual(len(d._envs), 2)
self.assertEqual(d._envs['abc'].report.deployStage, DeployStage.PRE_RESTART)
self.assertEqual(d._envs['abc'].report.deployId, '123')
self.assertEqual(d._envs['abc'].report.envId, 'def')
self.assertEqual(d._envs['abc'].report.status, AgentStatus.SUCCEEDED)
self.assertEqual(d._envs['abc'].build_info.build_commit, 'abcd')
self.assertEqual(d._envs['abc'].build_info.build_url, 'https://test')
self.assertEqual(d._envs['bcd'].report.deployStage, DeployStage.SERVING_BUILD)
self.assertEqual(d._envs['bcd'].report.deployId, '234')
self.assertEqual(d._envs['bcd'].report.envId, 'efg')
self.assertEqual(d._envs['bcd'].report.status, AgentStatus.SUCCEEDED)
self.assertEqual(d._envs['bcd'].build_info.build_commit, 'abcd')
self.assertEqual(d._envs['bcd'].build_info.build_url, 'https://test2')
def test_delete_report(self):
status = DeployStatus()
ping_report = {}
ping_report['deployId'] = '123'
ping_report['envId'] = '234'
ping_report['envName'] = 'abc'
ping_report['stageName'] = 'beta'
ping_report['deployStage'] = DeployStage.SERVING_BUILD
ping_report['status'] = AgentStatus.SUCCEEDED
status.report = PingReport(jsonValue=ping_report)
envs = {'abc': status}
client = mock.Mock()
estatus = mock.Mock()
estatus.load_envs = mock.Mock(return_value=envs)
deploy_goal = {}
deploy_goal['deployId'] = '123'
deploy_goal['envId'] = '234'
deploy_goal['envName'] = 'abc'
deploy_goal['stageName'] = 'beta'
ping_response = {'deployGoal': deploy_goal, 'opCode': OpCode.DELETE}
responses = [
PingResponse(jsonValue=ping_response),
PingResponse(jsonValue=self.ping_noop_response)
]
client.send_reports = mock.Mock(side_effect=responses)
agent = DeployAgent(client=client, estatus=estatus, conf=self.config,
executor=self.executor, helper=self.helper)
agent.serve_build()
calls = [mock.call(envs), mock.call({})]
client.send_reports.assert_has_calls(calls)
self.assertIsNone(agent._curr_report)
self.assertEqual(agent._envs, {})
def test_init_report(self):
if os.path.exists('/tmp/env_status'):
os.remove('/tmp/env_status')
client = mock.Mock()
client.send_reports = \
mock.Mock(return_value=(PingResponse(jsonValue=self.ping_noop_response)))
d = DeployAgent(client=client, estatus=self.estatus,
conf=self.config, executor=self.executor, helper=self.helper)
d.serve_build()
client.send_reports.assert_called_once_with({})
def test_report_health(self):
status = DeployStatus()
ping_report = {}
ping_report['deployId'] = '123'
ping_report['envId'] = '234'
ping_report['envName'] = 'abc'
ping_report['stageName'] = 'beta'
ping_report['deployStage'] = DeployStage.SERVING_BUILD
ping_report['status'] = AgentStatus.SUCCEEDED
status.report = PingReport(jsonValue=ping_report)
envs = {'234': status}
client = mock.Mock()
estatus = mock.Mock()
estatus.load_envs = mock.Mock(return_value=envs)
client.send_reports = \
mock.Mock(return_value=PingResponse(jsonValue=self.ping_noop_response))
agent = DeployAgent(client=client, estatus=estatus, conf=self.config,
executor=self.executor, helper=self.helper)
agent.serve_build()
client.send_reports.assert_called_once_with(envs)
self.assertEqual(agent._curr_report.report.envId, '234')
self.assertEqual(agent._curr_report.report.deployStage, DeployStage.SERVING_BUILD)
self.assertEqual(agent._curr_report.report.status, AgentStatus.SUCCEEDED)
def test_report_with_deploy_goal(self):
if os.path.exists('/tmp/env_status'):
os.remove('/tmp/env_status')
build = {}
build['id'] = '123'
build['url'] = 'https://test'
client = mock.Mock()
deploy_goal = {}
deploy_goal['deployId'] = '123'
deploy_goal['envName'] = '456'
deploy_goal['envId'] = '789'
deploy_goal['stageName'] = 'beta'
deploy_goal['deployStage'] = DeployStage.PRE_DOWNLOAD
deploy_goal['scriptVariables'] = build
ping_response = {'deployGoal': deploy_goal, 'opCode': OpCode.DEPLOY}
responses = [
PingResponse(jsonValue=ping_response),
PingResponse(jsonValue=self.ping_noop_response)
]
client.send_reports = mock.Mock(side_effect=responses)
agent = DeployAgent(client=client, estatus=self.estatus, conf=self.config,
executor=self.executor, helper=self.helper)
agent.process_deploy = mock.Mock(return_value=(DeployReport(AgentStatus.SUCCEEDED)))
agent.serve_build()
self.assertEqual(agent._curr_report.report.envId, '789')
self.assertEqual(agent._curr_report.report.deployStage, DeployStage.PRE_DOWNLOAD)
self.assertEqual(len(agent._envs), 1)
def test_set_and_get_deploy_status(self):
envvar = {}
envvar['id'] = 'bar'
envvar['url'] = 'https://abc-123.tar.gz'
build = {}
build['id'] = '123'
build['name'] = 'bar'
build['commitShort'] = '234'
build['artifactUrl'] = 'https://abc-123.tar.gz'
ping_response1 = self.ping_response1
ping_response1['deployGoal']['scriptVariables'] = envvar
ping_response2 = self.ping_response2
ping_response2['deployGoal']['build'] = build
deploy_goal5 = {}
deploy_goal5['deployId'] = '123'
deploy_goal5['envName'] = 'abc'
deploy_goal5['envId'] = 'def'
deploy_goal5['stageName'] = 'beta'
deploy_goal5['deployStage'] = DeployStage.RESTARTING
deploy_goal6 = {}
deploy_goal6['deployId'] = '123'
deploy_goal6['envName'] = 'abc'
deploy_goal6['envId'] = 'def'
deploy_goal6['stageName'] = 'beta'
deploy_goal6['deployStage'] = DeployStage.POST_RESTART
deploy_goal7 = {}
deploy_goal7['deployId'] = '123'
deploy_goal7['envName'] = 'abc'
deploy_goal7['envId'] = 'def'
deploy_goal7['stageName'] = 'beta'
deploy_goal7['deployStage'] = DeployStage.SERVING_BUILD
ping_response5 = {'deployGoal': deploy_goal5, 'opCode': OpCode.DEPLOY}
ping_response6 = {'deployGoal': deploy_goal6, 'opCode': OpCode.DEPLOY}
ping_response7 = {'deployGoal': deploy_goal7, 'opCode': OpCode.DEPLOY}
ping_response_list = [
PingResponse(jsonValue=ping_response1),
PingResponse(jsonValue=ping_response2),
PingResponse(jsonValue=self.ping_response3),
PingResponse(jsonValue=self.ping_response4),
PingResponse(jsonValue=ping_response5),
PingResponse(jsonValue=ping_response6),
PingResponse(jsonValue=ping_response7),
PingResponse(jsonValue=self.ping_noop_response)
]
client = mock.Mock()
client.send_reports = mock.Mock(side_effect=ping_response_list)
d = DeployAgent(client=client, estatus=self.estatus, conf=self.config,
executor=self.executor, helper=self.helper)
d.serve_build()
calls = [mock.call(stage)
for stage in ['PRE_DOWNLOAD', 'PRE_RESTART', 'RESTARTING', 'POST_RESTART']]
self.executor.execute_command.assert_has_calls(calls)
self.assertEqual(len(d._envs), 1)
self.assertEqual(d._curr_report.report.envId, 'def')
self.assertEqual(d._curr_report.report.envName, 'abc')
self.assertEqual(d._curr_report.report.deployId, '123')
self.assertEqual(d._curr_report.report.stageName, 'beta')
self.assertEqual(d._curr_report.report.deployStage, DeployStage.SERVING_BUILD)
def test_plan_change(self):
old_response = None
new_response = None
self.assertFalse(DeployAgent.plan_changed(old_response, new_response))
new_response = PingResponse()
self.assertTrue(DeployAgent.plan_changed(old_response, new_response))
old_response = PingResponse()
old_response.opCode = OpCode.DEPLOY
new_response.opCode = OpCode.NOOP
self.assertTrue(DeployAgent.plan_changed(old_response, new_response))
new_response.opCode = OpCode.DEPLOY
self.assertFalse(DeployAgent.plan_changed(old_response, new_response))
deploy_goal = {}
deploy_goal['deployId'] = '123'
deploy_goal2 = {}
deploy_goal2['deployId'] = '234'
old_response.deployGoal = DeployGoal(jsonValue=deploy_goal)
new_response.deployGoal = DeployGoal(jsonValue=deploy_goal2)
self.assertTrue(DeployAgent.plan_changed(old_response, new_response))
new_response.deployGoal.deployId = '123'
new_response.deployGoal.deployStage = DeployStage.PRE_RESTART
old_response.deployGoal.deployStage = DeployStage.PRE_RESTART
self.assertFalse(DeployAgent.plan_changed(old_response, new_response))
def test_switch_goal_download_variable_failed(self):
pass
if __name__ == '__main__':
unittest.main()
| 43.777108
| 125
| 0.634237
|
9760e75d6f5f857820bc35a7d267dc6323eb9be3
| 2,055
|
py
|
Python
|
helper/refactor_command.py
|
Bhanditz/JavaScriptEnhancements
|
f87ff0ae9dba99bab69bf4fe4e73ca29d198f81e
|
[
"MIT"
] | null | null | null |
helper/refactor_command.py
|
Bhanditz/JavaScriptEnhancements
|
f87ff0ae9dba99bab69bf4fe4e73ca29d198f81e
|
[
"MIT"
] | null | null | null |
helper/refactor_command.py
|
Bhanditz/JavaScriptEnhancements
|
f87ff0ae9dba99bab69bf4fe4e73ca29d198f81e
|
[
"MIT"
] | null | null | null |
# import sublime, sublime_plugin
# import re
# { "caption": "-" },
# {
# "caption": "Refactor (Working on it ...)",
# "id": "refactor",
# "children": [
# {
# "caption": "Extract",
# "children": [
# {
# "caption": "Method",
# "command": "refactor",
# "args": {"case": "extract_method"}
# }
# ]
# }
# ]
# }
# class refactorCommand(sublime_plugin.TextCommand):
# def run(self, edit, **args):
# view = self.view
# case = args.get("case")
# if not "answer" in args :
# caption = ""
# initial_text = ""
# if case == "extract_method" :
# caption = "Method:"
# initial_text = "func ()"
# view.window().show_input_panel(caption, initial_text, lambda answer: view.run_command('refactor', args={"case": case, "answer": answer}), None, None)
# else :
# answer = args.get("answer").strip()
# scope = view.scope_name(view.sel()[0].begin())
# space = Util.get_whitespace_from_line_begin(view, view.sel()[0])
# if case == "extract_method" :
# new_text = Util.replace_with_tab(view, view.sel()[0], "\t\n\t"+answer+" {\n\t", "\n\t}\n")
# view.replace(edit, view.sel()[0], "this."+(re.sub('\s+\(', '(', answer)) )
# region_class = Util.get_region_scope_first_match(view, scope, view.sel()[0], 'meta.class.js')["region"]
# view.insert(edit, region_class.end()-1, new_text)
# def is_enabled(self, **args) :
# view = self.view
# if not Util.selection_in_js_scope(view) :
# return False
# selections = view.sel()
# for selection in selections :
# if view.substr(selection).strip() != "" :
# return True
# return False
# def is_visible(self, **args) :
# view = self.view
# if not Util.selection_in_js_scope(view) :
# return False
# selections = view.sel()
# for selection in selections :
# if view.substr(selection).strip() != "" :
# return True
# return False
| 32.109375
| 157
| 0.543552
|
c7be26e6e05d19190651c72af1d0b82d3cb8ae56
| 430
|
py
|
Python
|
turtle-lib-tests/circle.py
|
actiago/estudos.py
|
c0aeca05dfc80b6f22e977f26bae3af9af3cbe1a
|
[
"MIT"
] | 2
|
2020-10-06T12:15:39.000Z
|
2020-10-06T12:39:03.000Z
|
turtle-lib-tests/circle.py
|
actiago/estudos.py
|
c0aeca05dfc80b6f22e977f26bae3af9af3cbe1a
|
[
"MIT"
] | null | null | null |
turtle-lib-tests/circle.py
|
actiago/estudos.py
|
c0aeca05dfc80b6f22e977f26bae3af9af3cbe1a
|
[
"MIT"
] | null | null | null |
import Turtle
ninja = Turtle.Turtle()
colors = [ 'red', 'purple', 'yellow', 'blue', 'green', 'orange' ]
ninja.speed(10)
turtle.bgcolor('black')
for i in range(180):
ninja.pencolor(colors[i%6])
ninja.forward(100)
ninja.right(30)
ninja.forward(20)
ninja.left(60)
ninja.forward(50)
ninja.right(30)
ninja.penup()
ninja.setposition(0, 0)
ninja.pendown()
ninja.right(2)
turtle.done()
| 15.925926
| 65
| 0.625581
|
bb67fb634ce051c9b028f45b081eb2294b82db8d
| 4,800
|
py
|
Python
|
test/nonregression/iotools/test_run_converters.py
|
ravih18/clinica
|
07dfe5ba3bab5852a220dba2c88ab0c5132ef26e
|
[
"MIT"
] | null | null | null |
test/nonregression/iotools/test_run_converters.py
|
ravih18/clinica
|
07dfe5ba3bab5852a220dba2c88ab0c5132ef26e
|
[
"MIT"
] | null | null | null |
test/nonregression/iotools/test_run_converters.py
|
ravih18/clinica
|
07dfe5ba3bab5852a220dba2c88ab0c5132ef26e
|
[
"MIT"
] | null | null | null |
# coding: utf8
"""
This file contains a set of functional tests designed to check the correct execution of the pipeline and the
different functions available in Clinica
"""
import warnings
from os import PathLike, fspath
from pathlib import Path
from test.nonregression.testing_tools import (
clean_folder,
compare_folders,
compare_folders_structures,
compare_folders_with_hashes,
create_list_hashes,
identical_subject_list,
same_missing_modality_tsv,
)
import pytest
# Determine location for working_directory
warnings.filterwarnings("ignore")
@pytest.fixture(
params=[
"Nifd2Bids",
"Oasis2Bids",
"Oasis3ToBids",
"Adni2Bids",
"Aibl2Bids",
]
)
def test_name(request):
return request.param
def run_nifd2bids(input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike) -> None:
import shutil
from clinica.iotools.converters.nifd_to_bids.nifd_to_bids import convert_images
# Arrange
shutil.copytree(
input_dir / "clinical_data",
output_dir / "clinical_data",
copy_function=shutil.copy,
)
# Arrange - Data location
clinical_data_directory = output_dir / "clinical_data"
# Acte - Conversion
to_convert = convert_images(
input_dir / "unorganized", output_dir / "bids", clinical_data_directory
)
# Assert
compare_folders_structures(
fspath(output_dir / "bids"), fspath(ref_dir / "hashes_nifd.p")
)
def run_oasis2bids(
input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike
) -> None:
from clinica.iotools.converters.oasis_to_bids.oasis_to_bids import OasisToBids
# Arrange
clinical_data_directory = input_dir / "clinical_data"
# Act
oasis_to_bids = OasisToBids()
oasis_to_bids.convert_images(input_dir / "unorganized", output_dir / "bids")
oasis_to_bids.convert_clinical_data(clinical_data_directory, output_dir / "bids")
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir)
def run_oasis3tobids(
input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike
) -> None:
from clinica.iotools.converters.oasis3_to_bids.oasis3_to_bids import convert_images
# Arrange
clinical_data_directory = input_dir / "clinical_data"
# Act
convert_images(
input_dir / "unorganized", output_dir / "bids", clinical_data_directory
)
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir)
def run_adni2bids(input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike) -> None:
from clinica.iotools.converters.adni_to_bids.adni_to_bids import AdniToBids
# Arrange
clinical_data_directory = input_dir / "clinical_data"
dataset_directory = input_dir / "unorganized_data"
subjects_list = input_dir / "subjects.txt"
modalities = ["T1", "PET_FDG", "PET_AMYLOID", "PET_TAU", "DWI", "FLAIR", "fMRI"]
# Act
adni_to_bids = AdniToBids()
adni_to_bids.check_adni_dependencies()
adni_to_bids.convert_images(
dataset_directory,
clinical_data_directory,
output_dir / "bids",
subjects_list,
modalities,
)
adni_to_bids.convert_clinical_data(clinical_data_directory, output_dir / "bids")
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir)
def run_aibl2bids(input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike) -> None:
from clinica.iotools.converters.aibl_to_bids.aibl_to_bids import (
convert_clinical_data,
convert_images,
)
# Arrange
clinical_data_directory = input_dir / "Data_extract_3.2.5"
dataset_directory = input_dir / "unorganized_data"
# Act
convert_images(
dataset_directory,
clinical_data_directory,
output_dir / "bids",
)
convert_clinical_data(output_dir / "bids", clinical_data_directory)
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir)
def test_run_convertors(cmdopt, tmp_path, test_name):
import shutil
base_dir = Path(cmdopt["input"])
input_dir = base_dir / test_name / "in"
ref_dir = base_dir / test_name / "ref"
tmp_out_dir = tmp_path / test_name / "out"
tmp_out_dir.mkdir(parents=True)
if test_name == "Nifd2Bids":
run_nifd2bids(input_dir, tmp_out_dir, ref_dir)
elif test_name == "Oasis2Bids":
run_oasis2bids(input_dir, tmp_out_dir, ref_dir)
elif test_name == "Oasis3ToBids":
run_oasis3tobids(input_dir, tmp_out_dir, ref_dir)
elif test_name == "Adni2Bids":
run_adni2bids(input_dir, tmp_out_dir, ref_dir)
elif test_name == "Aibl2Bids":
run_aibl2bids(input_dir, tmp_out_dir, ref_dir)
else:
print(f"Test {test_name} not available.")
assert 0
| 29.268293
| 108
| 0.704792
|
cfbbb1eadd4c0cb360bcc94d64424ed73967f1cd
| 11,593
|
py
|
Python
|
DESHIMA/use_desim.py
|
EsmeeHuijten/DESHIMAmodel
|
6e0e3f68276ce3802f233ee999c0a341f79fa120
|
[
"MIT"
] | 1
|
2020-02-04T12:46:25.000Z
|
2020-02-04T12:46:25.000Z
|
DESHIMA/use_desim.py
|
EsmeeHuijten/DESHIMAmodel
|
6e0e3f68276ce3802f233ee999c0a341f79fa120
|
[
"MIT"
] | null | null | null |
DESHIMA/use_desim.py
|
EsmeeHuijten/DESHIMAmodel
|
6e0e3f68276ce3802f233ee999c0a341f79fa120
|
[
"MIT"
] | 2
|
2020-03-12T06:46:54.000Z
|
2020-09-02T09:00:49.000Z
|
import numpy as np
import pandas as pd
import time
import math
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from scipy import integrate
import sys
sys.path.append('./DESHIMA/desim/')
sys.path.append('./desim/')
sys.path.append('../desim/')
import minidesim as dsm
sys.path.append('./DESHIMA/')
# cython
# import pyximport; pyximport.install()
# import Lorentzian
class use_desim(object):
h = 6.62607004 * 10**-34
k = 1.38064852 * 10**-23
e = 1.60217662 * 10**-19 # electron charge
c = 299792458.
Delta_Al = 188 * 10**-6 * e # gap energy of Al
eta_pb = 0.4
def __init__(self):
self.instrument_properties_D1 = {
'eta_M1_spill' : 0.99,
'eta_M2_spill' : 0.90,
'n_wo_mirrors' : 2.,
'window_AR' : False,
'eta_co' : 0.65, # product of co spillover, qo filter
'eta_lens_antenna_rad' : 0.81, # D2_2V3.pdf, p14: front-to-back ratio 0.93 * reflection efficiency 0.9 * matching 0.98 * antenna spillover 0.993
'eta_IBF' : 0.6,
'KID_excess_noise_factor' : 1.0,
'Tb_cmb' : 2.725,
'Tp_amb' : 273.,
'Tp_cabin' : 290.,
'Tp_co' : 4.,
'Tp_chip' : 0.12,
}
self.instrument_properties_D2 = {
'eta_M1_spill' : 0.99,
'eta_M2_spill' : 0.90,
'n_wo_mirrors' : 4.,
'window_AR' : True,
'eta_co' : 0.65, # product of co spillover, qo filter
'eta_lens_antenna_rad' : 0.81, # D2_2V3.pdf, p14: front-to-back ratio 0.93 * reflection efficiency 0.9 * matching 0.98 * antenna spillover 0.993
'eta_IBF' : 0.6,
'KID_excess_noise_factor' : 1.1,
'Tb_cmb' : 2.725,
'Tp_amb' : 273.,
'Tp_cabin' : 290.,
'Tp_co' : 4.,
'Tp_chip' : 0.12,
}
def calcLorentzian(F_bins_Lor_mesh, F_filters_mesh, R):
# F_bins_Lor_mesh, F_filters_mesh = np.meshgrid(F_bins_Lor, F_filters)
FWHM = F_filters_mesh/R
y_array = 1/math.pi * 1/2 * FWHM / ((F_bins_Lor_mesh-F_filters_mesh)**2 + (1/2 * FWHM)**2)
return y_array
def D2HPBW(F):
HPBW = 29.*240./(F/1e9) * np.pi / 180. / 60. / 60.
return HPBW
def eta_mb_ruze(self, F, LFlimit, sigma):
'''F in Hz, LFlimit is the eta_mb at => 0 Hz, sigma in m'''
eta_mb = LFlimit* np.exp(- (4.*np.pi* sigma * F/self.c)**2. )
return eta_mb
def transmit_through_DESHIMA(self, signal_instance, pwv_value):
F_min = signal_instance.F_min
F_max = signal_instance.F_max
num_filters = signal_instance.num_filters
num_bins_Lor = signal_instance.num_bins
R = signal_instance.spec_res
eta_atm_df = signal_instance.eta_atm_df
F_highres = signal_instance.F_highres
eta_atm_func_zenith = signal_instance.eta_atm_func_zenith
psd_gal = signal_instance.psd_gal
EL = signal_instance.EL
D1 = signal_instance.D1
pwv_values_no_gal = np.array([pwv_value[0], pwv_value[2], pwv_value[3], pwv_value[4]])
pwv_value_gal = np.array([pwv_value[0], pwv_value[1]])
F_filters = signal_instance.filters
margin = 10e9
# F_bins_Lor = np.logspace(np.log10(F_min-margin), np.log10(F_max + margin), num_bins_Lor)
F_bins_Lor = np.linspace(F_min-margin, F_max + margin, num_bins_Lor)
if D1:
instrument_properties = self.instrument_properties_D1
theta_maj = 31.4*np.pi/180./60./60.
theta_min = 22.8*np.pi/180./60./60.
eta_mb = 0.34
eta_filter_peak = 0.35 * 0.1
else:
instrument_properties = self.instrument_properties_D2
HPBW = use_desim.D2HPBW(F_bins_Lor)
eta_mb = self.eta_mb_ruze(F=F_bins_Lor,LFlimit=0.8,sigma=37e-6) * 0.9 # see specs, 0.9 is from EM, ruze is from ASTE
theta_maj = HPBW
theta_min = HPBW
eta_filter_peak = 0.4
Desim_input_params ={
'eta_atm_df': eta_atm_df,
'F_highres': F_highres,
'eta_atm_func_zenith': eta_atm_func_zenith,
'F' : F_bins_Lor,
'pwv':pwv_values_no_gal,
'EL':EL,
# 'R' : R,
'theta_maj' : theta_maj,
'theta_min' : theta_min,
'eta_mb' : eta_mb,
'psd_gal': psd_gal,
'inclGal': 0
}
Desim_input = dict(instrument_properties, **Desim_input_params)
DESHIMA_transmitted_no_gal = dsm.spectrometer_sensitivity(**Desim_input) # takes a lot of time
Desim_input_params['pwv'] = pwv_value_gal
Desim_input_params['inclGal'] = 1
Desim_input = dict(instrument_properties, **Desim_input_params)
DESHIMA_transmitted_gal = dsm.spectrometer_sensitivity(**Desim_input) # takes a lot of time
psd_co_no_gal = DESHIMA_transmitted_no_gal['psd_co'] #vector because of F
psd_co_gal = DESHIMA_transmitted_gal['psd_co']
psd_co = np.zeros([num_bins_Lor, 5])
for i in range(0, 4):
if i == 0:
psd_co[:, 0] = psd_co_no_gal[:, 0]
else:
psd_co[:, i + 1] = psd_co_no_gal[:, i]
psd_co[:, 1] = psd_co_gal[:, 1]
psd_jn_chip = DESHIMA_transmitted_no_gal['psd_jn_chip']
F_bins_Lor_mesh, F_filters_mesh = np.meshgrid(F_bins_Lor, F_filters)
eta_circuit = use_desim.calcLorentzian(F_bins_Lor_mesh, F_filters_mesh, R) * math.pi * F_filters_mesh/(2 * R) * eta_filter_peak
eta_chip = instrument_properties['eta_lens_antenna_rad'] * eta_circuit
# calculate psd_KID with different values for pwv
psd_medium = np.transpose((1-eta_chip)*np.transpose(np.array(psd_jn_chip)))
psd_KID = np.zeros([psd_co.shape[1], num_filters, num_bins_Lor])
for i in range(num_bins_Lor):
psd_co_i = psd_co[i, :].reshape(psd_co[i, :].shape[0], 1)
eta_chip_i = eta_chip[:, i].reshape(1, eta_chip[:, i].shape[0])
psd_KID_in_i = eta_chip_i*psd_co_i
result = psd_KID_in_i + psd_medium[i, :]
psd_KID[:, :, i] = result
return DESHIMA_transmitted_no_gal, F_bins_Lor, psd_KID, F_filters
##------------------------------------------------------------------------------
## Everything under this is not used in the model, only for making the interpolation curves and plotting
##------------------------------------------------------------------------------
def obt_data(self, input, D1):
F = input['F']
data_names = input['data_names']
# del(input['data_names'])
if D1:
instrument_properties = self.instrument_properties_D1
else:
instrument_properties = self.instrument_properties_D2
sensitivity_input = dict(instrument_properties, **input)
del(sensitivity_input['data_names'])
D2goal = dsm.spectrometer_sensitivity(**sensitivity_input) # takes a lot of time
data = []
for el in data_names:
data.append(np.array(D2goal[el]))
return data
def calcT_psd_P(self, eta_atm_df, F_highres, eta_atm_func_zenith, F_filter, EL_vector, num_filters, pwv = 0.1, R = 500, num_bins = 1500, progressbar = None, D1 = 0):
length_EL_vector = len(EL_vector)
margin = 10e9
# F_bins = np.logspace(np.log10(F_filter[0]-margin), np.log10(F_filter[-1] + margin), num_bins) #to calculate the Lorentzian
F_bins = np.linspace(F_filter[0] - margin, F_filter[-1] + margin, num_bins)
if D1:
instrument_properties = self.instrument_properties_D1
theta_maj = 31.4*np.pi/180./60./60. * np.ones(num_bins)
theta_min = 22.8*np.pi/180./60./60. * np.ones(num_bins)
eta_mb = 0.34 * np.ones(num_bins)
eta_filter_peak = 0.35 * 0.1
else:
instrument_properties = self.instrument_properties_D2
eta_mb = self.eta_mb_ruze(F=F_bins,LFlimit=0.8,sigma=37e-6) * 0.9 # see specs, 0.9 is from EM, ruze is from ASTE
HPBW = use_desim.D2HPBW(F_bins)
theta_maj = HPBW
theta_min = HPBW
eta_filter_peak = 0.4
# Initializing variables
psd_KID = np.zeros([num_filters, num_bins, length_EL_vector])
Tb_sky = np.zeros([num_filters, length_EL_vector])
Pkid = np.zeros([num_filters, length_EL_vector])
psd_co = np.zeros([num_bins, length_EL_vector])
psd_jn_chip = np.zeros([num_bins, length_EL_vector])
eta_circuit = np.zeros(num_bins)
# Obtain psd_co and psd_jn_chip from desim
for j in range(0, num_bins):
input = {
'F': F_bins[j],
'pwv': pwv,
'EL': EL_vector,
'data_names': ['psd_co', 'psd_jn_chip'],
'eta_atm_df': eta_atm_df,
'F_highres': F_highres,
'eta_atm_func_zenith': eta_atm_func_zenith,
'theta_maj' : theta_maj[j],
'theta_min' : theta_min[j],
'eta_mb' : eta_mb[j]
}
[psd_co[j, :], psd_jn_chip[j, :]] = self.obt_data(input, D1)
if progressbar:
progressbar.next()
# Obtain psd_kid
for i in range(0, num_filters):
# Putting a Lorentzian curve with peak height 0.35 and center frequency F_filter[i] in eta_circuit
eta_circuit = use_desim.calcLorentzian(F_bins, F_filter[i], R) * math.pi * F_filter[i]/(2 * R) * eta_filter_peak
eta_chip = instrument_properties['eta_lens_antenna_rad'] * eta_circuit
eta_chip_matrix = np.tile(eta_chip.reshape(len(eta_chip), 1), (1, length_EL_vector))
psd_KID[i, :, :] = dsm.rad_trans(psd_co, psd_jn_chip, eta_chip_matrix)
if progressbar:
progressbar.next()
delta_F = F_bins[1] - F_bins[0]
numerators = np.zeros([EL_vector.shape[0], num_filters])
denominators = np.zeros(num_filters)
for k in range(0, num_filters):
transmission = use_desim.calcLorentzian(F_bins, F_filter[k], R)
transmission = transmission.reshape([transmission.shape[0], 1])
numerators[:, k] = delta_F * np.sum(transmission \
* dsm.eta_atm_func(F=F_bins, pwv=pwv, EL=EL_vector, eta_atm_df=eta_atm_df, F_highres=F_highres, eta_atm_func_zenith=eta_atm_func_zenith), axis = 0)
denominators[k] = delta_F * np.sum(transmission) # delta_F taken out of sum because it is the same for each bin
eta_atm_matrix = np.transpose(numerators/denominators)
if D1 == 0:
eta_mb = self.eta_mb_ruze(F=F_filter,LFlimit=0.8,sigma=37e-6) * 0.9 # see specs, 0.9 is from EM, ruze is from ASTE
HPBW = use_desim.D2HPBW(F_filter)
theta_maj = HPBW
theta_min = HPBW
# Obtain Tb_sky
for l in range(0, num_filters):
input = {
'F': F_filter[l],
'pwv': pwv,
'EL': EL_vector,
'data_names': ['Tb_sky'],
'eta_atm_df': eta_atm_df,
'F_highres': F_highres,
'eta_atm_func_zenith': eta_atm_func_zenith,
'eta_atm_smeared': eta_atm_matrix[l, :],
'theta_maj' : theta_maj[l],
'theta_min' : theta_min[l],
'eta_mb' : eta_mb[l]
}
Tb_sky[l, :] = self.obt_data(input, D1)[0]
if progressbar:
progressbar.next()
return Tb_sky, psd_KID, F_bins
| 43.912879
| 169
| 0.590788
|
1f09f67f0ba06ebfb87a12348e24aff80c25be96
| 3,499
|
py
|
Python
|
concerns/dmoj.py
|
JunyuanChen/SonnyBot-ng
|
9f06b12c0985c77fb154a4c89b975948c9e4953c
|
[
"CC0-1.0"
] | null | null | null |
concerns/dmoj.py
|
JunyuanChen/SonnyBot-ng
|
9f06b12c0985c77fb154a4c89b975948c9e4953c
|
[
"CC0-1.0"
] | null | null | null |
concerns/dmoj.py
|
JunyuanChen/SonnyBot-ng
|
9f06b12c0985c77fb154a4c89b975948c9e4953c
|
[
"CC0-1.0"
] | null | null | null |
# coding: utf-8
""" DMOJ web scraping. """
import json
import requests
import lxml.html
from concerns import (
calc_exp,
calc_coins
)
with open("assets/ccc.json", "r", encoding="utf-8") as f:
CCC_PROBLEMS = json.load(f)
def ccc_difficulty(problem):
if problem in CCC_PROBLEMS:
return CCC_PROBLEMS[problem]["difficulty"]
return 0
def api_for(username):
quoted = requests.utils.quote(username)
return f"https://dmoj.ca/user/{quoted}/solved"
CCC_DATA_XPATH = """
//div[@class="user-problem-group"][contains(., "CCC")]/table/tbody/tr
""".strip()
def extract_ccc(content):
result = {}
rows = lxml.html.document_fromstring(content).xpath(CCC_DATA_XPATH)
for row in rows:
problem_url = row.xpath('td[@class="problem-name"]/a/@href')[0]
score_str = row.xpath('td[@class="problem-score"]/a/text()')[0]
score, total = map(float, score_str.split("/"))
percentage = round(score / total * 100)
result[problem_url] = percentage
return result
def fetch_ccc(username):
with requests.get(api_for(username)) as resp:
return extract_ccc(resp.content)
def connect(user, username):
ccc = fetch_ccc(username)
if ccc:
user.dmoj_username = username
return update_ccc(user, ccc)
return None
def update(user):
if user.dmoj_username is None:
raise ValueError("DMOJ Account not connected")
ccc = fetch_ccc(user.dmoj_username)
return update_ccc(user, ccc)
def reward(total_reward, percentage):
"""
Reward for a problem with total_reward done to percentage.
Since the first a few test cases are generally very easy, a linear
approach will be unfair. Thus, the reward is given in 20:80 ratio.
The first 50% gives 20% of the reward, and the last 50% gives 80%
of the reward.
Thus, if percentage <= 0.5, then the percentage of reward given is:
0.2 * (percentage / 0.5) = 0.4 * percentage
And if percentage >= 0.5, then the weighed percentage is:
0.2 + 0.8 * ((percentage - 0.5) / 0.5)) = 1.6 * percentage - 0.6
"""
percentage /= 100
if percentage <= 0.5:
weighed_percentage = 0.4 * percentage
else:
weighed_percentage = 1.6 * percentage - 0.6
return round(total_reward * weighed_percentage)
def new_reward(total_reward, old_percentage, new_percentage):
"""
New reward eligible after completing the problem to new_percentage.
The user have already received some rewards. Now they finished the
problem to a higher percentage, thus they will be eligible for some
new rewards.
"""
old = reward(total_reward, old_percentage)
new = reward(total_reward, new_percentage)
return new - old
def update_ccc(user, ccc):
exp_reward = 0
coin_reward = 0
for problem, percentage in ccc.items():
if problem in user.ccc_progress:
old_percentage = user.ccc_progress[problem]
else:
old_percentage = 0
if old_percentage < percentage:
user.ccc_progress[problem] = percentage
difficulty = ccc_difficulty(problem)
total_exp = calc_exp.ccc_reward(difficulty)
total_coins = calc_coins.ccc_reward(difficulty)
exp_reward += new_reward(total_exp, old_percentage, percentage)
coin_reward += new_reward(total_coins, old_percentage, percentage)
return exp_reward, coin_reward
RequestException = requests.exceptions.RequestException
| 28.680328
| 78
| 0.66962
|
85cd924f705995d96e2e6733975058b4af7400a0
| 8,965
|
py
|
Python
|
plotly/validators/_heatmap.py
|
omridanan/plotly.py
|
a8d26670cba49ce15ce9b7639ae0f55a6088a825
|
[
"MIT"
] | null | null | null |
plotly/validators/_heatmap.py
|
omridanan/plotly.py
|
a8d26670cba49ce15ce9b7639ae0f55a6088a825
|
[
"MIT"
] | null | null | null |
plotly/validators/_heatmap.py
|
omridanan/plotly.py
|
a8d26670cba49ce15ce9b7639ae0f55a6088a825
|
[
"MIT"
] | 1
|
2019-02-18T04:12:56.000Z
|
2019-02-18T04:12:56.000Z
|
import _plotly_utils.basevalidators
class HeatmapValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name='heatmap', parent_name='', **kwargs):
super(HeatmapValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Heatmap',
data_docs="""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `colorscale`. In case
`colorscale` is unspecified or `autocolorscale`
is true, the default palette will be chosen
according to whether numbers in the `color`
array are all positive, all negative or mixed.
colorbar
plotly.graph_objs.heatmap.ColorBar instance or
dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)', [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and
`zmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Grey
s,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,
Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth
,Electric,Viridis,Cividis.
connectgaps
Determines whether or not gaps (i.e. {nan} or
missing values) in the `z` data are filled in.
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, *scatter* traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on plot.ly for
customdata .
dx
Sets the x coordinate step. See `x0` for more
info.
dy
Sets the y coordinate step. See `y0` for more
info.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on plot.ly for
hoverinfo .
hoverlabel
plotly.graph_objs.heatmap.Hoverlabel instance
or dict with compatible properties
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the trace.
reversescale
Reverses the color mapping if true. If true,
`zmin` will correspond to the last color in the
array and `zmax` will correspond to the first
color.
selectedpoints
Array containing integer indices of selected
points. Has an effect only for traces that
support selections. Note that an empty array
means an empty selection where the `unselected`
are turned on for all points, whereas, any
other non-array values means no selection all
where the `selected` and `unselected` styles
have no effect.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showscale
Determines whether or not a colorbar is
displayed for this trace.
stream
plotly.graph_objs.heatmap.Stream instance or
dict with compatible properties
text
Sets the text elements associated with each z
value.
textsrc
Sets the source reference on plot.ly for text
.
transpose
Transposes the z data.
uid
visible
Determines whether or not this trace is
visible. If *legendonly*, the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the
starting coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x
coordinates and a 2D cartesian x axis. If *x*
(the default value), the x coordinates refer to
`layout.xaxis`. If *x2*, the x coordinates
refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date
data.
xgap
Sets the horizontal gap (in pixels) between
bricks.
xsrc
Sets the source reference on plot.ly for x .
xtype
If *array*, the heatmap's x coordinates are
given by *x* (the default behavior when `x` is
provided). If *scaled*, the heatmap's x
coordinates are given by *x0* and *dx* (the
default behavior when `x` is not provided).
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the
starting coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y
coordinates and a 2D cartesian y axis. If *y*
(the default value), the y coordinates refer to
`layout.yaxis`. If *y2*, the y coordinates
refer to `layout.yaxis2`, and so on.
ycalendar
Sets the calendar system to use with `y` date
data.
ygap
Sets the vertical gap (in pixels) between
bricks.
ysrc
Sets the source reference on plot.ly for y .
ytype
If *array*, the heatmap's y coordinates are
given by *y* (the default behavior when `y` is
provided) If *scaled*, the heatmap's y
coordinates are given by *y0* and *dy* (the
default behavior when `y` is not provided)
z
Sets the z data.
zauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `z`) or the bounds set in `zmin` and `zmax`
Defaults to `false` when `zmin` and `zmax` are
set by the user.
zhoverformat
Sets the hover text formatting rule using d3
formatting mini-languages which are very
similar to those in Python. See: https://github
.com/d3/d3-format/blob/master/README.md#locale_
format
zmax
Sets the upper bound of the color domain. Value
should have the same units as in `z` and if
set, `zmin` must be set as well.
zmin
Sets the lower bound of the color domain. Value
should have the same units as in `z` and if
set, `zmax` must be set as well.
zsmooth
Picks a smoothing algorithm use to smooth `z`
data.
zsrc
Sets the source reference on plot.ly for z .""",
**kwargs
)
| 44.381188
| 72
| 0.52783
|
43363f8a9082a67675bee3e0e79c5b6694a956da
| 13,226
|
py
|
Python
|
sdk/python/pulumi_azure_native/kubernetesconfiguration/get_source_control_configuration.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/kubernetesconfiguration/get_source_control_configuration.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/kubernetesconfiguration/get_source_control_configuration.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetSourceControlConfigurationResult',
'AwaitableGetSourceControlConfigurationResult',
'get_source_control_configuration',
]
@pulumi.output_type
class GetSourceControlConfigurationResult:
"""
The SourceControl Configuration object returned in Get & Put response.
"""
def __init__(__self__, compliance_status=None, configuration_protected_settings=None, enable_helm_operator=None, helm_operator_properties=None, id=None, name=None, operator_instance_name=None, operator_namespace=None, operator_params=None, operator_scope=None, operator_type=None, provisioning_state=None, repository_public_key=None, repository_url=None, ssh_known_hosts_contents=None, system_data=None, type=None):
if compliance_status and not isinstance(compliance_status, dict):
raise TypeError("Expected argument 'compliance_status' to be a dict")
pulumi.set(__self__, "compliance_status", compliance_status)
if configuration_protected_settings and not isinstance(configuration_protected_settings, dict):
raise TypeError("Expected argument 'configuration_protected_settings' to be a dict")
pulumi.set(__self__, "configuration_protected_settings", configuration_protected_settings)
if enable_helm_operator and not isinstance(enable_helm_operator, bool):
raise TypeError("Expected argument 'enable_helm_operator' to be a bool")
pulumi.set(__self__, "enable_helm_operator", enable_helm_operator)
if helm_operator_properties and not isinstance(helm_operator_properties, dict):
raise TypeError("Expected argument 'helm_operator_properties' to be a dict")
pulumi.set(__self__, "helm_operator_properties", helm_operator_properties)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if operator_instance_name and not isinstance(operator_instance_name, str):
raise TypeError("Expected argument 'operator_instance_name' to be a str")
pulumi.set(__self__, "operator_instance_name", operator_instance_name)
if operator_namespace and not isinstance(operator_namespace, str):
raise TypeError("Expected argument 'operator_namespace' to be a str")
pulumi.set(__self__, "operator_namespace", operator_namespace)
if operator_params and not isinstance(operator_params, str):
raise TypeError("Expected argument 'operator_params' to be a str")
pulumi.set(__self__, "operator_params", operator_params)
if operator_scope and not isinstance(operator_scope, str):
raise TypeError("Expected argument 'operator_scope' to be a str")
pulumi.set(__self__, "operator_scope", operator_scope)
if operator_type and not isinstance(operator_type, str):
raise TypeError("Expected argument 'operator_type' to be a str")
pulumi.set(__self__, "operator_type", operator_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if repository_public_key and not isinstance(repository_public_key, str):
raise TypeError("Expected argument 'repository_public_key' to be a str")
pulumi.set(__self__, "repository_public_key", repository_public_key)
if repository_url and not isinstance(repository_url, str):
raise TypeError("Expected argument 'repository_url' to be a str")
pulumi.set(__self__, "repository_url", repository_url)
if ssh_known_hosts_contents and not isinstance(ssh_known_hosts_contents, str):
raise TypeError("Expected argument 'ssh_known_hosts_contents' to be a str")
pulumi.set(__self__, "ssh_known_hosts_contents", ssh_known_hosts_contents)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> 'outputs.ComplianceStatusResponse':
"""
Compliance Status of the Configuration
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter(name="configurationProtectedSettings")
def configuration_protected_settings(self) -> Optional[Mapping[str, str]]:
"""
Name-value pairs of protected configuration settings for the configuration
"""
return pulumi.get(self, "configuration_protected_settings")
@property
@pulumi.getter(name="enableHelmOperator")
def enable_helm_operator(self) -> Optional[bool]:
"""
Option to enable Helm Operator for this git configuration.
"""
return pulumi.get(self, "enable_helm_operator")
@property
@pulumi.getter(name="helmOperatorProperties")
def helm_operator_properties(self) -> Optional['outputs.HelmOperatorPropertiesResponse']:
"""
Properties for Helm operator.
"""
return pulumi.get(self, "helm_operator_properties")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="operatorInstanceName")
def operator_instance_name(self) -> Optional[str]:
"""
Instance name of the operator - identifying the specific configuration.
"""
return pulumi.get(self, "operator_instance_name")
@property
@pulumi.getter(name="operatorNamespace")
def operator_namespace(self) -> Optional[str]:
"""
The namespace to which this operator is installed to. Maximum of 253 lower case alphanumeric characters, hyphen and period only.
"""
return pulumi.get(self, "operator_namespace")
@property
@pulumi.getter(name="operatorParams")
def operator_params(self) -> Optional[str]:
"""
Any Parameters for the Operator instance in string format.
"""
return pulumi.get(self, "operator_params")
@property
@pulumi.getter(name="operatorScope")
def operator_scope(self) -> Optional[str]:
"""
Scope at which the operator will be installed.
"""
return pulumi.get(self, "operator_scope")
@property
@pulumi.getter(name="operatorType")
def operator_type(self) -> Optional[str]:
"""
Type of the operator
"""
return pulumi.get(self, "operator_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource provider.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="repositoryPublicKey")
def repository_public_key(self) -> str:
"""
Public Key associated with this SourceControl configuration (either generated within the cluster or provided by the user).
"""
return pulumi.get(self, "repository_public_key")
@property
@pulumi.getter(name="repositoryUrl")
def repository_url(self) -> Optional[str]:
"""
Url of the SourceControl Repository.
"""
return pulumi.get(self, "repository_url")
@property
@pulumi.getter(name="sshKnownHostsContents")
def ssh_known_hosts_contents(self) -> Optional[str]:
"""
Base64-encoded known_hosts contents containing public SSH keys required to access private Git instances
"""
return pulumi.get(self, "ssh_known_hosts_contents")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Top level metadata https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSourceControlConfigurationResult(GetSourceControlConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSourceControlConfigurationResult(
compliance_status=self.compliance_status,
configuration_protected_settings=self.configuration_protected_settings,
enable_helm_operator=self.enable_helm_operator,
helm_operator_properties=self.helm_operator_properties,
id=self.id,
name=self.name,
operator_instance_name=self.operator_instance_name,
operator_namespace=self.operator_namespace,
operator_params=self.operator_params,
operator_scope=self.operator_scope,
operator_type=self.operator_type,
provisioning_state=self.provisioning_state,
repository_public_key=self.repository_public_key,
repository_url=self.repository_url,
ssh_known_hosts_contents=self.ssh_known_hosts_contents,
system_data=self.system_data,
type=self.type)
def get_source_control_configuration(cluster_name: Optional[str] = None,
cluster_resource_name: Optional[str] = None,
cluster_rp: Optional[str] = None,
resource_group_name: Optional[str] = None,
source_control_configuration_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSourceControlConfigurationResult:
"""
The SourceControl Configuration object returned in Get & Put response.
API Version: 2021-03-01.
:param str cluster_name: The name of the kubernetes cluster.
:param str cluster_resource_name: The Kubernetes cluster resource name - either managedClusters (for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:param str cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:param str resource_group_name: The name of the resource group.
:param str source_control_configuration_name: Name of the Source Control Configuration.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['clusterResourceName'] = cluster_resource_name
__args__['clusterRp'] = cluster_rp
__args__['resourceGroupName'] = resource_group_name
__args__['sourceControlConfigurationName'] = source_control_configuration_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:kubernetesconfiguration:getSourceControlConfiguration', __args__, opts=opts, typ=GetSourceControlConfigurationResult).value
return AwaitableGetSourceControlConfigurationResult(
compliance_status=__ret__.compliance_status,
configuration_protected_settings=__ret__.configuration_protected_settings,
enable_helm_operator=__ret__.enable_helm_operator,
helm_operator_properties=__ret__.helm_operator_properties,
id=__ret__.id,
name=__ret__.name,
operator_instance_name=__ret__.operator_instance_name,
operator_namespace=__ret__.operator_namespace,
operator_params=__ret__.operator_params,
operator_scope=__ret__.operator_scope,
operator_type=__ret__.operator_type,
provisioning_state=__ret__.provisioning_state,
repository_public_key=__ret__.repository_public_key,
repository_url=__ret__.repository_url,
ssh_known_hosts_contents=__ret__.ssh_known_hosts_contents,
system_data=__ret__.system_data,
type=__ret__.type)
| 46.407018
| 419
| 0.6987
|
c71e307552cce160effb2e619ccca3cfaec5443c
| 7,821
|
py
|
Python
|
core/main.py
|
weijianxing/APITestMaid
|
48649cf2199ab90708a4441d6fdc58a235cb4a28
|
[
"MIT"
] | 1
|
2021-03-01T03:09:20.000Z
|
2021-03-01T03:09:20.000Z
|
core/main.py
|
weijianxing/APITestMaid
|
48649cf2199ab90708a4441d6fdc58a235cb4a28
|
[
"MIT"
] | null | null | null |
core/main.py
|
weijianxing/APITestMaid
|
48649cf2199ab90708a4441d6fdc58a235cb4a28
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
# ------ wuage.com testing team ---------
# __author__ : weijx.cpp@gmail.com
import json
import os
import sys
import traceback
from abc import ABCMeta
import fire
from com.wuage.testing.helper.chatbotHelper import DingtalkChatbot
from core.MetaCases import MetaCases, APIRequest
from util import logger
from util.generatorReport.Generator import Generator
from util.loadConfig import load_folder_files, load_file
def log_assertFail():
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
return 'Assert fail on line {} in statement {}'.format(line, text)
def print_exception():
import linecache
linecache.checkcache()
efile = sys.stderr
typ, val, tb = excinfo = sys.exc_info()
sys.last_type, sys.last_value, sys.last_traceback = excinfo
tbe = traceback.extract_tb(tb)
print(efile, '\nTraceback (most recent call last):')
traceback.print_list(tbe, file=efile)
lines = traceback.format_exception_only(typ, val)
for line in lines:
print(efile, line)
class Main(metaclass = ABCMeta):
def __init__(self,caseDir="/",configFileName = "/"):
"""
:param caseDir: execution cases dir
:param configFileName: configuration file dir
"""
if caseDir == "/":
self.metacasedir = os.getcwd() + caseDir + "/metacase"
self.testcasedir = os.getcwd() + caseDir + "/testcase"
self.testscenariodir = os.getcwd() + caseDir + "testscenario"
else:
self.metacasedir = os.getcwd() + caseDir
self.testcasedir = os.getcwd() + caseDir
self.testscenariodir = os.getcwd() + caseDir
if configFileName == "/":
self.confdir = os.getcwd()+configFileName + "/config/alter.yaml"
else:
self.confdir = os.getcwd() + configFileName
logger.log_info("load matacases from : " + self.metacasedir)
logger.log_info("load testcases from : " + self.testcasedir)
logger.log_info("load testscenarios from : " + self.testscenariodir)
self._metacases = {}
self.resultdatafileName = 'util/Data/reportData.json'
self.exec_result = list()
self.htmlReportfileName = ""
def __del__(self):
#todo filter execution result.
with open(self.resultdatafileName, 'w', encoding='utf-8') as f:
json.dump(self.exec_result, f, ensure_ascii=False, indent=4)
f.close()
pass
def load_metaCases(self):
self.filelist = load_folder_files(self.metacasedir)
for casefile in self.filelist:
logger.log_info("begin loading matacase: " + casefile)
meta = MetaCases(casefile)
cases = meta.getCases()
logger.log_info("meta cases : " + str(meta))
# dict key is file: value is matacases.
self._metacases[casefile] = cases
def execute_metacases(self):
"""
check and filter run testcases then execute
:return:
"""
for filename , metacases in self._metacases.items():
logger.log_info("begin execute metacases : " + filename)
for metacase in metacases:
result = {}
dirs = str(filename).split(os.sep)
result["Application"] = dirs[-1]
result["Environment"] = ""
result["Id"] = metacase.caseName
result["Test-Case"] = metacase.caseName
result["APIType"] = metacase.apiType
result["Error"] = []
try:
#todo deal with type cases [skipped,]
metacase.execute_verify()
result["Result"] = "Passed"
# except AssertionError:
except Exception :
# _, _, tb = sys.exc_info()
# traceback.print_tb(tb) # Fixed format
formatted_lines = traceback.format_exc().splitlines()
# print(str(formatted_lines))
# exc_type, exc_value, exc_traceback = sys.exc_info()
# print("format execption.")
# print(repr(traceback.format_exception(exc_type, exc_value,
# exc_traceback)))
# tb_info = traceback.extract_tb(tb)
# filename, line, func, text = tb_info[-1]
msg = 'Assert fail : {0}'.format(str(formatted_lines))
logger.log_error("execute fail: " + msg)
result["Result"] = "Failed"
# msg = log_assertFail()
# logger.log_error("asert fail : "+ msg)
errorInfos = []
infos = {}
infos["Description"] = msg
infos["Assert"] = "xxx"
infos["Screenshot"] = {
"Name": "Screenshot-1.png",
"Url": "#"
}
errorInfos.append(infos)
result["Error"] = errorInfos
finally:
self.exec_result.append(result)
#self._metacases.append(metacase)``
def execute_testcases(self):
pass
def execute_testscenarios(self):
pass
def genarate_testReport(self):
"""
产生测试报告
:return:
"""
a = Generator(appName="search_report",
templatePath="util/Templates/",
reportPath="",
dataPath=self.resultdatafileName)
a.generate_html()
pass
def getFailInfo(self):
"""
check report when find fail case return model name.
:return:
"""
for result in self.exec_result:
if result["Result"] == "Failed":
failmodel = result["Application"]
error = str(result["Error"])
return failmodel, error
logger.log_info("execute success.")
return None,None
def send_failMsg(self,failLink):
"""
检查执行结果并发送失败信息到钉钉群
内部方法,CLI方式应该从文件读取数据
:return:
"""
failmodel, errorInfo = self.getFailInfo()
print("send fail info: " + str(failmodel))
print("send error info : " + str(errorInfo))
if failmodel is not None:
#loading sending detail.
conf = load_file(self.confdir)
alter = conf["sendMsgWhenFail"]
if str(alter) == 'False':
logger.init("user donot config send alter when script execute fail. ")
return
token = conf["dingToken"]
ding = DingtalkChatbot(webhook=token)
subTitle = "API model {0} execute fail:".format(failmodel)
print(subTitle)
at_mobiles = conf["atowners"]
print(at_mobiles)
ding.send_failInfo(title="API testing.",
subTitle=subTitle
, owner=conf["owner"]
, at_mobiles=at_mobiles
,reporturl=failLink
,caseInfo= errorInfo)
else:
logger.log_info("skip sending msg.")
def test_meta(caseDir = "/"):
"""
执行所有独立cases : caseDir: case所存放的路径,默认当前路径,如果自定义路径 仅支持在当前目录下创建
:param caseDir: case所存放的路径,默认当前路径,如果自定义路径 仅支持在当前目录下创建
:return:
"""
main = Main(caseDir)
main.load_metaCases()
main.execute_metacases()
def main2():
fire.Fire()
# if __name__ == '__main__':
#
# fire.Fire()
| 34.76
| 86
| 0.540979
|
8926c843d46f6f5407d279062dc97b65be11c62d
| 11,144
|
py
|
Python
|
leetcode_python/Array/employee-free-time.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Array/employee-free-time.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Array/employee-free-time.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
"""
LC 759
We are given a list schedule of employees, which represents the working time for each employee.
Each employee has a list of non-overlapping Intervals, and these intervals are in sorted order.
Return the list of finite intervals representing common, positive-length free time for all employees, also in sorted order.
Example 1:
Input: schedule = [[[1,2],[5,6]],[[1,3]],[[4,10]]]
Output: [[3,4]]
Explanation:
There are a total of three employees, and all common
free time intervals would be [-inf, 1], [3, 4], [10, inf].
We discard any intervals that contain inf as they aren't finite.
Example 2:
Input: schedule = [[[1,3],[6,7]],[[2,4]],[[2,5],[9,12]]]
Output: [[5,6],[7,9]]
(Even though we are representing Intervals in the form [x, y], the objects inside are Intervals, not lists or arrays. For example, schedule[0][0].start = 1, schedule[0][0].end = 2, and schedule[0][0][0] is not defined.)
Also, we wouldn't include intervals like [5, 5] in our answer, as they have zero length.
Note:
schedule and schedule[i] are lists with lengths in range [1, 50].
0 <= schedule[i].start < schedule[i].end <= 10^8.
"""
# V0
# V1
# IDEA : heapq
# https://leetcode.com/problems/employee-free-time/discuss/1805842/Python-Solution
class Solution:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
pq = []
for i, s in enumerate(schedule):
heapq.heappush(pq, (s[0].start, i, 0))
prev = pq[0][0]
res = []
while pq:
s, i, j = heapq.heappop(pq)
if s > prev:
res.append(Interval(prev, s))
prev = max(prev, schedule[i][j].end)
if j + 1 < len(schedule[i]):
heapq.heappush(pq, (schedule[i][j + 1].start, i, j + 1))
return res
# V1'
# IDEA : SORT by start
# https://leetcode.com/problems/employee-free-time/discuss/408522/Python-sort-by-start
class Solution:
def employeeFreeTime(self, schedule: 'list<list<Interval>>') -> 'list<Interval>':
# sort all intervals by start time
intervals = []
for timeSlots in schedule:
for timeSlot in timeSlots:
intervals.append(timeSlot)
intervals.sort(key= lambda x: x.start)
# Maintain left and right pointer pointing to the interval
left = right = 0
commonFree = []
# iterate through intervals with left pointer
# if left.start > right.end, append the common free time to the ans
# else, assign right to the lastest-ended intervals we heve visited so far.
while left < len(intervals) - 1:
left += 1
if intervals[left].start > intervals[right].end:
commonFree.append(Interval(intervals[right].end,intervals[left].start))
right = left
else:
if intervals[left].end > intervals[right].end:
right = left
return commonFree
# V1''
# https://leetcode.com/problems/employee-free-time/discuss/877358/Python-O(N-log-K)-heap-solution
# IDEA :
# I merge the intervals while keeping the heap size less than or equal to K. It is only less than K when I've already popped out all the elements from a certain employee. My solution treats each employees intervals as a queue. pop(0) in python is O(n) so if you wanted you could just convert all of the intervals to deque so your pop(0) is O(1) and it functions as an actual queue but I just pretended pop(0) was O(1) for demonstration purposes.
class Solution:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
k = len(schedule)
intervals = []
heap = []
for i in range(k):
val = schedule[i].pop(0)
heappush(heap, ([val.start, val.end], i))
elem = [heap[0][0][0], heap[0][0][1]]
while heap:
val = heappop(heap)
start, end, idx = val[0][0], val[0][1], val[1]
if start > elem[1]:
intervals.append(elem)
elem = [start, end]
else:
elem = [min(elem[0], start), max(elem[1], end)]
if schedule[idx]:
item = schedule[idx].pop(0)
heappush(heap, ([item.start, item.end], idx))
intervals.append(elem)
out = []
for i in range(1, len(intervals)):
out.append(Interval(intervals[i-1][1], intervals[i][0]))
return out
# V1'''
# IDEA : SORT
# https://leetcode.com/problems/employee-free-time/discuss/1039353/pythonjava-solution
class Solution:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
listSchedule = []
for i in schedule:
for j in i:
listSchedule.append([j.start, j.end])
listSchedule.sort(key = lambda x: (x[0], x[1]))
minStart = listSchedule[0][0]
maxEnd = listSchedule[0][1]
res = []
for curStart, curEnd in listSchedule:
if curStart <= maxEnd:
maxEnd = max(maxEnd, curEnd)
else:
res.append(Interval(maxEnd, curStart))
minStart = curStart
maxEnd = curEnd
return res
# V1''''
# IDEA : bisect
# https://leetcode.com/problems/employee-free-time/discuss/113142/Python-with-bisect
from bisect import *
class Solution(object):
def employeeFreeTime(self, avails):
time=[-float('inf'),float('inf')]
for p in avails:
for itv in p:
s=itv.start
e=itv.end
l=bisect_right(time,s)
r=bisect_left(time,e)
if l%2:
if r%2:
time=time[:l]+[s,e]+time[r:]
else:
time=time[:l]+[s]+time[r:]
else:
if r%2:
time=time[:l]+[e]+time[r:]
else:
time=time[:l]+time[r:]
ans=[]
for i in range(3,len(time)-2,2):
if time[i-1]<time[i]:
ans.append(Interval(time[i-1],time[i]))
return ans
# V1'''''
# https://zxi.mytechroad.com/blog/geometry/leetcode-759-employee-free-time/
# https://www.youtube.com/watch?v=4XiZ-mVxvbk
# C++
# // Author: Huahua
# // Running time: 81 ms
# class Solution {
# public:
# vector<Interval> employeeFreeTime(vector<vector<Interval>>& schedule) {
# vector<Interval> all;
# for (const auto intervals : schedule)
# all.insert(all.end(), intervals.begin(), intervals.end());
# std::sort(all.begin(), all.end(),
# [](const Interval& a, const Interval& b){
# return a.start < b.start;
# });
# vector<Interval> ans;
# int end = all.front().end;
# for (const Interval& busy : all) {
# if (busy.start > end)
# ans.emplace_back(end, busy.start);
# end = max(end, busy.end);
# }
# return ans;
# }
# };
# V1''''''
# https://www.acwing.com/file_system/file/content/whole/index/content/2808852/
class Solution:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
#TC: O(n log n)
#SC: O(n) n = len(schedule)
#Combine the problem of LC56 and LC986
allInterval = []
for schedule_per_em in schedule:
for interval in schedule_per_em:
allInterval.append(interval)
allInterval.sort(key = lambda x: x.start)
Start = allInterval[0].start
End = allInterval[0].end
res = []
for i in range(1, len(allInterval)):
if End >= allInterval[i].start:
End = max(End, allInterval[i].end)
else:
Start = allInterval[i].start
res.append(Interval(End, Start))
End = allInterval[i].end
return res
# V1'''''''
# https://github.com/xiaoningning/LeetCode-Python/blob/master/759%20Employee%20Free%20Time.py
# https://github.com/DataStudySquad/LeetCode-5/blob/master/759%20Employee%20Free%20Time.py
from typing import List
import heapq
S = 0
E = 1
class Solution:
def employeeFreeTime(self, schedule: List[List[List[int]]]) -> List[List[int]]:
"""
Method 1
Looking at the head of each list through iterator
Merge interval of heads, need to sort, then use heap
After merge, find the open interval
No need to merge, find the max end time, and compare to the min start time
Method 2
Better algorithm to find the open interval
[s, e], we can think of this as two events: balance++ when time = s, and
balance-- when time = e. We want to know the regions where balance == 0.
Similar to meeting rooms II
"""
cur_max_end = min(
itv[E]
for itvs in schedule
for itv in itvs
)
q = []
for i, itvs in enumerate(schedule):
# head
j = 0
itv = itvs[j]
heapq.heappush(q, (itv[S], i, j))
ret = []
while q:
_, i, j = heapq.heappop(q)
itv = schedule[i][j]
if cur_max_end < itv[S]:
ret.append([cur_max_end, itv[S]])
cur_max_end = max(cur_max_end, itv[E])
# next
j += 1
if j < len(schedule[i]):
itv = schedule[i][j]
heapq.heappush(q, (itv[S], i, j))
return ret
def employeeFreeTime(self, schedule: List[List[List[int]]]) -> List[List[int]]:
"""
Method 2
"""
# flatten the nested list
lst = []
for itvs in schedule:
for itv in itvs:
lst.append([itv[S], S])
lst.append([itv[E], E])
lst.sort()
count = 0
prev = None
ret = []
for t, flag in lst:
if count == 0 and prev:
ret.append([prev, t])
if flag == S:
count += 1
else:
prev = t
count -= 1
return ret
def employeeFreeTime_error(self, schedule: List[List[List[int]]]) -> List[List[int]]:
"""
Cannot store iterator in the heap to compare
use index instead
"""
schedules = list(map(iter, schedule))
cur_max_end = min(
itv[E]
for emp in schedule
for itv in emp
)
q = []
for emp_iter in schedules:
itv = next(emp_iter, None)
if itv:
heapq.heappush(q, (itv[S], itv, emp_iter))
ret = []
while q:
_, itv, emp_iter = heapq.heappop(q)
if cur_max_end < itv[S]:
ret.append([cur_max_end, itv[S]])
cur_max_end = max(cur_max_end, itv[E])
itv = next(emp_iter, None)
if itv:
heapq.heappush(q, (itv[S], itv, emp_iter))
return ret
# V2
| 33.465465
| 445
| 0.541906
|
865b158109bac8040ef8ca6db6a55589443946b2
| 16,205
|
py
|
Python
|
code/hopenet.py
|
KelvinCPChiu/deep-head-pose
|
125c9085dd12c88fb32dba6eea874020621432c2
|
[
"Apache-2.0"
] | null | null | null |
code/hopenet.py
|
KelvinCPChiu/deep-head-pose
|
125c9085dd12c88fb32dba6eea874020621432c2
|
[
"Apache-2.0"
] | null | null | null |
code/hopenet.py
|
KelvinCPChiu/deep-head-pose
|
125c9085dd12c88fb32dba6eea874020621432c2
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
class Hopenet(nn.Module):
# Hopenet with 3 output layers for yaw, pitch and roll
# Predicts Euler angles by binning and regression with the expected value
def __init__(self, block, layers, num_bins):
self.inplanes = 64
super(Hopenet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc_yaw = nn.Linear(512 * block.expansion, num_bins)
self.fc_pitch = nn.Linear(512 * block.expansion, num_bins)
self.fc_roll = nn.Linear(512 * block.expansion, num_bins)
self.dropout = nn.Dropout(p=0.5)
# Vestigial layer from previous experiments
self.fc_finetune = nn.Linear(512 * block.expansion + 3, 3)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
pre_yaw = self.fc_yaw(x)
pre_pitch = self.fc_pitch(x)
pre_roll = self.fc_roll(x)
return pre_yaw, pre_pitch, pre_roll
class ResNet(nn.Module):
# ResNet for regression of 3 Euler angles.
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc_angles = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc_angles(x)
return x
class AlexNet(nn.Module):
# AlexNet laid out as a Hopenet - classify Euler angles in bins and
# regress the expected value.
def __init__(self, num_bins):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
)
self.fc_yaw = nn.Linear(4096, num_bins)
self.fc_pitch = nn.Linear(4096, num_bins)
self.fc_roll = nn.Linear(4096, num_bins)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
yaw = self.fc_yaw(x)
pitch = self.fc_pitch(x)
roll = self.fc_roll(x)
return yaw, pitch, roll
class Hopenet_VAE(Hopenet):
def __init__(self, block, layers, num_bins, alpha, beta):
super(Hopenet_VAE, self).__init__(block, layers, num_bins)
super(Hopenet_VAE, self)
self.yaw_mean = nn.Linear(512 * block.expansion, 512)
self.yaw_logvar = nn.Linear(512 * block.expansion, 512)
self.roll_mean = nn.Linear(512 * block.expansion, 512)
self.roll_logvar = nn.Linear(512 * block.expansion, 512)
self.pitch_mean = nn.Linear(512 * block.expansion, 512)
self.pitch_logvar = nn.Linear(512 * block.expansion, 512)
self.dropout(p=0.5)
self.fc_yaw = nn.Linear(512, num_bins)
self.fc_pitch = nn.Linear(512, num_bins)
self.fc_roll = nn.Linear(512, num_bins)
self.CEL = nn.CrossEntropyLoss()
self.MSE = nn.MSELoss()
self.softmax = nn.Softmax(dim=1)
self.alpha = torch.tensor(alpha, dtype=torch.float)
self.beta = torch.tensor(beta, dtype=torch.float)
idx_tensor = [idx for idx in xrange(66)]
self.idx_tensor = torch.tensor(idx_tensor)
self.register_buffer('idx_tensor', self.idx_tensor)
self.register_buffer('alpha', self.alpha)
self.register_buffer('beta', self.beta)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
pre_yaw_ = self.yaw_mean(x)
logvar_yaw = self.yaw_logvar(x)
pre_roll_ = self.roll_mean(x)
logvar_roll = self.roll_logvar(x)
pre_pitch_ = self.pitch_mean(x)
logvar_pitch = self.pitch_logvar(x)
pre_yaw = self.reparameterize(pre_yaw_, logvar_yaw)
pre_pitch = self.reparameterize(pre_pitch_, logvar_pitch)
pre_roll = self.reparameterize(pre_roll_, logvar_roll)
pre_yaw = self.fc_yaw(pre_yaw)
pre_pitch = self.fc_pitch(pre_pitch)
pre_roll = self.fc_pitch(pre_roll)
return (pre_yaw, pre_yaw_, logvar_yaw), (pre_pitch, pre_pitch_, logvar_pitch), (pre_roll, pre_roll_, logvar_roll)
def reparameterize(self, mean, logvar):
if self.training:
std = torch.exp(logvar*0.5)
norm = torch.randn_like(std, requires_grad=False)
return mean + std*norm
else:
return mean
def loss_function(self, mean, logvar, pre, label, label_cont):
KLD = -0.5 * torch.sum(1 + logvar - mean.pow(2) - torch.exp(logvar))
pre = self.softmax(pre)
CEL = self.CEL(pre, label)
pre = self.softmax(pre)
pre = torch.sum(pre * self.idx_tensor, 1) * 3 - 99
MSE = self.MSE(pre, label_cont)
return KLD + self.alpha*CEL + self.beta*MSE
class MobileNetV1(nn.Module):
def __init__(self):
super(MobileNetV1, self).__init__()
def conv_sd(in_channels, out_channels, stride, kernel_size):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def conv_dw(in_channels, out_channels, stride):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3,
stride=stride, groups=in_channels, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channel=in_channels, out_channels=out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
conv_sd(3, 32, 2, 3),
conv_dw(32, 32, 1),
conv_sd(32, 64, 1, 1),
conv_dw(64, 64, 2),
conv_sd(64, 128, 1, 1),
conv_dw(128, 128, 1),
conv_sd(128, 128, 1, 1),
conv_dw(128, 128, 2),
conv_sd(64, 128, 1, 1),
conv_dw(128, 128, 1),
conv_sd(128, 128, 1, 1),
conv_dw(128, 128, 2),
nn.AvgPool2d(7),
)
self.fc = nn.Linear(1024, 3)
def forward(self, x):
x = self.model(x)
x = x.view(-1, 1024)
x = self.fc(x)
return x
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
""" https://github.com/tonylins/pytorch-mobilenet-v2/blob/master/MobileNetV2.py """
def __init__(self, n_class=66, input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = int(32)
last_channel = int(1280)
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class MobileNetV2_angle_header(MobileNetV2):
def __init__(self, n_class=66, input_size=224, width_mult=1.):
super(MobileNetV2_angle_header, self).__init__(n_class=n_class, input_size=input_size, width_mult=width_mult)
super(MobileNetV2_angle_header, self)
self._initialize_weights()
self.dropout = nn.Dropout(p=0.5)
self.fc_yaw = nn.Linear(self.last_channel, n_class)
self.fc_pitch = nn.Linear(self.last_channel, n_class)
self.fc_roll = nn.Linear(self.last_channel, n_class)
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.dropout(x)
pre_yaw = self.fc_yaw(x)
pre_pitch = self.fc_pitch(x)
pre_roll = self.fc_roll(x)
return pre_yaw, pre_pitch, pre_roll
| 34.849462
| 121
| 0.567417
|
94835202b42f72126971db2aa12a9db8c6614234
| 2,515
|
py
|
Python
|
networkx/algorithms/tests/test_distance_regular.py
|
argriffing/networkx
|
5a3d000e605be2ca567f69a4694afcba3b8acb54
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/tests/test_distance_regular.py
|
argriffing/networkx
|
5a3d000e605be2ca567f69a4694afcba3b8acb54
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/tests/test_distance_regular.py
|
argriffing/networkx
|
5a3d000e605be2ca567f69a4694afcba3b8acb54
|
[
"BSD-3-Clause"
] | null | null | null |
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
import networkx as nx
from networkx import is_strongly_regular
class TestDistanceRegular(object):
def test_is_distance_regular(self):
assert_true(nx.is_distance_regular(nx.icosahedral_graph()))
assert_true(nx.is_distance_regular(nx.petersen_graph()))
assert_true(nx.is_distance_regular(nx.cubical_graph()))
assert_true(nx.is_distance_regular(nx.complete_bipartite_graph(3,3)))
assert_true(nx.is_distance_regular(nx.tetrahedral_graph()))
assert_true(nx.is_distance_regular(nx.dodecahedral_graph()))
assert_true(nx.is_distance_regular(nx.pappus_graph()))
assert_true(nx.is_distance_regular(nx.heawood_graph()))
assert_true(nx.is_distance_regular(nx.cycle_graph(3)))
# no distance regular
assert_false(nx.is_distance_regular(nx.path_graph(4)))
def test_not_connected(self):
G=nx.cycle_graph(4)
G.add_cycle([5,6,7])
assert_false(nx.is_distance_regular(G))
def test_global_parameters(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 0, 1), (1, 1, 0)])
b,c=nx.intersection_array(nx.cycle_graph(3))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 1, 0)])
def test_intersection_array(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
assert_equal(b,[2, 1])
assert_equal(c,[1, 1])
b,c=nx.intersection_array(nx.dodecahedral_graph())
assert_equal(b,[3, 2, 1, 1, 1])
assert_equal(c,[1, 1, 1, 2, 3])
b,c=nx.intersection_array(nx.icosahedral_graph())
assert_equal(b,[5, 2, 1])
assert_equal(c,[1, 2, 5])
class TestStronglyRegular(object):
"""Unit tests for the :func:`~networkx.is_strongly_regular`
function.
"""
def test_cycle_graph(self):
"""Tests that the cycle graph on five vertices is strongly
regular.
"""
G = nx.cycle_graph(5)
assert_true(is_strongly_regular(G))
def test_petersen_graph(self):
"""Tests that the Petersen graph is strongly regular."""
G = nx.petersen_graph()
assert_true(is_strongly_regular(G))
def test_path_graph(self):
"""Tests that the path graph is not strongly regular."""
G = nx.path_graph(4)
assert_false(is_strongly_regular(G))
| 34.452055
| 77
| 0.667197
|
6a1ea6963abd61deaab0a384353a6a8f29667965
| 6,662
|
py
|
Python
|
tf_privacy/optimizers/dp_optimizer_vectorized.py
|
frhrdr/MMD-GAN
|
7522093498b658026344541ddd5c248095763fb6
|
[
"Apache-2.0"
] | null | null | null |
tf_privacy/optimizers/dp_optimizer_vectorized.py
|
frhrdr/MMD-GAN
|
7522093498b658026344541ddd5c248095763fb6
|
[
"Apache-2.0"
] | null | null | null |
tf_privacy/optimizers/dp_optimizer_vectorized.py
|
frhrdr/MMD-GAN
|
7522093498b658026344541ddd5c248095763fb6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vectorized differentially private optimizers for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from distutils.version import LooseVersion
import tensorflow as tf
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
nest = tf.contrib.framework.nest
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
else:
nest = tf.nest
AdagradOptimizer = tf.optimizers.Adagrad
AdamOptimizer = tf.optimizers.Adam
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
parent_code = tf.optimizers.Optimizer._compute_gradients.__code__ # pylint: disable=protected-access
GATE_OP = None # pylint: disable=invalid-name
def make_vectorized_optimizer_class(cls):
"""Constructs a vectorized DP optimizer class from an existing one."""
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
child_code = cls.compute_gradients.__code__
else:
child_code = cls._compute_gradients.__code__ # pylint: disable=protected-access
if child_code is not parent_code:
tf.logging.warning(
'WARNING: Calling make_optimizer_class() on class %s that overrides '
'method compute_gradients(). Check to ensure that '
'make_optimizer_class() does not interfere with overridden version.',
cls.__name__)
class DPOptimizerClass(cls):
"""Differentially private subclass of given class cls."""
def __init__(
self,
l2_norm_clip,
noise_multiplier,
num_microbatches=None,
*args, # pylint: disable=keyword-arg-before-vararg, g-doc-args
**kwargs):
"""Initialize the DPOptimizerClass.
Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients)
noise_multiplier: Ratio of the standard deviation to the clipping norm
num_microbatches: How many microbatches into which the minibatch is
split. If None, will default to the size of the minibatch, and
per-example gradients will be computed.
"""
super(DPOptimizerClass, self).__init__(*args, **kwargs)
self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier
self._num_microbatches = num_microbatches
def compute_gradients(self,
loss,
var_list,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None,
gradient_tape=None):
if callable(loss):
# TF is running in Eager mode
raise NotImplementedError('Vectorized optimizer unavailable for TF2.')
else:
# TF is running in graph mode, check we did not receive a gradient tape.
if gradient_tape:
raise ValueError('When in graph mode, a tape should not be passed.')
batch_size = tf.shape(loss)[0]
if self._num_microbatches is None:
self._num_microbatches = batch_size
# Note: it would be closer to the correct i.i.d. sampling of records if
# we sampled each microbatch from the appropriate binomial distribution,
# although that still wouldn't be quite correct because it would be
# sampling from the dataset without replacement.
microbatch_losses = tf.reshape(loss, [self._num_microbatches, -1])
if var_list is None:
var_list = (
tf.trainable_variables() + tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
def process_microbatch(microbatch_loss):
"""Compute clipped grads for one microbatch."""
microbatch_loss = tf.reduce_mean(microbatch_loss)
grads, _ = zip(*super(DPOptimizerClass, self).compute_gradients(
microbatch_loss,
var_list,
gate_gradients,
aggregation_method,
colocate_gradients_with_ops,
grad_loss))
grads_list = [
g if g is not None else tf.zeros_like(v)
for (g, v) in zip(list(grads), var_list)
]
# Clip gradients to have L2 norm of l2_norm_clip.
# Here, we use TF primitives rather than the built-in
# tf.clip_by_global_norm() so that operations can be vectorized
# across microbatches.
grads_flat = nest.flatten(grads_list)
squared_l2_norms = [tf.reduce_sum(tf.square(g)) for g in grads_flat]
global_norm = tf.sqrt(tf.add_n(squared_l2_norms))
div = tf.maximum(global_norm / self._l2_norm_clip, 1.)
clipped_flat = [g / div for g in grads_flat]
clipped_grads = nest.pack_sequence_as(grads_list, clipped_flat)
return clipped_grads
clipped_grads = tf.vectorized_map(process_microbatch, microbatch_losses)
def reduce_noise_normalize_batch(stacked_grads):
summed_grads = tf.reduce_sum(stacked_grads, axis=0)
noise_stddev = self._l2_norm_clip * self._noise_multiplier
noise = tf.random.normal(tf.shape(summed_grads),
stddev=noise_stddev)
noised_grads = summed_grads + noise
return noised_grads / tf.cast(self._num_microbatches, tf.float32)
final_grads = nest.map_structure(reduce_noise_normalize_batch,
clipped_grads)
return list(zip(final_grads, var_list))
return DPOptimizerClass
VectorizedDPAdagrad = make_vectorized_optimizer_class(AdagradOptimizer)
VectorizedDPAdam = make_vectorized_optimizer_class(AdamOptimizer)
VectorizedDPSGD = make_vectorized_optimizer_class(GradientDescentOptimizer)
| 43.25974
| 103
| 0.683278
|
674b1db0c1d0a94e66cf3ea228dc437ce27feec0
| 1,220
|
py
|
Python
|
catnado/testing/testcase.py
|
tylertrussell/gae-catnado
|
91a73e9108bb724fb780cc8dcfca4da579313cb9
|
[
"Apache-2.0"
] | null | null | null |
catnado/testing/testcase.py
|
tylertrussell/gae-catnado
|
91a73e9108bb724fb780cc8dcfca4da579313cb9
|
[
"Apache-2.0"
] | null | null | null |
catnado/testing/testcase.py
|
tylertrussell/gae-catnado
|
91a73e9108bb724fb780cc8dcfca4da579313cb9
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from google.appengine.ext import testbed
import mock
class SimpleAppEngineTestCase(unittest.TestCase):
"""A very simple AppEngine test case which sets up basic stubs.
By default, stubs for Datastore and Memcache are created.
"""
def setUp(self):
"""Override setUp to set up stubs."""
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_urlfetch_stub()
self.addCleanup(mock.patch.stopall)
def tearDown(self):
"""Override tearDown to set up stubs."""
self.testbed.deactivate()
class ServiceAPITestCase(SimpleAppEngineTestCase):
"""Testcase for Service APIs.
Circumvents the X-AppEngine header check that verifies requests are coming
from within the application. That functionality is tested in
catnado.handlers.test.test_csrf_protected_handler.
"""
def setUp(self):
"""Override setUp to patch over request validation."""
super(ServiceAPITestCase, self).setUp()
self._csrf_token_patch = mock.patch(
'catnado.handlers.service_api_handler.validate_api_request'
)
self._csrf_token_mock = self._csrf_token_patch.start()
| 27.727273
| 76
| 0.745082
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.