hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd8d432133ff661f7c88705c713b928319cb35ba
| 2,153
|
py
|
Python
|
couchbase/encryption/crypto_manager.py
|
couchbase/couchbase-python-client
|
99ec055835f5aef0cd07905497b3ab4bb3cbbc32
|
[
"Apache-2.0"
] | 189
|
2015-01-07T18:34:31.000Z
|
2022-03-21T17:41:56.000Z
|
couchbase/encryption/crypto_manager.py
|
couchbase/couchbase-python-client
|
99ec055835f5aef0cd07905497b3ab4bb3cbbc32
|
[
"Apache-2.0"
] | 24
|
2015-05-19T14:00:16.000Z
|
2022-03-16T22:01:30.000Z
|
couchbase/encryption/crypto_manager.py
|
couchbase/couchbase-python-client
|
99ec055835f5aef0cd07905497b3ab4bb3cbbc32
|
[
"Apache-2.0"
] | 60
|
2015-03-10T22:12:50.000Z
|
2022-03-07T21:57:40.000Z
|
from abc import ABC, abstractmethod
from typing import Union
class CryptoManager(ABC):
"""Interface a CryptoManager must implement
"""
_DEFAULT_ENCRYPTER_ALIAS = "__DEFAULT__"
@abstractmethod
def encrypt(
self, # type: CryptoManager
plaintext, # type: Union[str, bytes, bytearray]
encrypter_alias=None, # type: str
) -> dict:
"""Encrypts the given plaintext using the given encrypter alias
:param plaintext: Input to be encrypted
:param encrypter_alias: Alias of encrypter to use, if None, default alias is used
:return: A :class:`couchbase.encryption.EncryptionResult` as a dict
:raises :class:`couchbase.exceptions.EncryptionFailureException`
"""
pass
@abstractmethod
def decrypt(
self, # type: CryptoManager
encrypted, # type: dict
) -> bytes:
"""Decrypts the given encrypted result based on the 'alg' key
in the encrypted result
:param encrypted: dict containing encryption information, must have an 'alg' key
:return A decrypted result based on the given encrypted input
:raises :class:`couchbase.exceptions.DecryptionFailureException`
"""
pass
@abstractmethod
def mangle(
self, # type: CryptoManager
field_name, # type: str
) -> str:
"""Mangles provided JSON field name
:param field_name: JSON field name to be mangled
:return mangled field name
"""
pass
@abstractmethod
def demangle(
self, # type: CryptoManager
field_name, # type: str
) -> str:
"""Demangles provided JSON field name
:param field_name: JSON field name to be demangled
:return demangled field name
"""
pass
@abstractmethod
def is_mangled(
self, # type: CryptoManager
field_name, # type: str
) -> bool:
"""Checks if provided JSON field name has been mangled
:param field_name: JSON field name to check
:return `True` if mangled, `False` otherwise
"""
pass
| 25.939759
| 90
| 0.618672
|
414159c2a0fbf58e7da5814d437c45c6e8d7e09c
| 1,911
|
py
|
Python
|
training/checkpointing.py
|
chenw23/open_lth
|
2ce732fe48abd5a80c10a153c45d397b048e980c
|
[
"MIT"
] | 509
|
2020-05-07T16:45:46.000Z
|
2022-03-28T13:41:36.000Z
|
training/checkpointing.py
|
chenw23/open_lth
|
2ce732fe48abd5a80c10a153c45d397b048e980c
|
[
"MIT"
] | 12
|
2020-06-10T10:07:09.000Z
|
2022-02-03T01:57:32.000Z
|
training/checkpointing.py
|
chenw23/open_lth
|
2ce732fe48abd5a80c10a153c45d397b048e980c
|
[
"MIT"
] | 103
|
2020-05-07T21:40:06.000Z
|
2022-03-11T19:07:55.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from foundations import paths
from foundations.step import Step
from platforms.platform import get_platform
from training.metric_logger import MetricLogger
def save_checkpoint_callback(output_location, step, model, optimizer, logger):
if get_platform().is_primary_process:
get_platform().save_model({
'ep': step.ep,
'it': step.it,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'logger': str(logger),
}, paths.checkpoint(output_location))
get_platform().barrier()
def restore_checkpoint(output_location, model, optimizer, iterations_per_epoch):
checkpoint_location = paths.checkpoint(output_location)
if not get_platform().exists(checkpoint_location):
return None, None
checkpoint = get_platform().load_model(checkpoint_location, map_location=torch.device('cpu'))
# Handle DataParallel.
module_in_name = get_platform().is_parallel
if module_in_name and not all(k.startswith('module.') for k in checkpoint['model_state_dict']):
checkpoint['model_state_dict'] = {'module.' + k: v for k, v in checkpoint['model_state_dict'].items()}
elif all(k.startswith('module.') for k in checkpoint['model_state_dict']) and not module_in_name:
checkpoint['model_state_dict'] = {k[len('module.'):]: v for k, v in checkpoint['model_state_dict'].items()}
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
step = Step.from_epoch(checkpoint['ep'], checkpoint['it'], iterations_per_epoch)
logger = MetricLogger.create_from_string(checkpoint['logger'])
return step, logger
| 42.466667
| 115
| 0.725275
|
5e149e499cb149f1d1d09390271663ec8f9beed3
| 569
|
py
|
Python
|
api/v2/views/image_tag.py
|
xuhang57/atmosphere
|
f53fea2a74ee89ccc8852906799b1d9a7e9178b7
|
[
"BSD-3-Clause"
] | null | null | null |
api/v2/views/image_tag.py
|
xuhang57/atmosphere
|
f53fea2a74ee89ccc8852906799b1d9a7e9178b7
|
[
"BSD-3-Clause"
] | null | null | null |
api/v2/views/image_tag.py
|
xuhang57/atmosphere
|
f53fea2a74ee89ccc8852906799b1d9a7e9178b7
|
[
"BSD-3-Clause"
] | null | null | null |
from core.models import ApplicationTag as ImageTag
from api.v2.serializers.details import ImageTagSerializer
from api.v2.views.base import AuthModelViewSet
class ImageTagViewSet(AuthModelViewSet):
"""
API endpoint that allows instance tags to be viewed
"""
queryset = ImageTag.objects.all()
serializer_class = ImageTagSerializer
filter_fields = ('application__id',)
def get_queryset(self):
"""
Filter out tags for deleted instances
"""
return ImageTag.objects.filter(application__end_date__isnull=True)
| 25.863636
| 74
| 0.725835
|
77859af9a3a1eccb4409f7d07c473816eb412e53
| 18,691
|
py
|
Python
|
tests/test_bzutil.py
|
locriandev/elliott
|
4f0553b1cb64da15384d3dc87e353211b16ba96a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_bzutil.py
|
locriandev/elliott
|
4f0553b1cb64da15384d3dc87e353211b16ba96a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_bzutil.py
|
locriandev/elliott
|
4f0553b1cb64da15384d3dc87e353211b16ba96a
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timezone
import logging
import unittest
import xmlrpc.client
from flexmock import flexmock
import mock
from elliottlib.bzutil import JIRABugTracker, BugzillaBugTracker
from elliottlib import bzutil, constants
hostname = "bugzilla.redhat.com"
class TestJIRABugTracker(unittest.TestCase):
def test_get_config(self):
config = {'foo': 1, 'jira_config': {'bar': 2}}
runtime = flexmock(
gitdata=flexmock(load_data=flexmock(data=config)),
get_major_minor=lambda: (4, 9)
)
actual = JIRABugTracker.get_config(runtime)
expected = {'foo': 1, 'bar': 2}
self.assertEqual(actual, expected)
class TestBugzillaBugTracker(unittest.TestCase):
def test_get_config(self):
config = {'foo': 1, 'bugzilla_config': {'bar': 2}}
runtime = flexmock(
gitdata=flexmock(load_data=flexmock(data=config)),
get_major_minor=lambda: (4, 9)
)
actual = BugzillaBugTracker.get_config(runtime)
expected = {'foo': 1, 'bar': 2}
self.assertEqual(actual, expected)
class TestBZUtil(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_is_flaw_bug(self):
bug = mock.MagicMock(product="Security Response", component="vulnerability")
self.assertTrue(bzutil.is_flaw_bug(bug))
bug = mock.MagicMock(product="foo", component="bar")
self.assertFalse(bzutil.is_flaw_bug(bug))
def test_get_whiteboard_component(self):
bug = mock.MagicMock(whiteboard="foo")
self.assertFalse(bzutil.get_whiteboard_component(bug))
bug = mock.MagicMock(whiteboard="component: ")
self.assertFalse(bzutil.get_whiteboard_component(bug))
for expected in ["something", "openvswitch2.15", "trailing_blank "]:
bug = mock.MagicMock(whiteboard=f"component: {expected}")
expected = expected.strip()
actual = bzutil.get_whiteboard_component(bug)
self.assertEqual(actual, expected.strip())
# def test_get_bugs(self):
# bug_ids = [1, 2]
# expected = {
# 1: mock.MagicMock(id=1),
# 2: mock.MagicMock(id=2),
# }
# bzapi = mock.MagicMock()
# bzapi.getbugs.return_value = [expected[bug_id] for bug_id in bug_ids]
# actual = bzutil.get_bugs(bzapi, bug_ids)
# self.assertEqual(expected, actual)
@mock.patch.object(bzutil.BugzillaBugTracker, 'get_bugs_map', autospec=True)
@mock.patch.object(bzutil.BugzillaBugTracker, 'login', return_value=None, autospec=True)
def test_get_tracker_flaws_map(self, login_mock: mock.MagicMock, get_bugs_mock: mock.MagicMock):
trackers = {
1: mock.MagicMock(id=1, blocks=[11, 12]),
2: mock.MagicMock(id=2, blocks=[21, 22]),
}
flaws_ids = [11, 12, 21, 22]
flaws = {
flaw_id: mock.MagicMock(id=flaw_id, product="Security Response", component="vulnerability")
for flaw_id in flaws_ids
}
expected = {
1: [flaws[11], flaws[12]],
2: [flaws[21], flaws[22]],
}
mock_bug_tracker = bzutil.BugzillaBugTracker({})
get_bugs_mock.return_value = flaws
actual = bzutil.get_tracker_flaws_map(mock_bug_tracker, trackers.values())
self.assertEqual(expected, actual)
def test_is_viable_bug(self):
bug = mock.MagicMock()
bug.status = "MODIFIED"
self.assertTrue(bzutil.is_viable_bug(bug))
bug.status = "ASSIGNED"
self.assertFalse(bzutil.is_viable_bug(bug))
def test_is_cve_tracker(self):
bug = mock.MagicMock(keywords=[])
self.assertFalse(bzutil.is_cve_tracker(bug))
bug.keywords.append("Security")
self.assertFalse(bzutil.is_cve_tracker(bug))
bug.keywords.append("SecurityTracking")
self.assertTrue(bzutil.is_cve_tracker(bug))
def test_to_timestamp(self):
dt = xmlrpc.client.DateTime("20210615T18:23:22")
actual = bzutil.to_timestamp(dt)
self.assertEqual(actual, 1623781402.0)
def test_filter_bugs_by_cutoff_event(self):
bzapi = mock.MagicMock()
with mock.patch("elliottlib.bzutil.BugzillaBugTracker.login") as mock_login:
mock_login.return_value = bzapi
bug_tracker = bzutil.BugzillaBugTracker({})
desired_statuses = ["MODIFIED", "ON_QA", "VERIFIED"]
sweep_cutoff_timestamp = datetime(2021, 6, 30, 12, 30, 00, 0, tzinfo=timezone.utc).timestamp()
bugs = [
mock.MagicMock(id=1, status="ON_QA", creation_time=xmlrpc.client.DateTime("20210630T12:29:00")),
mock.MagicMock(id=2, status="ON_QA", creation_time=xmlrpc.client.DateTime("20210630T12:30:00")),
mock.MagicMock(id=3, status="ON_QA", creation_time=xmlrpc.client.DateTime("20210630T12:31:00")),
mock.MagicMock(id=4, status="ON_QA", creation_time=xmlrpc.client.DateTime("20210630T00:00:00")),
mock.MagicMock(id=5, status="ON_QA", creation_time=xmlrpc.client.DateTime("20210630T00:00:00")),
mock.MagicMock(id=6, status="ON_QA", creation_time=xmlrpc.client.DateTime("20210630T00:00:00")),
mock.MagicMock(id=7, status="ON_QA", creation_time=xmlrpc.client.DateTime("20210630T00:00:00")),
mock.MagicMock(id=8, status="ON_QA", creation_time=xmlrpc.client.DateTime("20210630T00:00:00")),
mock.MagicMock(id=9, status="ON_QA", creation_time=xmlrpc.client.DateTime("20210630T00:00:00")),
]
bzapi.bugs_history_raw.return_value = {
"bugs": [
{
"id": 1,
"history": [],
},
{
"id": 2,
"history": [],
},
{
"id": 4,
"history": [
{
"when": xmlrpc.client.DateTime("20210630T01:00:00"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
]
},
{
"when": xmlrpc.client.DateTime("20210630T23:59:59"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
]
},
],
},
{
"id": 5,
"history": [
{
"when": xmlrpc.client.DateTime("20210630T01:00:00"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "NEW", "added": "MODIFIED"},
]
},
{
"when": xmlrpc.client.DateTime("20210630T23:59:59"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "MODIFIED", "added": "ON_QA"},
]
},
],
},
{
"id": 6,
"history": [
{
"when": xmlrpc.client.DateTime("20210630T01:00:00"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "NEW", "added": "ASSIGNED"},
]
},
{
"when": xmlrpc.client.DateTime("20210630T23:59:59"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "ASSIGNED", "added": "ON_QA"},
]
},
],
},
{
"id": 7,
"history": [
{
"when": xmlrpc.client.DateTime("20210630T01:00:00"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "NEW", "added": "MODIFIED"},
]
},
{
"when": xmlrpc.client.DateTime("20210630T23:59:59"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "MODIFIED", "added": "ON_QA"},
]
},
],
},
{
"id": 8,
"history": [
{
"when": xmlrpc.client.DateTime("20210630T01:00:00"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "NEW", "added": "MODIFIED"},
]
},
{
"when": xmlrpc.client.DateTime("20210630T13:00:00"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "MODIFIED", "added": "ON_QA"},
]
},
{
"when": xmlrpc.client.DateTime("20210630T23:59:59"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "ON_QA", "added": "VERIFIED"},
]
},
],
},
{
"id": 9,
"history": [
{
"when": xmlrpc.client.DateTime("20210630T01:00:00"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "NEW", "added": "MODIFIED"},
]
},
{
"when": xmlrpc.client.DateTime("20210630T13:00:00"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "MODIFIED", "added": "ON_QA"},
]
},
{
"when": xmlrpc.client.DateTime("20210630T23:59:59"),
"changes": [
{"field_name": "irelevant1", "removed": "foo", "added": "bar"},
{"field_name": "irelevant2", "removed": "bar", "added": "foo"},
{"field_name": "status", "removed": "ON_QA", "added": "ASSIGNED"},
]
},
],
},
]
}
actual = bug_tracker.filter_bugs_by_cutoff_event(bugs, desired_statuses, sweep_cutoff_timestamp)
self.assertListEqual([1, 2, 4, 5, 7, 8], [bug.id for bug in actual])
def test_approximate_cutoff_timestamp(self):
koji_api = mock.MagicMock()
koji_api.getEvent.return_value = {"ts": datetime(2021, 7, 3, 0, 0, 0, 0, tzinfo=timezone.utc).timestamp()}
metas = [
mock.MagicMock(),
mock.MagicMock(),
mock.MagicMock(),
]
metas[0].get_latest_build.return_value = {"nvr": "a-4.9.0-202107020000.p0"}
metas[1].get_latest_build.return_value = {"nvr": "b-4.9.0-202107020100.p0"}
metas[2].get_latest_build.return_value = {"nvr": "c-4.9.0-202107020200.p0"}
actual = bzutil.approximate_cutoff_timestamp(mock.ANY, koji_api, metas)
self.assertEqual(datetime(2021, 7, 2, 2, 0, 0, 0, tzinfo=timezone.utc).timestamp(), actual)
koji_api.getEvent.return_value = {"ts": datetime(2021, 7, 1, 0, 0, 0, 0, tzinfo=timezone.utc).timestamp()}
actual = bzutil.approximate_cutoff_timestamp(mock.ANY, koji_api, metas)
self.assertEqual(datetime(2021, 7, 1, 0, 0, 0, 0, tzinfo=timezone.utc).timestamp(), actual)
koji_api.getEvent.return_value = {"ts": datetime(2021, 7, 4, 0, 0, 0, 0, tzinfo=timezone.utc).timestamp()}
actual = bzutil.approximate_cutoff_timestamp(mock.ANY, koji_api, [])
self.assertEqual(datetime(2021, 7, 4, 0, 0, 0, 0, tzinfo=timezone.utc).timestamp(), actual)
class TestSearchFilter(unittest.TestCase):
def test_search_filter(self):
"""Verify the bugzilla SearchFilter works as expected"""
field_name = "component"
operator = "notequals"
value = "RFE"
expected = "&f1=component&o1=notequals&v1=RFE"
sf = bzutil.SearchFilter(field_name, operator, value)
self.assertEqual(sf.tostring(1), expected)
class TestGetHigestImpact(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_lowest_to_highest_impact(self):
trackers = [flexmock(id=index, severity=severity)
for index, severity in enumerate(constants.BUG_SEVERITY_NUMBER_MAP.keys())]
tracker_flaws_map = {
tracker.id: [] for tracker in trackers
}
impact = bzutil.get_highest_impact(trackers, tracker_flaws_map)
self.assertEqual(impact, constants.SECURITY_IMPACT[4])
def test_single_impact(self):
bugs = []
severity = "high"
bugs.append(flexmock(severity=severity))
impact = bzutil.get_highest_impact(bugs, None)
self.assertEqual(impact, constants.SECURITY_IMPACT[constants.BUG_SEVERITY_NUMBER_MAP[severity]])
def test_impact_for_tracker_with_unspecified_severity(self):
bugs = []
severity = "unspecified"
bugs.append(flexmock(id=123, severity=severity))
tracker_flaws_map = {
123: [flexmock(id=123, severity="medium")],
}
impact = bzutil.get_highest_impact(bugs, tracker_flaws_map)
self.assertEqual(impact, "Moderate")
tracker_flaws_map = {
123: [flexmock(id=123, severity="unspecified")],
}
impact = bzutil.get_highest_impact(bugs, tracker_flaws_map)
self.assertEqual(impact, "Low")
class TestGetFlawBugs(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_get_flaw_bugs(self):
t1 = flexmock(id='1', blocks=['b1', 'b2'])
t2 = flexmock(id='2', blocks=['b3'])
t3 = flexmock(id='3', blocks=[])
flaw_bugs = bzutil.get_flaw_bugs([t1, t2, t3])
for flaw in ['b1', 'b2', 'b3']:
self.assertTrue(flaw in flaw_bugs)
class TestGetFlawAliases(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_get_flaw_aliases(self):
CVE01 = flexmock(
id='1',
product='Security Response',
component='vulnerability',
alias=['CVE-0001-0001']
)
multiAlias = flexmock(
id='2',
product='Security Response',
component='vulnerability',
alias=['CVE-0001-0002', 'someOtherAlias']
)
multiAlias2 = flexmock(
id='3',
product='Security Response',
component='vulnerability',
alias=['someOtherAlias', 'CVE-0001-0003']
)
noAlias = flexmock(
id='4',
product='Security Response',
component='vulnerability',
alias=[]
)
nonFlaw = flexmock(
id='5',
product='Some Product',
component='security',
alias=['CVE-0001-0001', 'someOtherAlias']
)
flaws = [CVE01, multiAlias, multiAlias2, noAlias, nonFlaw]
flaw_cve_map = bzutil.get_flaw_aliases(flaws)
self.assertEqual(len(flaw_cve_map.keys()), 4)
self.assertEqual(flaw_cve_map['4'], "")
if __name__ == "__main__":
unittest.main()
| 43.772834
| 114
| 0.495479
|
b607bfaecb9d2e76d8a55510fce5ad3933300b45
| 5,373
|
py
|
Python
|
Day20/Day20-2.py
|
remonedo/AdventOfCode2019
|
0d4f47ce05d7500fd16058bb28e34c581a902d32
|
[
"MIT"
] | null | null | null |
Day20/Day20-2.py
|
remonedo/AdventOfCode2019
|
0d4f47ce05d7500fd16058bb28e34c581a902d32
|
[
"MIT"
] | null | null | null |
Day20/Day20-2.py
|
remonedo/AdventOfCode2019
|
0d4f47ce05d7500fd16058bb28e34c581a902d32
|
[
"MIT"
] | null | null | null |
import operator
import numpy
def mark_portals(area):
width, height = area.shape
portals = {}
for y in range(0, height):
for x in range(0, width):
if area[x, y] is None:
continue
if not ('A' <= area[x, y][0] <= 'Z'):
continue
portal_type = 1
if x < 3 or y < 3 or x > (width-4) or y > (height-4):
portal_type = -1
if 'A' <= area[x, y+1][0] <= 'Z':
# vertical portal
portal = area[x, y][0] + area[x, y+1][0]
if portal not in portals:
portals[portal] = []
if area[x, y-1][0] == '.':
area[x, y] = (portal, None)
area[x, y+1] = ('#', None)
portals[portal].append(((x, y), (x, y-1), portal_type, portal))
else:
area[x, y] = ('#', None)
area[x, y+1] = (portal, None)
portals[portal].append(((x, y+1), (x, y+2), portal_type, portal))
elif 'A' <= area[x+1, y][0] <= 'Z':
# horizontal portal
portal = area[x, y][0] + area[x+1, y][0]
if portal not in portals:
portals[portal] = []
if area[x-1, y][0] == '.':
area[x, y] = (portal, None)
area[x+1, y] = ('#', None)
portals[portal].append(((x, y), (x-1, y), portal_type, portal))
else:
area[x, y] = ('#', None)
area[x+1, y] = (portal, None)
portals[portal].append(((x+1, y), (x+2, y), portal_type, portal))
pass
return portals
def load_map():
lines = [line.rstrip() for line in open("Day20.txt")]
width = max([len(line) for line in lines])
height = len(lines)
area = numpy.empty([width, height], dtype=tuple)
for y in range(0, height):
for x in range(0, width):
area[x, y] = ('#', None)
for y in range(0, height):
for x in range(0, len(lines[y])):
if lines[y][x] != ' ':
area[x, y] = (lines[y][x], None)
return area
level_down_portals = {}
level_up_portals = {}
def traverse_map(original_area, portal, portals, level, depth, steps):
global level_down_portals, level_up_portals
if depth > 128 or level > 32:
return None
area = original_area.copy()
source_portal = portal[3]
if source_portal not in level_down_portals:
level_down_portals[source_portal] = []
level_up_portals[source_portal] = []
location = portal[0]
x, y = location[0], location[1]
area[x, y] = ('#', steps)
location = portal[1]
x, y = location[0], location[1]
area[x, y] = ('#', steps)
directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
positions = [((x, y), 0)]
start = 0
end = 1
while start < end:
steps += 1
for i in range(start, end):
position = positions[i]
for direction in directions:
new_location = (position[0][0]+direction[0], position[0][1]+direction[1])
x, y = new_location
if area[x, y][0] == '#':
continue
if area[x, y][0] == '.':
area[x, y] = ('#', steps)
positions.append((new_location, steps))
continue
if area[x, y][0] == 'AA':
continue
if level > 0:
if area[x, y][0] == 'ZZ':
continue
else:
if area[x, y][0] == 'ZZ':
return steps-1
portal = portals[area[x, y][0]][0]
if portal[0] != new_location:
portal = portals[area[x, y][0]][1]
if portal[2] == -1:
continue
portal = portals[area[x, y][0]][0]
if portal[0] == new_location:
portal = portals[area[x, y][0]][1]
if portal[2] == -1:
if (portal, level) in level_up_portals[source_portal]:
continue
else:
level_up_portals[source_portal].append((portal, level))
else:
if (portal, level) in level_down_portals[source_portal]:
continue
else:
level_down_portals[source_portal].append((portal, level))
result = traverse_map(original_area, portal, portals, level - portal[2], depth+1, steps)
area[x, y] = ('#', steps)
if portal[2] == -1:
level_up_portals[source_portal].remove((portal, level))
else:
level_down_portals[source_portal].remove((portal, level))
if result is not None:
return result
positions.sort(key=operator.itemgetter(1))
start = end
end = len(positions)
return None
def main():
area = load_map()
portals = mark_portals(area)
print(traverse_map(area, portals['AA'][0], portals, 0, 0, 0))
main()
| 30.01676
| 104
| 0.445003
|
bcfb4f4ca1b96990cfb5580bf59b0f5a8e42b273
| 1,237
|
py
|
Python
|
iwaves/utils/tools.py
|
mrayson/iwaves
|
ddb6acc017a22896484fcd4c1058210e6223fde0
|
[
"BSD-2-Clause"
] | null | null | null |
iwaves/utils/tools.py
|
mrayson/iwaves
|
ddb6acc017a22896484fcd4c1058210e6223fde0
|
[
"BSD-2-Clause"
] | 3
|
2020-08-31T02:50:39.000Z
|
2020-08-31T03:26:33.000Z
|
iwaves/utils/tools.py
|
iosonobert/iwaves
|
143563bc9075d1e42e486a064f1fefa67ed84702
|
[
"BSD-2-Clause"
] | 5
|
2020-08-31T02:04:41.000Z
|
2022-02-27T06:38:00.000Z
|
# Functions for 2D arrays
import numpy as np
import pdb
def grad_z(y, z, axis=0):
"""
Compute the vertical gradient
"z" can be an array same size as y, or vector along the first axis of "y"
Takes the derivative along the dimension specified by axis(=0)
"""
Nz = z.shape[0]
# Reshape the y variable
y = y.swapaxes(0, axis)
#assert y.shape[0] == Nz
z = z.swapaxes(0, axis)
assert z.shape == (Nz,) or z.shape == y.shape
dy_dz = np.zeros_like(y)
# Second-order accurate for mid-points
ymid = 0.5*(y[1:,...]+y[0:-1,...])
zmid = 0.5*(z[1:,...]+z[0:-1,...])
dzmid = zmid[1:,...] - zmid[0:-1,...]
dzmidi = 1./dzmid
dy_dz[1:-1, ...] = (ymid[1:,...] - ymid[0:-1,...])*\
dzmidi[:,...]
# First-order accurate for top and bottom cells
dy_dz[0,...] = (y[1,...] - y[0,...])*dzmidi[0,...]
dy_dz[-1,...] = (y[-1,...] - y[-2,...])*dzmidi[-1,...]
return dy_dz.swapaxes(axis, 0)
def quadinterp(x, x0, x1, x2, y0, y1, y2):
"""
Quadratic interpolation
"""
L0 = (x-x1)*(x-x2) / ( (x0-x1)*(x0-x2) )
L1 = (x-x0)*(x-x2) / ( (x1-x0)*(x1-x2) )
L2 = (x-x0)*(x-x1) / ( (x2-x0)*(x2-x1) )
return y0*L0 + y1*L1 + y2*L2
| 23.339623
| 77
| 0.503638
|
cf312690a337ef59335bea280df574721cc978c0
| 50,685
|
py
|
Python
|
bin/Python27/Lib/site-packages/sympy/integrals/integrals.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/sympy/integrals/integrals.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/sympy/integrals/integrals.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
from sympy.core import (Basic, Expr, S, C, Symbol, Wild, Add, sympify, diff,
oo, Tuple, Interval)
from sympy.core.symbol import Dummy
from sympy.core.compatibility import is_sequence
from sympy.integrals.trigonometry import trigintegrate
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.integrals.rationaltools import ratint
from sympy.integrals.risch import heurisch
from sympy.integrals.meijerint import meijerint_definite, meijerint_indefinite
from sympy.utilities import xthreaded, flatten
from sympy.utilities.misc import filldedent
from sympy.polys import Poly, PolynomialError
from sympy.solvers.solvers import solve, posify
from sympy.functions import Piecewise, sqrt, sign
from sympy.geometry import Curve
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.series import limit
def _process_limits(*symbols):
"""Convert the symbols-related limits into proper limits,
storing them as Tuple(symbol, lower, upper). The sign of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the sign is changed.
"""
limits = []
sign = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
sign *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, sign
class Integral(Expr):
"""Represents unevaluated integral."""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
"""Create an unevaluated integral.
Arguments are an integrand followed by one or more limits.
If no limits are given and there is only one free symbol in the
expression, that symbol will be used, otherwise an error will be
raised.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x)
Integral(x, x)
>>> Integral(y)
Integral(y, y)
When limits are provided, they are interpreted as follows (using
``x`` as though it were the variable of integration):
(x,) or x - indefinite integral
(x, a) - "evaluate at" integral
(x, a, b) - definite integral
Although the same integral will be obtained from an indefinite
integral and an "evaluate at" integral when ``a == x``, they
respond differently to substitution:
>>> i = Integral(x, x)
>>> at = Integral(x, (x, x))
>>> i.doit() == at.doit()
True
>>> i.subs(x, 1)
Integral(1, x)
>>> at.subs(x, 1)
Integral(x, (x, 1))
The ``as_dummy`` method can be used to see which symbols cannot be
targeted by subs: those with a preppended underscore cannot be
changed with ``subs``. (Also, the integration variables themselves --
the first element of a limit -- can never be changed by subs.)
>>> i.as_dummy()
Integral(x, x)
>>> at.as_dummy()
Integral(_x, (_x, x))
"""
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = piecewise_fold(sympify(function))
if function is S.NaN:
return S.NaN
if symbols:
limits, sign = _process_limits(*symbols)
else:
# no symbols provided -- let's compute full anti-derivative
free = function.free_symbols
if len(free) != 1:
raise ValueError("specify variables of integration for %s" % function)
limits, sign = [Tuple(s) for s in free], 1
while isinstance(function, Integral):
# denest the integrand
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [sign*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def function(self):
"""Return the function to be integrated.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of integration.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the integration variables.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Replace integration variables with dummy ones
transform : Perform mapping on the integration variable
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral depends on a certain
symbol or not.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
set([y])
See Also
========
function, limits, variables
"""
function, limits = self.function, self.limits
if function.is_zero:
return set()
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
if len(xab) == 3 and xab[1] == xab[2]:
# if two limits are the same the integral is 0
# and there are no symbols
return set()
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_zero(self):
"""Since Integral doesn't autosimplify it it useful to see if
it would simplify to zero or not in a trivial manner, i.e. when
the function is 0 or two limits of a definite integral are the same.
This is a very naive and quick test, not intended to check for special
patterns like Integral(sin(m*x)*cos(n*x), (x, 0, 2*pi)) == 0.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y, z
>>> Integral(1, (x, 1, 1)).is_zero
True
>>> Integral(0, (x, y, z)).is_zero
True
>>> Integral(1, (x, 1, 2)).is_zero
False
See Also
========
is_number
"""
if (self.function.is_zero or
any(len(xab) == 3 and xab[1] == xab[2] for xab in self.limits)):
return True
if not self.free_symbols and self.function.is_number:
# the integrand is a number and the limits are numerical
return False
@property
def is_number(self):
"""
Return True if the Integral will result in a number, else False.
sympy considers anything that will result in a number to have
is_number == True.
>>> from sympy import log
>>> log(2).is_number
True
Integrals are a special case since they contain symbols that can
be replaced with numbers. Whether the integral can be done or not is
another issue. But answering whether the final result is a number is
not difficult.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x).is_number
False
>>> Integral(x, y).is_number
False
>>> Integral(x, (y, 1, x)).is_number
False
>>> Integral(x, (y, 1, 2)).is_number
False
>>> Integral(x, (y, 1, 1)).is_number
True
>>> Integral(x, (x, 1, 2)).is_number
True
>>> Integral(x*y, (x, 1, 2), (y, 1, 3)).is_number
True
>>> Integral(1, x, (x, 1, 2)).is_number
True
See Also
========
is_zero
"""
integrand, limits = self.function, self.limits
isyms = integrand.atoms(Symbol)
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue # it may be removed later
elif len(xab) == 3 and xab[1] == xab[2]: # XXX naive equality test
return True # integral collapsed
if xab[0] in isyms:
# take it out of the symbols since it will be replace
# with whatever the limits of the integral are
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
# if there are no surviving symbols then the result is a number
return len(isyms) == 0
def as_dummy(self):
"""
Replace instances of the integration variables with their dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an Integral.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
The "integral at" limit that has a length of 1 is not treated as
though the integration symbol is a dummy, but the explicit form
of length 2 does treat the integration variable as a dummy.
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> Integral(x, (x, x)).as_dummy()
Integral(_x, (_x, x))
If there were no dummies in the original expression, then the
output of this function will show which symbols cannot be
changed by subs(), those with an underscore prefix.
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the integration variable
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in xrange(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
continue
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return Integral(f, *limits)
def transform(self, x, u, inverse=False):
r"""
Performs a change of variables from `x` to `u` using the relationship
given by `x` and `u` which will define the transformations `f` and `F`
(which are inverses of each other) as follows:
1) If `x` is a Symbol (which is a variable of integration) then `u`
will be interpreted as some function, f(u), with inverse F(u).
This, in effect, just makes the substitution of x with f(x).
2) If `u` is a Symbol then `x` will be interpreted as some function,
F(x), with inverse f(u). This is commonly referred to as
u-substitution.
The `inverse` option will reverse `x` and `u`. It is a deprecated option
since `x` and `u` can just be passed in reverse order.
Once f and F have been identified, the transformation is made as
follows:
.. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x)
\frac{\mathrm{d}}{\mathrm{d}x}
where `F(x)` is the inverse of `f(x)` and the limits and integrand have
been corrected so as to retain the same value after integration.
Notes
=====
The mappings, F(x) or f(u), must lead to a unique integral. Linear
or rational linear expression, `2*x`, `1/x` and `sqrt(x)`, will
always work; quadratic expressions like `x**2 - 1` are acceptable
as long as the resulting integrand does not depend on the sign of
the solutions (see examples).
The integral will be returned unchanged if `x` is not a variable of
integration.
`x` must be (or contain) only one of of the integration variables. If
`u` has more than one free symbol then it should be sent as a tuple
(`u`, `uvar`) where `uvar` identifies which variable is replacing
the integration variable.
XXX can it contain another integration variable?
Examples
========
>>> from sympy.abc import a, b, c, d, x, u, y
>>> from sympy import Integral, S, cos, sqrt
>>> i = Integral(x*cos(x**2 - 1), (x, 0, 1))
transform can change the variable of integration
>>> i.transform(x, u)
Integral(u*cos(u**2 - 1), (u, 0, 1))
transform can perform u-substitution as long as a unique
integrand is obtained:
>>> i.transform(x**2 - 1, u)
Integral(cos(u)/2, (u, -1, 0))
This attempt fails because x = +/-sqrt(u + 1) and the
sign does not cancel out of the integrand:
>>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u)
Traceback (most recent call last):
...
ValueError:
The mapping between F(x) and f(u) did not give a unique integrand.
transform can do a substitution. Here, the previous
result is transformed back into the original expression
using "u-substitution":
>>> ui = _
>>> _.transform(sqrt(u + 1), x) == i
True
We can accomplish the same with a regular substitution:
>>> ui.transform(u, x**2 - 1) == i
True
If the `x` does not contain a symbol of integration then
the integral will be returned unchanged. Integral `i` does
not have an integration variable `a` so no change is made:
>>> i.transform(a, x) == i
True
When `u` has more than one free symbol the symbol that is
replacing `x` must be identified by passing `u` as a tuple:
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, u))
Integral(a + u, (u, -a, -a + 1))
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, a))
Integral(a + u, (a, -u, -u + 1))
See Also
========
variables : Lists the integration variables
as_dummy : Replace integration variables with dummy ones
"""
if inverse:
# when this is removed, update the docstring
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
feature="transform(x, f(x), inverse=True)",
useinstead="transform(f(x), x)",
issue=3380, deprecated_since_version="0.7.2",
).warn()
# in the old style x and u contained the same variable so
# don't worry about using the old-style feature with the
# new style input...but it will still work:
# i.transform(x, u).transform(x, u, inverse=True) -> i
x, u = u, x
d = Dummy('d')
xfree = x.free_symbols.intersection(self.variables)
if len(xfree) > 1:
raise ValueError('F(x) can only contain one of: %s' % self.variables)
xvar = xfree.pop() if xfree else d
if xvar not in self.variables:
return self
u = sympify(u)
if isinstance(u, Expr):
ufree = u.free_symbols
if len(ufree) != 1:
raise ValueError(filldedent('''
When f(u) has more than one free symbol, the one replacing x
must be identified: pass f(u) as (f(u), u)'''))
uvar = ufree.pop()
else:
u, uvar = u
if uvar not in u.free_symbols:
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) where symbol identified
a free symbol in expr, but symbol is not in expr's free
symbols.'''))
if not isinstance(uvar, Symbol):
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) but didn't get
a symbol; got %s''' % uvar))
if x.is_Symbol and u.is_Symbol:
return self.xreplace({x: u})
if not x.is_Symbol and not u.is_Symbol:
raise ValueError('either x or u must be a symbol')
if uvar == xvar:
return self.transform(x, u.subs(uvar, d)).xreplace({d: uvar})
if uvar in self.limits:
raise ValueError(filldedent('''
u must contain the same variable as in x
or a variable that is not already an integration variable'''))
if not x.is_Symbol:
F = [x.subs(xvar, d)]
soln = solve(u - x, xvar, check=False)
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), x)')
f = [fi.subs(uvar, d) for fi in soln]
else:
f = [u.subs(uvar, d)]
pdiff, reps = posify(u - x)
puvar = uvar.subs([(v, k) for k, v in reps.iteritems()])
soln = [s.subs(reps) for s in solve(pdiff, puvar)]
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), u)')
F = [fi.subs(xvar, d) for fi in soln]
newfuncs = set([(self.function.subs(xvar, fi)*fi.diff(d)
).subs(d, uvar) for fi in f])
if len(newfuncs) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not give
a unique integrand.'''))
newfunc = newfuncs.pop()
def _calc_limit_1(F, a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
wok = F.subs(d, a)
if wok is S.NaN or wok.is_bounded is False and a.is_bounded:
return limit(sign(b)*F, d, a)
return wok
def _calc_limit(a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
avals = list(set([_calc_limit_1(Fi, a, b) for Fi in F]))
if len(avals) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not
give a unique limit.'''))
return avals[0]
newlimits = []
for xab in self.limits:
sym = xab[0]
if sym == xvar:
if len(xab) == 3:
a, b = xab[1:]
a, b = _calc_limit(a, b), _calc_limit(b, a)
if a > b:
a, b = b, a
newfunc = -newfunc
newlimits.append((uvar, a, b))
elif len(xab) == 2:
a = _calc_limit(xab[1], 1)
newlimits.append((uvar, a))
else:
newlimits.append(uvar)
else:
newlimits.append(xab)
return Integral(newfunc, *newlimits)
def doit(self, **hints):
"""
Perform the integration using any hints given.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).doit()
x**3/log(x) - x/log(x)
See Also
========
sympy.integrals.trigonometry.trigintegrate
sympy.integrals.risch.heurisch
sympy.integrals.rationaltools.ratint
as_sum : Approximate the integral using a sum
"""
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
meijerg = hints.get('meijerg', None)
conds = hints.get('conds', 'piecewise')
if conds not in ['separate', 'piecewise', 'none']:
raise ValueError('conds must be one of "separate", "piecewise", ' \
'"none", got: %s' % conds)
# check for the trivial case of equal upper and lower limits
if self.is_zero:
return S.Zero
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# There is no trivial answer, so continue
undone_limits = []
ulj = set() # free symbols of any undone limits' upper and lower limits
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
continue
# There are a number of tradeoffs in using the meijer g method.
# It can sometimes be a lot faster than other methods, and
# sometimes slower. And there are certain types of integrals for
# which it is more likely to work than others.
# These heuristics are incorporated in deciding what integration
# methods to try, in what order.
# See the integrate() docstring for details.
def try_meijerg(function, xab):
ret = None
if len(xab) == 3 and meijerg is not False:
x, a, b = xab
try:
res = meijerint_definite(function, x, a, b)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
res = None
if res is not None:
f, cond = res
if conds == 'piecewise':
ret = Piecewise((f, cond),
(Integral(function, (x, a, b)), True))
elif conds == 'separate':
if len(self.limits) != 1:
raise ValueError('conds=separate not supported in ' \
'multiple integrals')
ret = f, cond
else:
ret = f
return ret
meijerg1 = meijerg
if len(xab) == 3 and xab[1].is_real and xab[2].is_real \
and not function.is_Poly and \
(xab[1].has(oo, -oo) or xab[2].has(oo, -oo)):
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
else:
meijerg1 = False
# If the special meijerg code did not succeed finding a definite
# integral, then the code using meijerint_indefinite will not either
# (it might find an antiderivative, but the answer is likely to be
# nonsensical).
# Thus if we are requested to only use meijer g-function methods,
# we give up at this stage. Otherwise we just disable g-function
# methods.
if meijerg1 is False and meijerg is True:
antideriv = None
else:
antideriv = self._eval_integral(function, xab[0], meijerg1)
if antideriv is None and meijerg1 is True:
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
if antideriv is None:
undone_limits.append(xab)
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
if len(xab) == 2:
x, b = xab
a = None
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
else:
try:
function = antideriv._eval_interval(x, a, b)
except NotImplementedError:
# This can happen if _eval_interval depends in a
# complicated way on limits that cannot be computed
undone_limits.append(xab)
if undone_limits:
return self.func(*([function] + undone_limits))
return function
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References:
[1] http://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
[2] http://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
set([x])
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 1116
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = Integral(f, *tuple(limits))
# assemble the pieces
def _do(f, ab):
dab_dsym = diff(ab, sym)
if not dab_dsym:
return S.Zero
if isinstance(f, Integral):
limits = [(x, x) if (len(l) == 1 and l[0] == x) else l
for l in f.limits]
f = Integral(f.function, *limits)
return f.subs(x, ab)*dab_dsym
rv = 0
if b is not None:
rv += _do(f, b)
if a is not None:
rv -= _do(f, a)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
rv += Integral(arg, Tuple(x, a, b))
return rv
def _eval_integral(self, f, x, meijerg=None):
"""Calculate the anti-derivative to the function f(x).
This is a powerful function that should in theory be able to integrate
everything that can be integrated. If you find something, that it
doesn't, it is easy to implement it.
(1) Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials)
- functions non-integrable by any of the following algorithms (e.g.
exp(-x**2))
(2) Integration of rational functions:
(a) using apart() - apart() is full partial fraction decomposition
procedure based on Bronstein-Salvy algorithm. It gives formal
decomposition with no polynomial factorization at all (so it's
fast and gives the most general results). However it needs an
implementation of the RootsOf class.
(b) using Trager's algorithm - possibly faster than (a) but needs
implementation :)
(3) Whichever implementation of pmInt (Mateusz, Kirill's or a
combination of both).
- this way we can handle efficiently huge class of elementary and
special functions
(4) Recursive Risch algorithm as described in Bronstein's integration
tutorial.
- this way we can handle those integrable functions for which (3)
fails
(5) Powerful heuristics based mostly on user defined rules.
- handle complicated, rarely used cases
"""
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a sympy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly) and not meijerg:
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if f.func is Piecewise:
return f._eval_integral(x)
# let's cut it short if `f` does not depend on `x`
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None and not meijerg:
return poly.integrate().as_expr()
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One and not meijerg:
parts.append(coeff*x)
continue
# g(x) = expr + O(x**n)
order_term = g.getO()
if order_term is not None:
h = self._eval_integral(g.removeO(), x)
if h is not None:
h_order_expr = self._eval_integral(order_term.expr, x)
if h_order_expr is not None:
h_order_term = order_term.func(h_order_expr, *order_term.variables)
parts.append(coeff*(h + h_order_term))
continue
# NOTE: if there is O(x**n) and we fail to integrate then there is
# no point in trying other methods because they will fail anyway.
return None
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x) and not meijerg:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = C.log(g.base)
else:
h = g.base**(g.exp + 1) / (g.exp + 1)
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x) and not meijerg:
parts.append(coeff * ratint(g, x))
continue
if not meijerg:
# g(x) = Mul(trig)
h = trigintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
if not meijerg:
# fall back to the more general algorithm
try:
h = heurisch(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
else:
h = None
if meijerg is not False and h is None:
# rewrite using G functions
try:
h = meijerint_indefinite(g, x)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
res = None
if h is not None:
parts.append(coeff * h)
continue
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# out the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = f.expand(mul=True, deep=False)
if f.is_Add:
return self._eval_integral(f, x, meijerg)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x):
for term in self.function.lseries(x):
yield integrate(term, *self.limits)
def _eval_nseries(self, x, n, logx):
terms, order = self.function.nseries(x, n=n, logx=logx).as_coeff_add(C.Order)
return integrate(terms, *self.limits) + Add(*order)*x
def _eval_subs(self, old, new):
"""
Substitute old with new in the integrand and the limits, but don't
change anything that is (or corresponds to) a dummy variable of
integration.
The normal substitution semantics -- traversing all arguments looking
for matching patterns -- should not be applied to the Integrals since
changing the integration variables should also entail a change in the
integration limits (which should be done with the transform method). So
this method just makes changes in the integrand and the limits.
Not all instances of a given variable are conceptually the same: the
first argument of the limit tuple with length greater than 1 and any
corresponding variable in the integrand are dummy variables while
every other symbol is a symbol that will be unchanged when the integral
is evaluated. For example, the dummy variables for ``i`` can be seen
as symbols with a preppended underscore:
>>> from sympy import Integral
>>> from sympy.abc import a, b, c, x, y
>>> i = Integral(a + x, (a, a, b))
>>> i.as_dummy()
Integral(_a + x, (_a, a, b))
If you want to change the lower limit to 1 there is no reason to
prohibit this since it is not conceptually related to the integration
variable, _a. Nor is there reason to disallow changing the b to 1.
If a second limit were added, however, as in:
>>> i = Integral(x + a, (a, a, b), (b, 1, 2))
the dummy variables become:
>>> i.as_dummy()
Integral(_a + x, (_a, a, _b), (_b, 1, 2))
Note that the ``b`` of the first limit is now a dummy variable since
``b`` is a dummy variable in the second limit.
The "evaluate at" form of an integral allows some flexibility in how
the integral will be treated by subs: if there is no second argument,
none of the symbols matching the integration symbol are considered to
be dummy variables, but if an explicit expression is given for a limit
then the usual interpretation of the integration symbol as a dummy
symbol applies:
>>> Integral(x).as_dummy() # implicit integration wrt x
Integral(x, x)
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> _.subs(x, 1)
Integral(1, x)
>>> i = Integral(x, (x, x))
>>> i.as_dummy()
Integral(_x, (_x, x))
>>> i.subs(x, 1)
Integral(x, (x, 1))
Summary: no variable of the integrand or limit can be the target of
substitution if it appears as a variable of integration in a limit
positioned to the right of it. The only exception is for a variable
that defines an indefinite integral limit (a single symbol): that
symbol *can* be replaced in the integrand.
>>> i = Integral(a + x, (a, a, 3), (b, x, c))
>>> i.free_symbols # only these can be changed
set([a, c, x])
>>> i.subs(a, c) # note that the variable of integration is unchanged
Integral(a + x, (a, c, 3), (b, x, c))
>>> i.subs(a + x, b) == i # there is no x + a, only x + <a>
True
>>> i.subs(x, y - c)
Integral(a - c + y, (a, a, 3), (b, -c + y, c))
"""
integrand, limits = self.function, self.limits
old_atoms = old.free_symbols
limits = list(limits)
dummies = set()
for i in xrange(-1, -len(limits) - 1, -1):
xab = limits[i]
if len(xab) == 1:
continue
if not dummies.intersection(old_atoms):
limits[i] = Tuple(xab[0],
*[l._subs(old, new) for l in xab[1:]])
dummies.add(xab[0])
if not dummies.intersection(old_atoms):
integrand = integrand.subs(old, new)
return Integral(integrand, *limits)
def as_sum(self, n, method="midpoint"):
"""
Approximates the integral by a sum.
method ... one of: left, right, midpoint
This is basically just the rectangle method [1], the only difference is
where the function value is taken in each interval.
[1] http://en.wikipedia.org/wiki/Rectangle_method
**method = midpoint**:
Uses the n-order midpoint rule to evaluate the integral.
Midpoint rule uses rectangles approximation for the given area (e.g.
definite integral) of the function with heights equal to the point on
the curve exactly in the middle of each interval (thus midpoint
method). See [1] for more information.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> from sympy.integrals import Integral
>>> e = Integral(sqrt(x**3+1), (x, 2, 10))
>>> e
Integral(sqrt(x**3 + 1), (x, 2, 10))
>>> e.as_sum(4, method="midpoint")
4*sqrt(7) + 6*sqrt(14) + 4*sqrt(86) + 2*sqrt(730)
>>> e.as_sum(4, method="midpoint").n()
124.164447891310
>>> e.n()
124.616199194723
**method=left**:
Uses the n-order rectangle rule to evaluate the integral, at each
interval the function value is taken at the left hand side of the
interval.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> e = Integral(sqrt(x**3+1), (x, 2, 10))
>>> e
Integral(sqrt(x**3 + 1), (x, 2, 10))
>>> e.as_sum(4, method="left")
6 + 2*sqrt(65) + 2*sqrt(217) + 6*sqrt(57)
>>> e.as_sum(4, method="left").n()
96.8853618335341
>>> e.n()
124.616199194723
See Also
========
Integral.doit : Perform the integration using any hints
"""
limits = self.limits
if len(limits) > 1:
raise NotImplementedError("Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if n <= 0:
raise ValueError("n must be > 0")
if n == oo:
raise NotImplementedError("Infinite summation not yet implemented")
sym, lower_limit, upper_limit = limit
dx = (upper_limit - lower_limit)/n
result = 0.
for i in range(n):
if method == "midpoint":
xi = lower_limit + i*dx + dx/2
elif method == "left":
xi = lower_limit + i*dx
elif method == "right":
xi = lower_limit + i*dx + dx
else:
raise NotImplementedError("Unknown method %s" % method)
result += self.function.subs(sym, xi)
return result*dx
@xthreaded
def integrate(*args, **kwargs):
"""integrate(f, var, ...)
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a) -- indefinite integration with result
given with `a` replacing `symbol`
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is
multiple integration. (If var is omitted and the integrand is
univariate, the indefinite integral in that variable will be performed.)
Indefinite integrals are returned without terms that are independent
of the integration variables. (see examples)
Definite improper integrals often entail delicate convergence
conditions. Pass conds='piecewise', 'separate' or 'none' to have
these returned, respectively, as a Piecewise function, as a separate
result (i.e. result will be a tuple), or not at all (default is
'piecewise').
**Strategy**
SymPy uses various approaches to integration. One method is to find
an antiderivative for the integrand, and then use the fundamental
theorem of calculus. Various functions are implemented to integrate
polynomial, rational and trigonometric functions, and integrands
containing DiracDelta terms. There is also a (very successful,
albeit somewhat slow) general implementation of the heuristic risch
algorithm. See the docstring of Integral._eval_integral() for more
details on computing the antiderivative using algebraic methods.
Another family of strategies comes from re-writing the integrand in
terms of so-called Meijer G-functions. Indefinite integrals of a
single G-function can always be computed, and the definite integral
of a product of two G-functions can be computed from zero to
infinity. Various strategies are implemented to rewrite integrands
as G-functions, and use this information to compute integrals (see
the ``meijerint`` module).
In general, the algebraic methods work best for computing
antiderivatives of (possibly complicated) combinations of elementary
functions. The G-function methods work best for computing definite
integrals from zero to infinity of moderately complicated
combinations of special functions, or indefinite integrals of very
simple combinations of special functions.
The strategy employed by the integration code is as follows:
- If computing a definite integral, and both limits are real,
and at least one limit is +- oo, try the G-function method of
definite integration first.
- Try to find an antiderivative, using all available methods, ordered
by performance (that is try fastest method first, slowest last; in
particular polynomial integration is tried first, meijer
g-functions second to last, and heuristic risch last).
- If still not successful, try G-functions irrespective of the
limits.
The option meijerg=True, False, None can be used to, respectively:
always use G-function methods and no others, never use G-function
methods, or use all available methods (in order as described above).
It defaults to None.
Examples
========
>>> from sympy import integrate, log, exp, oo
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
Terms that are independent of x are dropped by indefinite integration:
>>> from sympy import sqrt
>>> integrate(sqrt(1 + x), (x, 0, x))
2*(x + 1)**(3/2)/3 - 2/3
>>> integrate(sqrt(1 + x), x)
2*(x + 1)**(3/2)/3
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
>>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
Piecewise((gamma(a + 1), -re(a) < 1), (Integral(x**a*exp(-x), (x, 0, oo)), True))
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
gamma(a + 1)
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
(gamma(a + 1), -re(a) < 1)
See Also
========
Integral, Integral.doit
"""
meijerg = kwargs.pop('meijerg', None)
conds = kwargs.pop('conds', 'piecewise')
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(deep = False, meijerg = meijerg, conds = conds)
else:
return integral
@xthreaded
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
========
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*sqrt(2)
See Also
========
integrate, Integral
"""
F = sympify(field)
if not F:
raise ValueError("Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not is_sequence(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * sqrt(dldt)
integral = Integral(Ft, curve.limits).doit(deep = False)
return integral
| 35.921332
| 91
| 0.548525
|
3bd5af70804af8d53afcdcd6b8596262649f5bed
| 75,940
|
py
|
Python
|
holoviews/plotting/plot.py
|
wuyuanyi135/holoviews
|
f8e776b15ac3c08d0d8090eb53466aa4130977ee
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/plotting/plot.py
|
wuyuanyi135/holoviews
|
f8e776b15ac3c08d0d8090eb53466aa4130977ee
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/plotting/plot.py
|
wuyuanyi135/holoviews
|
f8e776b15ac3c08d0d8090eb53466aa4130977ee
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Public API for all plots supported by HoloViews, regardless of
plotting package or backend. Every plotting classes must be a subclass
of this Plot baseclass.
"""
from __future__ import absolute_import
import threading
import uuid
import warnings
from collections import Counter, defaultdict
from functools import partial
from itertools import groupby, product
import numpy as np
import param
from panel.config import config
from panel.io.notebook import push
from panel.io.state import state
from pyviz_comms import JupyterComm
from ..selection import NoOpSelectionDisplay
from ..core import OrderedDict
from ..core import util, traversal
from ..core.element import Element, Element3D
from ..core.overlay import Overlay, CompositeOverlay
from ..core.layout import Empty, NdLayout, Layout
from ..core.options import Store, Compositor, SkipRendering, lookup_options
from ..core.overlay import NdOverlay
from ..core.spaces import HoloMap, DynamicMap
from ..core.util import stream_parameters, isfinite
from ..element import Table, Graph, Contours
from ..streams import Stream, RangeXY, RangeX, RangeY
from ..util.transform import dim
from .util import (get_dynamic_mode, initialize_unbounded, dim_axis_label,
attach_streams, traverse_setter, get_nested_streams,
compute_overlayable_zorders, get_nested_plot_frame,
split_dmap_overlay, get_axis_padding, get_range,
get_minimum_span, get_plot_frame, scale_fontsize)
class Plot(param.Parameterized):
"""
Base class of all Plot classes in HoloViews, designed to be
general enough to use any plotting package or backend.
"""
backend = None
# A list of style options that may be supplied to the plotting
# call
style_opts = []
# Sometimes matplotlib doesn't support the common aliases.
# Use this list to disable any invalid style options
_disabled_opts = []
def __init__(self, renderer=None, root=None, **params):
params = {k: v for k, v in params.items()
if k in self.param}
super(Plot, self).__init__(**params)
self.renderer = renderer if renderer else Store.renderers[self.backend].instance()
self._force = False
self._comm = None
self._document = None
self._root = None
self._pane = None
self._triggering = []
self._trigger = []
self.set_root(root)
@property
def state(self):
"""
The plotting state that gets updated via the update method and
used by the renderer to generate output.
"""
raise NotImplementedError
def set_root(self, root):
"""
Sets the root model on all subplots.
"""
if root is None:
return
for plot in self.traverse(lambda x: x):
plot._root = root
@property
def root(self):
if self._root:
return self._root
elif 'plot' in self.handles and self.top_level:
return self.state
else:
return None
@property
def document(self):
return self._document
@document.setter
def document(self, doc):
if (doc and hasattr(doc, 'on_session_destroyed') and
self.root is self.handles.get('plot') and
not isinstance(self, GenericAdjointLayoutPlot)):
doc.on_session_destroyed(self._session_destroy)
if self._document:
if isinstance(self._document._session_destroyed_callbacks, set):
self._document._session_destroyed_callbacks.discard(self._session_destroy)
else:
self._document._session_destroyed_callbacks.pop(self._session_destroy, None)
self._document = doc
if self.subplots:
for plot in self.subplots.values():
if plot is not None:
plot.document = doc
@property
def pane(self):
return self._pane
@pane.setter
def pane(self, pane):
if (config.console_output != 'disable' and self.root and
self.root.ref['id'] not in state._handles and
isinstance(self.comm, JupyterComm)):
from IPython.display import display
handle = display(display_id=uuid.uuid4().hex)
state._handles[self.root.ref['id']] = (handle, [])
self._pane = pane
if self.subplots:
for plot in self.subplots.values():
if plot is not None:
plot.pane = pane
if not plot.root:
continue
for cb in getattr(plot, 'callbacks', []):
if hasattr(pane, '_on_error') and getattr(cb, 'comm', None):
cb.comm._on_error = partial(pane._on_error, plot.root.ref['id'])
elif self.root:
for cb in getattr(self, 'callbacks', []):
if hasattr(pane, '_on_error') and getattr(cb, 'comm', None):
cb.comm._on_error = partial(pane._on_error, self.root.ref['id'])
@property
def comm(self):
return self._comm
@comm.setter
def comm(self, comm):
self._comm = comm
if self.subplots:
for plot in self.subplots.values():
if plot is not None:
plot.comm = comm
def initialize_plot(self, ranges=None):
"""
Initialize the matplotlib figure.
"""
raise NotImplementedError
def update(self, key):
"""
Update the internal state of the Plot to represent the given
key tuple (where integers represent frames). Returns this
state.
"""
return self.state
def cleanup(self):
"""
Cleans up references to the plot on the attached Stream
subscribers.
"""
plots = self.traverse(lambda x: x, [Plot])
for plot in plots:
if not isinstance(plot, (GenericCompositePlot, GenericElementPlot, GenericOverlayPlot)):
continue
for stream in set(plot.streams):
stream._subscribers = [
(p, subscriber) for p, subscriber in stream._subscribers
if not util.is_param_method(subscriber) or
util.get_method_owner(subscriber) not in plots
]
def _session_destroy(self, session_context):
self.cleanup()
def refresh(self, **kwargs):
"""
Refreshes the plot by rerendering it and then pushing
the updated data if the plot has an associated Comm.
"""
if self.renderer.mode == 'server':
from bokeh.io import curdoc
thread = threading.current_thread()
thread_id = thread.ident if thread else None
if (curdoc() is not self.document or (state._thread_id is not None and
thread_id != state._thread_id)):
# If we do not have the Document lock, schedule refresh as callback
self._triggering += [s for p in self.traverse(lambda x: x, [Plot])
for s in getattr(p, 'streams', []) if s._triggering]
if self.document.session_context:
self.document.add_next_tick_callback(self.refresh)
return
# Ensure that server based tick callbacks maintain stream triggering state
for s in self._triggering:
s._triggering = True
try:
traverse_setter(self, '_force', True)
key = self.current_key if self.current_key else self.keys[0]
dim_streams = [stream for stream in self.streams
if any(c in self.dimensions for c in stream.contents)]
stream_params = stream_parameters(dim_streams)
key = tuple(None if d in stream_params else k
for d, k in zip(self.dimensions, key))
stream_key = util.wrap_tuple_streams(key, self.dimensions, self.streams)
self._trigger_refresh(stream_key)
if self.top_level:
self.push()
except Exception as e:
raise e
finally:
# Reset triggering state
for s in self._triggering:
s._triggering = False
self._triggering = []
def _trigger_refresh(self, key):
"Triggers update to a plot on a refresh event"
# Update if not top-level, batched or an ElementPlot
if not self.top_level or isinstance(self, GenericElementPlot):
self.update(key)
def push(self):
"""
Pushes plot updates to the frontend.
"""
root = self._root
if (root and self.pane is not None and
root.ref['id'] in self.pane._plots):
child_pane = self.pane._plots[root.ref['id']][1]
else:
child_pane = None
if self.renderer.backend != 'bokeh' and child_pane is not None:
child_pane.object = self.renderer.get_plot_state(self)
elif (self.renderer.mode != 'server' and root and
'embedded' not in root.tags and self.document and self.comm):
push(self.document, self.comm)
@property
def id(self):
return self.comm.id if self.comm else id(self.state)
def __len__(self):
"""
Returns the total number of available frames.
"""
raise NotImplementedError
@classmethod
def lookup_options(cls, obj, group):
return lookup_options(obj, group, cls.backend)
class PlotSelector(object):
"""
Proxy that allows dynamic selection of a plotting class based on a
function of the plotted object. Behaves like a Plot class and
presents the same parameterized interface.
"""
_disabled_opts = []
def __init__(self, selector, plot_classes, allow_mismatch=False):
"""
The selector function accepts a component instance and returns
the appropriate key to index plot_classes dictionary.
"""
self.selector = selector
self.plot_classes = OrderedDict(plot_classes)
interface = self._define_interface(self.plot_classes.values(), allow_mismatch)
self.style_opts, self.plot_options = interface
def selection_display(self, obj):
plt_class = self.get_plot_class(obj)
return getattr(plt_class, 'selection_display', None)
def _define_interface(self, plots, allow_mismatch):
parameters = [{k:v.precedence for k,v in plot.param.params().items()
if ((v.precedence is None) or (v.precedence >= 0))}
for plot in plots]
param_sets = [set(params.keys()) for params in parameters]
if not allow_mismatch and not all(pset == param_sets[0] for pset in param_sets):
raise Exception("All selectable plot classes must have identical plot options.")
styles= [plot.style_opts for plot in plots]
if not allow_mismatch and not all(style == styles[0] for style in styles):
raise Exception("All selectable plot classes must have identical style options.")
plot_params = {p: v for params in parameters for p, v in params.items()}
return [s for style in styles for s in style], plot_params
def __call__(self, obj, **kwargs):
plot_class = self.get_plot_class(obj)
return plot_class(obj, **kwargs)
def get_plot_class(self, obj):
key = self.selector(obj)
if key not in self.plot_classes:
msg = "Key %s returned by selector not in set: %s"
raise Exception(msg % (key, ', '.join(self.plot_classes.keys())))
return self.plot_classes[key]
def __setattr__(self, label, value):
try:
return super(PlotSelector, self).__setattr__(label, value)
except:
raise Exception("Please set class parameters directly on classes %s"
% ', '.join(str(cls) for cls in self.__dict__['plot_classes'].values()))
def params(self):
return self.plot_options
@property
def param(self):
return self.plot_options
class DimensionedPlot(Plot):
"""
DimensionedPlot implements a number of useful methods
to compute dimension ranges and titles containing the
dimension values.
"""
fontsize = param.Parameter(default=None, allow_None=True, doc="""
Specifies various font sizes of the displayed text.
Finer control is available by supplying a dictionary where any
unmentioned keys revert to the default sizes, e.g:
{'ticks':20, 'title':15,
'ylabel':5, 'xlabel':5, 'zlabel':5,
'legend':8, 'legend_title':13}
You can set the font size of 'zlabel', 'ylabel' and 'xlabel'
together using the 'labels' key.""")
fontscale = param.Number(default=None, doc="""
Scales the size of all fonts.""")
#Allowed fontsize keys
_fontsize_keys = ['xlabel','ylabel', 'zlabel', 'clabel', 'labels',
'xticks', 'yticks', 'zticks', 'cticks', 'ticks',
'minor_xticks', 'minor_yticks', 'minor_ticks',
'title', 'legend', 'legend_title',
]
show_title = param.Boolean(default=True, doc="""
Whether to display the plot title.""")
title = param.String(default="{label} {group}\n{dimensions}", doc="""
The formatting string for the title of this plot, allows defining
a label group separator and dimension labels.""")
title_format = param.String(default=None, doc="Alias for title.")
normalize = param.Boolean(default=True, doc="""
Whether to compute ranges across all Elements at this level
of plotting. Allows selecting normalization at different levels
for nested data containers.""")
projection = param.Parameter(default=None, doc="""
Allows supplying a custom projection to transform the axis
coordinates during display. Example projections include '3d'
and 'polar' projections supported by some backends. Depending
on the backend custom, projection objects may be supplied.""")
def __init__(self, keys=None, dimensions=None, layout_dimensions=None,
uniform=True, subplot=False, adjoined=None, layout_num=0,
style=None, subplots=None, dynamic=False, **params):
self.subplots = subplots
self.adjoined = adjoined
self.dimensions = dimensions
self.layout_num = layout_num
self.layout_dimensions = layout_dimensions
self.subplot = subplot
self.keys = keys if keys is None else list(keys)
self.uniform = uniform
self.dynamic = dynamic
self.drawn = False
self.handles = {}
self.group = None
self.label = None
self.current_frame = None
self.current_key = None
self.ranges = {}
self._updated = False # Whether the plot should be marked as updated
super(DimensionedPlot, self).__init__(**params)
def __getitem__(self, frame):
"""
Get the state of the Plot for a given frame number.
"""
if isinstance(frame, int) and frame > len(self):
self.param.warning("Showing last frame available: %d" % len(self))
if not self.drawn: self.handles['fig'] = self.initialize_plot()
if not isinstance(frame, tuple):
frame = self.keys[frame]
self.update_frame(frame)
return self.state
def _get_frame(self, key):
"""
Required on each MPLPlot type to get the data corresponding
just to the current frame out from the object.
"""
pass
def matches(self, spec):
"""
Matches a specification against the current Plot.
"""
if callable(spec) and not isinstance(spec, type): return spec(self)
elif isinstance(spec, type): return isinstance(self, spec)
else:
raise ValueError("Matching specs have to be either a type or a callable.")
def traverse(self, fn=None, specs=None, full_breadth=True):
"""
Traverses any nested DimensionedPlot returning a list
of all plots that match the specs. The specs should
be supplied as a list of either Plot types or callables,
which should return a boolean given the plot class.
"""
accumulator = []
matches = specs is None
if not matches:
for spec in specs:
matches = self.matches(spec)
if matches: break
if matches:
accumulator.append(fn(self) if fn else self)
# Assumes composite objects are iterables
if hasattr(self, 'subplots') and self.subplots:
for el in self.subplots.values():
if el is None:
continue
accumulator += el.traverse(fn, specs, full_breadth)
if not full_breadth: break
return accumulator
def _frame_title(self, key, group_size=2, separator='\n'):
"""
Returns the formatted dimension group strings
for a particular frame.
"""
if self.layout_dimensions is not None:
dimensions, key = zip(*self.layout_dimensions.items())
elif not self.dynamic and (not self.uniform or len(self) == 1) or self.subplot:
return ''
else:
key = key if isinstance(key, tuple) else (key,)
dimensions = self.dimensions
dimension_labels = [dim.pprint_value_string(k) for dim, k in
zip(dimensions, key)]
groups = [', '.join(dimension_labels[i*group_size:(i+1)*group_size])
for i in range(len(dimension_labels))]
return util.bytes_to_unicode(separator.join(g for g in groups if g))
def _format_title(self, key, dimensions=True, separator='\n'):
if self.title_format:
self.param.warning('title_format is deprecated. Please use title instead')
label, group, type_name, dim_title = self._format_title_components(
key, dimensions=True, separator='\n'
)
custom_title = (self.title != self.param['title'].default)
if custom_title and self.title_format:
self.param.warning('Both title and title_format set. Using title')
title_str = (
self.title if custom_title or self.title_format is None
else self.title_format
)
title = util.bytes_to_unicode(title_str).format(
label=util.bytes_to_unicode(label),
group=util.bytes_to_unicode(group),
type=type_name,
dimensions=dim_title
)
return title.strip(' \n')
def _format_title_components(self, key, dimensions=True, separator='\n'):
"""
Determine components of title as used by _format_title method.
To be overridden in child classes.
Return signature: (label, group, type_name, dim_title)
"""
return (self.label, self.group, type(self).__name__, '')
def _get_fontsize_defaults(self):
"""
Should returns default fontsize for the following keywords:
* ticks
* minor_ticks
* label
* title
* legend
* legend_title
However may also provide more specific defaults for
specific axis label or ticks, e.g. clabel or xticks.
"""
return {}
def _fontsize(self, key, label='fontsize', common=True):
if not self.fontsize and not self.fontscale:
return {}
elif not isinstance(self.fontsize, dict) and self.fontsize is not None and common:
return {label: scale_fontsize(self.fontsize, self.fontscale)}
fontsize = self.fontsize if isinstance(self.fontsize, dict) else {}
unknown_keys = set(fontsize.keys()) - set(self._fontsize_keys)
if unknown_keys:
msg = "Popping unknown keys %r from fontsize dictionary.\nValid keys: %r"
self.param.warning(msg % (list(unknown_keys), self._fontsize_keys))
for key in unknown_keys: fontsize.pop(key, None)
defaults = self._get_fontsize_defaults()
size = None
if key in fontsize:
size = fontsize[key]
elif key in ['zlabel', 'ylabel', 'xlabel', 'clabel']:
size = fontsize.get('labels', defaults.get(key, defaults.get('label')))
elif key in ['xticks', 'yticks', 'zticks', 'cticks']:
size = fontsize.get('ticks', defaults.get(key, defaults.get('ticks')))
elif key in ['minor_xticks', 'minor_yticks']:
size = fontsize.get('minor_ticks', defaults.get(key, defaults.get('minor_ticks')))
elif key in ('legend', 'legend_title', 'title'):
size = defaults.get(key)
if size is None:
return {}
return {label: scale_fontsize(size, self.fontscale)}
def compute_ranges(self, obj, key, ranges):
"""
Given an object, a specific key, and the normalization options,
this method will find the specified normalization options on
the appropriate OptionTree, group the elements according to
the selected normalization option (i.e. either per frame or
over the whole animation) and finally compute the dimension
ranges in each group. The new set of ranges is returned.
"""
all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element]))
if obj is None or not self.normalize or all_table:
return OrderedDict()
# Get inherited ranges
ranges = self.ranges if ranges is None else {k: dict(v) for k, v in ranges.items()}
# Get element identifiers from current object and resolve
# with selected normalization options
norm_opts = self._get_norm_opts(obj)
# Traverse displayed object if normalization applies
# at this level, and ranges for the group have not
# been supplied from a composite plot
return_fn = lambda x: x if isinstance(x, Element) else None
for group, (axiswise, framewise) in norm_opts.items():
axiswise = (not getattr(self, 'shared_axes', True)) or (axiswise)
elements = []
# Skip if ranges are cached or already computed by a
# higher-level container object.
framewise = framewise or self.dynamic or len(elements) == 1
if not framewise: # Traverse to get all elements
elements = obj.traverse(return_fn, [group])
elif key is not None: # Traverse to get elements for each frame
frame = self._get_frame(key)
elements = [] if frame is None else frame.traverse(return_fn, [group])
# Only compute ranges if not axiswise on a composite plot
# or not framewise on a Overlay or ElementPlot
if (not (axiswise and not isinstance(obj, HoloMap)) or
(not framewise and isinstance(obj, HoloMap))):
self._compute_group_range(group, elements, ranges, framewise)
self.ranges.update(ranges)
return ranges
def _get_norm_opts(self, obj):
"""
Gets the normalization options for a LabelledData object by
traversing the object to find elements and their ids.
The id is then used to select the appropriate OptionsTree,
accumulating the normalization options into a dictionary.
Returns a dictionary of normalization options for each
element in the tree.
"""
norm_opts = {}
# Get all elements' type.group.label specs and ids
type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False),
util.label_sanitizer(x.label, escape=False))) \
if isinstance(x, Element) else None
element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn)
if idspec is not None}
# Group elements specs by ID and override normalization
# options sequentially
key_fn = lambda x: -1 if x[0] is None else x[0]
id_groups = groupby(sorted(element_specs, key=key_fn), key_fn)
for gid, element_spec_group in id_groups:
gid = None if gid == -1 else gid
group_specs = [el for _, el in element_spec_group]
backend = self.renderer.backend
optstree = Store.custom_options(
backend=backend).get(gid, Store.options(backend=backend))
# Get the normalization options for the current id
# and match against customizable elements
for opts in optstree:
path = tuple(opts.path.split('.')[1:])
applies = any(path == spec[:i] for spec in group_specs
for i in range(1, 4))
if applies and 'norm' in opts.groups:
nopts = opts['norm'].options
if 'axiswise' in nopts or 'framewise' in nopts:
norm_opts.update({path: (nopts.get('axiswise', False),
nopts.get('framewise', False))})
element_specs = [spec for _, spec in element_specs]
norm_opts.update({spec: (False, False) for spec in element_specs
if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))})
return norm_opts
@classmethod
def _compute_group_range(cls, group, elements, ranges, framewise):
# Iterate over all elements in a normalization group
# and accumulate their ranges into the supplied dictionary.
elements = [el for el in elements if el is not None]
group_ranges = OrderedDict()
for el in elements:
if isinstance(el, (Empty, Table)): continue
opts = cls.lookup_options(el, 'style')
plot_opts = cls.lookup_options(el, 'plot')
# Compute normalization for color dim transforms
for k, v in dict(opts.kwargs, **plot_opts.kwargs).items():
if not isinstance(v, dim) or ('color' not in k and k != 'magnitude'):
continue
if isinstance(v, dim) and v.applies(el):
dim_name = repr(v)
if dim_name in ranges.get(group, {}) and not framewise:
continue
values = v.apply(el, expanded=False, all_values=True)
factors = None
if values.dtype.kind == 'M':
drange = values.min(), values.max()
elif util.isscalar(values):
drange = values, values
elif len(values) == 0:
drange = np.NaN, np.NaN
else:
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
drange = (np.nanmin(values), np.nanmax(values))
except:
factors = util.unique_array(values)
if dim_name not in group_ranges:
group_ranges[dim_name] = {'data': [], 'hard': [], 'soft': []}
if factors is not None:
if 'factors' not in group_ranges[dim_name]:
group_ranges[dim_name]['factors'] = []
group_ranges[dim_name]['factors'].append(factors)
else:
group_ranges[dim_name]['data'].append(drange)
# Compute dimension normalization
for el_dim in el.dimensions('ranges'):
dim_name = el_dim.name
if dim_name in ranges.get(group, {}) and not framewise:
continue
if hasattr(el, 'interface'):
if isinstance(el, Graph) and el_dim in el.nodes.dimensions():
dtype = el.nodes.interface.dtype(el.nodes, el_dim)
elif isinstance(el, Contours) and el.level is not None:
dtype = np.array([el.level]).dtype # Remove when deprecating level
else:
dtype = el.interface.dtype(el, el_dim)
else:
dtype = None
if all(util.isfinite(r) for r in el_dim.range):
data_range = (None, None)
elif dtype is not None and dtype.kind in 'SU':
data_range = ('', '')
elif isinstance(el, Graph) and el_dim in el.kdims[:2]:
data_range = el.nodes.range(2, dimension_range=False)
else:
data_range = el.range(el_dim, dimension_range=False)
if dim_name not in group_ranges:
group_ranges[dim_name] = {'data': [], 'hard': [], 'soft': []}
group_ranges[dim_name]['data'].append(data_range)
group_ranges[dim_name]['hard'].append(el_dim.range)
group_ranges[dim_name]['soft'].append(el_dim.soft_range)
if (any(isinstance(r, util.basestring) for r in data_range) or
el_dim.type is not None and issubclass(el_dim.type, util.basestring)):
if 'factors' not in group_ranges[dim_name]:
group_ranges[dim_name]['factors'] = []
if el_dim.values not in ([], None):
values = el_dim.values
elif el_dim in el:
if isinstance(el, Graph) and el_dim in el.kdims[:2]:
# Graph start/end normalization should include all node indices
values = el.nodes.dimension_values(2, expanded=False)
else:
values = el.dimension_values(el_dim, expanded=False)
elif isinstance(el, Graph) and el_dim in el.nodes:
values = el.nodes.dimension_values(el_dim, expanded=False)
if (isinstance(values, np.ndarray) and values.dtype.kind == 'O' and
all(isinstance(v, (np.ndarray)) for v in values)):
values = np.concatenate(values)
factors = util.unique_array(values)
group_ranges[dim_name]['factors'].append(factors)
group_dim_ranges = defaultdict(dict)
for gdim, values in group_ranges.items():
matching = True
for t, rs in values.items():
if t == 'factors':
continue
matching &= (
len({'date' if isinstance(v, util.datetime_types) else 'number'
for rng in rs for v in rng if util.isfinite(v)}) < 2
)
if matching:
group_dim_ranges[gdim] = values
dim_ranges = []
for gdim, values in group_dim_ranges.items():
hard_range = util.max_range(values['hard'], combined=False)
soft_range = util.max_range(values['soft'])
data_range = util.max_range(values['data'])
combined = util.dimension_range(data_range[0], data_range[1],
hard_range, soft_range)
dranges = {'data': data_range, 'hard': hard_range,
'soft': soft_range, 'combined': combined}
if 'factors' in values:
dranges['factors'] = util.unique_array([
v for fctrs in values['factors'] for v in fctrs])
dim_ranges.append((gdim, dranges))
if group not in ranges:
ranges[group] = OrderedDict(dim_ranges)
else:
ranges[group].update(OrderedDict(dim_ranges))
@classmethod
def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True):
"""
Traverses the supplied object getting all options in opts for
the specified opt_type and specs. Also takes into account the
plotting class defaults for plot options. If a keyfn is
supplied the returned options will be grouped by the returned
keys.
"""
def lookup(x):
"""
Looks up options for object, including plot defaults.
keyfn determines returned key otherwise None key is used.
"""
options = cls.lookup_options(x, opt_type)
selected = {o: options.options[o]
for o in opts if o in options.options}
if opt_type == 'plot' and defaults:
plot = Store.registry[cls.backend].get(type(x))
selected['defaults'] = {o: getattr(plot, o) for o in opts
if o not in selected and hasattr(plot, o)}
key = keyfn(x) if keyfn else None
return (key, selected)
# Traverse object and accumulate options by key
traversed = obj.traverse(lookup, specs)
options = OrderedDict()
default_opts = defaultdict(lambda: defaultdict(list))
for key, opts in traversed:
defaults = opts.pop('defaults', {})
if key not in options:
options[key] = {}
for opt, v in opts.items():
if opt not in options[key]:
options[key][opt] = []
options[key][opt].append(v)
for opt, v in defaults.items():
default_opts[key][opt].append(v)
# Merge defaults into dictionary if not explicitly specified
for key, opts in default_opts.items():
for opt, v in opts.items():
if opt not in options[key]:
options[key][opt] = v
return options if keyfn else options[None]
def _get_projection(cls, obj):
"""
Uses traversal to find the appropriate projection
for a nested object. Respects projections set on
Overlays before considering Element based settings,
before finally looking up the default projection on
the plot type. If more than one non-None projection
type is found an exception is raised.
"""
isoverlay = lambda x: isinstance(x, CompositeOverlay)
element3d = obj.traverse(lambda x: x, [Element3D])
if element3d:
return '3d'
opts = cls._traverse_options(obj, 'plot', ['projection'],
[CompositeOverlay, Element],
keyfn=isoverlay)
from_overlay = not all(p is None for p in opts.get(True, {}).get('projection', []))
projections = opts.get(from_overlay, {}).get('projection', [])
custom_projs = [p for p in projections if p is not None]
if len(set(custom_projs)) > 1:
raise Exception("An axis may only be assigned one projection type")
return custom_projs[0] if custom_projs else None
def update(self, key):
if len(self) == 1 and ((key == 0) or (key == self.keys[0])) and not self.drawn:
return self.initialize_plot()
item = self.__getitem__(key)
self.traverse(lambda x: setattr(x, '_updated', True))
return item
def __len__(self):
"""
Returns the total number of available frames.
"""
return len(self.keys)
class CallbackPlot(object):
backend = None
def _construct_callbacks(self):
"""
Initializes any callbacks for streams which have defined
the plotted object as a source.
"""
cb_classes = set()
registry = list(Stream.registry.items())
callbacks = Stream._callbacks[self.backend]
for source in self.link_sources:
streams = [
s for src, streams in registry for s in streams
if src is source or (src._plot_id is not None and
src._plot_id == source._plot_id)]
cb_classes |= {(callbacks[type(stream)], stream) for stream in streams
if type(stream) in callbacks and stream.linked
and stream.source is not None}
cbs = []
sorted_cbs = sorted(cb_classes, key=lambda x: id(x[0]))
for cb, group in groupby(sorted_cbs, lambda x: x[0]):
cb_streams = [s for _, s in group]
cbs.append(cb(self, cb_streams, source))
return cbs
@property
def link_sources(self):
"Returns potential Link or Stream sources."
if isinstance(self, GenericOverlayPlot):
zorders = []
elif self.batched:
zorders = list(range(self.zorder, self.zorder+len(self.hmap.last)))
else:
zorders = [self.zorder]
if isinstance(self, GenericOverlayPlot) and not self.batched:
sources = []
elif not self.static or isinstance(self.hmap, DynamicMap):
sources = [o for i, inputs in self.stream_sources.items()
for o in inputs if i in zorders]
else:
sources = [self.hmap.last]
return sources
class GenericElementPlot(DimensionedPlot):
"""
Plotting baseclass to render contents of an Element. Implements
methods to get the correct frame given a HoloMap, axis labels and
extents and titles.
"""
apply_ranges = param.Boolean(default=True, doc="""
Whether to compute the plot bounds from the data itself.""")
apply_extents = param.Boolean(default=True, doc="""
Whether to apply extent overrides on the Elements""")
bgcolor = param.ClassSelector(class_=(str, tuple), default=None, doc="""
If set bgcolor overrides the background color of the axis.""")
default_span = param.ClassSelector(default=2.0, class_=(int, float, tuple), doc="""
Defines the span of an axis if the axis range is zero, i.e. if
the lower and upper end of an axis are equal or no range is
defined at all. For example if there is a single datapoint at
0 a default_span of 2.0 will result in axis ranges spanning
from -1 to 1.""")
hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing a plot. The
hook is passed the plot object and the displayed element, and
other plotting handles can be accessed via plot.handles.""")
invert_axes = param.Boolean(default=False, doc="""
Whether to invert the x- and y-axis""")
invert_xaxis = param.Boolean(default=False, doc="""
Whether to invert the plot x-axis.""")
invert_yaxis = param.Boolean(default=False, doc="""
Whether to invert the plot y-axis.""")
finalize_hooks = param.HookList(default=[], doc="""
Deprecated; use hooks options instead.""")
logx = param.Boolean(default=False, doc="""
Whether the x-axis of the plot will be a log axis.""")
logy = param.Boolean(default=False, doc="""
Whether the y-axis of the plot will be a log axis.""")
padding = param.ClassSelector(default=0.1, class_=(int, float, tuple), doc="""
Fraction by which to increase auto-ranged extents to make
datapoints more visible around borders.
To compute padding, the axis whose screen size is largest is
chosen, and the range of that axis is increased by the
specified fraction along each axis. Other axes are then
padded ensuring that the amount of screen space devoted to
padding is equal for all axes. If specified as a tuple, the
int or float values in the tuple will be used for padding in
each axis, in order (x,y or x,y,z).
For example, for padding=0.2 on a 800x800-pixel plot, an x-axis
with the range [0,10] will be padded by 20% to be [-1,11], while
a y-axis with a range [0,1000] will be padded to be [-100,1100],
which should make the padding be approximately the same number of
pixels. But if the same plot is changed to have a height of only
200, the y-range will then be [-400,1400] so that the y-axis
padding will still match that of the x-axis.
It is also possible to declare non-equal padding value for the
lower and upper bound of an axis by supplying nested tuples,
e.g. padding=(0.1, (0, 0.1)) will pad the x-axis lower and
upper bound as well as the y-axis upper bound by a fraction of
0.1 while the y-axis lower bound is not padded at all.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
show_grid = param.Boolean(default=False, doc="""
Whether to show a Cartesian grid on the plot.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None, True, False], doc="""
Whether and where to display the xaxis.
The "bare" options allow suppressing all axis labels, including ticks and xlabel.
Valid options are 'top', 'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None, True, False], doc="""
Whether and where to display the yaxis.
The "bare" options allow suppressing all axis labels, including ticks and ylabel.
Valid options are 'left', 'right', 'bare', 'left-bare' and 'right-bare'.""")
xlabel = param.String(default=None, doc="""
An explicit override of the x-axis label, if set takes precedence
over the dimension label.""")
ylabel = param.String(default=None, doc="""
An explicit override of the y-axis label, if set takes precedence
over the dimension label.""")
xlim = param.Tuple(default=(np.nan, np.nan), length=2, doc="""
User-specified x-axis range limits for the plot, as a tuple (low,high).
If specified, takes precedence over data and dimension ranges.""")
ylim = param.Tuple(default=(np.nan, np.nan), length=2, doc="""
User-specified x-axis range limits for the plot, as a tuple (low,high).
If specified, takes precedence over data and dimension ranges.""")
zlim = param.Tuple(default=(np.nan, np.nan), length=2, doc="""
User-specified z-axis range limits for the plot, as a tuple (low,high).
If specified, takes precedence over data and dimension ranges.""")
xrotation = param.Integer(default=None, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yrotation = param.Integer(default=None, bounds=(0, 360), doc="""
Rotation angle of the yticks.""")
xticks = param.Parameter(default=None, doc="""
Ticks along x-axis specified as an integer, explicit list of
tick locations, or bokeh Ticker object. If set to None default
bokeh ticking behavior is applied.""")
yticks = param.Parameter(default=None, doc="""
Ticks along y-axis specified as an integer, explicit list of
tick locations, or bokeh Ticker object. If set to None
default bokeh ticking behavior is applied.""")
# A dictionary mapping of the plot methods used to draw the
# glyphs corresponding to the ElementPlot, can support two
# keyword arguments a 'single' implementation to draw an individual
# plot and a 'batched' method to draw multiple Elements at once
_plot_methods = {}
# Declares the options that are propagated from sub-elements of the
# plot, mostly useful for inheriting options from individual
# Elements on an OverlayPlot. Enabled by default in v1.7.
_propagate_options = []
v17_option_propagation = True
_selection_display = NoOpSelectionDisplay()
def __init__(self, element, keys=None, ranges=None, dimensions=None,
batched=False, overlaid=0, cyclic_index=0, zorder=0, style=None,
overlay_dims={}, stream_sources=[], streams=None, **params):
self.zorder = zorder
self.cyclic_index = cyclic_index
self.overlaid = overlaid
self.batched = batched
self.overlay_dims = overlay_dims
if not isinstance(element, (HoloMap, DynamicMap)):
self.hmap = HoloMap(initial_items=(0, element),
kdims=['Frame'], id=element.id)
else:
self.hmap = element
if overlaid:
self.stream_sources = stream_sources
else:
self.stream_sources = compute_overlayable_zorders(self.hmap)
plot_element = self.hmap.last
if self.batched and not isinstance(self, GenericOverlayPlot):
plot_element = plot_element.last
dynamic = isinstance(element, DynamicMap) and not element.unbounded
self.top_level = keys is None
if self.top_level:
dimensions = self.hmap.kdims
keys = list(self.hmap.data.keys())
self.style = self.lookup_options(plot_element, 'style') if style is None else style
plot_opts = self.lookup_options(plot_element, 'plot').options
if self.v17_option_propagation:
inherited = self._traverse_options(plot_element, 'plot',
self._propagate_options,
defaults=False)
plot_opts.update(**{k: v[0] for k, v in inherited.items()
if k not in plot_opts})
super(GenericElementPlot, self).__init__(keys=keys, dimensions=dimensions,
dynamic=dynamic,
**dict(params, **plot_opts))
self.streams = get_nested_streams(self.hmap) if streams is None else streams
# Attach streams if not overlaid and not a batched ElementPlot
if not (self.overlaid or (self.batched and not isinstance(self, GenericOverlayPlot))):
attach_streams(self, self.hmap)
# Update plot and style options for batched plots
if self.batched:
self.ordering = util.layer_sort(self.hmap)
overlay_opts = self.lookup_options(self.hmap.last, 'plot').options.items()
opts = {k: v for k, v in overlay_opts if k in self.param}
self.param.set_param(**opts)
self.style = self.lookup_options(plot_element, 'style').max_cycles(len(self.ordering))
else:
self.ordering = []
def get_zorder(self, overlay, key, el):
"""
Computes the z-order of element in the NdOverlay
taking into account possible batching of elements.
"""
spec = util.get_overlay_spec(overlay, key, el)
return self.ordering.index(spec)
def _updated_zorders(self, overlay):
specs = [util.get_overlay_spec(overlay, key, el)
for key, el in overlay.data.items()]
self.ordering = sorted(set(self.ordering+specs))
return [self.ordering.index(spec) for spec in specs]
def _get_frame(self, key):
if isinstance(self.hmap, DynamicMap) and self.overlaid and self.current_frame:
self.current_key = key
return self.current_frame
elif key == self.current_key and not self._force:
return self.current_frame
cached = self.current_key is None and not any(s._triggering for s in self.streams)
key_map = dict(zip([d.name for d in self.dimensions], key))
frame = get_plot_frame(self.hmap, key_map, cached)
traverse_setter(self, '_force', False)
if not key in self.keys and len(key) == self.hmap.ndims and self.dynamic:
self.keys.append(key)
self.current_frame = frame
self.current_key = key
return frame
def _execute_hooks(self, element):
"""
Executes finalize hooks
"""
if self.hooks and self.finalize_hooks:
self.param.warning(
"Supply either hooks or finalize_hooks not both, "
"using hooks and ignoring finalize_hooks.")
elif self.finalize_hooks:
self.param.warning(
"The finalize_hooks option is deprecated, use the "
"hooks option instead.")
hooks = self.hooks or self.finalize_hooks
for hook in hooks:
try:
hook(self, element)
except Exception as e:
self.param.warning("Plotting hook %r could not be "
"applied:\n\n %s" % (hook, e))
def get_aspect(self, xspan, yspan):
"""
Should define the aspect ratio of the plot.
"""
def get_padding(self, obj, extents):
"""
Computes padding along the axes taking into account the plot aspect.
"""
(x0, y0, z0, x1, y1, z1) = extents
padding_opt = self.lookup_options(obj, 'plot').kwargs.get('padding')
if self.overlaid:
padding = 0
elif padding_opt is None:
if self.param.objects('existing')['padding'].default is not self.padding:
padding = self.padding
else:
opts = self._traverse_options(
obj, 'plot', ['padding'], specs=[Element], defaults=True
)
padding = opts.get('padding')
if padding:
padding = padding[0]
else:
padding = self.padding
else:
padding = padding_opt
xpad, ypad, zpad = get_axis_padding(padding)
if not self.overlaid and not self.batched:
xspan = x1-x0 if util.is_number(x0) and util.is_number(x1) else None
yspan = y1-y0 if util.is_number(y0) and util.is_number(y1) else None
aspect = self.get_aspect(xspan, yspan)
if aspect > 1:
xpad = tuple(xp/aspect for xp in xpad) if isinstance(xpad, tuple) else xpad/aspect
else:
ypad = tuple(yp*aspect for yp in ypad) if isinstance(ypad, tuple) else ypad*aspect
return xpad, ypad, zpad
def _get_range_extents(self, element, ranges, range_type, xdim, ydim, zdim):
dims = element.dimensions()
ndims = len(dims)
xdim = xdim or (dims[0] if ndims else None)
ydim = ydim or (dims[1] if ndims > 1 else None)
if self.projection == '3d':
zdim = zdim or (dims[2] if ndims > 2 else None)
else:
zdim = None
(x0, x1), xsrange, xhrange = get_range(element, ranges, xdim)
(y0, y1), ysrange, yhrange = get_range(element, ranges, ydim)
(z0, z1), zsrange, zhrange = get_range(element, ranges, zdim)
if not self.overlaid and not self.batched:
xspan, yspan, zspan = (v/2. for v in get_axis_padding(self.default_span))
mx0, mx1 = get_minimum_span(x0, x1, xspan)
# If auto-padding is enabled ensure RangeXY dependent plots
# are recomputed before initial render
if x0 != mx0 or x1 != mx1:
for stream in self.streams:
if isinstance(stream, (RangeX, RangeXY)):
stream.update(x_range=(mx0, mx1))
if stream not in self._trigger:
self._trigger.append(stream)
x0, x1 = mx0, mx1
my0, my1 = get_minimum_span(y0, y1, yspan)
if y0 != my0 or y1 != my1:
for stream in self.streams:
if isinstance(stream, (RangeY, RangeXY)):
stream.update(y_range=(my0, my1))
if stream not in self._trigger:
self._trigger.append(stream)
y0, y1 = my0, my1
mz0, mz1 = get_minimum_span(z0, z1, zspan)
xpad, ypad, zpad = self.get_padding(element, (x0, y0, z0, x1, y1, z1))
if range_type == 'soft':
x0, x1 = xsrange
elif range_type == 'hard':
x0, x1 = xhrange
elif xdim == 'categorical':
x0, x1 = '', ''
elif range_type == 'combined':
x0, x1 = util.dimension_range(x0, x1, xhrange, xsrange, xpad, self.logx)
if range_type == 'soft':
y0, y1 = ysrange
elif range_type == 'hard':
y0, y1 = yhrange
elif range_type == 'combined':
y0, y1 = util.dimension_range(y0, y1, yhrange, ysrange, ypad, self.logy)
elif ydim == 'categorical':
y0, y1 = '', ''
elif ydim is None:
y0, y1 = np.NaN, np.NaN
if self.projection == '3d':
if range_type == 'soft':
z0, z1 = zsrange
elif range_type == 'data':
z0, z1 = zhrange
elif range_type=='combined':
z0, z1 = util.dimension_range(z0, z1, zhrange, zsrange, zpad, self.logz)
elif zdim == 'categorical':
z0, z1 = '', ''
elif zdim is None:
z0, z1 = np.NaN, np.NaN
return (x0, y0, z0, x1, y1, z1)
return (x0, y0, x1, y1)
def get_extents(self, element, ranges, range_type='combined', xdim=None, ydim=None, zdim=None):
"""
Gets the extents for the axes from the current Element. The globally
computed ranges can optionally override the extents.
The extents are computed by combining the data ranges, extents
and dimension ranges. Each of these can be obtained individually
by setting the range_type to one of:
* 'data': Just the data ranges
* 'extents': Element.extents
* 'soft': Dimension.soft_range values
* 'hard': Dimension.range values
To obtain the combined range, which includes range padding the
default may be used:
* 'combined': All the range types combined and padding applied
This allows Overlay plots to obtain each range and combine them
appropriately for all the objects in the overlay.
"""
num = 6 if self.projection == '3d' else 4
if self.apply_extents and range_type in ('combined', 'extents'):
norm_opts = self.lookup_options(element, 'norm').options
if norm_opts.get('framewise', False) or self.dynamic:
extents = element.extents
else:
extent_list = self.hmap.traverse(lambda x: x.extents, [Element])
extents = util.max_extents(extent_list, self.projection == '3d')
else:
extents = (np.NaN,) * num
if range_type == 'extents':
return extents
if self.apply_ranges:
range_extents = self._get_range_extents(element, ranges, range_type, xdim, ydim, zdim)
else:
range_extents = (np.NaN,) * num
if getattr(self, 'shared_axes', False) and self.subplot:
combined = util.max_extents([range_extents, extents], self.projection == '3d')
else:
max_extent = []
for l1, l2 in zip(range_extents, extents):
if isfinite(l2):
max_extent.append(l2)
else:
max_extent.append(l1)
combined = tuple(max_extent)
if self.projection == '3d':
x0, y0, z0, x1, y1, z1 = combined
else:
x0, y0, x1, y1 = combined
x0, x1 = util.dimension_range(x0, x1, self.xlim, (None, None))
y0, y1 = util.dimension_range(y0, y1, self.ylim, (None, None))
if self.projection == '3d':
z0, z1 = util.dimension_range(z0, z1, self.zlim, (None, None))
return (x0, y0, z0, x1, y1, z1)
return (x0, y0, x1, y1)
def _get_axis_labels(self, dimensions, xlabel=None, ylabel=None, zlabel=None):
if self.xlabel is not None:
xlabel = self.xlabel
elif dimensions and xlabel is None:
xdims = dimensions[0]
xlabel = dim_axis_label(xdims) if xdims else ''
if self.ylabel is not None:
ylabel = self.ylabel
elif len(dimensions) >= 2 and ylabel is None:
ydims = dimensions[1]
ylabel = dim_axis_label(ydims) if ydims else ''
if getattr(self, 'zlabel', None) is not None:
zlabel = self.zlabel
elif self.projection == '3d' and len(dimensions) >= 3 and zlabel is None:
zlabel = dim_axis_label(dimensions[2]) if dimensions[2] else ''
return xlabel, ylabel, zlabel
def _format_title_components(self, key, dimensions=True, separator='\n'):
frame = self._get_frame(key)
if frame is None:
return ('', '', '', '')
type_name = type(frame).__name__
group = frame.group if frame.group != type_name else ''
label = frame.label
if self.layout_dimensions or dimensions:
dim_title = self._frame_title(key, separator=separator)
else:
dim_title = ''
return (label, group, type_name, dim_title)
def update_frame(self, key, ranges=None):
"""
Set the plot(s) to the given frame number. Operates by
manipulating the matplotlib objects held in the self._handles
dictionary.
If n is greater than the number of available frames, update
using the last available frame.
"""
class GenericOverlayPlot(GenericElementPlot):
"""
Plotting baseclass to render (Nd)Overlay objects. It implements
methods to handle the creation of ElementPlots, coordinating style
groupings and zorder for all layers across a HoloMap. It also
allows collapsing of layers via the Compositor.
"""
batched = param.Boolean(default=True, doc="""
Whether to plot Elements NdOverlay in a batched plotting call
if possible. Disables legends and zorder may not be preserved.""")
legend_limit = param.Integer(default=25, doc="""
Number of rendered glyphs before legends are disabled.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_grouping = param.Integer(default=2, doc="""
The length of the type.group.label spec that will be used to
group Elements into style groups. A style_grouping value of
1 will group just by type, a value of 2 will group by type and
group, and a value of 3 will group by the full specification.""")
_passed_handles = []
def __init__(self, overlay, ranges=None, batched=True, keys=None, group_counter=None, **params):
if 'projection' not in params:
params['projection'] = self._get_projection(overlay)
super(GenericOverlayPlot, self).__init__(overlay, ranges=ranges, keys=keys,
batched=batched, **params)
# Apply data collapse
self.hmap = self._apply_compositor(self.hmap, ranges, self.keys)
self.map_lengths = Counter()
self.group_counter = Counter() if group_counter is None else group_counter
self.cyclic_index_lookup = {}
self.zoffset = 0
self.subplots = self._create_subplots(ranges)
self.traverse(lambda x: setattr(x, 'comm', self.comm))
self.top_level = keys is None
self.dynamic_subplots = []
if self.top_level:
self.traverse(lambda x: attach_streams(self, x.hmap, 2),
[GenericElementPlot])
def _apply_compositor(self, holomap, ranges=None, keys=None, dimensions=None):
"""
Given a HoloMap compute the appropriate (mapwise or framewise)
ranges in order to apply the Compositor collapse operations in
display mode (data collapse should already have happened).
"""
# Compute framewise normalization
defaultdim = holomap.ndims == 1 and holomap.kdims[0].name != 'Frame'
if keys and ranges and dimensions and not defaultdim:
dim_inds = [dimensions.index(d) for d in holomap.kdims]
sliced_keys = [tuple(k[i] for i in dim_inds) for k in keys]
frame_ranges = OrderedDict([(slckey, self.compute_ranges(holomap, key, ranges[key]))
for key, slckey in zip(keys, sliced_keys) if slckey in holomap.data.keys()])
else:
mapwise_ranges = self.compute_ranges(holomap, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(holomap, key, mapwise_ranges))
for key in holomap.data.keys()])
ranges = frame_ranges.values()
return Compositor.collapse(holomap, (ranges, frame_ranges.keys()), mode='display')
def _create_subplots(self, ranges):
# Check if plot should be batched
ordering = util.layer_sort(self.hmap)
batched = self.batched and type(self.hmap.last) is NdOverlay
if batched:
backend = self.renderer.backend
batchedplot = Store.registry[backend].get(self.hmap.last.type)
if (batched and batchedplot and 'batched' in batchedplot._plot_methods and
(not self.show_legend or len(ordering) > self.legend_limit)):
self.batched = True
keys, vmaps = [()], [self.hmap]
else:
self.batched = False
keys, vmaps = self.hmap._split_overlays()
if isinstance(self.hmap, DynamicMap):
dmap_streams = [get_nested_streams(layer) for layer in
split_dmap_overlay(self.hmap)]
else:
dmap_streams = [None]*len(keys)
# Compute global ordering
length = self.style_grouping
group_fn = lambda x: (x.type.__name__, x.last.group, x.last.label)
for m in vmaps:
self.map_lengths[group_fn(m)[:length]] += 1
subplots = OrderedDict()
for (key, vmap, streams) in zip(keys, vmaps, dmap_streams):
subplot = self._create_subplot(key, vmap, streams, ranges)
if subplot is None:
continue
if not isinstance(key, tuple): key = (key,)
subplots[key] = subplot
if isinstance(subplot, GenericOverlayPlot):
self.zoffset += len(subplot.subplots.keys()) - 1
if not subplots:
raise SkipRendering("%s backend could not plot any Elements "
"in the Overlay." % self.renderer.backend)
return subplots
def _create_subplot(self, key, obj, streams, ranges):
registry = Store.registry[self.renderer.backend]
ordering = util.layer_sort(self.hmap)
overlay_type = 1 if self.hmap.type == Overlay else 2
group_fn = lambda x: (x.type.__name__, x.last.group, x.last.label)
opts = {'overlaid': overlay_type}
if self.hmap.type == Overlay:
style_key = (obj.type.__name__,) + key
if self.overlay_dims:
opts['overlay_dims'] = self.overlay_dims
else:
if not isinstance(key, tuple): key = (key,)
style_key = group_fn(obj) + key
opts['overlay_dims'] = OrderedDict(zip(self.hmap.last.kdims, key))
if self.batched:
vtype = type(obj.last.last)
oidx = 0
else:
vtype = type(obj.last)
if style_key not in ordering:
ordering.append(style_key)
oidx = ordering.index(style_key)
plottype = registry.get(vtype, None)
if plottype is None:
self.param.warning(
"No plotting class for %s type and %s backend "
"found. " % (vtype.__name__, self.renderer.backend))
return None
# Get zorder and style counter
length = self.style_grouping
group_key = style_key[:length]
zorder = self.zorder + oidx + self.zoffset
cyclic_index = self.group_counter[group_key]
self.cyclic_index_lookup[style_key] = cyclic_index
self.group_counter[group_key] += 1
group_length = self.map_lengths[group_key]
if not isinstance(plottype, PlotSelector) and issubclass(plottype, GenericOverlayPlot):
opts['group_counter'] = self.group_counter
opts['show_legend'] = self.show_legend
if not any(len(frame) for frame in obj):
self.param.warning('%s is empty and will be skipped '
'during plotting' % obj.last)
return None
elif self.batched and 'batched' in plottype._plot_methods:
param_vals = dict(self.param.get_param_values())
propagate = {opt: param_vals[opt] for opt in self._propagate_options
if opt in param_vals}
opts['batched'] = self.batched
opts['overlaid'] = self.overlaid
opts.update(propagate)
if len(ordering) > self.legend_limit:
opts['show_legend'] = False
style = self.lookup_options(obj.last, 'style').max_cycles(group_length)
passed_handles = {k: v for k, v in self.handles.items()
if k in self._passed_handles}
plotopts = dict(opts, cyclic_index=cyclic_index,
invert_axes=self.invert_axes,
dimensions=self.dimensions, keys=self.keys,
layout_dimensions=self.layout_dimensions,
ranges=ranges, show_title=self.show_title,
style=style, uniform=self.uniform,
fontsize=self.fontsize, streams=streams,
renderer=self.renderer, adjoined=self.adjoined,
stream_sources=self.stream_sources,
projection=self.projection, fontscale=self.fontscale,
zorder=zorder, root=self.root, **passed_handles)
return plottype(obj, **plotopts)
def _create_dynamic_subplots(self, key, items, ranges, **init_kwargs):
"""
Handles the creation of new subplots when a DynamicMap returns
a changing set of elements in an Overlay.
"""
length = self.style_grouping
group_fn = lambda x: (x.type.__name__, x.last.group, x.last.label)
for i, (k, obj) in enumerate(items):
vmap = self.hmap.clone([(key, obj)])
self.map_lengths[group_fn(vmap)[:length]] += 1
subplot = self._create_subplot(k, vmap, [], ranges)
if subplot is None:
continue
self.subplots[k] = subplot
subplot.initialize_plot(ranges, **init_kwargs)
subplot.update_frame(key, ranges, element=obj)
self.dynamic_subplots.append(subplot)
def _update_subplot(self, subplot, spec):
"""
Updates existing subplots when the subplot has been assigned
to plot an element that is not an exact match to the object
it was initially assigned.
"""
# See if the precise spec has already been assigned a cyclic
# index otherwise generate a new one
if spec in self.cyclic_index_lookup:
cyclic_index = self.cyclic_index_lookup[spec]
else:
group_key = spec[:self.style_grouping]
self.group_counter[group_key] += 1
cyclic_index = self.group_counter[group_key]
self.cyclic_index_lookup[spec] = cyclic_index
subplot.cyclic_index = cyclic_index
if subplot.overlay_dims:
odim_key = util.wrap_tuple(spec[-1])
new_dims = zip(subplot.overlay_dims, odim_key)
subplot.overlay_dims = util.OrderedDict(new_dims)
def _get_subplot_extents(self, overlay, ranges, range_type):
"""
Iterates over all subplots and collects the extents of each.
"""
if range_type == 'combined':
extents = {'extents': [], 'soft': [], 'hard': [], 'data': []}
else:
extents = {range_type: []}
items = overlay.items()
if self.batched and self.subplots:
subplot = list(self.subplots.values())[0]
subplots = [(k, subplot) for k in overlay.data.keys()]
else:
subplots = self.subplots.items()
for key, subplot in subplots:
found = False
if subplot is None:
continue
layer = overlay.data.get(key, None)
if isinstance(self.hmap, DynamicMap) and layer is None:
for _, layer in items:
if isinstance(layer, subplot.hmap.type):
found = True
break
if not found:
layer = None
if layer is None or not subplot.apply_ranges:
continue
if isinstance(layer, CompositeOverlay):
sp_ranges = ranges
else:
sp_ranges = util.match_spec(layer, ranges) if ranges else {}
for rt in extents:
extent = subplot.get_extents(layer, sp_ranges, range_type=rt)
extents[rt].append(extent)
return extents
def get_extents(self, overlay, ranges, range_type='combined'):
subplot_extents = self._get_subplot_extents(overlay, ranges, range_type)
zrange = self.projection == '3d'
extents = {k: util.max_extents(rs, zrange) for k, rs in subplot_extents.items()}
if range_type != 'combined':
return extents[range_type]
# Unpack extents
if len(extents['data']) == 6:
x0, y0, z0, x1, y1, z1 = extents['data']
sx0, sy0, sz0, sx1, sy1, sz1 = extents['soft']
hx0, hy0, hz0, hx1, hy1, hz1 = extents['hard']
else:
x0, y0, x1, y1 = extents['data']
sx0, sy0, sx1, sy1 = extents['soft']
hx0, hy0, hx1, hy1 = extents['hard']
z0, z1 = np.NaN, np.NaN
# Apply minimum span
xspan, yspan, zspan = (v/2. for v in get_axis_padding(self.default_span))
x0, x1 = get_minimum_span(x0, x1, xspan)
y0, y1 = get_minimum_span(y0, y1, yspan)
z0, z1 = get_minimum_span(z0, z1, zspan)
# Apply padding
xpad, ypad, zpad = self.get_padding(overlay, (x0, y0, z0, x1, y1, z1))
x0, x1 = util.dimension_range(x0, x1, (hx0, hx1), (sx0, sx1), xpad, self.logx)
y0, y1 = util.dimension_range(y0, y1, (hy0, hy1), (sy0, sy1), ypad, self.logy)
if len(extents['data']) == 6:
z0, z1 = util.dimension_range(z0, z1, (hz0, hz1), (sz0, sz1), zpad, self.logz)
padded = (x0, y0, z0, x1, y1, z1)
else:
padded = (x0, y0, x1, y1)
# Combine with Element.extents
combined = util.max_extents([padded, extents['extents']], zrange)
if self.projection == '3d':
x0, y0, z0, x1, y1, z1 = combined
else:
x0, y0, x1, y1 = combined
# Apply xlim, ylim, zlim plot option
x0, x1 = util.dimension_range(x0, x1, self.xlim, (None, None))
y0, y1 = util.dimension_range(y0, y1, self.ylim, (None, None))
if self.projection == '3d':
z0, z1 = util.dimension_range(z0, z1, getattr(self, 'zlim', (None, None)), (None, None))
return (x0, y0, z0, x1, y1, z1)
return (x0, y0, x1, y1)
class GenericCompositePlot(DimensionedPlot):
def __init__(self, layout, keys=None, dimensions=None, **params):
if 'uniform' not in params:
params['uniform'] = traversal.uniform(layout)
self.top_level = keys is None
if self.top_level:
dimensions, keys = traversal.unique_dimkeys(layout)
dynamic, unbounded = get_dynamic_mode(layout)
if unbounded:
initialize_unbounded(layout, dimensions, keys[0])
self.layout = layout
super(GenericCompositePlot, self).__init__(keys=keys,
dynamic=dynamic,
dimensions=dimensions,
**params)
nested_streams = layout.traverse(lambda x: get_nested_streams(x),
[DynamicMap])
self.streams = list(set([s for streams in nested_streams for s in streams]))
self._link_dimensioned_streams()
def _link_dimensioned_streams(self):
"""
Should perform any linking required to update titles when dimensioned
streams change.
"""
def _get_frame(self, key):
"""
Creates a clone of the Layout with the nth-frame for each
Element.
"""
cached = self.current_key is None
layout_frame = self.layout.clone(shared_data=False)
if key == self.current_key and not self._force:
return self.current_frame
else:
self.current_key = key
key_map = dict(zip([d.name for d in self.dimensions], key))
for path, item in self.layout.items():
frame = get_nested_plot_frame(item, key_map, cached)
if frame is not None:
layout_frame[path] = frame
traverse_setter(self, '_force', False)
self.current_frame = layout_frame
return layout_frame
def _format_title_components(self, key, dimensions=True, separator='\n'):
dim_title = self._frame_title(key, 3, separator) if dimensions else ''
layout = self.layout
type_name = type(self.layout).__name__
group = util.bytes_to_unicode(layout.group if layout.group != type_name else '')
label = util.bytes_to_unicode(layout.label)
return (label, group, type_name, dim_title)
class GenericLayoutPlot(GenericCompositePlot):
"""
A GenericLayoutPlot accepts either a Layout or a NdLayout and
displays the elements in a cartesian grid in scanline order.
"""
transpose = param.Boolean(default=False, doc="""
Whether to transpose the layout when plotting. Switches
from row-based left-to-right and top-to-bottom scanline order
to column-based top-to-bottom and left-to-right order.""")
def __init__(self, layout, **params):
if not isinstance(layout, (NdLayout, Layout)):
raise ValueError("GenericLayoutPlot only accepts Layout objects.")
if len(layout.values()) == 0:
raise SkipRendering(warn=False)
super(GenericLayoutPlot, self).__init__(layout, **params)
self.subplots = {}
self.rows, self.cols = layout.shape[::-1] if self.transpose else layout.shape
self.coords = list(product(range(self.rows),
range(self.cols)))
class GenericAdjointLayoutPlot(Plot):
"""
AdjointLayoutPlot allows placing up to three Views in a number of
predefined and fixed layouts, which are defined by the layout_dict
class attribute. This allows placing subviews next to a main plot
in either a 'top' or 'right' position.
"""
layout_dict = {'Single': {'positions': ['main']},
'Dual': {'positions': ['main', 'right']},
'Triple': {'positions': ['main', 'right', 'top']}}
| 41.271739
| 116
| 0.589505
|
25e409d77bd12d53e11b97177208fe3827a255c3
| 1,680
|
py
|
Python
|
freebilly/repository/AbstractWorkLogRepository.py
|
emileten/billy
|
3fd0ab381382f7e64c67bc37b4ea2555e6b09822
|
[
"MIT"
] | null | null | null |
freebilly/repository/AbstractWorkLogRepository.py
|
emileten/billy
|
3fd0ab381382f7e64c67bc37b4ea2555e6b09822
|
[
"MIT"
] | 1
|
2022-01-20T02:36:03.000Z
|
2022-01-20T02:36:03.000Z
|
freebilly/repository/AbstractWorkLogRepository.py
|
emileten/billy
|
3fd0ab381382f7e64c67bc37b4ea2555e6b09822
|
[
"MIT"
] | null | null | null |
import abc
from custom_inherit import DocInheritMeta
from freebilly.domain.AbstractWorkLog import AbstractWorkLog
class AbstractWorkLogRepository(
metaclass=DocInheritMeta(style="numpy", abstract_base_class=True)
):
"""
abstraction for a repository of work logs
"""
@abc.abstractmethod
def exists(self, client: str, project: str) -> bool:
"""
Parameters
----------
client: str
project: str
Returns
-------
bool
True if the representation of the work log associated with `client` and `project` can be fetched
from repo.
"""
raise NotImplementedError
@abc.abstractmethod
def valid(self, client: str, project: str) -> bool:
"""
Parameters
----------
client: str
project: str
Returns
-------
bool
True if the representation of the work log associated with `client` and `project`
is valid. What 'valid' means depends on the particular implementation of `AbstractWorkLogRepository`.
"""
@abc.abstractmethod
def get(self, client: str, project: str) -> AbstractWorkLog:
"""
retrieves work log from repository.
Parameters
----------
client: str
project: srt
Returns
-------
AbstractWorkLog
work log, in-memory
Raises
------
ValueError
if work log specified does not exist or if, if it exists but
work log representation is invalid as per `self.valid()`.
"""
raise NotImplementedError
| 23.013699
| 113
| 0.567857
|
5267d31d230e0a15c06f732707d662ffa4ffdb1f
| 1,233
|
py
|
Python
|
SiF/SiF/urls.py
|
karendahlaarhus/TDT4140-programvareutvikling
|
f966ff3f2286c4875edbb5c77d1f9073a10b56c6
|
[
"MIT"
] | null | null | null |
SiF/SiF/urls.py
|
karendahlaarhus/TDT4140-programvareutvikling
|
f966ff3f2286c4875edbb5c77d1f9073a10b56c6
|
[
"MIT"
] | null | null | null |
SiF/SiF/urls.py
|
karendahlaarhus/TDT4140-programvareutvikling
|
f966ff3f2286c4875edbb5c77d1f9073a10b56c6
|
[
"MIT"
] | null | null | null |
"""SiF URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from bruker.views import LoginView
from django.views.generic.base import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('login/',LoginView.as_view(template_name='bruker/login.html'), name='login'),
path('', TemplateView.as_view(template_name='home.html'), name='home'),
path('vask/', include('vaskelister.urls', namespace='vask')),
path('oversikt/', include('studentby.urls')),
]
| 38.53125
| 86
| 0.714517
|
3420b939fcca78e99c80ffbff060e73d96ae7bfa
| 2,328
|
py
|
Python
|
sahara/api/middleware/auth_valid.py
|
mapr/sahara
|
e08fce3c7e1833b44a47838bbe8de26bb21bff16
|
[
"Apache-2.0"
] | 1
|
2022-02-25T19:14:33.000Z
|
2022-02-25T19:14:33.000Z
|
sahara/api/middleware/auth_valid.py
|
mapr/sahara
|
e08fce3c7e1833b44a47838bbe8de26bb21bff16
|
[
"Apache-2.0"
] | null | null | null |
sahara/api/middleware/auth_valid.py
|
mapr/sahara
|
e08fce3c7e1833b44a47838bbe8de26bb21bff16
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webob.exc as ex
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import log as logging
import sahara.openstack.commons as commons
LOG = logging.getLogger(__name__)
class AuthValidator:
"""Handles token auth results and tenants."""
def __init__(self, app):
self.app = app
def __call__(self, env, start_response):
"""Ensures that tenants in url and token are equal.
Handle incoming request by checking tenant info prom the headers and
url ({tenant_id} url attribute).
Pass request downstream on success.
Reject request if tenant_id from headers not equals to tenant_id from
url.
"""
token_tenant = env['HTTP_X_TENANT_ID']
if not token_tenant:
LOG.warn(_LW("Can't get tenant_id from env"))
resp = ex.HTTPServiceUnavailable()
return resp(env, start_response)
path = env['PATH_INFO']
if path != '/':
version, url_tenant, rest = commons.split_path(path, 3, 3, True)
if not version or not url_tenant or not rest:
LOG.info(_LI("Incorrect path: %s"), path)
resp = ex.HTTPNotFound(_("Incorrect path"))
return resp(env, start_response)
if token_tenant != url_tenant:
LOG.debug("Unauthorized: token tenant != requested tenant")
resp = ex.HTTPUnauthorized(
_('Token tenant != requested tenant'))
return resp(env, start_response)
return self.app(env, start_response)
def wrap(app):
"""Wrap wsgi application with auth validator check."""
return AuthValidator(app)
| 32.788732
| 77
| 0.659364
|
69d6fd5fc5ef1fbf65f84616e5e63a6c10bb8f5b
| 1,937
|
py
|
Python
|
python/test_gilded_rose.py
|
josppss96/GildedRose-Refactoring-Kata
|
e07ab1729687df0a5325b3943fceac3dba9f3135
|
[
"MIT"
] | 1
|
2021-10-05T08:20:38.000Z
|
2021-10-05T08:20:38.000Z
|
python/test_gilded_rose.py
|
josppss96/GildedRose-Refactoring-Kata
|
e07ab1729687df0a5325b3943fceac3dba9f3135
|
[
"MIT"
] | null | null | null |
python/test_gilded_rose.py
|
josppss96/GildedRose-Refactoring-Kata
|
e07ab1729687df0a5325b3943fceac3dba9f3135
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from gilded_rose import Item, GildedRose
class GildedRoseTest(unittest.TestCase):
aged_brie = "Aged Brie"
sulfuras = "Sulfuras, Hand of Ragnaros"
elixir = "Elixir of the Mongoose"
conjured = "Conjured Mana Cake"
passes = "Backstage passes to a TAFKAL80ETC concert"
#sell in, quality
def test_sulfuras(self):
items = [Item("fixme", 0, 0), Item(self.aged_brie, 2, 2), Item(self.sulfuras, -1, 80)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual((self.sulfuras, -1, 80), (items[2].name, items[2].quality, items[2].sell_in))
def test_aged_brie(self):
items = [Item("fixme", 0, 0), Item(self.aged_brie, 2, 2), Item(self.sulfuras, -1, 80)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual((self.aged_brie, 1, 3), (items[1].name, items[1].sell_in, items[1].quality))
def test_backstage_passes(self):
items = [Item(self.passes, 15, 20), Item(self.aged_brie, 2, 2), Item(self.sulfuras, -1, 80)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual((self.passes, 14, 21), (items[0].name, items[0].sell_in, items[0].quality))
def test_elixir(self):
items = [Item(self.elixir, 7, 5), Item(self.aged_brie, 2, 2), Item(self.sulfuras, -1, 80)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual((self.elixir, 6, 4), (items[0].name, items[0].sell_in, items[0].quality))
def test_conjure(self):
items = [Item(self.conjured, 7, 6), Item(self.aged_brie, 2, 2), Item(self.sulfuras, -1, 80)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual((self.conjured, 6, 4), (items[0].name, items[0].sell_in, items[0].quality))
if __name__ == '__main__':
unittest.main()
| 40.354167
| 102
| 0.645328
|
aa522c7ca249f7059913f88674e39b462925bf44
| 5,290
|
py
|
Python
|
autoarray/structures/plot/structure_plotters.py
|
Jammy2211/PyAutoArray
|
1fb9c84ca2a3333abedfbf96d070fc355e2628e4
|
[
"MIT"
] | 5
|
2019-09-26T02:18:25.000Z
|
2021-12-11T16:29:20.000Z
|
autoarray/structures/plot/structure_plotters.py
|
Jammy2211/PyAutoArray
|
1fb9c84ca2a3333abedfbf96d070fc355e2628e4
|
[
"MIT"
] | 3
|
2020-03-30T14:25:57.000Z
|
2021-12-21T17:10:55.000Z
|
autoarray/structures/plot/structure_plotters.py
|
Jammy2211/PyAutoArray
|
1fb9c84ca2a3333abedfbf96d070fc355e2628e4
|
[
"MIT"
] | 4
|
2020-03-03T11:35:41.000Z
|
2022-01-21T17:37:35.000Z
|
import numpy as np
from typing import List, Union
from autoarray.plot.abstract_plotters import AbstractPlotter
from autoarray.plot.mat_wrap.visuals import Visuals1D
from autoarray.plot.mat_wrap.visuals import Visuals2D
from autoarray.plot.mat_wrap.include import Include1D
from autoarray.plot.mat_wrap.include import Include2D
from autoarray.plot.mat_wrap.mat_plot import MatPlot1D
from autoarray.plot.mat_wrap.mat_plot import MatPlot2D
from autoarray.plot.mat_wrap.mat_plot import AutoLabels
from autoarray.structures.arrays.one_d.array_1d import Array1D
from autoarray.structures.arrays.two_d.array_2d import Array2D
from autoarray.structures.grids.two_d.grid_2d import Grid2D
from autoarray.structures.grids.two_d.grid_2d_irregular import Grid2DIrregular
class Array2DPlotter(AbstractPlotter):
def __init__(
self,
array: Array2D,
mat_plot_2d: MatPlot2D = MatPlot2D(),
visuals_2d: Visuals2D = Visuals2D(),
include_2d: Include2D = Include2D(),
):
super().__init__(
visuals_2d=visuals_2d, include_2d=include_2d, mat_plot_2d=mat_plot_2d
)
self.array = array
@property
def visuals_with_include_2d(self) -> Visuals2D:
"""
Extracts from an `Array2D` attributes that can be plotted and returns them in a `Visuals` object.
Only attributes already in `self.visuals_2d` or with `True` entries in the `Include` object are extracted
for plotting.
From an `Array2D` the following attributes can be extracted for plotting:
- origin: the (y,x) origin of the structure's coordinate system.
- mask: the mask of the structure.
- border: the border of the structure's mask.
Parameters
----------
array : Array2D
The array whose attributes are extracted for plotting.
Returns
-------
Visuals2D
The collection of attributes that can be plotted by a `Plotter2D` object.
"""
return self.visuals_2d + self.visuals_2d.__class__(
origin=self.extract_2d("origin", Grid2DIrregular(grid=[self.array.origin])),
mask=self.extract_2d("mask", self.array.mask),
border=self.extract_2d("border", self.array.mask.border_grid_sub_1.binned),
)
def figure_2d(self):
self.mat_plot_2d.plot_array(
array=self.array,
visuals_2d=self.visuals_with_include_2d,
auto_labels=AutoLabels(title="Array2D", filename="array"),
)
class Grid2DPlotter(AbstractPlotter):
def __init__(
self,
grid: Grid2D,
mat_plot_2d: MatPlot2D = MatPlot2D(),
visuals_2d: Visuals2D = Visuals2D(),
include_2d: Include2D = Include2D(),
):
super().__init__(
visuals_2d=visuals_2d, include_2d=include_2d, mat_plot_2d=mat_plot_2d
)
self.grid = grid
@property
def visuals_with_include_2d(self) -> Visuals2D:
"""
Extracts from a `Grid2D` attributes that can be plotted and return them in a `Visuals` object.
Only attributes with `True` entries in the `Include` object are extracted for plotting.
From a `Grid2D` the following attributes can be extracted for plotting:
- origin: the (y,x) origin of the grid's coordinate system.
- mask: the mask of the grid.
- border: the border of the grid's mask.
Parameters
----------
grid : abstract_grid_2d.AbstractGrid2D
The grid whose attributes are extracted for plotting.
Returns
-------
Visuals2D
The collection of attributes that can be plotted by a `Plotter2D` object.
"""
if not isinstance(self.grid, Grid2D):
return self.visuals_2d
return self.visuals_2d + self.visuals_2d.__class__(
origin=self.extract_2d("origin", Grid2DIrregular(grid=[self.grid.origin]))
)
def figure_2d(self, color_array: np.ndarray = None):
self.mat_plot_2d.plot_grid(
grid=self.grid,
visuals_2d=self.visuals_with_include_2d,
auto_labels=AutoLabels(title="Grid2D", filename="grid"),
color_array=color_array,
)
class YX1DPlotter(AbstractPlotter):
def __init__(
self,
y: Union[np.ndarray, List, Array1D],
x: Union[np.ndarray, List, Array1D],
mat_plot_1d: MatPlot1D = MatPlot1D(),
visuals_1d: Visuals1D = Visuals1D(),
include_1d: Include1D = Include1D(),
):
super().__init__(
visuals_1d=visuals_1d, include_1d=include_1d, mat_plot_1d=mat_plot_1d
)
self.y = y
self.x = x
@property
def visuals_with_include_1d(self) -> Visuals1D:
return self.visuals_1d + self.visuals_1d.__class__(
origin=self.extract_1d("origin", self.x.origin),
mask=self.extract_1d("mask", self.x.mask),
)
def figure_1d(self):
self.mat_plot_1d.plot_yx(
y=self.y, x=self.x, visuals_1d=self.visuals_1d, auto_labels=AutoLabels()
)
| 33.910256
| 114
| 0.633459
|
7ca97e0b22cf5ca509f4ef0e74fcb19f855e8354
| 82
|
py
|
Python
|
fx_findings/reversal_analysis/__init__.py
|
maxoja/betting-simulator
|
812242cec2218ed60aee8819255a4ab0c4ea2263
|
[
"MIT"
] | null | null | null |
fx_findings/reversal_analysis/__init__.py
|
maxoja/betting-simulator
|
812242cec2218ed60aee8819255a4ab0c4ea2263
|
[
"MIT"
] | null | null | null |
fx_findings/reversal_analysis/__init__.py
|
maxoja/betting-simulator
|
812242cec2218ed60aee8819255a4ab0c4ea2263
|
[
"MIT"
] | null | null | null |
# from . import analyse
from .analyse import run
from .wick import run as run_wick
| 27.333333
| 33
| 0.780488
|
a6f43eb4d833b4ba679c38ac11f44b548feb7d7a
| 3,236
|
py
|
Python
|
DDPG/utils.py
|
WoShiDongZhiWu/Reinforcement-learning-Algorithm
|
59fdf29e7feb73048b9ddf3b4755b55f0459efcb
|
[
"Apache-2.0"
] | 1
|
2019-12-23T02:59:13.000Z
|
2019-12-23T02:59:13.000Z
|
DDPG/utils.py
|
WoShiDongZhiWu/reinforcement-learning-algorithm
|
59fdf29e7feb73048b9ddf3b4755b55f0459efcb
|
[
"Apache-2.0"
] | null | null | null |
DDPG/utils.py
|
WoShiDongZhiWu/reinforcement-learning-algorithm
|
59fdf29e7feb73048b9ddf3b4755b55f0459efcb
|
[
"Apache-2.0"
] | null | null | null |
'''
########################################################
# author wudong
# date 20190816
# DDPG算法需要的一些函数:
# Ornstein-Uhlenbeck过程:给确定性行为添加一定的噪声
# DDPG的两套更新算法 hard_update完全更新 soft_uodate 小幅度更新
########################################################
'''
import numpy as np
import torch
import shutil
import torch.autograd as Variable
import random
import matplotlib.pyplot as plt
def learning_curve(data, x_index = 0, y1_index = 1, y2_index = None, title = "",
x_name = "", y_name = "",
y1_legend = "", y2_legend=""):
'''根据统计数据绘制学习曲线,
Args:
statistics: 数据元组,每一个元素是一个列表,各列表长度一致 ([], [], [])
x_index: x轴使用的数据list在元组tuple中索引值
y_index: y轴使用的数据list在元组tuple中的索引值
title:图标的标题
x_name: x轴的名称
y_name: y轴的名称
y1_legend: y1图例
y2_legend: y2图例
Return:
None 绘制曲线图
'''
fig, ax = plt.subplots()
x = data[x_index]
y1 = data[y1_index]
ax.plot(x, y1, label = y1_legend)
if y2_index is not None:
ax.plot(x, data[y2_index], label = y2_legend)
ax.grid(True, linestyle='-.')
ax.tick_params(labelcolor='black', labelsize='medium', width=1)
ax.set_xlabel(x_name)
ax.set_ylabel(y_name)
ax.set_title(title)
ax.legend()
#plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
#plt.axis([40, 160, 0, 0.03])
#plt.grid(True)
plt.show()
def soft_update(target, source, tau):
"""
使用下式将source网络(x)参数软更新至target网络(y)参数:
y = tau * x + (1 - tau)*y
Args:
target: 目标网络 (PyTorch)
source: 源网络 network (PyTorch)
Return: None
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def hard_update(target, source):
"""
将source网络(x)参数完全更新至target网络(y)参数:
Args:
target: 目标网络 (PyTorch)
source: 源网络 network (PyTorch)
Return: None
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def save_training_checkpoint(state, is_best, episode_count):
"""
Saves the models, with all training parameters intact
:param state:
:param is_best:
:param filename:
:return:
"""
filename = str(episode_count) + 'checkpoint.path.rar'
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckActionNoise:
def __init__(self, action_dim, mu = 0, theta = 0.15, sigma = 0.2):
'''
在生成的行为基础上添加一个随机噪声,在确切的行为周围实现一定范围的探索
Ornstein-Uhlenbeck过程,生成符合高斯分布、马尔科夫过程的随机过程
'''
self.action_dim = action_dim
self.mu = mu
self.theta = theta
self.sigma = sigma
self.X = np.ones(self.action_dim) * self.mu
def reset(self):
self.X = np.ones(self.action_dim) * self.mu
def sample(self):
dx = self.theta * (self.mu - self.X)
dx = dx + self.sigma * np.random.randn(len(self.X))
self.X = self.X + dx
return self.X
| 27.896552
| 100
| 0.594252
|
8d863273bf5fba93d47b6d407c93da0a5a2775cd
| 7,047
|
py
|
Python
|
test/functional/p2p_invalid_tx.py
|
Neomnf/NEOM
|
daf60c9ffc3f85d758c114f1e511d246a2fd178d
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_tx.py
|
Neomnf/NEOM
|
daf60c9ffc3f85d758c114f1e511d246a2fd178d
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_tx.py
|
Neomnf/NEOM
|
daf60c9ffc3f85d758c114f1e511d246a2fd178d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid transactions.
In this test we connect to one node over p2p, and test tx requests."""
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import NEOMTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class InvalidTxRequestTest(NEOMTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bootstrap_p2p(self, *, num_connections=1):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
for _ in range(num_connections):
self.nodes[0].add_p2p_connection(P2PDataStore())
def reconnect_p2p(self, **kwargs):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p(**kwargs)
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
best_block = self.nodes[0].getbestblockhash()
tip = int(best_block, 16)
best_block_time = self.nodes[0].getblock(best_block)['time']
block_time = best_block_time + 1
self.log.info("Create a new block with an anyone-can-spend coinbase.")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block], node, success=True)
self.log.info("Mature the block.")
self.nodes[0].generate(100)
# b'\x64' is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
# and we get disconnected immediately
self.log.info('Test a transaction that is rejected')
tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x64' * 35, amount=25 * COIN - 12000)
node.p2p.send_txs_and_test([tx1], node, success=False, expect_disconnect=True)
# Make two p2p connections to provide the node with orphans
# * p2ps[0] will send valid orphan txs (one with low fee)
# * p2ps[1] will send an invalid orphan tx (and is later disconnected for that)
self.reconnect_p2p(num_connections=2)
self.log.info('Test orphan transaction handling ... ')
# Create a root transaction that we withhold until all dependend transactions
# are sent out and in the orphan cache
SCRIPT_PUB_KEY_OP_TRUE = b'\x51\x75' * 15 + b'\x51'
tx_withhold = CTransaction()
tx_withhold.vin.append(CTxIn(outpoint=COutPoint(block1.vtx[0].sha256, 0)))
tx_withhold.vout.append(CTxOut(nValue=25 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_withhold.calc_sha256()
# Our first orphan tx with some outputs to create further orphan txs
tx_orphan_1 = CTransaction()
tx_orphan_1.vin.append(CTxIn(outpoint=COutPoint(tx_withhold.sha256, 0)))
tx_orphan_1.vout = [CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)] * 3
tx_orphan_1.calc_sha256()
# A valid transaction with low fee
tx_orphan_2_no_fee = CTransaction()
tx_orphan_2_no_fee.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 0)))
tx_orphan_2_no_fee.vout.append(CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
# A valid transaction with sufficient fee
tx_orphan_2_valid = CTransaction()
tx_orphan_2_valid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 1)))
tx_orphan_2_valid.vout.append(CTxOut(nValue=10 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_orphan_2_valid.calc_sha256()
# An invalid transaction with negative fee
tx_orphan_2_invalid = CTransaction()
tx_orphan_2_invalid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 2)))
tx_orphan_2_invalid.vout.append(CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
self.log.info('Send the orphans ... ')
# Send valid orphan txs from p2ps[0]
node.p2p.send_txs_and_test([tx_orphan_1, tx_orphan_2_no_fee, tx_orphan_2_valid], node, success=False)
# Send invalid tx from p2ps[1]
node.p2ps[1].send_txs_and_test([tx_orphan_2_invalid], node, success=False)
assert_equal(0, node.getmempoolinfo()['size']) # Mempool should be empty
assert_equal(2, len(node.getpeerinfo())) # p2ps[1] is still connected
self.log.info('Send the withhold tx ... ')
node.p2p.send_txs_and_test([tx_withhold], node, success=True)
# Transactions that should end up in the mempool
expected_mempool = {
t.hash
for t in [
tx_withhold, # The transaction that is the root for all orphans
tx_orphan_1, # The orphan transaction that splits the coins
tx_orphan_2_valid, # The valid transaction (with sufficient fee)
]
}
# Transactions that do not end up in the mempool
# tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
# tx_orphan_invaid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
assert_equal(expected_mempool, set(node.getrawmempool()))
# restart node with sending BIP61 messages disabled, check that it disconnects without sending the reject message
self.log.info('Test a transaction that is rejected, with BIP61 disabled')
self.restart_node(0, ['-enablebip61=0', '-persistmempool=0'])
self.reconnect_p2p(num_connections=1)
with node.assert_debug_log(expected_msgs=[
"{} from peer=0 was not accepted: mandatory-script-verify-flag-failed (Invalid OP_IF construction) (code 16)".format(tx1.hash),
"disconnecting peer=0",
]):
node.p2p.send_txs_and_test([tx1], node, success=False, expect_disconnect=True)
# send_txs_and_test will have waited for disconnect, so we can safely check that no reject has been received
assert_equal(node.p2p.reject_code_received, None)
if __name__ == '__main__':
InvalidTxRequestTest().main()
| 45.464516
| 143
| 0.686108
|
3a0ebd0abe11eae685e3a48c740f3f9a54b4e347
| 1,066
|
py
|
Python
|
pydarkstar/scrubbing/scrubber.py
|
Demiurge-DSP/pydarkstar
|
47fb57b493aa83740dfebccd84de6957a2992dcc
|
[
"MIT"
] | null | null | null |
pydarkstar/scrubbing/scrubber.py
|
Demiurge-DSP/pydarkstar
|
47fb57b493aa83740dfebccd84de6957a2992dcc
|
[
"MIT"
] | null | null | null |
pydarkstar/scrubbing/scrubber.py
|
Demiurge-DSP/pydarkstar
|
47fb57b493aa83740dfebccd84de6957a2992dcc
|
[
"MIT"
] | 1
|
2019-05-12T14:28:57.000Z
|
2019-05-12T14:28:57.000Z
|
from ..darkobject import DarkObject
from bs4 import BeautifulSoup
import logging
import time
from urllib.request import urlopen
class Scrubber(DarkObject):
def __init__(self):
super(Scrubber, self).__init__()
def scrub(self):
"""
Get item metadata.
"""
return {}
# noinspection PyBroadException
@staticmethod
def soup(url):
"""
Open URL and create tag soup.
:param url: website string
:type url: str
"""
handle = ''
max_tries = 10
for i in range(max_tries):
try:
handle = urlopen(url)
handle = handle.read()
break
except:
logging.exception('urlopen failed (attempt %d)', i + 1)
if i == max_tries - 1:
logging.error('the maximum urlopen attempts have been reached')
raise
time.sleep(1)
s = BeautifulSoup(handle)
return s
if __name__ == '__main__':
pass
| 21.755102
| 83
| 0.525328
|
8bdb8c6bde17d15d2986dc149b62d8d97cea2713
| 2,314
|
py
|
Python
|
clients/python-flask/generated/openapi_server/models/favorite_impllinks.py
|
PankTrue/swaggy-jenkins
|
aca35a7cca6e1fcc08bd399e05148942ac2f514b
|
[
"MIT"
] | 23
|
2017-08-01T12:25:26.000Z
|
2022-01-25T03:44:11.000Z
|
clients/python-flask/generated/openapi_server/models/favorite_impllinks.py
|
PankTrue/swaggy-jenkins
|
aca35a7cca6e1fcc08bd399e05148942ac2f514b
|
[
"MIT"
] | 35
|
2017-06-14T03:28:15.000Z
|
2022-02-14T10:25:54.000Z
|
clients/python-flask/generated/openapi_server/models/favorite_impllinks.py
|
PankTrue/swaggy-jenkins
|
aca35a7cca6e1fcc08bd399e05148942ac2f514b
|
[
"MIT"
] | 11
|
2017-08-31T19:00:20.000Z
|
2021-12-19T12:04:12.000Z
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.link import Link # noqa: F401,E501
from openapi_server import util
class FavoriteImpllinks(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _self: Link=None, _class: str=None): # noqa: E501
"""FavoriteImpllinks - a model defined in OpenAPI
:param _self: The _self of this FavoriteImpllinks. # noqa: E501
:type _self: Link
:param _class: The _class of this FavoriteImpllinks. # noqa: E501
:type _class: str
"""
self.openapi_types = {
'_self': Link,
'_class': str
}
self.attribute_map = {
'_self': 'self',
'_class': '_class'
}
self.__self = _self
self.__class = _class
@classmethod
def from_dict(cls, dikt) -> 'FavoriteImpllinks':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The FavoriteImpllinks of this FavoriteImpllinks. # noqa: E501
:rtype: FavoriteImpllinks
"""
return util.deserialize_model(dikt, cls)
@property
def _self(self) -> Link:
"""Gets the _self of this FavoriteImpllinks.
:return: The _self of this FavoriteImpllinks.
:rtype: Link
"""
return self.__self
@_self.setter
def _self(self, _self: Link):
"""Sets the _self of this FavoriteImpllinks.
:param _self: The _self of this FavoriteImpllinks.
:type _self: Link
"""
self.__self = _self
@property
def _class(self) -> str:
"""Gets the _class of this FavoriteImpllinks.
:return: The _class of this FavoriteImpllinks.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this FavoriteImpllinks.
:param _class: The _class of this FavoriteImpllinks.
:type _class: str
"""
self.__class = _class
| 25.152174
| 96
| 0.60847
|
7baf078e1ca61d0392512e61730bead17d63566b
| 8,771
|
py
|
Python
|
matpyxplot/plt.py
|
ajayramak/matpyxplot
|
874b83f75a7640257d53590783ab702098139406
|
[
"Apache-2.0"
] | null | null | null |
matpyxplot/plt.py
|
ajayramak/matpyxplot
|
874b83f75a7640257d53590783ab702098139406
|
[
"Apache-2.0"
] | null | null | null |
matpyxplot/plt.py
|
ajayramak/matpyxplot
|
874b83f75a7640257d53590783ab702098139406
|
[
"Apache-2.0"
] | null | null | null |
# Ajayrama Kumaraswamy, 6th Jan 2015
import pyx
import subprocess
import math
goldenMean = (1 + math.sqrt(5))/2
class Graph(object):
_colMap = {
'b' : pyx.color.rgb.blue,
'r' : pyx.color.rgb.red,
'g' : pyx.color.rgb.green,
'm' : pyx.color.cmyk.Magenta,
'y' : pyx.color.cmyk.Yellow,
'k' : pyx.color.gray.black,
}
_lsMap = {
'-' : pyx.style.linestyle.solid,
'--' : pyx.style.linestyle.dashed,
':' : pyx.style.linestyle.dotted,
'.-' : pyx.style.linestyle.dashdotted,
}
_mMap = {
'o' : pyx.graph.style.symbol.circle,
'x' : pyx.graph.style.symbol.cross,
'^' : pyx.graph.style.symbol.triangle,
'd' : pyx.graph.style.symbol.diamond,
's' : pyx.graph.style.symbol.square,
'+' : pyx.graph.style.symbol.plus,
}
def __init__(self, width=None, height=None, ratio=goldenMean):
'''
Either widht or height must be specified.
:param width: width of graph in cm
:param height: height of graph in cm
:param ratio: widht/height
:return: graph object
'''
assert width is not None or height is not None, 'Either height or width must be specified'
if width is not None and height is not None:
assert ratio == width / height, 'Ratio is not width/height'
if height is None:
self.width = width
self.height = width / ratio
if width is None:
self.height = height
self.width = height * ratio
self.plots = []
self.xpos = 0
self.ypos = 0
self.plots = []
self.datas = []
self.styles = []
self.xAxis = None
self.yAxis = None
self.drawLegend = False
def setXAxisParams(self, type='linear', label=None, manualTicks=[], min=None, max=None):
'''
:param type: The type of the axis. 'linear' or 'log'. Only 'linear' supported
:param label: axis label
:param manualTicks: axis manual ticks, Will override autmatically generated ones.
:param min: axis lower limit
:param max: axis upper limit
:return:
'''
if type == 'linear':
self.xAxis = pyx.graph.axis.axis.linear(
min = min,
max = max,
title = label,
manualticks = manualTicks
)
else:
raise(NotImplementedError())
def setYAxisParams(self, type='linear', label=None, manualTicks=[], min=None, max=None):
'''
:param type: The type of the axis. 'linear' or 'log'. Only 'linear' supported
:param label: axis label
:param manualTicks: axis manual ticks, Will override autmatically generated ones.
:param min: axis lower limit
:param max: axis upper limit
:return:
'''
if type == 'linear':
self.yAxis = pyx.graph.axis.axis.linear(
min = min,
max = max,
title = label,
manualticks = manualTicks
)
else:
raise(NotImplementedError())
def showLegend(self, pos='tr'):
'''
Insert a legend
:param pos: Specify where to place the legend. 'tr' for top right and 'bl' for lower bottom
:return:
'''
self.drawLegend = True
self.legendPos = pos
def plot(self, x, y, c='b', ls='-', m='None', mfc='None', title=''):
'''
:param x: iterable containing x axis values of the points
:param y: iterable containing y axis values of the points
:param c: string specifying color. One of {'r', 'g', 'b', 'k', 'm', 'y', }
:param ls: string specifying line style. '-'(continous), ':'(dotted), '--'(dashed), '.-'(dot-dash)
:param m: string specifiying marker. 's'(square), 'o'(circle), 'd'(diamond), 'x'(crosss), '+'(plus), '^'(upright triangle)
:param mfc: string specifiying marker face color. Same as param c
:param title: tag string to use in legend.
:return:
'''
self.datas.append(pyx.graph.data.values(x=x, y=y, title=title))
style = []
if not ls == 'None':
style.append(pyx.graph.style.line([self._lsMap[ls], self._colMap[c]]))
if not m == 'None':
if mfc == 'None':
mfc = c
symbol = pyx.graph.style.symbol(symbol=self._mMap[m],
symbolattrs=[pyx.deco.stroked.clear,
pyx.deco.filled([self._colMap[mfc]])])
style.append(symbol)
self.styles.append(style)
def draw(self):
'''
Draws the graph using pyx
:return:
'''
args = dict(width=self.width,
height=self.height,
xpos=self.xpos,
ypos=self.ypos)
if self.xAxis is not None:
args['x'] = self.xAxis
if self.yAxis is not None:
args['y'] = self.yAxis
if self.drawLegend:
args['key'] = pyx.graph.key.key(pos=self.legendPos)
g = pyx.graph.graphxy(**args)
for data, style in zip(self.datas, self.styles):
g.plot(data, style)
return g
class Canvas(object):
def __init__(self):
self.canvas = pyx.canvas.canvas()
self.graphs = []
self.drawn = False
def addGraph(self, graph, xpos=0, ypos=0):
'''
Add a graph to the canvas. The graph is placed at (0, 0) of the canvas
:param graph: a Graph object.
:param xpos: xpos of the lower bottom corner of the graph in the canvas
:param ypos: ypos of the lower bottom corner of the graph in the canvas
:return:
'''
graph.xpos = xpos
graph.ypos = ypos
self.graphs.append(graph)
def addGraphBelowOf(self, graphToAdd, refGraph, distance=1):
'''
Add a graph to the canvas below an already added graph
:param graphToAdd: Graph object
:param refGraph: Graph Object
:param distance: vertical distance between graphToAdd and refGraph in cm
:return:
'''
graphToAdd.xpos = refGraph.xpos
graphToAdd.ypos = refGraph.ypos - graphToAdd.height - distance
self.graphs.append(graphToAdd)
def addGraphRightOf(self, graphToAdd, refGraph, distance=1):
'''
Add a graph to the canvas to the right of an already added graph
:param graphToAdd: Graph Object
:param refGraph: Graph Object
:param distance: horizontal distance between graphToAdd and refGraph in cm
:return:
'''
graphToAdd.ypos = refGraph.ypos
graphToAdd.xpos = refGraph.xpos + graphToAdd.width + distance
self.graphs.append(graphToAdd)
def draw(self):
'''
Draws the canvas and it's graphs in pyx
:return:
'''
if len(self.graphs) == 0:
raise(AttributeError('Please add a few graphs to the canvas using addGraph...() funcs'))
self.drawn = True
for graph in self.graphs:
g = graph.draw()
self.canvas.insert(g)
def writeAndOpen(self, fName):
'''
Writes PDF and opens with evince. Works only in linux.
:param fName: filename of the output PDF file.
:return:
'''
if not self.drawn:
self.draw()
self.canvas.writePDFfile(fName)
subprocess.Popen(['evince', fName])
def writePDF(self, fName):
'''
Writes PDF.
:param fName: filename of the output PDF file.
:return:
'''
if not self.drawn:
self.draw()
self.canvas.writePDFfile(fName)
def writeEPS(self, fName):
'''
Writes EPS.
:param fName: filename of the output EPS file.
:return:
'''
if not self.drawn:
self.draw()
self.canvas.writeEPSfile(fName)
| 26.104167
| 130
| 0.5061
|
7a9180a12293b0d956cfbb70d398fdb26925f03d
| 538
|
py
|
Python
|
manage.py
|
AkinWilderman/myPort
|
3ddeea04ccffe3ed7b66d6dba2c1f2dc00c9eb6c
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
AkinWilderman/myPort
|
3ddeea04ccffe3ed7b66d6dba2c1f2dc00c9eb6c
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
AkinWilderman/myPort
|
3ddeea04ccffe3ed7b66d6dba2c1f2dc00c9eb6c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myPort.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.625
| 73
| 0.685874
|
a02a7361573201e19663b4edd144d99cb4beac53
| 5,235
|
py
|
Python
|
fhirclient/r4models/documentreference_tests.py
|
cspears-mitre/CapStatement
|
2390566ed75d420e0615e3a0aacb77e8c030fdcc
|
[
"Apache-2.0"
] | 1
|
2021-12-24T11:14:38.000Z
|
2021-12-24T11:14:38.000Z
|
fhirclient/r4models/documentreference_tests.py
|
cspears-mitre/CapStatement
|
2390566ed75d420e0615e3a0aacb77e8c030fdcc
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/r4models/documentreference_tests.py
|
cspears-mitre/CapStatement
|
2390566ed75d420e0615e3a0aacb77e8c030fdcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.6.0-bd605d07 on 2018-12-20.
# 2018, SMART Health IT.
import os
import io
import unittest
import json
from . import documentreference
from .fhirdate import FHIRDate
class DocumentReferenceTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("DocumentReference", js["resourceType"])
return documentreference.DocumentReference(js)
def testDocumentReference1(self):
inst = self.instantiate_from("documentreference-example.json")
self.assertIsNotNone(inst, "Must have instantiated a DocumentReference instance")
self.implDocumentReference1(inst)
js = inst.as_json()
self.assertEqual("DocumentReference", js["resourceType"])
inst2 = documentreference.DocumentReference(js)
self.implDocumentReference1(inst2)
def implDocumentReference1(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "History and Physical")
self.assertEqual(inst.category[0].coding[0].display, "History and Physical")
self.assertEqual(inst.category[0].coding[0].system, "http://ihe.net/xds/connectathon/classCodes")
self.assertEqual(inst.contained[0].id, "a2")
self.assertEqual(inst.content[0].attachment.contentType, "application/hl7-v3+xml")
self.assertEqual(inst.content[0].attachment.creation.date, FHIRDate("2005-12-24T09:35:00+11:00").date)
self.assertEqual(inst.content[0].attachment.creation.as_json(), "2005-12-24T09:35:00+11:00")
self.assertEqual(inst.content[0].attachment.hash, "2jmj7l5rSw0yVb/vlWAYkK/YBwk=")
self.assertEqual(inst.content[0].attachment.language, "en-US")
self.assertEqual(inst.content[0].attachment.size, 3654)
self.assertEqual(inst.content[0].attachment.title, "Physical")
self.assertEqual(inst.content[0].attachment.url, "http://example.org/xds/mhd/Binary/07a6483f-732b-461e-86b6-edb665c45510")
self.assertEqual(inst.content[0].format.code, "urn:ihe:pcc:handp:2008")
self.assertEqual(inst.content[0].format.display, "History and Physical Specification")
self.assertEqual(inst.content[0].format.system, "urn:oid:1.3.6.1.4.1.19376.1.2.3")
self.assertEqual(inst.context.event[0].coding[0].code, "T-D8200")
self.assertEqual(inst.context.event[0].coding[0].display, "Arm")
self.assertEqual(inst.context.event[0].coding[0].system, "http://ihe.net/xds/connectathon/eventCodes")
self.assertEqual(inst.context.facilityType.coding[0].code, "Outpatient")
self.assertEqual(inst.context.facilityType.coding[0].display, "Outpatient")
self.assertEqual(inst.context.facilityType.coding[0].system, "http://www.ihe.net/xds/connectathon/healthcareFacilityTypeCodes")
self.assertEqual(inst.context.period.end.date, FHIRDate("2004-12-23T08:01:00+11:00").date)
self.assertEqual(inst.context.period.end.as_json(), "2004-12-23T08:01:00+11:00")
self.assertEqual(inst.context.period.start.date, FHIRDate("2004-12-23T08:00:00+11:00").date)
self.assertEqual(inst.context.period.start.as_json(), "2004-12-23T08:00:00+11:00")
self.assertEqual(inst.context.practiceSetting.coding[0].code, "General Medicine")
self.assertEqual(inst.context.practiceSetting.coding[0].display, "General Medicine")
self.assertEqual(inst.context.practiceSetting.coding[0].system, "http://www.ihe.net/xds/connectathon/practiceSettingCodes")
self.assertEqual(inst.date.date, FHIRDate("2005-12-24T09:43:41+11:00").date)
self.assertEqual(inst.date.as_json(), "2005-12-24T09:43:41+11:00")
self.assertEqual(inst.description, "Physical")
self.assertEqual(inst.docStatus, "preliminary")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234")
self.assertEqual(inst.masterIdentifier.system, "urn:ietf:rfc:3986")
self.assertEqual(inst.masterIdentifier.value, "urn:oid:1.3.6.1.4.1.21367.2005.3.7")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.relatesTo[0].code, "appends")
self.assertEqual(inst.securityLabel[0].coding[0].code, "V")
self.assertEqual(inst.securityLabel[0].coding[0].display, "very restricted")
self.assertEqual(inst.securityLabel[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-Confidentiality")
self.assertEqual(inst.status, "current")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "34108-1")
self.assertEqual(inst.type.coding[0].display, "Outpatient Note")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
| 61.588235
| 135
| 0.701624
|
d3047b6ccbf88110d035b1360384972c67aab91f
| 989
|
py
|
Python
|
examples/plots/plot_cylinder.py
|
alek5k/pytransform3d
|
c6fb10b1d17713bd8a2d6becb928c4f6dcf611f9
|
[
"BSD-3-Clause"
] | 304
|
2019-01-16T15:14:31.000Z
|
2022-03-31T16:14:37.000Z
|
examples/plots/plot_cylinder.py
|
alek5k/pytransform3d
|
c6fb10b1d17713bd8a2d6becb928c4f6dcf611f9
|
[
"BSD-3-Clause"
] | 94
|
2018-12-07T14:54:05.000Z
|
2022-03-19T22:38:20.000Z
|
examples/plots/plot_cylinder.py
|
alek5k/pytransform3d
|
c6fb10b1d17713bd8a2d6becb928c4f6dcf611f9
|
[
"BSD-3-Clause"
] | 37
|
2018-12-09T23:58:40.000Z
|
2022-03-16T02:29:53.000Z
|
"""
==========================
Plot Transformed Cylinders
==========================
Plots surfaces of transformed cylindrical shells.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from pytransform3d.transformations import transform_from, plot_transform
from pytransform3d.rotations import random_axis_angle, matrix_from_axis_angle
from pytransform3d.plot_utils import plot_cylinder, remove_frame
random_state = np.random.RandomState(42)
A2B = transform_from(
R=matrix_from_axis_angle(random_axis_angle(random_state)),
p=random_state.randn(3))
ax = plot_cylinder(length=1.0, radius=0.3, thickness=0.1,
wireframe=False, alpha=0.2)
plot_transform(ax=ax, A2B=np.eye(4), s=0.3, lw=3)
plot_cylinder(ax=ax, length=1.0, radius=0.3, thickness=0.1, A2B=A2B,
wireframe=False, alpha=0.2)
plot_transform(ax=ax, A2B=A2B, s=0.3, lw=3)
remove_frame(ax)
ax.set_xlim((0, 1.5))
ax.set_ylim((-1.5, 0))
ax.set_zlim((-0.8, 0.7))
plt.show()
| 29.969697
| 77
| 0.70273
|
0a02c30043a23e269bc92f334d103d5ee509fe64
| 6,166
|
py
|
Python
|
src/main/python/model/elastic_net.py
|
meowpunch/bobsim-research
|
4411ac6eaf5b760611f689b0a9e290546e2f5435
|
[
"MIT"
] | 2
|
2020-03-01T17:42:44.000Z
|
2020-03-09T06:13:34.000Z
|
src/main/python/model/elastic_net.py
|
meowpunch/bobsim-research
|
4411ac6eaf5b760611f689b0a9e290546e2f5435
|
[
"MIT"
] | 2
|
2020-04-01T16:48:06.000Z
|
2020-04-04T11:04:10.000Z
|
src/main/python/model/elastic_net.py
|
meowpunch/bobsim-research
|
4411ac6eaf5b760611f689b0a9e290546e2f5435
|
[
"MIT"
] | null | null | null |
import tempfile
import pandas as pd
from joblib import dump
import numpy as np
from sklearn.linear_model import ElasticNet
from sklearn.metrics import make_scorer, mean_squared_error
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit
from analysis.hit_ratio_error import hit_ratio_error
from utils.logging import init_logger
from utils.s3_manager.manage import S3Manager
from utils.visualize import draw_hist
class ElasticNetModel:
"""
ElasticNet
"""
def __init__(self, bucket_name: str, x_train, y_train, params=None):
# logger
self.logger = init_logger()
# s3
self.s3_manager = S3Manager(bucket_name=bucket_name)
if params is None:
self.model = ElasticNet()
else:
self.model = ElasticNet(**params)
self.x_train, self.y_train = x_train, y_train
self.error = None
self.metric = None
def fit(self):
self.model.fit(self.x_train, self.y_train)
def predict(self, X):
return self.model.predict(X=X)
def estimate_metric(self, scorer, y, predictions):
self.error = pd.Series(y - predictions, name="error")
self.metric = scorer(y_true=y, y_pred=predictions)
return self.metric
def score(self):
return self.model.score(self.x_train, self.y_train)
@property
def coef_df(self):
"""
:return: pd DataFrame
"""
return pd.Series(
data=np.append(self.model.coef_, self.model.intercept_),
index=self.x_train.columns.tolist() + ["intercept"],
).rename("beta").reset_index().rename(columns={"index": "column"})
def save(self, prefix):
"""
save beta coef, metric, distribution, model
:param prefix: dir
"""
self.save_coef(key="{prefix}/beta.csv".format(prefix=prefix))
self.save_metric(key="{prefix}/metric.pkl".format(prefix=prefix))
self.save_error_distribution(prefix=prefix)
self.save_model(key="{prefix}/model.pkl".format(prefix=prefix))
def save_coef(self, key):
self.logger.info("coef:\n{coef}".format(coef=self.coef_df))
self.s3_manager.save_df_to_csv(self.coef_df, key=key)
def save_metric(self, key):
self.logger.info("customized RMSE is {metric}".format(metric=self.metric))
self.s3_manager.save_dump(x=self.metric, key=key)
def save_model(self, key):
self.s3_manager.save_dump(self.model, key=key)
def save_error_distribution(self, prefix):
draw_hist(self.error)
self.s3_manager.save_plt_to_png(
key="{prefix}/image/error_distribution.png".format(prefix=prefix)
)
ratio = hit_ratio_error(self.error)
self.s3_manager.save_plt_to_png(
key="{prefix}/image/hit_ratio_error.png".format(prefix=prefix)
)
return ratio
class ElasticNetSearcher(GridSearchCV):
"""
for research
"""
def __init__(
self, x_train, y_train, bucket_name,
grid_params=None, score=mean_squared_error
):
if grid_params is None:
grid_params = {
"max_iter": [1, 5, 10],
"alpha": [0, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100],
"l1_ratio": np.arange(0.0, 1.0, 0.1)
}
self.x_train = x_train
self.y_train = y_train
self.scorer = score
self.error = None # pd.Series
self.metric = None
# s3
self.s3_manager = S3Manager(bucket_name=bucket_name)
# logger
self.logger = init_logger()
super().__init__(
estimator=ElasticNet(),
param_grid=grid_params,
scoring=make_scorer(self.scorer, greater_is_better=False),
# we have to know the relationship before and after obviously, so n_splits: 2
cv=TimeSeriesSplit(n_splits=2).split(self.x_train)
)
def fit(self, X=None, y=None, groups=None, **fit_params):
super().fit(X=self.x_train, y=self.y_train)
@property
def coef_df(self):
"""
:return: pd DataFrame
"""
return pd.Series(
data=np.append(self.best_estimator_.coef_, self.best_estimator_.intercept_),
index=self.x_train.columns.tolist() + ["intercept"],
).rename("beta").reset_index().rename(columns={"index": "column"})
def estimate_metric(self, y_true, y_pred):
self.error = pd.Series(y_true - y_pred, name="error")
self.metric = self.scorer(y_true=y_true, y_pred=y_pred)
return self.metric
def save(self, prefix):
"""
save tuned params, beta coef, metric, distribution, model
:param prefix: dir
"""
self.save_params(key="{prefix}/params.pkl".format(prefix=prefix))
self.save_coef(key="{prefix}/beta.pkl".format(prefix=prefix))
self.save_metric(key="{prefix}/metric.pkl".format(prefix=prefix))
self.save_error_distribution(prefix=prefix)
self.save_model(key="{prefix}/model.pkl".format(prefix=prefix))
def save_params(self, key):
self.logger.info("tuned params: {params}".format(params=self.best_params_))
self.s3_manager.save_dump(x=self.best_params_, key=key)
def save_coef(self, key):
self.logger.info("beta_coef:\n{coef}".format(coef=self.coef_df))
self.s3_manager.save_df_to_csv(self.coef_df, key=key)
def save_metric(self, key):
self.logger.info("customized RMSE is {metric}".format(metric=self.metric))
self.s3_manager.save_dump(x=self.metric, key=key)
def save_model(self, key):
# save best elastic net
self.s3_manager.save_dump(self.best_estimator_, key=key)
def save_error_distribution(self, prefix):
draw_hist(self.error)
self.s3_manager.save_plt_to_png(
key="{prefix}/image/error_distribution.png".format(prefix=prefix)
)
ratio = hit_ratio_error(self.error)
self.s3_manager.save_plt_to_png(
key="{prefix}/image/hit_ratio_error.png".format(prefix=prefix)
)
return ratio
| 32.624339
| 89
| 0.631528
|
c9a4a542e62785cc01d93a6597ed6c04cb24e6a3
| 7,763
|
py
|
Python
|
password_validation/test/test_validators.py
|
gutyril/django-password-validation
|
fc721bdb973cafdaca2acc31f370c6ed66290d7b
|
[
"BSD-3-Clause"
] | 33
|
2015-06-11T02:02:36.000Z
|
2018-03-09T12:20:42.000Z
|
password_validation/test/test_validators.py
|
gutyril/django-password-validation
|
fc721bdb973cafdaca2acc31f370c6ed66290d7b
|
[
"BSD-3-Clause"
] | 2
|
2017-03-23T17:05:03.000Z
|
2017-04-25T19:11:33.000Z
|
password_validation/test/test_validators.py
|
gutyril/django-password-validation
|
fc721bdb973cafdaca2acc31f370c6ed66290d7b
|
[
"BSD-3-Clause"
] | 14
|
2015-06-11T07:11:53.000Z
|
2019-12-19T01:22:36.000Z
|
"""This module is backported from Django.
Copied from tests/auth_tests/test_validators.py at commit
9851e54121b3eebd3a7a29de3ed874d82554396b
The only change is to replace `django.contrib.auth.password_validation` with
`password_validation` throughout.
"""
from __future__ import unicode_literals
import os
from django.contrib.auth.models import User
from password_validation import (
CommonPasswordValidator, MinimumLengthValidator, NumericPasswordValidator,
UserAttributeSimilarityValidator, get_default_password_validators,
get_password_validators, password_changed,
password_validators_help_text_html, password_validators_help_texts,
validate_password,
)
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from django.utils._os import upath
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'password_validation.CommonPasswordValidator'},
{'NAME': 'password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
class PasswordValidationTest(TestCase):
def test_get_default_password_validators(self):
validators = get_default_password_validators()
self.assertEqual(len(validators), 2)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(validators[1].__class__.__name__, 'MinimumLengthValidator')
self.assertEqual(validators[1].min_length, 12)
def test_get_password_validators_custom(self):
validator_config = [{'NAME': 'password_validation.CommonPasswordValidator'}]
validators = get_password_validators(validator_config)
self.assertEqual(len(validators), 1)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(get_password_validators([]), [])
def test_validate_password(self):
self.assertIsNone(validate_password('sufficiently-long'))
msg_too_short = 'This password is too short. It must contain at least 12 characters.'
with self.assertRaises(ValidationError, args=['This password is too short.']) as cm:
validate_password('django4242')
self.assertEqual(cm.exception.messages, [msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
validate_password('password')
self.assertEqual(cm.exception.messages, ['This password is too common.', msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
self.assertIsNone(validate_password('password', password_validators=[]))
def test_password_changed(self):
self.assertIsNone(password_changed('password'))
def test_password_validators_help_texts(self):
help_texts = password_validators_help_texts()
self.assertEqual(len(help_texts), 2)
self.assertIn('12 characters', help_texts[1])
self.assertEqual(password_validators_help_texts(password_validators=[]), [])
def test_password_validators_help_text_html(self):
help_text = password_validators_help_text_html()
self.assertEqual(help_text.count('<li>'), 2)
self.assertIn('12 characters', help_text)
class MinimumLengthValidatorTest(TestCase):
def test_validate(self):
expected_error = "This password is too short. It must contain at least %d characters."
self.assertIsNone(MinimumLengthValidator().validate('12345678'))
self.assertIsNone(MinimumLengthValidator(min_length=3).validate('123'))
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator().validate('1234567')
self.assertEqual(cm.exception.messages, [expected_error % 8])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator(min_length=3).validate('12')
self.assertEqual(cm.exception.messages, [expected_error % 3])
def test_help_text(self):
self.assertEqual(
MinimumLengthValidator().get_help_text(),
"Your password must contain at least 8 characters."
)
class UserAttributeSimilarityValidatorTest(TestCase):
def test_validate(self):
user = User.objects.create(
username='testclient', first_name='Test', last_name='Client', email='testclient@example.com',
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
)
expected_error = "The password is too similar to the %s."
self.assertIsNone(UserAttributeSimilarityValidator().validate('testclient'))
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('testclient', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "username"])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_similar')
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('example.com', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "email address"])
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=['first_name'],
max_similarity=0.3,
).validate('testclient', user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
self.assertIsNone(
UserAttributeSimilarityValidator(user_attributes=['first_name']).validate('testclient', user=user)
)
def test_help_text(self):
self.assertEqual(
UserAttributeSimilarityValidator().get_help_text(),
"Your password can't be too similar to your other personal information."
)
class CommonPasswordValidatorTest(TestCase):
def test_validate(self):
expected_error = "This password is too common."
self.assertIsNone(CommonPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
CommonPasswordValidator().validate('godzilla')
self.assertEqual(cm.exception.messages, [expected_error])
def test_validate_custom_list(self):
path = os.path.join(os.path.dirname(os.path.realpath(upath(__file__))), 'common-passwords-custom.txt')
validator = CommonPasswordValidator(password_list_path=path)
expected_error = "This password is too common."
self.assertIsNone(validator.validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
validator.validate('from-my-custom-list')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
def test_help_text(self):
self.assertEqual(
CommonPasswordValidator().get_help_text(),
"Your password can't be a commonly used password."
)
class NumericPasswordValidatorTest(TestCase):
def test_validate(self):
expected_error = "This password is entirely numeric."
self.assertIsNone(NumericPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
NumericPasswordValidator().validate('42424242')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_entirely_numeric')
def test_help_text(self):
self.assertEqual(
NumericPasswordValidator().get_help_text(),
"Your password can't be entirely numeric."
)
| 42.653846
| 110
| 0.716604
|
e3ece2691272aa36d8340643146de96ca7b03277
| 15,127
|
py
|
Python
|
zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Scanner/__init__.py
|
kerwinxu/barcodeManager
|
175a7a07babc3da460e94c49c463fd8275ec228b
|
[
"BSD-2-Clause"
] | null | null | null |
zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Scanner/__init__.py
|
kerwinxu/barcodeManager
|
175a7a07babc3da460e94c49c463fd8275ec228b
|
[
"BSD-2-Clause"
] | null | null | null |
zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Scanner/__init__.py
|
kerwinxu/barcodeManager
|
175a7a07babc3da460e94c49c463fd8275ec228b
|
[
"BSD-2-Clause"
] | 1
|
2019-12-24T07:18:01.000Z
|
2019-12-24T07:18:01.000Z
|
"""SCons.Scanner
The Scanner package for the SCons software construction utility.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/__init__.py 5023 2010/06/14 22:05:46 scons"
import re
import SCons.Node.FS
import SCons.Util
class _Null(object):
pass
# This is used instead of None as a default argument value so None can be
# used as an actual argument value.
_null = _Null
def Scanner(function, *args, **kw):
"""
Public interface factory function for creating different types
of Scanners based on the different types of "functions" that may
be supplied.
TODO: Deprecate this some day. We've moved the functionality
inside the Base class and really don't need this factory function
any more. It was, however, used by some of our Tool modules, so
the call probably ended up in various people's custom modules
patterned on SCons code.
"""
if SCons.Util.is_Dict(function):
return Selector(function, *args, **kw)
else:
return Base(function, *args, **kw)
class FindPathDirs(object):
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env[self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
class Base(object):
"""
The base class for dependency scanners. This implements
straightforward, single-pass scanning of a single file.
"""
def __init__(self,
function,
name = "NONE",
argument = _null,
skeys = _null,
path_function = None,
# Node.FS.Base so that, by default, it's okay for a
# scanner to return a Dir, File or Entry.
node_class = SCons.Node.FS.Base,
node_factory = None,
scan_check = None,
recursive = None):
"""
Construct a new scanner object given a scanner function.
'function' - a scanner function taking two or three
arguments and returning a list of strings.
'name' - a name for identifying this scanner object.
'argument' - an optional argument that, if specified, will be
passed to both the scanner function and the path_function.
'skeys' - an optional list argument that can be used to determine
which scanner should be used for a given Node. In the case of File
nodes, for example, the 'skeys' would be file suffixes.
'path_function' - a function that takes four or five arguments
(a construction environment, Node for the directory containing
the SConscript file that defined the primary target, list of
target nodes, list of source nodes, and optional argument for
this instance) and returns a tuple of the directories that can
be searched for implicit dependency files. May also return a
callable() which is called with no args and returns the tuple
(supporting Bindable class).
'node_class' - the class of Nodes which this scan will return.
If node_class is None, then this scanner will not enforce any
Node conversion and will return the raw results from the
underlying scanner function.
'node_factory' - the factory function to be called to translate
the raw results returned by the scanner function into the
expected node_class objects.
'scan_check' - a function to be called to first check whether
this node really needs to be scanned.
'recursive' - specifies that this scanner should be invoked
recursively on all of the implicit dependencies it returns
(the canonical example being #include lines in C source files).
May be a callable, which will be called to filter the list
of nodes found to select a subset for recursive scanning
(the canonical example being only recursively scanning
subdirectories within a directory).
The scanner function's first argument will be a Node that should
be scanned for dependencies, the second argument will be an
Environment object, the third argument will be the tuple of paths
returned by the path_function, and the fourth argument will be
the value passed into 'argument', and the returned list should
contain the Nodes for all the direct dependencies of the file.
Examples:
s = Scanner(my_scanner_function)
s = Scanner(function = my_scanner_function)
s = Scanner(function = my_scanner_function, argument = 'foo')
"""
# Note: this class could easily work with scanner functions that take
# something other than a filename as an argument (e.g. a database
# node) and a dependencies list that aren't file names. All that
# would need to be changed is the documentation.
self.function = function
self.path_function = path_function
self.name = name
self.argument = argument
if skeys is _null:
if SCons.Util.is_Dict(function):
skeys = list(function.keys())
else:
skeys = []
self.skeys = skeys
self.node_class = node_class
self.node_factory = node_factory
self.scan_check = scan_check
if callable(recursive):
self.recurse_nodes = recursive
elif recursive:
self.recurse_nodes = self._recurse_all_nodes
else:
self.recurse_nodes = self._recurse_no_nodes
def path(self, env, dir=None, target=None, source=None):
if not self.path_function:
return ()
if not self.argument is _null:
return self.path_function(env, dir, target, source, self.argument)
else:
return self.path_function(env, dir, target, source)
def __call__(self, node, env, path = ()):
"""
This method scans a single object. 'node' is the node
that will be passed to the scanner function, and 'env' is the
environment that will be passed to the scanner function. A list of
direct dependency nodes for the specified node will be returned.
"""
if self.scan_check and not self.scan_check(node, env):
return []
self = self.select(node)
if not self.argument is _null:
list = self.function(node, env, path, self.argument)
else:
list = self.function(node, env, path)
kw = {}
if hasattr(node, 'dir'):
kw['directory'] = node.dir
node_factory = env.get_factory(self.node_factory)
nodes = []
for l in list:
if self.node_class and not isinstance(l, self.node_class):
l = node_factory(l, **kw)
nodes.append(l)
return nodes
def __cmp__(self, other):
try:
return cmp(self.__dict__, other.__dict__)
except AttributeError:
# other probably doesn't have a __dict__
return cmp(self.__dict__, other)
def __hash__(self):
return id(self)
def __str__(self):
return self.name
def add_skey(self, skey):
"""Add a skey to the list of skeys"""
self.skeys.append(skey)
def get_skeys(self, env=None):
if env and SCons.Util.is_String(self.skeys):
return env.subst_list(self.skeys)[0]
return self.skeys
def select(self, node):
if SCons.Util.is_Dict(self.function):
key = node.scanner_key()
try:
return self.function[key]
except KeyError:
return None
else:
return self
def _recurse_all_nodes(self, nodes):
return nodes
def _recurse_no_nodes(self, nodes):
return []
recurse_nodes = _recurse_no_nodes
def add_scanner(self, skey, scanner):
self.function[skey] = scanner
self.add_skey(skey)
class Selector(Base):
"""
A class for selecting a more specific scanner based on the
scanner_key() (suffix) for a specific Node.
TODO: This functionality has been moved into the inner workings of
the Base class, and this class will be deprecated at some point.
(It was never exposed directly as part of the public interface,
although it is used by the Scanner() factory function that was
used by various Tool modules and therefore was likely a template
for custom modules that may be out there.)
"""
def __init__(self, dict, *args, **kw):
Base.__init__(self, None, *args, **kw)
self.dict = dict
self.skeys = list(dict.keys())
def __call__(self, node, env, path = ()):
return self.select(node)(node, env, path)
def select(self, node):
try:
return self.dict[node.scanner_key()]
except KeyError:
return None
def add_scanner(self, skey, scanner):
self.dict[skey] = scanner
self.add_skey(skey)
class Current(Base):
"""
A class for scanning files that are source files (have no builder)
or are derived files and are current (which implies that they exist,
either locally or in a repository).
"""
def __init__(self, *args, **kw):
def current_check(node, env):
return not node.has_builder() or node.is_up_to_date()
kw['scan_check'] = current_check
Base.__init__(self, *args, **kw)
class Classic(Current):
"""
A Scanner subclass to contain the common logic for classic CPP-style
include scanning, but which can be customized to use different
regular expressions to find the includes.
Note that in order for this to work "out of the box" (without
overriding the find_include() and sort_key() methods), the regular
expression passed to the constructor must return the name of the
include file in group 0.
"""
def __init__(self, name, suffixes, path_variable, regex, *args, **kw):
self.cre = re.compile(regex, re.M)
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, path)
kw['function'] = _scan
kw['path_function'] = FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
Current.__init__(self, *args, **kw)
def find_include(self, include, source_dir, path):
n = SCons.Node.FS.find_file(include, (source_dir,) + tuple(path))
return n, include
def sort_key(self, include):
return SCons.Node.FS._my_normcase(include)
def find_include_names(self, node):
return self.cre.findall(node.get_text_contents())
def scan(self, node, path=()):
# cache the includes list in node so we only scan it once:
if node.includes is not None:
includes = node.includes
else:
includes = self.find_include_names (node)
# Intern the names of the include files. Saves some memory
# if the same header is included many times.
node.includes = list(map(SCons.Util.silent_intern, includes))
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the #include line (including the
# " or <, since that may affect what file is found), which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for include in includes:
n, i = self.find_include(include, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
nodes.append((self.sort_key(include), n))
return [pair[1] for pair in sorted(nodes)]
class ClassicCPP(Classic):
"""
A Classic Scanner subclass which takes into account the type of
bracketing used to include the file, and uses classic CPP rules
for searching for the files based on the bracketing.
Note that in order for this to work, the regular expression passed
to the constructor must return the leading bracket in group 0, and
the contained filename in group 1.
"""
def find_include(self, include, source_dir, path):
if include[0] == '"':
paths = (source_dir,) + tuple(path)
else:
paths = tuple(path) + (source_dir,)
n = SCons.Node.FS.find_file(include[1], paths)
i = SCons.Util.silent_intern(include[1])
return n, i
def sort_key(self, include):
return SCons.Node.FS._my_normcase(' '.join(include))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 36.538647
| 126
| 0.623455
|
f631dcd682255452eedaadece2188cc4f002118b
| 955
|
py
|
Python
|
app/db/__init__.py
|
Exodo-LS/is219_flask_app
|
15a88d1d23aa64519a9e93d1f6cff04c4f68791e
|
[
"BSD-3-Clause"
] | null | null | null |
app/db/__init__.py
|
Exodo-LS/is219_flask_app
|
15a88d1d23aa64519a9e93d1f6cff04c4f68791e
|
[
"BSD-3-Clause"
] | null | null | null |
app/db/__init__.py
|
Exodo-LS/is219_flask_app
|
15a88d1d23aa64519a9e93d1f6cff04c4f68791e
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import os
from flask import Blueprint, cli
from flask_sqlalchemy import SQLAlchemy
from app import config
db = SQLAlchemy()
database = Blueprint('database', __name__, )
@database.cli.command('create')
def init_db():
db.create_all()
@database.before_app_first_request
def create_db_file_if_does_not_exist():
root = config.Config.BASE_DIR
# set the name of the apps log folder to logs
dbdir = os.path.join(root, '..', config.Config.DB_DIR)
# make a directory if it doesn't exist
if not os.path.exists(dbdir):
os.mkdir(dbdir)
db.create_all()
@database.before_app_first_request
def create_upload_folder():
root = config.Config.BASE_DIR
# set the name of the apps log folder to logs
uploadfolder = os.path.join(root, '..', config.Config.UPLOAD_FOLDER)
# make a directory if it doesn't exist
if not os.path.exists(uploadfolder):
os.mkdir(uploadfolder)
db.create_all()
| 24.487179
| 72
| 0.71623
|
00bd29cd570668e6addce0873b08952e77992564
| 1,968
|
py
|
Python
|
pythonscript/embedded/godot/hazmat/io.py
|
ktksgit/godot-python
|
d50a29203d0744754e59eb9cb13263e907ba7ee3
|
[
"CC-BY-3.0"
] | null | null | null |
pythonscript/embedded/godot/hazmat/io.py
|
ktksgit/godot-python
|
d50a29203d0744754e59eb9cb13263e907ba7ee3
|
[
"CC-BY-3.0"
] | null | null | null |
pythonscript/embedded/godot/hazmat/io.py
|
ktksgit/godot-python
|
d50a29203d0744754e59eb9cb13263e907ba7ee3
|
[
"CC-BY-3.0"
] | null | null | null |
import sys
import pdb
from io import RawIOBase
from pythonscriptcffi import lib
from godot.hazmat.tools import godot_string_from_pyobj
# TODO: really not optimized implementation...
class GodotIO(RawIOBase):
def __init__(self, godot_func):
self.buffer = ""
self.godot_func = godot_func
def write(self, b):
self.buffer += b
if "\n" in self.buffer:
*to_print, self.buffer = self.buffer.split("\n")
g_b = godot_string_from_pyobj("\n".join(to_print))
self.godot_func(g_b)
def flush(self):
if self.buffer:
g_b = godot_string_from_pyobj(self.buffer)
self.godot_func(g_b)
self.buffer = ""
godot_stdout_io = GodotIO(lib.godot_print)
# Note: godot_print_error takes 4 args: descr, func, file, line.
# So GodotIO.write/flush would need to call it like that.
# But we don't have func/file/line here.
# Also, python calls write() at fairly random points with substrings of
# the actual message, so trying to structure the output with
# godot_print_error doesn't work well. Just use godot_print for now.
godot_stderr_io = GodotIO(lib.godot_print)
vanilla_Pdb = pdb.Pdb
def enable_capture_io_streams():
sys.stdout.flush()
sys.stderr.flush()
# if pdb.Pdb is not GodotIOStreamCaptureSwitchPdb:
# pdb.Pdb = GodotIOStreamCaptureSwitchPdb
sys.stdout = godot_stdout_io
sys.stderr = godot_stderr_io
# TODO: Godot always end it print with a '\n', which mess the `(pdb)` cursor
# the solution could be to create a special console (with input !) in the Godot
# editor...
def disable_capture_io_streams():
sys.stdout.flush()
sys.stderr.flush()
# if pdb.Pbd is not vanilla_Pdb:
# pdb.Pdb = vanilla_Pdb
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
class GodotIOStreamCaptureSwitchPdb(pdb.Pdb):
def __init__(self):
super().__init__()
disable_capture_io_streams()
| 26.594595
| 79
| 0.6875
|
6701ad033865780073c71423f3075642a6dc5e4b
| 9,471
|
py
|
Python
|
mmtfPyspark/utils/mmtfDecoder.py
|
pwrose/mmtf-pyspark
|
2d0d9908bca0cb37e4cd8205b905707ce26d237e
|
[
"Apache-2.0"
] | null | null | null |
mmtfPyspark/utils/mmtfDecoder.py
|
pwrose/mmtf-pyspark
|
2d0d9908bca0cb37e4cd8205b905707ce26d237e
|
[
"Apache-2.0"
] | null | null | null |
mmtfPyspark/utils/mmtfDecoder.py
|
pwrose/mmtf-pyspark
|
2d0d9908bca0cb37e4cd8205b905707ce26d237e
|
[
"Apache-2.0"
] | null | null | null |
'''mmtfDecoder.py
Provides efficient methods to decode mmtf structures
'''
__author__ = "Mars (Shih-Cheng) Huang, Peter W Rose"
__maintainer__ = "Peter W Rose"
__email__ = "pwrose.ucsd@gmail.com"
__version__ = "0.3.7"
__status__ = "experimental"
import numpy as np
from numba import jit
from mmtfPyspark.utils import mmtfCodec
USE_NUMBA = False
#
# Byte arrays in message pack are in big endian format, e.g. >i4.
# Convert to little endian as expected by Python.
def get_value(input_data, field_name, required=False):
"""
Return an unencoded value from an MMTF data structure.
:param input_data:
:param field_name:
:param required:
:return:
"""
if field_name in input_data:
return input_data[field_name]
elif required:
raise Exception('ERROR: Invalid MMTF File, field: {} is missing!'.format(field_name))
else:
return None
def decode(input_data, field_name, required=False):
"""Decode MMTF binary data using one of the supported encoding strategies.
See https://github.com/rcsb/mmtf/blob/master/spec.md#codecs.
"""
if field_name in input_data:
encoding = np.frombuffer(input_data[field_name][0:4], '>i4').byteswap().newbyteorder()[0]
# TODO call method by string?
# method_to_call = getattr(, "_decode_type" + str(encoding))
# return method_to_call()
# see: https://jaxenter.com/implement-switch-case-statement-python-138315.html
if encoding == 2:
return _decode_type_2(input_data, field_name)
elif encoding == 4:
return _decode_type_4(input_data, field_name)
elif encoding == 5:
return _decode_type_5(input_data, field_name)
elif encoding == 6:
return _decode_type_6(input_data, field_name)
elif encoding == 8:
return _decode_type_8(input_data, field_name)
elif encoding == 9:
return _decode_type_9(input_data, field_name)
elif encoding == 10:
return _decode_type_10(input_data, field_name)
else:
raise Exception('ERROR: MMTF encoding type not supported : {}!'.format(field_name))
elif required:
raise Exception('ERROR: Invalid MMTF File, field: {} is missing!'.format(field_name))
else:
return []
def _decode_type_2(input_data, field_name):
return mmtfCodec.decode_array(input_data[field_name])
# return np.frombuffer(input_data[field_name], '>i1', offset=12).byteswap().newbyteorder()
def _decode_type_4(input_data, field_name):
return mmtfCodec.decode_array(input_data[field_name])
# return np.frombuffer(input_data[field_name], '>i4', offset=12).byteswap().newbyteorder()
def _decode_type_5(input_data, field_name):
return mmtfCodec.decode_array(input_data[field_name])
# return np.frombuffer(input_data[field_name], 'S4', offset=12).astype(str)
def _decode_type_6(input_data, field_name):
return mmtfCodec.decode_array(input_data[field_name])
# length = np.frombuffer(input_data[field_name][4:8], '>i').byteswap().newbyteorder()[0]
# int_array = np.frombuffer(input_data[field_name], '>i4', offset=12).byteswap().newbyteorder()
# return run_length_decoder_ascii(int_array, length)
def _decode_type_8(input_data, field_name):
return mmtfCodec.decode_array(input_data[field_name])
# length = np.frombuffer(input_data[field_name][4:8], '>i').byteswap().newbyteorder()[0]
# int_array = np.frombuffer(input_data[field_name], '>i4', offset=12).byteswap().newbyteorder()
# if USE_NUMBA:
# return np.cumsum(run_length_decoder_jit(int_array, length)).astype(np.int32)
# else:
# return np.cumsum(run_length_decoder(int_array, length)).astype(np.int32)
def _decode_type_9(input_data, field_name):
return mmtfCodec.decode_array(input_data[field_name])
# length = np.frombuffer(input_data[field_name][4:8], '>i').byteswap().newbyteorder()[0]
# buffer = input_data[field_name]
# int_array = np.frombuffer(buffer, '>i4', offset=12).byteswap().newbyteorder()
# divisor = np.frombuffer(buffer[8:12], '>i').byteswap().newbyteorder()[0]
# if USE_NUMBA:
# return (run_length_decoder_jit(int_array, length) / divisor).astype(np.float32)
# else:
# return (run_length_decoder(int_array, length) / divisor).astype(np.float32)
def _decode_type_10(input_data, field_name):
# TODO debugging decoding error
return np.empty(1000, np.float32)
#return mmtfCodec.decode_array(input_data[field_name])
buffer = input_data[field_name]
#int_array = np.frombuffer(buffer[12:], '>i2').byteswap().newbyteorder()
int_array = np.frombuffer(buffer, '>i2', offset=12).byteswap().newbyteorder()
divisor = np.frombuffer(buffer[8:12], '>i').byteswap().newbyteorder()
if USE_NUMBA:
return (recursive_index_decode_jit(int_array, divisor)).astype(np.float32)
else:
return (recursive_index_decode(int_array, divisor)).astype(np.float32)
def run_length_decoder(in_array, n):
"""Decodes a run length encoded array
Parameters
----------
in_array : list
the input list to apply run length decoder on
"""
lengths = np.array(in_array[1::2])
values = np.array(in_array[0::2])
starts = np.insert(np.array([0]), 1, np.cumsum(lengths))[:-1]
ends = starts + lengths
x = np.full(n, np.nan)
for l, h, v in zip(starts, ends, values):
x[l:h] = v
return x
@jit(nopython=True)
def run_length_decoder_jit(x, n):
"""Decodes a run length encoded array
Parameters
----------
x : encoded array of integers (value, repeat pairs)
n : number of element in decoded array
"""
y = np.empty(n)
start = 0
for i in range(0, x.shape[0] - 1, 2):
end = x[i + 1] + start
y[start:end] = x[i]
start = end
return y
def recursive_index_decode(int_array, divisor=1000):
"""Unpack an array of integers using recursive indexing.
Parameters
----------
int_array : list
the input array of integers
divisor : int
the number used for decoding [1000]
Returns
-------
numpy.array
return the numpy.array of integers after recursive index decoding
"""
maximum = 32767
minimum = -32768
out_arr = np.cumsum(int_array) / divisor
return out_arr[(int_array != maximum) & (int_array != minimum)]
@jit(nopython=True)
def recursive_index_decode_jit(x, divisor):
"""Unpack an array of integers using recursive indexing.
Parameters
----------
x : list
the input array of integers
divisor : int
the number used for decoding [1000]
Returns
-------
numpy.array
return the numpy.array of integers after recursive index decoding
"""
maximum = 32767
minimum = -32768
y = np.cumsum(x) / divisor
return y[(x != maximum) & (x != minimum)]
def run_length_decoder_ascii(x, n):
"""Decodes a run length encoded array
Parameters
----------
x : encoded array of integers (value, repeat pairs)
n : number of element in decoded array
"""
# TODO initialize as str or np.object_ or default?
y = np.empty(n, dtype=str)
start = 0
for i in range(0, x.shape[0] - 1, 2):
end = x[i + 1] + start
y[start:end] = chr(x[i])
start = end
return y
# def decode_entity_list(input_data):
# """Convert byte strings to strings in the entity list.
#
# Parameters
# ----------
# input_data : list
# the list of entities
#
# Returns
# -------
# list
# decoded entity list
# """
# return [convert_entity(entry) for entry in input_data]
#
# TODO check if these methods are still required
# def decode_group_list(input_data):
# """Convert byte strings to strings in the group map.
#
# Parameters
# ----------
# input_data : list
# the list of groups
#
# Returns
# -------
# list
# decoded group list
# """
# return [convert_group(entry) for entry in input_data]
#
#
# def convert_group(input_group):
# """Convert an individual group from byte strings to regular strings.
#
# Parameters
# ----------
# input_group : list
# the list of input groups
#
# Returns
# -------
# dict
# """
#
# output_group = {}
# for key in input_group:
# if key in [b'elementList', b'atomNameList']:
# output_group[key.decode('ascii')] = [x.decode('ascii')
# for x in input_group[key]]
# elif key in [b'chemCompType', b'groupName', b'singleLetterCode']:
# output_group[key.decode(
# 'ascii')] = input_group[key].decode('ascii')
# else:
# output_group[key.decode('ascii')] = input_group[key]
# return output_group
#
#
# def convert_entity(input_entity):
# """Convert an individual entity from byte strings to regular strings
#
# Parameters
# ----------
# input_entity : list
# entities to decode
#
# Returns
# -------
# dict
# decoded entity
# """
# output_entity = {}
# for key in input_entity:
# if key in [b'description', b'type', b'sequence']:
# output_entity[key.decode('ascii')] = input_entity[key].decode('ascii')
# else:
# output_entity[key.decode('ascii')] = input_entity[key]
# return output_entity
| 31.257426
| 99
| 0.639426
|
956f6a63e6988641a61e334890dcead52a0303b6
| 7,528
|
py
|
Python
|
xtlib/psm/local_psm_client.py
|
microsoft/ExperimentTools
|
ea9c15899dc787d8fbaf87dc4e9849ec4b28aadc
|
[
"MIT"
] | 5
|
2020-06-13T17:44:51.000Z
|
2021-12-21T21:02:36.000Z
|
xtlib/psm/local_psm_client.py
|
microsoft/ExperimentTools
|
ea9c15899dc787d8fbaf87dc4e9849ec4b28aadc
|
[
"MIT"
] | 9
|
2020-06-11T20:56:52.000Z
|
2022-03-12T00:34:45.000Z
|
xtlib/psm/local_psm_client.py
|
microsoft/ExperimentTools
|
ea9c15899dc787d8fbaf87dc4e9849ec4b28aadc
|
[
"MIT"
] | 7
|
2020-06-13T17:44:54.000Z
|
2021-12-21T21:02:52.000Z
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
#
# local_psm_client.py: talk to Pool State Mgr running on local machine
import os
import time
import uuid
import psutil
import shutil
from xtlib import utils
from xtlib import pc_utils
from xtlib import constants
from xtlib import file_utils
from xtlib import process_utils
from xtlib.helpers.xt_config import get_merged_config
CONTROLLER_NAME_PATTERN = "xtlib.controller"
PY_RUN_CONTROLLER = "__run_controller__.py"
PSM_NAME_PATTERN = "psm.py"
class LocalPsmClient():
def __init__(self):
self.box_is_windows = pc_utils.is_windows()
self.psm_queue_path = os.path.expanduser(constants.PSM_QUEUE)
self.psm_log_path = os.path.expanduser(constants.PSM_LOGDIR)
self.cwd_path = os.path.expanduser(constants.CWD)
# ensure our required dirs have been created
if not os.path.exists(self.cwd_path):
os.makedirs(self.cwd_path)
if not os.path.exists(self.psm_queue_path):
os.makedirs(self.psm_queue_path)
def enqueue(self, team, job, run, node, fn_zip):
# copy file to box (with unique name)
#guid = str(uuid.uuid4())
ticks = time.time()
# copy SCRIPT
# TODO: copy to .tmp and then rename to .zip (to avoid partial copy issues)
fn_entry = "{}.{}.{}.{}.{}.zip".format(team, job, run, node, int(10*ticks))
fn_dest = os.path.join(self.psm_queue_path, fn_entry)
shutil.copyfile(fn_zip, fn_dest)
return fn_entry
def dequeue(self, fn_entry):
# delete file
fn_dest = os.path.join(self.psm_queue_path, fn_entry)
if os.path.exists(fn_dest):
os.remove(fn_dest)
def enum_queue(self):
# list contents of queue
entries = os.listdir(self.psm_queue_path)
# get current entry being processed by controller
current = self.get_running_entry_name()
if not self._get_controller_process():
current = None
return entries, current
def _is_psm_running(self):
processes = psutil.process_iter()
psm_count = 0
for p in processes:
try:
if p.name().lower().startswith("python"):
#print("process name: {}".format(p.name()))
cmd_line = " ".join(p.cmdline())
if constants.PSM in cmd_line:
psm_count += 1
except BaseException as ex:
pass
return psm_count > 0
def _get_controller_process(self):
processes = psutil.process_iter()
controller_process = None
for p in processes:
try:
if p.name().lower().startswith("python"):
#print("process name: {}".format(p.name()))
cmd_line = " ".join(p.cmdline())
if CONTROLLER_NAME_PATTERN in cmd_line or PY_RUN_CONTROLLER in cmd_line:
controller_process = p
break
except BaseException as ex:
pass
return controller_process
def _get_psm_process(self):
processes = psutil.process_iter()
psm_process = None
for p in processes:
try:
if p.name().lower().startswith("python"):
#print("process name: {}".format(p.name()))
cmd_line = " ".join(p.cmdline())
if PSM_NAME_PATTERN in cmd_line:
psm_process = p
break
except BaseException as ex:
pass
return psm_process
def restart_psm_if_needed(self):
'''
processing:
- if PSM is running on old psm.py, kill the process and restart it.
- if PMS is not running, start it.
'''
kill_needed = False
start_needed = False
fn_src = os.path.join(file_utils.get_my_file_dir(__file__), constants.PSM)
fn_dest = os.path.join(self.cwd_path, constants.PSM)
running = self._is_psm_running()
#print("PSM running=", running)
if running:
# do file contents match?
text_src = file_utils.read_text_file(fn_src)
text_dest = file_utils.read_text_file(fn_dest) if os.path.exists(fn_dest) else None
if text_src != text_dest:
kill_needed = True
else:
start_needed = True
if kill_needed:
p = self._get_psm_process()
try:
p.kill()
except psutil.AccessDenied:
print("AccessDenied in killing python process")
start_needed = True
if start_needed:
# always copy psm.py (for xt dev/debug purposes)
shutil.copyfile(fn_src, fn_dest)
# run psm
fn_log = os.path.join(self.cwd_path, constants.PSMLOG)
if self.box_is_windows:
cmd_parts = ["cmd", "/c", "python -u {} > {}".format(fn_dest, fn_log)]
else:
cmd_parts = ["bash", "-c", "python -u {} > {}".format(fn_dest, fn_log)]
fn_psm_log = os.path.expanduser("~/.xt/cwd/runpsm.log")
process_utils.start_async_run_detached(cmd_parts, self.cwd_path, fn_psm_log)
def get_running_entry_name(self):
text = None
controller_cwd = utils.get_controller_cwd(self.box_is_windows, is_local=True)
fn_current = os.path.join(controller_cwd, constants.CURRENT_RUNNING_ENTRY)
if os.path.exists(fn_current):
text = file_utils.read_text_file(fn_current).strip()
return text
def get_status(self, fn_entry):
status = "completed" # unless below finds different
fn_queue_entry = os.path.join(self.psm_queue_path, fn_entry)
if os.path.exists(fn_queue_entry):
status = "queued"
else:
text = self.get_running_entry_name()
if text == fn_entry:
# entry might be running; is the controller active?
if self._get_controller_process():
status = "running"
return status
def cancel(self, fn_entry):
cancelled = False
status = "completed"
# don't call get_entry_status - check details JIT to minimize race conditons
fn_queue_entry = os.path.join(self.psm_queue_path, fn_entry)
if os.path.exists(fn_queue_entry):
os.remove(fn_queue_entry)
cancelled = True
else:
text = self.get_running_entry_name()
if text == fn_entry:
# entry might be running; is the controller active?
p = self._get_controller_process()
if p:
p.kill()
cancelled = True
return cancelled, status
def read_log_file(self, fn_entry, start_offset, end_offset):
fn_entry_base = os.path.splitext(fn_entry)[0]
fn_log = os.path.join(self.psm_log_path, fn_entry_base + ".log")
new_bytes = b""
if os.path.exists(fn_log):
with open(fn_log, "rb") as infile:
infile.seek(start_offset)
if end_offset:
new_bytes = infile.read(end_offset-start_offset)
else:
new_bytes = infile.read()
return new_bytes
| 32.034043
| 95
| 0.577179
|
46d1ecaa0e8c9579374e4a0186db06f4fdc3535f
| 6,978
|
py
|
Python
|
src/data/CFD/shallow_cfd.py
|
poly-ai/fluid-surface-estimation
|
b2e310f38c3cce3c13fbf0b8277ee4eb00755d36
|
[
"MIT"
] | 2
|
2022-02-15T21:41:06.000Z
|
2022-02-16T04:54:51.000Z
|
src/data/CFD/shallow_cfd.py
|
poly-ai/fluid-surface-estimation
|
b2e310f38c3cce3c13fbf0b8277ee4eb00755d36
|
[
"MIT"
] | null | null | null |
src/data/CFD/shallow_cfd.py
|
poly-ai/fluid-surface-estimation
|
b2e310f38c3cce3c13fbf0b8277ee4eb00755d36
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from .readgri import readgri
from .flux import FluxFunction
import pandas as pd
# USER SPECIFY
#############################################################
LABEL = "fine" # Label for the simulation
SIM_TIME = 1 # The total simulation time (> 500 frames)
# NOTE dt = 0.001 sec
# Define the Initial Distribution of the Water Height
def initial_height(x,y,x_center,y_center,x_distri,y_distri,height_level,height_delta):
h0 = height_level+ height_delta*np.exp((-x_distri*(x-x_center)**2)-(y_distri*(y-y_center)**2));
#h0 = 1.0+0.3*np.exp((-50*(x-0.5)**2)-(50*(y-0.5)**2));
# This setup will generate an avg height with 1.0
# A single peak 1.3 height at (x=1.3,y=0.9)
# It is similar to the concept of guassian distribution
# NOTE the value of (0.3) should be positive due to the limit of
# shallow water equation
# Adding h1 and return h0 + h1, it will be a two peaks distribution
# h1 = 1.0+0.3*np.exp((-50*(x-0.25)**2)-(50*(y-0.5)**2));
return h0
##############################################################
def AreaCell(C, V):
x1 = V[C[0], 0]
y1 = V[C[0], 1]
x2 = V[C[1], 0]
y2 = V[C[1], 1]
x3 = V[C[2], 0]
y3 = V[C[2], 1]
Ai = 0.5 * (x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2))
return Ai
def dLength(E, V):
index1 = E[0]
index2 = E[1]
x1 = V[index1, 0]
y1 = V[index1, 1]
x2 = V[index2, 0]
y2 = V[index2, 1]
length = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
return length
def WallFlux(R, Lcell, u, n0, dl, E0, Forces):
g = 9.8
Lu = u[Lcell]
h = Lu[0]
R[Lcell, 0] += dl * 0
R[Lcell, 1] += dl * n0[0] * 0.5 * g * h**2
R[Lcell, 2] += dl * n0[1] * 0.5 * g * h**2
index = E0[3]
if index > 0:
Forces[2 * index - 2] += dl * n0[0] * 0.5 * g * h**2
Forces[2 * index - 1] += dl * n0[1] * 0.5 * g * h**2
def findnorm(V, edge):
# normal point from left to right
pA = V[edge[0]]
pB = V[edge[1]]
dl = np.sqrt((pA[0] - pB[0]) ** 2 + (pA[1] - pB[1]) ** 2)
n = np.array([pB[1] - pA[1], pA[0] - pB[0]]) / dl
return n
def Centeroid(V, C, x_center,y_center,x_distri,y_distri,height_level,height_delta):
NC = np.shape(C)[0]
Cent = np.zeros((NC, 2))
u = np.zeros((NC, 3))
for i in range(NC):
C0 = C[i]
x1 = V[C0[0], 0]
y1 = V[C0[0], 1]
x2 = V[C0[1], 0]
y2 = V[C0[1], 1]
x3 = V[C0[2], 0]
y3 = V[C0[2], 1]
x = (x1 + x2 + x3) / 3
y = (y1 + y2 + y3) / 3
Cent[i, :] = np.array([x, y])
#######################################################################
# Calls the initial_height function to determine the
# inital distribution of the water height
h0 = initial_height(x,y,
x_center,y_center,x_distri,y_distri,height_level,height_delta)
#######################################################################
vx0 = 0
vy0 = 0
u[i, :] = np.array([h0, vx0, vy0])
return Cent, u
def WriteFile(label, df1, df2):
print("Writing label: " + label + " .csv files to output/")
tx_h = "data/raw/CFD/Shallow_" + label + ".csv"
tx_t = "data/raw/CFD/SimTime_" + label + ".csv"
print("Height Data\n", df1)
df1.to_csv(tx_h, sep=",", index=None)
print("Simulation Time\n", df2)
df2.to_csv(tx_t, sep=",")
print("Data : " + label + " .csv files to data/raw/CFD")
def plotline(pt1, pt2):
x = [pt1[0], pt2[0]]
y = [pt1[1], pt2[1]]
plt.plot(x, y)
def run_cfd(grifile,x_center=1.3,y_center=0.9,x_distri=50,y_distri=50,height_level=1,height_delta=0.3):
# Use Fine grid: tank1.gri
Mesh = readgri(grifile)
V = Mesh["V"]
C = Mesh["E"]
NC = np.shape(C)[0]
# Centeroid of Cell and Initial States
Cent, u = Centeroid(V, C, x_center,y_center,x_distri,y_distri,height_level,height_delta)
# u = np.ones((NC,3)); #for test
# u[:,1] = 0;
# u[:,2] = 0;
df = pd.DataFrame(u)
df2 = pd.DataFrame(Cent, columns=["x", "y"])
df4 = pd.DataFrame()
Ai = np.zeros(NC)
for i in range(NC):
Ai[i] = AreaCell(C[i], V)
t = np.zeros(1)
df4 = df4.append(pd.DataFrame(t))
it = 0
# Nt = 100
Tsim = 0.5
# rescheck = np.zeros(1)
# df5 = pd.DataFrame();
# df5 = df5.append(pd.DataFrame(rescheck));
# Start Propogating with Time
# while(it<=2):
while t <= Tsim:
# print("\niter-----------------------")
# print(it)
R = np.zeros((NC, 3))
Forces = np.zeros((6, 1))
tisum = np.zeros(NC)
# Get BE normal
BE = Mesh["BE"]
for i in range(np.shape(BE)[0]):
E0 = BE[i]
n0 = findnorm(V, E0)
c0 = 0.5 * (V[E0[0], 0:2] + V[E0[1], 0:2])
dl = dLength(E0, V)
LCell = E0[2]
WallFlux(R, LCell, u, n0, dl, E0, Forces)
# Wall wave speed
u0 = u[LCell]
c0 = np.sqrt(9.8 * u0[0])
vx = u0[1] / u0[0]
vy = u0[2] / u0[0]
wavespeed = np.abs(vx * n0[0] + vy * n0[1]) + c0
tisum[LCell] += wavespeed * dl
# plotline(c0,c0+0.05*n0)
# Get IE normal
IE = Mesh["IE"]
for i in range(np.shape(IE)[0]):
E0 = IE[i]
n0 = findnorm(V, E0)
c0 = 0.5 * (V[E0[0], 0:2] + V[E0[1], 0:2])
dl = dLength(E0, V)
LCell = E0[2]
RCell = E0[3]
uL = u[LCell]
uR = u[RCell]
Flux, smag = FluxFunction(uL, uR, n0)
R[LCell] += dl * Flux
R[RCell] -= dl * Flux
tisum[LCell] += smag * dl
tisum[RCell] += smag * dl
# Residual Done
# print("Resid Check");
# rescheck[0] = LA.norm(R);
# print(rescheck);
# print("Height Check")
# print(np.max(u[:,0]))
# print("Forces")
# print(Forces)
# update states
# dti = np.divide(2*Ai,tisum);
# dtg = np.min(dti)*0.9;
dtg = 0.001
# print("dt global [sec]")
# print(dtg)
for i in range(NC):
u[i, :] = u[i, :] - dtg * R[i, :] / Ai[i]
t[0] += dtg
# Store unknowns
df = pd.concat([df, pd.DataFrame(u)], axis=1)
# Store Forces
# if it == 0:
# print("Got you!!!!")
# df3 = pd.concat([df3,pd.DataFrame(Forces)], axis = 1);
# df3 = pd.concat([df3,pd.DataFrame(Forces)], axis = 1);
# else:
# df3 = pd.concat([df3,pd.DataFrame(Forces)], axis = 1);
# Store Sim. Time
df4 = df4.append(pd.DataFrame(t))
# df5 = df5.append(pd.DataFrame(rescheck));
# print("current time")
# print(t[0])
it += 1
print("\n##### DONE #####\n")
return df.to_numpy(), df2.to_numpy(), df4.to_numpy()
| 28.137097
| 103
| 0.475064
|
73131d6c0fa216e136b44772d195a1f6a4700c1d
| 20,381
|
py
|
Python
|
tensorflow/python/autograph/pyct/static_analysis/activity.py
|
huonw/tensorflow
|
85f47254af7cc230a4a031998dffe770b7edbb9d
|
[
"Apache-2.0"
] | 1
|
2020-10-01T16:52:51.000Z
|
2020-10-01T16:52:51.000Z
|
tensorflow/python/autograph/pyct/static_analysis/activity.py
|
huonw/tensorflow
|
85f47254af7cc230a4a031998dffe770b7edbb9d
|
[
"Apache-2.0"
] | 1
|
2022-02-10T01:08:48.000Z
|
2022-02-10T01:08:48.000Z
|
tensorflow/python/autograph/pyct/static_analysis/activity.py
|
huonw/tensorflow
|
85f47254af7cc230a4a031998dffe770b7edbb9d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Activity analysis.
Requires qualified name annotations (see qual_names.py).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import weakref
import gast
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
class Scope(object):
"""Encloses local symbol definition and usage information.
This can track for instance whether a symbol is modified in the current scope.
Note that scopes do not necessarily align with Python's scopes. For example,
the body of an if statement may be considered a separate scope.
Caution - the AST references held by this object are weak.
Scope objects are mutable during construction only, and must be frozen using
`Scope.finalize()` before use. Furthermore, a scope is consistent only after
all its children have been frozen. While analysing code blocks, scopes are
being gradually built, from the innermost scope outward. Freezing indicates
that the analysis of a code block is complete. Once frozen, mutation is no
longer allowed. `is_final` tracks whether the scope is frozen or not. Certain
properties, like `referenced`, are only accurate when called on frozen scopes.
Attributes:
parent: Optional[Scope], the parent scope, if any.
isolated: bool, whether the scope is a true Python scope (e.g. the scope of
a function), or just a surrogate tracking an ordinary code block. Using
the terminology of the Python 3 reference documentation, True roughly
represents an actual scope, whereas False represents an ordinary code
block.
isolated_names: Set[qual_names.QN], identifiers that are isolated to this
scope (even if the scope is not isolated).
read: Set[qual_names.QN], identifiers read in this scope.
modified: Set[qual_names.QN], identifiers modified in this scope.
deleted: Set[qual_names.QN], identifiers deleted in this scope.
bound: Set[qual_names.QN], names that are bound to this scope. See
https://docs.python.org/3/reference/executionmodel.html#binding-of-names
for a precise definition.
globals: Set[qual_names.QN], names that are explicitly marked as global in
this scope. Note that this doesn't include free read-only vars bound to
global symbols.
free_vars: Set[qual_names.QN], the free variables in this scope. See
https://docs.python.org/3/reference/executionmodel.html for a precise
definition.
params: WeakValueDictionary[qual_names.QN, ast.Node], function arguments
visible in this scope, mapped to the function node that defines them.
enclosing_scope: Scope, the innermost isolated scope that is a transitive
parent of this scope. May be the scope itself.
referenced: Set[qual_names.QN], the totality of the symbols used by this
scope and its parents.
is_final: bool, whether the scope is frozen or not.
Note - simple statements may never delete and modify a symbol at the same
time. However, compound ones like if statements can. In that latter case, it's
undefined whether the symbol is actually modified or deleted upon statement
exit. Certain analyses like reaching definitions need to be careful about
this.
"""
# Note: this mutable-immutable pattern is used because using a builder would
# have taken a lot more boilerplate.
def __init__(self, parent, isolated=True):
"""Create a new scope.
Args:
parent: A Scope or None.
isolated: Whether the scope is isolated, that is, whether variables
modified in this scope should be considered modified in the parent
scope.
"""
self.parent = parent
self.isolated = isolated
self.isolated_names = set()
self.read = set()
self.modified = set()
self.deleted = set()
self.bound = set()
self.globals = set()
self.params = weakref.WeakValueDictionary()
# Certain fields can only be accessed after the scope and all its parent
# scopes have been fully built. This field guards that.
self.is_final = False
@property
def enclosing_scope(self):
assert self.is_final
if self.parent is not None and not self.isolated:
return self.parent
return self
@property
def referenced(self):
if self.parent is not None:
return self.read | self.parent.referenced
return self.read
@property
def free_vars(self):
enclosing_scope = self.enclosing_scope
return enclosing_scope.read - enclosing_scope.bound
def copy_from(self, other):
"""Recursively copies the contents of this scope from another scope."""
assert not self.is_final
if self.parent is not None:
assert other.parent is not None
self.parent.copy_from(other.parent)
self.isolated_names = copy.copy(other.isolated_names)
self.modified = copy.copy(other.modified)
self.read = copy.copy(other.read)
self.deleted = copy.copy(other.deleted)
self.bound = copy.copy(other.bound)
self.params = copy.copy(other.params)
@classmethod
def copy_of(cls, other):
if other.parent is not None:
assert other.parent is not None
parent = cls.copy_of(other.parent)
else:
parent = None
new_copy = cls(parent)
new_copy.copy_from(other)
return new_copy
def merge_from(self, other):
assert not self.is_final
if self.parent is not None:
assert other.parent is not None
self.parent.merge_from(other.parent)
self.isolated_names.update(other.isolated_names)
self.read.update(other.read)
self.modified.update(other.modified)
self.bound.update(other.deleted)
self.params.update(other.params)
def finalize(self):
"""Freezes this scope."""
assert not self.is_final
# TODO(mdan): freeze read, modified, bound.
if self.parent is not None:
assert not self.parent.is_final
if not self.isolated:
self.parent.read.update(self.read - self.isolated_names)
self.parent.modified.update(self.modified - self.isolated_names)
self.parent.bound.update(self.bound - self.isolated_names)
self.parent.globals.update(self.globals)
else:
# TODO(mdan): This is not accurate.
self.parent.read.update(self.read - self.bound)
self.is_final = True
def __repr__(self):
return 'Scope{r=%s, w=%s}' % (tuple(self.read), tuple(self.modified))
def mark_param(self, name, owner):
# Assumption: all AST nodes have the same life span. This lets us use
# a weak reference to mark the connection between a symbol node and the
# function node whose argument that symbol is.
self.params[name] = owner
class _Comprehension(object):
no_root = True
def __init__(self):
# TODO(mdan): Consider using an enum.
self.is_list_comp = False
self.targets = set()
class _FunctionOrClass(object):
def __init__(self):
self.node = None
class ActivityAnalyzer(transformer.Base):
"""Annotates nodes with local scope information.
See Scope.
The use of this class requires that qual_names.resolve() has been called on
the node. This class will ignore nodes have not been
annotated with their qualified names.
"""
def __init__(self, context, parent_scope=None):
super(ActivityAnalyzer, self).__init__(context)
self.scope = Scope(parent_scope, isolated=True)
# Note: all these flags crucially rely on the respective nodes are
# leaves in the AST, that is, they cannot contain other statements.
self._in_aug_assign = False
@property
def _in_constructor(self):
context = self.state[_FunctionOrClass]
if context.level > 2:
innermost = context.stack[-1].node
parent = context.stack[-2].node
return (isinstance(parent, gast.ClassDef) and
(isinstance(innermost, gast.FunctionDef) and
innermost.name == '__init__'))
return False
def _node_sets_self_attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
qn = anno.getanno(node, anno.Basic.QN)
# TODO(mdan): The 'self' argument is not guaranteed to be called 'self'.
if qn.has_attr and qn.parent.qn == ('self',):
return True
return False
def _track_symbol(self, node, composite_writes_alter_parent=False):
# A QN may be missing when we have an attribute (or subscript) on a function
# call. Example: a().b
if not anno.hasanno(node, anno.Basic.QN):
return
qn = anno.getanno(node, anno.Basic.QN)
# When inside a comprehension, ignore reads to any of the comprehensions's
# targets. This includes attributes or slices of those arguments.
for l in self.state[_Comprehension]:
if qn in l.targets:
return
if qn.owner_set & set(l.targets):
return
if isinstance(node.ctx, gast.Store):
# In comprehensions, modified symbols are the comprehension targets.
if self.state[_Comprehension].level > 0:
self.state[_Comprehension].targets.add(qn)
# List comprehension targets leak in Python 2.
# For details, see:
# https://stackoverflow.com/questions/4198906/list-comprehension-rebinds-names-even-after-scope-of-comprehension-is-this-righ
if not (six.PY2 and self.state[_Comprehension].is_list_comp):
return
self.scope.modified.add(qn)
self.scope.bound.add(qn)
if qn.is_composite and composite_writes_alter_parent:
self.scope.modified.add(qn.parent)
if self._in_aug_assign:
self.scope.read.add(qn)
elif isinstance(node.ctx, gast.Load):
self.scope.read.add(qn)
elif isinstance(node.ctx, gast.Param):
self.scope.bound.add(qn)
self.scope.mark_param(qn, self.state[_FunctionOrClass].node)
elif isinstance(node.ctx, gast.Del):
# The read matches the Python semantics - attempting to delete an
# undefined symbol is illegal.
self.scope.read.add(qn)
# Targets of del are considered bound:
# https://docs.python.org/3/reference/executionmodel.html#binding-of-names
self.scope.bound.add(qn)
self.scope.deleted.add(qn)
else:
raise ValueError('Unknown context {} for node "{}".'.format(
type(node.ctx), qn))
def _enter_scope(self, isolated):
self.scope = Scope(self.scope, isolated=isolated)
def _exit_scope(self):
exited_scope = self.scope
exited_scope.finalize()
self.scope = exited_scope.parent
return exited_scope
def _exit_and_record_scope(self, node, tag=anno.Static.SCOPE):
node_scope = self._exit_scope()
anno.setanno(node, tag, node_scope)
return node_scope
def _process_statement(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
self._exit_and_record_scope(node)
return node
def visit_Global(self, node):
for name in node.names:
self.scope.globals.add(qual_names.QN(name))
return node
def visit_Expr(self, node):
return self._process_statement(node)
def visit_Raise(self, node):
return self._process_statement(node)
def visit_Return(self, node):
return self._process_statement(node)
def visit_Assign(self, node):
return self._process_statement(node)
def visit_AnnAssign(self, node):
return self._process_statement(node)
def visit_AugAssign(self, node):
# Special rules for AugAssign. Here, the AST only shows the target as
# written, when it is in fact also read.
self._enter_scope(False)
self._in_aug_assign = True
node.target = self.visit(node.target)
self._in_aug_assign = False
node.op = self.visit(node.op)
node.value = self.visit(node.value)
self._exit_and_record_scope(node)
return node
def visit_Delete(self, node):
return self._process_statement(node)
def visit_Name(self, node):
node = self.generic_visit(node)
self._track_symbol(node)
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if self._in_constructor and self._node_sets_self_attribute(node):
self._track_symbol(node, composite_writes_alter_parent=True)
else:
self._track_symbol(node)
return node
def visit_Subscript(self, node):
node = self.generic_visit(node)
# Subscript writes (e.g. a[b] = "value") are considered to modify
# both the element itself (a[b]) and its parent (a).
self._track_symbol(node)
return node
def visit_Print(self, node):
self._enter_scope(False)
node.values = self.visit_block(node.values)
node_scope = self._exit_and_record_scope(node)
anno.setanno(node, NodeAnno.ARGS_SCOPE, node_scope)
return node
def visit_Assert(self, node):
return self._process_statement(node)
def visit_Call(self, node):
self._enter_scope(False)
node.args = self.visit_block(node.args)
node.keywords = self.visit_block(node.keywords)
# TODO(mdan): Account starargs, kwargs
self._exit_and_record_scope(node, tag=NodeAnno.ARGS_SCOPE)
node.func = self.visit(node.func)
return node
def _process_block_node(self, node, block, scope_name):
self._enter_scope(False)
block = self.visit_block(block)
self._exit_and_record_scope(node, tag=scope_name)
return node
def _process_parallel_blocks(self, parent, children):
# Because the scopes are not isolated, processing any child block
# modifies the parent state causing the other child blocks to be
# processed incorrectly. So we need to checkpoint the parent scope so that
# each child sees the same context.
before_parent = Scope.copy_of(self.scope)
after_children = []
for child, scope_name in children:
self.scope.copy_from(before_parent)
parent = self._process_block_node(parent, child, scope_name)
after_child = Scope.copy_of(self.scope)
after_children.append(after_child)
for after_child in after_children:
self.scope.merge_from(after_child)
return parent
def _process_comprehension(self,
node,
is_list_comp=False,
is_dict_comp=False):
with self.state[_Comprehension] as comprehension_:
comprehension_.is_list_comp = is_list_comp
# Note: it's important to visit the generators first to properly account
# for the variables local to these generators. Example: `x` is local to
# the expression `z for x in y for z in x`.
node.generators = self.visit_block(node.generators)
if is_dict_comp:
node.key = self.visit(node.key)
node.value = self.visit(node.value)
else:
node.elt = self.visit(node.elt)
return node
def visit_comprehension(self, node):
# It is important to visit children in this order so that the reads to
# the target name are appropriately ignored.
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
return self.generic_visit(node)
def visit_DictComp(self, node):
return self._process_comprehension(node, is_dict_comp=True)
def visit_ListComp(self, node):
return self._process_comprehension(node, is_list_comp=True)
def visit_SetComp(self, node):
return self._process_comprehension(node)
def visit_GeneratorExp(self, node):
return self._process_comprehension(node)
def visit_arguments(self, node):
return self._process_statement(node)
def visit_ClassDef(self, node):
with self.state[_FunctionOrClass] as fn:
fn.node = node
# The ClassDef node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node.decorator_list = self.visit_block(node.decorator_list)
self.scope.modified.add(qual_names.QN(node.name))
self.scope.bound.add(qual_names.QN(node.name))
node.bases = self.visit_block(node.bases)
node.keywords = self.visit_block(node.keywords)
self._exit_and_record_scope(node)
# A separate Scope tracks the actual class definition.
self._enter_scope(True)
node = self.generic_visit(node)
self._exit_scope()
return node
def visit_FunctionDef(self, node):
with self.state[_FunctionOrClass] as fn:
fn.node = node
# The FunctionDef node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node.decorator_list = self.visit_block(node.decorator_list)
function_name = qual_names.QN(node.name)
self.scope.modified.add(function_name)
self.scope.bound.add(function_name)
self._exit_and_record_scope(node)
# A separate Scope tracks the actual function definition.
self._enter_scope(True)
node.args = self.visit(node.args)
# Track the body separately. This is for compatibility reasons, it may not
# be strictly needed.
self._enter_scope(False)
node.body = self.visit_block(node.body)
self._exit_and_record_scope(node, NodeAnno.BODY_SCOPE)
self._exit_scope()
return node
def visit_Lambda(self, node):
# Lambda nodes are treated in roughly the same way as FunctionDef nodes.
with self.state[_FunctionOrClass] as fn:
fn.node = node
self._enter_scope(True)
node = self.generic_visit(node)
self._exit_and_record_scope(node)
return node
def visit_With(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
self._exit_and_record_scope(node, NodeAnno.BODY_SCOPE)
return node
def visit_withitem(self, node):
return self._process_statement(node)
def visit_If(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
node_scope = self._exit_and_record_scope(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, node_scope)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_For(self, node):
self._enter_scope(False)
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
self._exit_and_record_scope(node.iter)
self._enter_scope(False)
self.visit(node.target)
self._exit_and_record_scope(node, tag=NodeAnno.ITERATE_SCOPE)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_While(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
node_scope = self._exit_and_record_scope(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, node_scope)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_ExceptHandler(self, node):
self._enter_scope(False)
# try/except oddity: as expected, it leaks any names you defined inside the
# except block, but not the name of the exception variable.
if node.name is not None:
self.scope.isolated_names.add(anno.getanno(node.name, anno.Basic.QN))
node = self.generic_visit(node)
self._exit_scope()
return node
def resolve(node, context, parent_scope=None):
return ActivityAnalyzer(context, parent_scope).visit(node)
| 35.568935
| 133
| 0.701487
|
50277c70a08d8a843203bcb716a5bec99cd1b41c
| 8,023
|
py
|
Python
|
docs/conf.py
|
StabbarN/pytest-services
|
753ccdf2c3be8bca4aced50d96be624064b179c5
|
[
"MIT"
] | 58
|
2015-03-03T13:36:47.000Z
|
2021-11-24T01:00:49.000Z
|
docs/conf.py
|
StabbarN/pytest-services
|
753ccdf2c3be8bca4aced50d96be624064b179c5
|
[
"MIT"
] | 38
|
2015-07-17T09:09:10.000Z
|
2020-10-30T12:13:04.000Z
|
docs/conf.py
|
StabbarN/pytest-services
|
753ccdf2c3be8bca4aced50d96be624064b179c5
|
[
"MIT"
] | 13
|
2015-07-20T15:12:15.000Z
|
2020-12-24T13:31:30.000Z
|
"""Sphinx config."""
# -*- coding: utf-8 -*-
#
# pytest-services documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 7 21:07:56 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import pytest_services
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pytest-services'
AUTHOR = 'Anatoly Bubenkov, Paylogic International and others'
copyright = u'2015, ' + AUTHOR
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pytest_services.__version__
# The full version, including alpha/beta/rc tags.
release = pytest_services.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytest-services-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pytest-services.tex', u'pytest-services Documentation', AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pytest-services', u'pytest-services Documentation',
[AUTHOR], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pytest-services', u'pytest-services Documentation',
AUTHOR, 'pytest-services', 'Services plugin for pytest testing framework.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 32.350806
| 89
| 0.715568
|
d854ee15a847c6444b207eb0ac579510c247986b
| 45,720
|
py
|
Python
|
lib/handling/gromacs.py
|
hernanchavezthielemann/GRO2LAM
|
e60aca80ad441f9b29acd30a6bef4c47d97e5e30
|
[
"MIT"
] | 48
|
2018-05-15T04:50:52.000Z
|
2022-03-22T09:27:08.000Z
|
lib/handling/gromacs.py
|
hernanchavezthielemann/GRO2LAM
|
e60aca80ad441f9b29acd30a6bef4c47d97e5e30
|
[
"MIT"
] | 16
|
2019-01-12T01:41:05.000Z
|
2021-01-27T11:38:12.000Z
|
lib/handling/gromacs.py
|
hernanchavezthielemann/GRO2LAM
|
e60aca80ad441f9b29acd30a6bef4c47d97e5e30
|
[
"MIT"
] | 17
|
2019-03-11T22:21:42.000Z
|
2021-11-11T14:08:12.000Z
|
#!/usr/bin/python
# By Hernan Chavez Thielemann
__author__ = 'Hernan Chavez Thielemann <hchavezthiele at gmail dot com>'
from lib.misc.warn import wrg_1, wrg_3, pop_err_1, pop_wrg_1
from lib.misc.file import check_file, debugger_file, fileseeker
from lib.misc.geometry import rotate, arcos, raiz
from lib.misc.data import isnot_num
from sys import exit
def extract_gromacs_data( _data_files_, _autoload_):
''' data files ---> ['gro file', 'top file', 'forcefield',
'non bonded file', 'bonded file']'''
# _water_names_
filename_gro = _data_files_[0]
filename_top = _data_files_[1]
filename_ff = _data_files_[2]
filename_nb = _data_files_[3]
filename_bon = _data_files_[4]
data_container = {}
data_container['define'] = {}
#_solvated_f_, _autoload_= _ck_buttons_
print 'Autoload: {}\n'.format( _autoload_)
if not _autoload_:
print filename_ff # or not
_sidemol_f_ = False
###########################################################################
###########################################################################
section = '''--------------- FILE GRO ----------------------'''
#=========================================================================#
ok_flag, gro_pack, b_xyz = get_gro_fixed_line( filename_gro)
if not ok_flag:
pop_err_1('Problem detected in :\n' + section)
return {}, [ ok_flag, _sidemol_f_]
_mol_, _mtype_, _type_, _xyz_, _mtypes_ = gro_pack
################# ------------ BOX DEF ------------- ##################
data_container['box'] = [[],[]]
b_xyz = [ x_y_z*10 for x_y_z in b_xyz ]
angles = []
Ar = [[0,0,0],[0,0,0],[0,0,0]]
for i in range(3):
Ar[i][i] = b_xyz[i]
if sum( b_xyz) < 2.8:
exit('xx/0 Error in .gro file, box dimension 000')
elif len( b_xyz) == 3:
pass
elif len( b_xyz) == 9:
k = 0
for i in range(3):
for j in range(3):
if i != j:
Ar[i][j] = b_xyz[ k + 3]
k += 1
cero = 1e-12
if Ar[1][0] < cero or Ar[2][0] < cero or Ar[2][1] < cero:
print('Your triclinic cell will be rotated to make it!')
# y rotation
a_tor_y = -arcos( (Ar[0][0])/(raiz(Ar[0][0]*Ar[0][0]+Ar[2][0]*Ar[2][0])) )
Ar = rotate( Ar, a_tor_y, 'y')
# z rotation
a_tor_z = arcos( (Ar[0][0])/(raiz(Ar[0][0]*Ar[0][0]+Ar[1][0]*Ar[1][0])) )
Ar = rotate( Ar, a_tor_z, 'z')
a_tor_x = arcos( Ar[1][1]/( raiz( Ar[1][1]*Ar[1][1] + Ar[2][1]*Ar[2][1])) )
Ar = rotate( Ar, a_tor_x)
_xyz_ = rotate( rotate( rotate( _xyz_, a_tor_y, 'y'),
a_tor_z, 'z'), a_tor_x)
else:
exit('xx/0 Error box dimension 001')
_x_, _y_, _z_ = _xyz_
xlo = min( _x_)*10
xhi = xlo + Ar[0][0]
ylo = min( _y_)*10
yhi = ylo + Ar[1][1]
zlo = min( _z_)*10
zhi = zlo + Ar[2][2]
data_container['box'][0] = [ xlo, xhi, ylo, yhi, zlo, zhi]
data_container['box'][1] = [ Ar[0][1], Ar[0][2], Ar[1][2]]
data_container['atomsdata'] = [ _mol_, _mtypes_, _type_, _xyz_, _mtype_]
###########################################################################
###########################################################################
###########################################################################
section = '''---------------- .FILE TOP. ---------------'''
#=========================================================================#
################# Defaults ##################
data_container['defaults'], ok_flag, _a_fff_ = ck_forcefield( filename_ff,
filename_top)
filename_ff = _a_fff_
if not ok_flag:
pop_err_1('Problem detected in :\n' + section.split('.')[1])
return {}, [ ok_flag, _sidemol_f_]
buckorlj = int(data_container['defaults'][0])
############
startstrings = ['[ moleculetype ]', '[ atoms ]', '[ bonds ]', '[ pairs ]',
'[ angles ]', '[ dihedrals ]', '[ system ]',
'[ molecules ]', '']
exclusions_ = ['[ bonds ]', '[ pairs ]', '[ angles ]', '[ dihedrals ]']
# Scheme type????
pure_side_mol_flag = ( ( seek_for_directive( [ filename_top],
'moleculetype') == '') or
( filename_nb == filename_ff and
filename_nb == filename_bon))
if pure_side_mol_flag:
startstrings = startstrings[-3:]
print wrg_3( 'Using pure side molecule scheme')
#n_atoms = 0
#n_bonds = 0
#n_angles = 0
data_container['atoms'] = []
data_container['bonds'] = []
data_container['angles'] = []
data_container['dihedrals'] = []
for ti in range(len(startstrings))[:-1]:
s_str_ = startstrings[ ti][ 2:-2]
''' here is possible to insert a selector in case pairs and
others can be obviated'''
data_container[ s_str_], ok_flag, _ = get_topitp_line( filename_top,
startstrings[ti]
)
if not ok_flag:
if startstrings[ti] not in exclusions_:
print wrg_3( 'Not ok flag in <extract_gromacs_data> top file' +
'section, in ' + s_str_)
return {}, [ ok_flag, _sidemol_f_]
else:
ok_flag = True
#debugger_file( s_str_, data_container[s_str_])
n_atoms = len( data_container['atoms'])
n_bonds = len( data_container['bonds'])
n_angles = len( data_container['angles'])
###########################################################################
section = '''---------- .SIDE MOLE FILES. -------------'''
#=========================================================================#
#### re-search in topology for new molecules / side molecules
if _autoload_:
data_container, ok_flag, _sidemol_f_ = sidemol_data( filename_top,
data_container)
if not ok_flag:
pop_err_1( 'Problem detected in :\n' + section.split('.')[1])
return {}, [ ok_flag, _sidemol_f_]
###########################################################################
section = '''----------------- .FILE NB. ---------------'''
#=========================================================================#
startstrings = ['[ atomtypes ]', '[ nonbond_params ]']
data_container['atomtypes'], ok_flag, _ = get_topitp_line( filename_nb,
'[ atomtypes ]')
if not ok_flag:
pop_err_1('Problem detected in :\n' + section.split('.')[1])
return {}, [ ok_flag, _sidemol_f_]
n_atomtypes = len( data_container['atomtypes'])
#debugger_file( 'atomtypes',data_container['atomtypes'])
###########################################################################
section = '''---------------- .FILE BON. ---------------'''
#=========================================================================#
startstrings = ['[ bondtypes ]', '[ angletypes ]', '[ dihedraltypes ]', '']
if filename_nb == filename_ff and filename_nb == filename_bon:
for bi in range( len( startstrings))[:-1]:
s_str_ = startstrings[ bi][ 2:-2]
data_container[ s_str_] = []
data_container['define'][s_str_[:-5]] = {}
#data_container['impropers'] = []
#data_container['impropertypes'] = []
startstrings = startstrings[-1]
aux_strings = [ 'bonds', 'angles', 'dihedrals']
for bi in range( len( startstrings))[:-1]:
s_str_ = startstrings[ bi][ 2:-2]
_aux_here_ = get_topitp_line( filename_bon, startstrings[ bi])
data_container[ s_str_], ok_flag, _data_define_ = _aux_here_
################################################################################### flag
# Make a function like dihedral integrity check
if bi == 2:
for di in range( len(data_container[ s_str_])):
# isnot_num return true if is string
dih_pt_line = data_container[ s_str_][di]
if not isnot_num( dih_pt_line[2]):
pop_wrg_1( 'Dihedral potential problem found!!\nAdopting'
+ ' X-A1-A2-X configuration for: '
+ ' {}-{}'.format( *dih_pt_line[:2]) )
new_row = ['X'] + dih_pt_line[:2] + ['X'] + dih_pt_line[2:]
data_container[ s_str_][di] = new_row
elif not isnot_num( dih_pt_line[3]):
exit('Error 0031 undefined dihedral')
data_container['define'][s_str_[:-5]] = _data_define_
#debugger_file(s_str_, data_container[s_str_])
if not ok_flag:
if data_container[ aux_strings[ bi]] != []:
pop_err_1('Problem detected in :\n' + section.split('.')[1])
return {}, [ ok_flag, _sidemol_f_]
else:
ok_flag = True
###########################################################################
section = '''------------ .#define & Impropers. ------------'''
#=========================================================================#
gromosff_flag = False
data_container[ 'define'][ 'improper'] = {}
aux_here = {}
print( section.split('.')[1])
if filename_nb != filename_ff and filename_nb != filename_bon:
print(" Is it GROMOS there ?? ")
aux_here = get_gromos_define( filename_bon)
else:
print('no gromos check')
for key_ in aux_here.keys():
if aux_here[ key_] != {}:
print ( 'GROMOS ' + key_ + ' kind detected!')
data_container[ 'define'][ key_].update( aux_here[ key_])
gromosff_flag = True
dihe_g_data = data_container[ 'dihedraltypes']
if 'dihedraltypes' == key_+'types' and dihe_g_data != []:
rewrite_flag = False
for gd_ in range( len( dihe_g_data)):
#print dihe_g_data[gd_][2]
if dihe_g_data[gd_][2].isdigit():
if not rewrite_flag:
print('Dihedral with 2 atoms re-formating to 4: ')
rewrite_flag = True
dihe_g_data[gd_] = ( [ 'X',] + dihe_g_data[ gd_][:2]
+ [ 'X',] + dihe_g_data[ gd_][2:])
print (dihe_g_data[ gd_])
if rewrite_flag:
data_container[ 'dihedraltypes'] = dihe_g_data
if gromosff_flag:
for ss_ in startstrings[:-1]:
s_str_ = ss_[ 2:-2]
data_aux = data_container[ s_str_]
cont_k = s_str_[ :-5]
cddd = data_container[ 'define'][ cont_k]
for i in range( len( data_aux)):
if len( data_aux[i][-1].split('.')) < 2:
if not data_aux[i][-1].isdigit():
aux = data_aux[i][:-1] + cddd[ data_aux[i][-1]]
#print aux
data_container[ s_str_][i] = aux
#print data_container['define']['bond']['gb_33']
# Search for impropers in TOP and BON, using crossreference if needed
data_container = split_define_dihe_impr( data_container)
n_dihedrals = len( data_container['dihedrals'])
n_impropers = len( data_container['impropers'])
###########################################################################
'''-------------- "Side Mol" --------------'''
#=========================================================================#
n_atomsnew = len( _type_)
if _sidemol_f_:
### A_02 maths
# "previewing / preallocating" // computing side mol size
sidemol = data_container['sidemol']
side_bonds_n = 0
side_angles_n = 0
side_dihed_n = 0
side_improp_n = 0
for sb in range( len( sidemol['tag'])):
bonds_x_mol = len( sidemol['data'][sb]['bonds'])
angles_x_mol = len( sidemol['data'][sb]['angles'])
dihedr_x_mol = len( sidemol['data'][sb]['dihedrals'])
improp_x_mol = len( sidemol['data'][sb]['impropers'])
sm_quantity = sidemol['num'][sb]
#print(sm_quantity, bonds_x_mol,sm_quantity * bonds_x_mol)
side_bonds_n += sm_quantity * bonds_x_mol
side_angles_n += sm_quantity * angles_x_mol
side_dihed_n += sm_quantity * dihedr_x_mol
side_improp_n += sm_quantity * improp_x_mol
n_bondsnew = n_bonds + side_bonds_n
n_anglesnew = n_angles + side_angles_n
n_dihednew = n_dihedrals + side_dihed_n
n_impropnew = n_impropers + side_improp_n
#print n_bonds, side_bonds_n, n_bonds + side_bonds_n
#print n_angles, side_angles_n, n_angles + side_angles_n
### A_03
# tester in case is an asigment for define ore something like that
contentkey = [ 'bond', 'angle', 'improper', 'dihedral']
for cont_k in contentkey:
# memorandum:
# 'define' stores in a contentkey dictionary each define key:value
cddd = data_container[ 'define'][ cont_k]
if cddd.keys() != []:
for sb in range( len( sidemol['tag'])): # in each side mol
datacont = sidemol['data'][sb][cont_k+'s']# in its cont-key
for dc in range( len( datacont)):# lets look their content
if isnot_num( datacont[dc][-1]):#
#print( '{} {} {}'.format( cont_k+'s', dc,
# datacont[dc][-1]))
aux = datacont[dc][:-1] + cddd[ datacont[dc][-1]]
sidemol['data'][sb][cont_k+'s'][dc] = aux
#else:
#print datacont[dc]
#######################################################################
### A_04
# I think that this part is deprecated... however I am not sure
# Regarding the itp format:
# charges index 6 in data-atoms
# opls names in index 1
# atom tags in index 4
_charge_ = {}
_conv_dict_ = {}
for sb in range( len( sidemol['tag'])):
for at in range( len( sidemol['data'][sb]['atoms'])):
a_opls_tag = sidemol['data'][sb]['atoms'][at][1]
a_elem_tag = sidemol['data'][sb]['atoms'][at][4]
a_charge = float( sidemol['data'][sb]['atoms'][at][6])
_charge_[a_opls_tag] = a_charge
_conv_dict_[ a_elem_tag] = a_opls_tag
print '='*45+'\n'+'='*5+' Charges found: '
print _charge_
print _conv_dict_
data_container['S_charge'] = _charge_
data_container['S_translation'] = _conv_dict_
#######################################################################
### A_05
############ Esoteric part ;) ###############
#### ----------- DEFINING BONDED INTERACTIONS ---------- ####
# load the side molecules data if exist
#sidemol = _topodata_['sidemol']
smol_extra_bondtypes = []
smol_extra_angletypes = []
smol_extra_dihedraltypes = []
smol_extra_impropertypes = []
bn_namelist = []
an_namelist = []
di_namelist = []
im_namelist = []
for sb in range( len( sidemol['tag'])):
_smd_ = sidemol['data'][sb]
_at_dic_here = {}
for _at in range( len( _smd_['atoms'])):
_smat_ = _smd_['atoms'][_at]
_at_dic_here[ _smat_[0]] = _smat_[1]
for _bn in range( len( _smd_['bonds'])):
_smbn_ = _smd_['bonds'][_bn]
aux_here = [_at_dic_here[ _smbn_[0]], _at_dic_here[ _smbn_[1]]]
name = '{}-{}'.format(*aux_here)
if name not in bn_namelist and len( _smbn_[2:]) > 1:
bn_namelist.append( name)
smol_extra_bondtypes.append( aux_here + _smbn_[2:])
for _an in range( len( _smd_['angles'])):
_sman_ = _smd_['angles'][_an]
aux_here = [_at_dic_here[ _sman_[0]], _at_dic_here[ _sman_[1]],
_at_dic_here[ _sman_[2]] ]
name = '{}-{}-{}'.format(*aux_here)
if name not in an_namelist and len( _sman_[3:]) > 1:
an_namelist.append( name)
smol_extra_angletypes.append( aux_here + _sman_[3:])
for _dh in range( len( _smd_['dihedrals'])):
_smdh_ = _smd_['dihedrals'][_dh]
aux_here = [_at_dic_here[ _smdh_[0]], _at_dic_here[ _smdh_[1]],
_at_dic_here[ _smdh_[2]], _at_dic_here[ _smdh_[3]]]
name = '{}-{}-{}-{}'.format(*aux_here)
if name not in di_namelist and len( _smdh_[4:]) > 1:
di_namelist.append( name)
smol_extra_dihedraltypes.append( aux_here + _smdh_[4:])
for _im in range( len( _smd_['impropers'])):
_smim_ = _smd_['impropers'][_im]
aux_here = [_at_dic_here[ _smim_[0]], _at_dic_here[ _smim_[1]],
_at_dic_here[ _smim_[2]], _at_dic_here[ _smim_[3]]]
name = '{}-{}-{}-{}'.format(*aux_here)
if name not in im_namelist and len( _smim_[4:]) > 1:
im_namelist.append( name)
smol_extra_impropertypes.append( aux_here + _smim_[4:])
if len( _smd_.keys()) > 5:
print ('Uuupa!! This thing is not implemented yet' +
' as side mol part')
a_key = [ 'atoms', 'bonds', 'angles', 'dihedrals', 'impropers']
for ky in _smd_.keys():
if ky not in a_key:
print ('-- > this key : ' + ky)
# --------- !!!! Update the info !!!!
data_container['bondtypes'] = ( smol_extra_bondtypes +
data_container['bondtypes'] )
data_container['angletypes'] = ( smol_extra_angletypes +
data_container['angletypes'])
data_container['dihedraltypes'] = ( smol_extra_dihedraltypes +
data_container['dihedraltypes'])
data_container['impropertypes'] = ( smol_extra_impropertypes +
data_container['impropertypes'])
#print(data_container['bondtypes'])
else:
n_bondsnew = n_bonds
n_anglesnew = n_angles
n_atomsnew = n_atoms
n_dihednew = n_dihedrals
n_impropnew = n_impropers
###################### trabajo
# marker: 'bond_kinds'angl_kinds'dihe_kinds'impr_kinds'
nice_list = [ 'bondtypes', 'angletypes', 'dihedraltypes','impropertypes']
for it in range( len( nice_list)):
_aux_set_here = set()
poss = it + 2
if poss > 4:
poss = 4
for i in range( len ( data_container[ nice_list[it] ])):
_aux_set_here.add( data_container[ nice_list[it] ][i][ poss ])
#print( nice_list[it][:4])
#print(_aux_set_here)
data_container[ nice_list[it][:4]+'_kinds'] = _aux_set_here
n_bondstypes = len( data_container['bondtypes'])
n_anglestypes = len( data_container['angletypes'])
n_dihedraltypes = len( data_container['dihedraltypes'])
n_impropertypes = len( data_container['impropertypes'])
data_container['numbers']={}
data_container['numbers']['total'] = [n_atomsnew, n_bondsnew,
n_anglesnew, n_dihednew, n_impropnew
]
#print( 'here', data_container['numbers']['total'])
#exit('')
data_container['numbers']['type'] = [n_atomtypes, n_bondstypes,
n_anglestypes, n_dihedraltypes,
n_impropertypes]
print 'Ending gromacs data parsing\n'
return data_container, [ ok_flag, _sidemol_f_]
def sidemol_data( _file_top_, data_container):
''' -- getter of the side molecules data --
Per each side mole returns a dictionary with:
tag : side mole tag
num : number in this instance of this kind of side mol
data : dictionary with topology data
{atoms bonds angles dihedrals impropers}
'''
sidemol = {'tag': [],'num':[], 'data':[] }# Tag # mol_number
sm_flag = False
# all molecules info
_aux_m_ = data_container[ 'molecules']
# names of non side molecules
if 'moleculetype' in data_container.keys():
non_sm = data_container['moleculetype']
non_sm = [non_sm[i][0] for i in range(len(non_sm))]
_buffer_ = ''
else:
# non conventional case // the one in the main top
non_sm = ['']
_buffer_ = '0'
# side molecules info filtering
for i in range( len( _aux_m_)) :
if _aux_m_[i][0] not in non_sm:
sidemol['tag'].append( _aux_m_[i][0])
sidemol['num'].append( int(_aux_m_[i][1]))
sm_flag = True
if sm_flag:
#////////======= Side molecule >>> file <<< search ==========////////
print ('\nLoading side molecule files: ' )
_sm_files_ = []
root_dir = '/'.join( _file_top_.split('/')[:-1]+[''])
ok_flag = False
with open( _file_top_, 'r') as topdata:
if sidemol['tag'] == []:
topdata = []
for k_line in topdata:
if k_line.startswith('#'):
logic_test = ('#if' not in _buffer_ and _buffer_ != '')
if k_line.startswith('#include') and logic_test:
if _sm_files_ == []:
ok_flag = True
try:
new_filename = k_line.split('"')[1].lstrip('.')
except IndexError:
auxt = wrg_1( 'Format error with {}')
print( auxt.format( k_line.split()[-1] ) )
new_filename = new_filename.lstrip('/').split('/')[-1]
po_file = fileseeker( root_dir, new_filename)
if po_file != []:
_sm_files_.append( po_file[0])
print( 'SM_file : {}'.format(_sm_files_[-1]))
ok_flag *= check_file( po_file[0],
content='[ atoms ]')
else:
_buffer_ = k_line
# do it in the same loop or not that is the thing... maybe is better
# to not just beacause the indentation going to +inf atoms
#////////======= Side molecule >>> data <<< search ==========////////
if ok_flag:
for sm in sidemol['tag']:
aux_data, aux_flag = sidemol_data_gatherer( _sm_files_, sm)
#print aux_data
ok_flag *= aux_flag
sidemol['data'].append( aux_data)
data_container['sidemol'] = sidemol
else:
print ('No side molecule files detected!' )
ok_flag = True
return data_container, ok_flag, sm_flag
def sidemol_data_gatherer( _sm_files_, _sm_):
''' collects all the data related with one kind of side molecule
the data types are specified in startstrings
'''
print( '\nSearching for: {}'.format( _sm_ ))#, ' in: ' ,_sm_files_
_flag_ = True
_file_ = ''
_sm_data_c_ = {}
# is sm in sm_file?? in cases with more than one file
for smfile in _sm_files_:
with open( smfile, 'r') as sm_data:
read_flag = False
i = 0
for j_line in sm_data:
j_line = j_line.split(';')[0].strip()
#print j_line, read_flag, j_line.startswith(sm)
if j_line.startswith('['):
if j_line.startswith('[ moleculetype ]'):
read_flag = True
i = 0
else:
read_flag = False
elif read_flag and j_line.startswith( _sm_):
_file_ = smfile
break
elif read_flag:
i +=1
if i > 3:
read_flag = False
if _file_=='':
pop_err_1('Error!! side molecule {} not found in itp -- '.format( _sm_))
_flag_ = False
else:
print( 'Success!, found in : {}\n'.format( _file_))
tag_str = [ 'atoms', 'bonds', 'angles', 'dihedrals','fin']
_sm_data_c_ = { x:[] for x in tag_str if x != 'fin'}
read_flag = False
iner_flag = False
cd_tag = ''
i = 0
with open( _file_, 'r') as sm_data:
for j_line0 in sm_data:
j_line = j_line0.split(';')[0].split()
if not j_line:
pass
elif read_flag:
if j_line[0][0] == '#':
pass
elif j_line[0][0] == '[':
if j_line[1] != tag_str[i] :
if j_line[1] in tag_str[i+1:]:
i = tag_str.index( j_line[1])
cd_tag = tag_str[i]
iner_flag = True
print( '** Gathering {} data'.format( cd_tag))
elif j_line[1] == 'moleculetype':
break
else:
txt_s = '> {} not considered in {}'
print txt_s.format( j_line[1], _sm_)
iner_flag = False
else :
cd_tag = tag_str[i]
print( '* Gathering {} data'.format( cd_tag))
iner_flag = True
elif iner_flag:
#print j_line
_sm_data_c_[ cd_tag].append( j_line)
elif j_line0.lstrip().startswith( _sm_):
read_flag = True
# todo add a new check in case of empty container
#print _sm_data_c_
######### Split impropers and dihedrals
_sm_data_c_, _ = split_dihedral_improper( _sm_data_c_)
#if _sm_data_c_['impropers'] <>[]:
# print _sm_data_c_['impropers']
return _sm_data_c_, _flag_
def split_dihedral_improper( _data_container_):
''' New function to neat just the split
Seems that in GROMACS, impropers are present as a kind of
dihedral type, so this function is meant to pick the dihedral
data and split it resizing the original container and creating
a new improper-container.
'''
### ////////////////////\\\\\\\\\\\\\\\\\\\\\ ###
dh_dict_kind = {'1':"Proper dihedral", '2':"Improper dihedral",
'3':"Ryckaert-Bellemans dihedral",
'4':"Periodic improper dihedral", '5':"Fourier dihedral",
'8':"Tabulated dihedral", '9':"Proper dihedral (multiple)",
'10':"Restricted dihedral", '11':"Combined bending-torsion"}
_dihedrals_data_ = _data_container_[ 'dihedrals']
_admitted_dihe_ = ['1', '3', '9']#['1', '3', '9']
_admitted_impr_ = ['2', '4']# ['2']
im_data_ = []
dh_data_ = []
define_dihe_extra = []
def_impr_extra = []
dh_bf_err = ""
for i in range( len ( _dihedrals_data_)):
# Dihedral line format in :
# ai aj ak al funct c0 c1 c2 c3 c4 c5
dihe_funct = _dihedrals_data_[i][4]
if dihe_funct in _admitted_dihe_:
dh_data_.append( _dihedrals_data_[i])
if len (_dihedrals_data_[i])>5:
define_dihe_extra.append( _dihedrals_data_[i])
elif dihe_funct in _admitted_impr_:
im_data_.append( _dihedrals_data_[i])
if len (_dihedrals_data_[i])>5:
def_impr_extra.append( _dihedrals_data_[i])
else:
print 'Problem #008 here #split_dihedral_improper'
dihe_err = "#008_" + dh_dict_kind[ dihe_funct]
if dihe_err != dh_bf_err:
if dihe_funct in dh_dict_kind.keys():
pop_err_1( dh_dict_kind[ dihe_funct] +
' not implemented yet')
dh_bf_err = dihe_err
else:
exit( dihe_funct + ' is not a valid dihedral function')
# Save/overwriting point
_data_container_['impropers'] = im_data_
_data_container_['dihedrals'] = dh_data_
return _data_container_, define_dihe_extra
def split_define_dihe_impr( _data_container_, smt_flag = False):
''' This picks the dihedral data and splits it
Creates #define data in dihedrals
Also creates the data regarding kinds of functions. Useful when
'1' and '3' are used in the same file
'''
### ========================================= ###
''' ============= Dihedral TOP data ============= '''
# Save/overwriting point
_data_container_, define_dh_ex = split_dihedral_improper( _data_container_)
_admitted_impr_ = ['2', '4']
#====================================================
''' ======== "Type" Dihedral BONDED data ======= '''
_dihe_type_data_ = _data_container_['dihedraltypes']
im_type_ = []
dh_type_ = []
#print _dihe_type_data_
for i in range( len ( _dihe_type_data_)):
# Dihedral line format:
# ai aj ak al funct c0 c1 c2 c3 c4 c5
try:
if _dihe_type_data_[i][4] in _admitted_impr_:
im_type_.append( _dihe_type_data_[i])
else:
dh_type_.append( _dihe_type_data_[i])
except IndexError as _err:
print( _err)
exit( _dihe_type_data_[i] )
#====================================================
''' ======== Dihedral "#define" data ========== '''
def_dihe_dic = {}
def_impr_dic = {}
# If there are define potentials, I have to process... by types and kind
''' Make it homogeneous - New kind creations '''
# first_clause = ( maybe should be an inner clause
''' Now supposing that just #define exist with a tag in the c0
position...'''
new_dihedraltypes = {}
define_dic = _data_container_['define']['dihedral']
if define_dic != {}:
known_atoms = _data_container_['atoms']
for dh in range( len( define_dh_ex)):
_dhi_ = define_dh_ex[ dh]
a_tag = ['',]*4
for at1 in range( 4): # at1 0 1 2 3
atnum = int( _dhi_[at1])
if ( known_atoms[ atnum - 1][0] == _dhi_[ at1]):
a_tag[at1] = known_atoms[ atnum - 1][1]
# Si no esta ordenado nos vamos casi a la...
else:
# Brute force, till should be found
for at2 in range( len( known_atoms)):
if known_atoms[at2][0] == _dhi_[at1]:
a_tag[at1] = known_atoms[at2][1]
break
if '' == a_tag[at1]:
_string_ = 'Error!! atom number {} not found in .top -- '
pop_err_1( _string_.format( atnum))
#### TODO Flag
## First case with coefs in the top file... c0 c1 c2 c3 c4 c5
if len( _dhi_) > 6:
print ('Coefficients in the top file are not supported yet' +
'... or maybe they are '+ u'\u00AC'*2)
new_dihedraltypes['-'.join(a_tag)] = (a_tag + _dhi_[4:])
## Second case with #define
elif len( _dhi_) == ( 4 + 1 + 1):
dh_kind_, dihedral_tag = _dhi_[4:]
_content_ = a_tag + [dh_kind_] + define_dic[ dihedral_tag]
# with a dictionary instead a set because, sets do not allow
# unhashable lists as items
new_dihedraltypes[ '-'.join( a_tag)] = _content_
for key in new_dihedraltypes.keys():
dh_type_.append( new_dihedraltypes[key])
# Save/overwriting point
_data_container_['impropertypes'] = im_type_
_data_container_['dihedraltypes'] = dh_type_
return _data_container_
def get_gro_fixed_line( _filename_):
''' reading gromacs gro fixed lines'''
_content_ = []
_mol_ = []
_mtype_ = []
g_names = []
_type_ = []
_xyz_ = []
_x_ = []
_y_ = []
_z_ = []
_corrupt = True
with open(_filename_, 'r') as indata:
read_flag = False
at=0
at_num = 0
_buffer = []
for j_line in indata:
if read_flag:
at+=1
mtype = j_line[5:10].strip(' ')
_mol_.append( j_line[:5].lstrip(' '))
_mtype_.append(mtype)
_type_.append(j_line[10:15].lstrip(' '))
_x_.append( float( j_line[20:28]) )
_y_.append( float( j_line[28:36]) )
_z_.append( float( j_line[36:44]) )
if _buffer==[]:
_buffer = [ mtype, at]
## TODO : Analyze if it is possible to improve here using sets
elif mtype != _buffer[0]:
_buffer += [at-1]
g_names.append( _buffer)
_buffer = [ mtype, at]
if at == at_num:
read_flag = False
g_names.append(_buffer + [at])
elif j_line.startswith(';'):
pass
elif at_num == 0:
j_line = indata.next()
at_num = int( j_line)
read_flag = True
elif at == at_num:
box_xyz_hi = [float(x) for x in j_line.split(';')[0].split()]
if len( box_xyz_hi) in [ 3, 9]:
_corrupt = False
if at_num != len(_type_):
pop_err_1('Atom number mismatch in .gro file')
return False, 0 ,0
elif _corrupt:
pop_err_1('Corrupt .gro file box definition')
return False, 0,0
else:
_xyz_ = [ _x_, _y_, _z_]
return True, [_mol_, _mtype_, _type_, _xyz_, g_names], box_xyz_hi
def top_groups( mtype, _buffer, g_names):
return _buffer, g_names # hook
def get_topitp_line( _filename_, _ss_):
''' reading gromacs content lines
spliting by the space between info
'''
_verbose_ = True
content_line = []
_define_ = {}
# \* TODO: apply a method just in case that
# some _startstrings_ are not there ??
with open(_filename_, 'r') as indata:
read_flag = False
ok_flag = True
tag_not_found = True
if _verbose_:
print _ss_
for j_line in indata:
# I just whant to read once the flag is on
j_line_s0 = j_line.split(';')[0].split()
if read_flag and j_line_s0:
#if _verbose_: ### is beter to store and print outside the
# cycle with just one if
#print j_line_s0
_line_ = j_line_s0
# getting out comments and empty lines
if len( _line_) <0:
pass
elif _line_[0][0] == '#':
if _line_[0] == '#include':
print( wrg_3( _line_[1] + ' skipped this time'))
elif _line_[0] == '#define':
_define_[_line_[1]] = _line_[2:]
else:
print wrg_3( str(_line_) + ' ??')
elif _line_[0][0] == '[':
print( ' '.join(_line_) + 'Checked!')
if ' '.join(_line_) != _ss_ :
read_flag = False
#print 'exit here 424'
elif len( _line_) > 0:
content_line.append( _line_)
else:
print('Ups... please raise an issue at GitHub ;)')
elif j_line.lstrip().startswith( _ss_):
if _verbose_:
print( _ss_+' found!')
read_flag = True
tag_not_found = False
if content_line == [] or tag_not_found:
if '/' in _filename_:
_filename_ = _filename_.split('/')[-1]
pop_err_1( 'The {} section is missing on {} file'.format( _ss_ ,
_filename_)
)
ok_flag = False
return content_line, ok_flag, _define_
def get_gromos_define( _bondedfile_):
''' reading GROMOS define forcefield format
gb_ : bond
ga_ : angle
gi_ : wop - improper
gd_ : dihedral
eg. #define gb_12
'''
_dedic_ = {'b':'bond', 'a':'angle', 'i':'improper', 'd':'dihedral'}
_define_ = {}
for k in _dedic_.keys():
_define_[ _dedic_[k]] = {}
with open(_bondedfile_, 'r') as indata:
for j_line in indata:
_line_ = j_line.split(';')[0].split()
if _line_ and _line_[0][0] == '#':
if _line_[0] == '#define':
if _line_[1][0] == 'g':
if _line_[1][2] == '_' and _line_[1][3].isdigit():
aux_dic = { _line_[1] : _line_[2:]}
_define_[ _dedic_[ _line_[1][1]]].update( aux_dic)
else:
print('Whojojoooh...')
elif _line_[0] == '#include':
print(wrg_3( _line_[1] + ' skipped!'))
else:
print(wrg_3( str(_line_) + ' ??'))
return _define_
def get_ffldfiles( _topfile_):
'''
self explanatory... sub routine to get the force field files
if they are stated in the top file.
starts from the top file
'''
ff_file = ''
nonerr_flag = True
with open( _topfile_, 'r') as indata:
for j_line in indata:
if j_line.startswith('#include'):
ff_file = j_line.split('"')[1]
break
elif j_line.startswith('[ moleculetype ]'):
break
root_folder = '/'.join(_topfile_.split('/')[:-1]+[''])
ff_file = ff_file.lstrip('.').lstrip('/')
if ff_file != '':
# if there is at least one itp, lets parse it
# first seek for further includes itp
aux_file_cont = [_topfile_, '']
print '----- Loading :'
i = 1
aux_file_cont[i] = root_folder + ff_file
print aux_file_cont[i]
root_folder = '/'.join( aux_file_cont[i].split('/')[:-1]+[''])
try:
with open( aux_file_cont[i], 'r') as indata2:
for k_line in indata2:
if k_line.startswith('#include'):
i+=1
aux_file_cont.append( root_folder+k_line.split('"')[1])
print aux_file_cont[i]
if i==3:
break
except IOError:
pop_err_1('xx/0 Read error 030, file not found!!.\n')
nonerr_flag *= False
# the first one is [ defaults ]
# second nonbonded atomtypes
# third bonded
_directives_ = ['defaults', 'atomtypes', 'bondtypes']
file_cont = []
for _di_ in _directives_:
file_cont.append( seek_for_directive( aux_file_cont, _di_))
if file_cont[-1] == '':
pop_wrg_1('Directive ' + _di_ + ' not found!')
if _di_ == _directives_[-1]:
file_cont[-1] = file_cont[0]
else:
print ('Using :' +file_cont[-1]+' for ' + _di_)
else:
pop_err_1('Force field files #include not found!')
nonerr_flag *= False
# final check of non error flag
if nonerr_flag and len(file_cont) < 3 :
pop_wrg_1('Your structure seems unfamiliar, just ' +
'{} itp found.'.format( len(file_cont)) +
'\nthe conversion could fail!')
# Meaning that the directive seeker could not find the correspondin one
while len(file_cont) < 3:
file_cont.append( file_cont[-1])
# a file integrity check should be done outside
return file_cont, nonerr_flag
def ck_forcefield( _the_file_, _secondoption_ = None):
'''
podria pedirse solo este archivo y
de aqui sacar la iformacion de los otros dos....
'''
_flag_ = False
comb_rule = -1
with open( _the_file_, 'r') as indata:
for j_line in indata:
line_c = j_line.split()
if j_line.startswith('[ defaults ]'):
_flag_ = True
if len(line_c)>1 and 'fudgeQQ'==line_c[-1]:
j_line = indata.next()
comb_rule= j_line.split()
print('---> comb_rule {}'.format(comb_rule[1]))
if not _flag_ and _secondoption_ != None:
comb_rule, _flag_, _the_file_ = ck_forcefield( _secondoption_)
if comb_rule < 0 or not _flag_:
pop_err_1('forcefield.itp file is missing or incomplete')
comb_rule, _flag_, _the_file_ = [ 0, 0, '']
return comb_rule, _flag_, _the_file_
def seek_for_directive( _list_of_files_, _directive_):
''' search for a certain directive in a bunch of files
and then returns the file in which it is, or an empty string
PS: a directive is a word wrapped with []
'''
content_file = ''
for file_ in _list_of_files_:
try:
with open( file_, 'r') as indata:
for j_line in indata:
line_c = j_line.split(';')[0].split(' ]')[0].split('[ ')
if len( line_c) > 1:
if line_c[1] == _directive_:
content_file = file_
break
except IOError:
exit('xx/0 Read error 030, file not found.\n' + file_)
if content_file != '':
break
return content_file
def get_top_groups( _mtype_container_, _group_):
_mtype_ = _mtype_container_
_buffer = []
for mt in range(len( _mtype_)):
mtype = _mtype_[mt].strip(' ')
if _buffer == [] and mtype == _group_:
buffer = [ mtype, mt+1]
elif _buffer != [] and mtype != _group_:
_buffer += [mt]
break
elif mt==(len(_mtype_)-1):
_buffer += [mt+1]
print''
print 'Group characterized as: {} with ids {} to {}'.format(*_buffer)
return _buffer
if __name__ == '__main__':
pass
# vim:tw=80
| 40.175747
| 96
| 0.460739
|
889ab952492ba4f184815b3a41b87a731d344b8c
| 545
|
py
|
Python
|
adv/sinoa.py.means.py
|
mattkw/dl
|
45bfc28ad9ff827045a3734730deb893a2436c09
|
[
"Apache-2.0"
] | null | null | null |
adv/sinoa.py.means.py
|
mattkw/dl
|
45bfc28ad9ff827045a3734730deb893a2436c09
|
[
"Apache-2.0"
] | null | null | null |
adv/sinoa.py.means.py
|
mattkw/dl
|
45bfc28ad9ff827045a3734730deb893a2436c09
|
[
"Apache-2.0"
] | null | null | null |
import adv_test
import adv
from adv import *
def module():
return Sinoa
class Sinoa(adv.Adv):
a1 = ('a',0.13,'hp100')
a3 = ('bt',0.2)
# conf = {}
# import slot
# conf['slots.a'] = slot.a.Bellathorna()+slot.a.RR()
def s1_proc(this, e):
adv.Teambuff('s1_att',0.25/4,15,'att').on()
adv.Teambuff('s1_crit',0.25/4,10,'crit').on()
if __name__ == '__main__':
conf = {}
conf['acl'] = '''
`s1
`s2
`fs,seq=5
'''
adv_test.test(module(), conf, verbose=-2, mass=0)
| 17.580645
| 55
| 0.519266
|
b6ed225d7138b2b374f78280cdef3d6ce62e5912
| 7,928
|
py
|
Python
|
app/libs/ldap/ldap_wrapper.py
|
regg00/docker-xmedius-adsync
|
738bdb82d37173c9363d69c8148a95c43043aeba
|
[
"MIT"
] | null | null | null |
app/libs/ldap/ldap_wrapper.py
|
regg00/docker-xmedius-adsync
|
738bdb82d37173c9363d69c8148a95c43043aeba
|
[
"MIT"
] | null | null | null |
app/libs/ldap/ldap_wrapper.py
|
regg00/docker-xmedius-adsync
|
738bdb82d37173c9363d69c8148a95c43043aeba
|
[
"MIT"
] | null | null | null |
#////////////////////////////////////////////////////////////////////////////
# Copyright (c) 2012 Sagemcom Canada Permission to use this work
# for any purpose must be obtained in writing from Sagemcom Canada
# 5252 de Maisonneuve Blvd. West, suite 400, Montreal, Quebec H4A 3S5
#////////////////////////////////////////////////////////////////////////////
import base64
import logging
from libs.utils.utility import Utility
from libs.ldap.exclusion_filter import LdapExclusionFilter
import ldap
import ldap.filter as Filter
from ldap.controls import SimplePagedResultsControl
LDAP_SERVER_SHOW_DELETED_OID = '1.2.840.113556.1.4.417'
LDAP_CONTROL_PAGE_OID = '1.2.840.113556.1.4.319'
logger = logging.getLogger(__name__)
#Wrapper around ldap queries
class LDAPWrapper():
#In : None
#Out : None
def __init__(self, ad_infos, page_size):
self._page_size = page_size
self._highest_committed_usn = -1
self.connect_to_ldap_server(ad_infos)
#In : None
#Out : None
def get_highest_usn_comitted(self):
if self._highest_committed_usn == -1:
base_dn = ""
search_scope = ldap.SCOPE_BASE
try:
msgid = self._server_handler.search(base_dn, search_scope)
result_type, result_data = self._server_handler.result(msgid, 0)
if result_type == ldap.RES_SEARCH_ENTRY:
self._highest_committed_usn = int((result_data[0][1]['highestCommittedUSN'][0]))
except:
raise Exception("Could not retrieve highest USN")
return self._highest_committed_usn
#In : integer (min_USN), integer (max_USN)
#Out : None
def sync_search(self, min_USN, max_USN, search_nodes):
logger.debug("Syncing entries by usn %s - %s" % (min_USN, max_USN))
not_deleted_entries = {}
deleted_entries = {}
if search_nodes['query_for_users']:
not_deleted_entries = self._find_entries_by_usn(min_USN, max_USN, search_nodes['query_for_users'], [])
if search_nodes['query_for_deleted_users']:
deleted_entries = self._find_entries_by_usn(min_USN, max_USN, search_nodes['query_for_deleted_users'], [LDAP_SERVER_SHOW_DELETED_OID])
return not_deleted_entries, deleted_entries
#In : string (object_id)
#Out : dictionary (AD entry)
def find_entry_by_object_id(self, object_id, search_nodes):
object_id = base64.b16decode(object_id)
object_id = Filter.escape_filter_chars(object_id, escape_mode=0)
ctrl_response = ldap.controls.RequestControl(LDAP_SERVER_SHOW_DELETED_OID, True)
for entry in search_nodes:
msgid = self._server_handler.search_ext(entry['base'], ldap.SCOPE_SUBTREE, 'objectGUID=%s' % object_id, serverctrls=[ctrl_response])
result_type, result_data = self._server_handler.result(msgid)
if result_type == ldap.RES_SEARCH_RESULT:
try:
result_data = Utility.get_first(result_data)
new_item = {'ad_data': result_data[1], 'usn_changed': Utility.get_first(result_data[1]['uSNChanged'])}
new_item['ad_data']['objectGUID'][0] = base64.b16encode(new_item['ad_data']['objectGUID'][0])
return {entry['base']: [new_item]}
except:
pass
#In : None
#Out : None
def connect_to_ldap_server(self, AD_infos):
try:
ldapURI = 'ldap://%s:%s' % (AD_infos['address'], AD_infos['port'])
self._server_handler = ldap.initialize(ldapURI)
self._server_handler.protocol_version = 3
self._server_handler.set_option(ldap.OPT_REFERRALS, 0)
self._server_handler.simple_bind_s(AD_infos['username'], AD_infos['password'])
logger.info("Link established with directory: %s Port : %i." % (AD_infos['address'], AD_infos['port']))
except Exception, e:
logger.exception("Failed to connect to ldap server")
raise Exception("Failed to connect to ldap server : %s" % str(e))
def terminate(self):
Utility.permissive_execute(self._server_handler.unbind_s)
# Filters out exclusiosn if present.
# Exclusions is a list of attributes (eg mail) and exclusion filters with wildcards (eg *administrators*)
def _filter_exclusions(self, query_params, results):
exclusion_filter = LdapExclusionFilter(query_params)
return exclusion_filter.filter(results)
#In : string (base_dn), string (search_scope), string (search_filter), array (control_codes)
#Out : array (ldap result)
def _ldap_page_search(self, base_dn, search_scope, search_filter, control_codes):
results = []
control_args = []
page_handler = SimplePagedResultsControl(True, self._page_size, '')
control_args.append(page_handler)
for code in control_codes:
ctrl_response = ldap.controls.RequestControl(code, True)
control_args.append(ctrl_response)
while True:
msgid = self._server_handler.search_ext(base_dn, search_scope, search_filter, serverctrls=control_args)
result_type, result_data, rmsgid, controls = self._server_handler.result3(msgid)
if not result_data:
break
if result_type == ldap.RES_SEARCH_RESULT:
for r in result_data:
results.append(r)
page_controls = [c for c in controls if c.controlType == LDAP_CONTROL_PAGE_OID]
if page_controls:
page_handler.size, page_handler.cookie = (page_controls[0].size, page_controls[0].cookie)
if not page_handler.cookie:
break
else:
break
return results
#In : string (base_filter), array (additional_filters)
#Out : string (reformatted filter)
def _format_ldap_filter(self, base_filter, additional_filters):
#Remove enclosing parentheses
if base_filter.startswith('(') and base_filter.endswith(')'):
base_filter = base_filter[1:-1]
new_filter = '(' + base_filter + ')'
for fltr in additional_filters:
new_filter += '(' + fltr + ')'
return '(&' + new_filter + ')'
#In : integer (min_USN), integer (max_USN), array (search_nodes), array (control_codes)
#Out : dictionary (AD entries)
def _find_entries_by_usn(self, min_USN, max_USN, search_nodes, control_codes):
entries = {}
for entry in search_nodes:
base_dn = entry['base']
if entry['scope'] == 'BASE':
search_scope = ldap.SCOPE_BASE
elif entry['scope'] == 'ONELEVEL':
search_scope = ldap.SCOPE_ONELEVEL
else:
search_scope = ldap.SCOPE_SUBTREE
additional_filters = []
additional_filters.append('uSNChanged>=' + str(min_USN))
additional_filters.append('uSNChanged<=' + str(max_USN))
search_filter = self._format_ldap_filter(entry['filter'], additional_filters)
results = self._ldap_page_search(base_dn, search_scope, search_filter, control_codes)
results = self._filter_exclusions(entry, results)
#Initialize dictionary to empty values
entries[base_dn] = []
for item in results:
if item[0] is None:
logger.debug("Got referrals during search (%s), skipping it" % item[1])
continue
new_item = {'ad_data': item[1], 'usn_changed': item[1]['uSNChanged'][0]}
new_item['ad_data']['objectGUID'][0] = base64.b16encode(new_item['ad_data']['objectGUID'][0])
entries[base_dn].append(new_item)
if not entries[base_dn]:
entries.pop(base_dn, None)
return entries
| 41.726316
| 146
| 0.626387
|
84e3d2bf75b5344fe1ff8e6e1e921c64c4175720
| 3,548
|
py
|
Python
|
CSDGAN/pipeline/data/make_image_dataset.py
|
Atrus619/CSDGAN
|
712be213e59b32a79a4970684d726af63616edaf
|
[
"MIT"
] | 1
|
2019-06-27T12:14:39.000Z
|
2019-06-27T12:14:39.000Z
|
CSDGAN/pipeline/data/make_image_dataset.py
|
Atrus619/CSDGAN
|
712be213e59b32a79a4970684d726af63616edaf
|
[
"MIT"
] | 21
|
2019-08-06T12:44:14.000Z
|
2019-11-03T14:28:46.000Z
|
CSDGAN/pipeline/data/make_image_dataset.py
|
Atrus619/Synthetic_Data_GAN_Capstone
|
712be213e59b32a79a4970684d726af63616edaf
|
[
"MIT"
] | 1
|
2020-07-25T13:14:31.000Z
|
2020-07-25T13:14:31.000Z
|
import CSDGAN.utils.constants as cs
import CSDGAN.utils.db as db
import CSDGAN.utils.utils as cu
import CSDGAN.utils.img_data_loading as cuidl
import logging
import os
import pickle as pkl
def make_image_dataset(run_id, username, title, folder, bs, x_dim=None, splits=None):
"""
Requirements of image data set is that it should be a single zip with all images with same label in a folder named with the label name
Images should either be the same size, or a specified image size should be provided (all images will be cropped to the same size)
Assumes that file has been pre-unzipped and checked by the create.py functions/related util functions
This file accomplishes the following:
1. Accepts a desired image size (optional, else first image dim will be used), batch size, and train/val/test splits
2. Splits data into train/val/test splits via stratified sampling and moves into corresponding folders
3. Deletes original unzipped images
4. Pickles label encoder, one hot encoder, resulting image size, and all three generators
"""
run_id = str(run_id)
db.query_verify_live_run(run_id=run_id)
cu.setup_run_logger(name='dataset_func', username=username, title=title)
logger = logging.getLogger('dataset_func')
try:
db.query_set_status(run_id=run_id, status_id=cs.STATUS_DICT['Preprocessing data'])
# Check existence of run directory
run_dir = os.path.join(cs.RUN_FOLDER, username, title)
assert os.path.exists(run_dir), "Run directory does not exist"
unzipped_path = os.path.join(run_dir, folder)
assert os.path.exists(unzipped_path), "Unzipped path does not exist"
# Load and preprocess data
import_gen = cuidl.import_dataset(path=unzipped_path, bs=bs, shuffle=False, incl_paths=True)
splits = [float(num) for num in splits]
le, ohe, x_dim = cuidl.preprocess_imported_dataset(path=unzipped_path, import_gen=import_gen,
splits=splits, x_dim=x_dim)
logger.info('Data successfully imported and preprocessed. Splitting into train/val/test...')
# Create data loader for each component of data set
train_gen = cuidl.import_dataset(os.path.join(unzipped_path, 'train'), bs=bs, shuffle=True, incl_paths=False)
val_gen = cuidl.import_dataset(os.path.join(unzipped_path, 'val'), bs=bs, shuffle=True, incl_paths=False)
test_gen = cuidl.import_dataset(os.path.join(unzipped_path, 'test'), bs=bs, shuffle=True, incl_paths=False)
logger.info('Data successfully split into train/val/test. Pickling and exiting.')
# Pickle relevant objects
with open(os.path.join(run_dir, "le.pkl"), "wb") as f:
pkl.dump(le, f)
with open(os.path.join(run_dir, "ohe.pkl"), "wb") as f:
pkl.dump(ohe, f)
with open(os.path.join(run_dir, "train_gen.pkl"), "wb") as f:
pkl.dump(train_gen, f)
with open(os.path.join(run_dir, "val_gen.pkl"), "wb") as f:
pkl.dump(val_gen, f)
with open(os.path.join(run_dir, "test_gen.pkl"), "wb") as f:
pkl.dump(test_gen, f)
except Exception as e:
db.query_set_status(run_id=run_id, status_id=cs.STATUS_DICT['Error'])
logger.exception('Error: %s', e)
raise Exception("Intentionally failing process after broadly catching an exception. "
"Logs describing this error can be found in the run's specific logs file.")
| 47.306667
| 138
| 0.683484
|
57a92adf3f1913f1a0989e2c1c86089e618d7607
| 276
|
py
|
Python
|
setup.py
|
kulia/x4m300-parser
|
f37451774daf9d35f225e614c09a26d5b25fce86
|
[
"MIT"
] | null | null | null |
setup.py
|
kulia/x4m300-parser
|
f37451774daf9d35f225e614c09a26d5b25fce86
|
[
"MIT"
] | null | null | null |
setup.py
|
kulia/x4m300-parser
|
f37451774daf9d35f225e614c09a26d5b25fce86
|
[
"MIT"
] | 1
|
2022-02-28T02:01:16.000Z
|
2022-02-28T02:01:16.000Z
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='x4m300parser',
version='0.1',
description='Parser fro x4m300',
author='Geir Kulia',
author_email='geir@kulia.no',
url='http://kulia.no',
packages=['x4m300parser'],
)
| 21.230769
| 38
| 0.619565
|
0e26fabd7ecd52d8bd4e7b25c0fc56bc0de024b8
| 2,959
|
py
|
Python
|
scripts/ccpp_prebuild_config_cam_kessler.py
|
cacraigucar/ccpp-framework
|
82d22110e8be81b0f1e4a0fffcd1b2f5ed2a6981
|
[
"Apache-2.0"
] | null | null | null |
scripts/ccpp_prebuild_config_cam_kessler.py
|
cacraigucar/ccpp-framework
|
82d22110e8be81b0f1e4a0fffcd1b2f5ed2a6981
|
[
"Apache-2.0"
] | 3
|
2018-06-12T17:29:28.000Z
|
2018-12-13T16:49:00.000Z
|
scripts/ccpp_prebuild_config_cam_kessler.py
|
cacraigucar/ccpp-framework
|
82d22110e8be81b0f1e4a0fffcd1b2f5ed2a6981
|
[
"Apache-2.0"
] | 2
|
2018-06-11T21:12:57.000Z
|
2018-09-05T20:37:44.000Z
|
#!/usr/bin/env python
# CCPP prebuild config for simple CAM physics
###############################################################################
# Definitions #
###############################################################################
# Add all files with metadata tables on the host model side,
# relative to basedir = top-level directory of host model
VARIABLE_DEFINITION_FILES = [
'cam_driver/src/cam_var_defs.f90',
]
# Can be empty, since all physics schemes and their
# dependencies are hardcoded in CMakeLists in
# ccpp-physics - to fix, c.f. FV3 v1
SCHEME_FILES_DEPENDENCIES = []
# Add all physics scheme files relative to basedir
SCHEME_FILES = [
'simple/src/kessler.F90',
]
# Auto-generated makefile/cmakefile snippets that contain all schemes
SCHEMES_MAKEFILE = 'simple/CCPP_SCHEMES.mk'
SCHEMES_CMAKEFILE = 'simple/CCPP_SCHEMES.cmake'
# CCPP host cap in which to insert the ccpp_field_add statements;
# determines the directory to place ccpp_{modules,fields}.inc
TARGET_FILES = [
'cam_driver/src/cam_kessler.f90',
]
# Auto-generated makefile/cmakefile snippets that contain all caps
CAPS_MAKEFILE = 'simple/CCPP_CAPS.mk'
CAPS_CMAKEFILE = 'simple/CCPP_CAPS.cmake'
# Directory where to put all auto-generated physics caps
CAPS_DIR = 'simple/physics'
# Optional arguments - only required for schemes that use
# optional arguments. ccpp_prebuild.py will throw an exception
# if it encounters a scheme subroutine with optional arguments
# if no entry is made here. Possible values are: 'all', 'none',
# or a list of standard_names: [ 'var1', 'var3' ].
OPTIONAL_ARGUMENTS = {
}
# Names of Fortran include files in the host model cap (do not change);
# both files will be written to the directory of each target file
MODULE_INCLUDE_FILE = 'ccpp_modules.inc'
FIELDS_INCLUDE_FILE = 'ccpp_fields.inc'
# HTML document containing the model-defined CCPP variables
HTML_VARTABLE_FILE = 'simple/CCPP_VARIABLES_simple.html'
# LaTeX document containing the provided vs requested CCPP variables
LATEX_VARTABLE_FILE = 'ccpp-framework/doc/DevelopersGuide/CCPP_VARIABLES_simple.tex'
###############################################################################
# Template code to generate include files #
###############################################################################
# Name of the CCPP data structure in the host model cap;
# in the case of simple CAM physics, this is a vector with loop index i
CCPP_DATA_STRUCTURE = 'cdata(i)'
# Modules to load for auto-generated ccpp_field_add code
# in the host model cap (e.g. error handling)
MODULE_USE_TEMPLATE_HOST_CAP = \
'''
use ccpp_errors, only: ccpp_error
'''
# Modules to load for auto-generated ccpp_field_get code
# in the physics scheme cap (e.g. derived data types)
MODULE_USE_TEMPLATE_SCHEME_CAP = \
'''
use machine, only: kind_phys
'''
| 35.22619
| 84
| 0.657655
|
54550ca7492bb9d377b5d91f814976624db34f31
| 4,266
|
py
|
Python
|
benchmark/startQiskit_noisy2590.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy2590.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy2590.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=39
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.cx(input_qubit[0],input_qubit[3]) # number=31
prog.x(input_qubit[3]) # number=32
prog.cx(input_qubit[0],input_qubit[3]) # number=33
prog.cx(input_qubit[0],input_qubit[3]) # number=18
prog.h(input_qubit[3]) # number=36
prog.cz(input_qubit[0],input_qubit[3]) # number=37
prog.h(input_qubit[3]) # number=38
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.y(input_qubit[1]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[1]) # number=30
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.swap(input_qubit[3],input_qubit[0]) # number=22
prog.swap(input_qubit[3],input_qubit[0]) # number=23
prog.swap(input_qubit[1],input_qubit[0]) # number=27
prog.swap(input_qubit[1],input_qubit[0]) # number=28
prog.swap(input_qubit[3],input_qubit[0]) # number=34
prog.swap(input_qubit[3],input_qubit[0]) # number=35
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2590.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.967213
| 140
| 0.655649
|
0fedff97d57e5819344c4a3cfcd214370c3e5a3c
| 52
|
py
|
Python
|
highcliff/singleton/__init__.py
|
sermelo/Highcliff-SDK
|
255dd12b3402361cba8b1ea7a28c506f32a11dae
|
[
"Apache-2.0"
] | null | null | null |
highcliff/singleton/__init__.py
|
sermelo/Highcliff-SDK
|
255dd12b3402361cba8b1ea7a28c506f32a11dae
|
[
"Apache-2.0"
] | null | null | null |
highcliff/singleton/__init__.py
|
sermelo/Highcliff-SDK
|
255dd12b3402361cba8b1ea7a28c506f32a11dae
|
[
"Apache-2.0"
] | null | null | null |
from highcliff.singleton.singleton import Singleton
| 26
| 51
| 0.884615
|
3aa96fc309764b834cc9e496484839a6718e165d
| 1,855
|
py
|
Python
|
3_1_classification.py
|
RyuTake/PythonStudy
|
7726f0cdaed2e65134027540d53ba77dc0ffe4cc
|
[
"MIT"
] | null | null | null |
3_1_classification.py
|
RyuTake/PythonStudy
|
7726f0cdaed2e65134027540d53ba77dc0ffe4cc
|
[
"MIT"
] | null | null | null |
3_1_classification.py
|
RyuTake/PythonStudy
|
7726f0cdaed2e65134027540d53ba77dc0ffe4cc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 14:35:29 2018
@author: rtake
"""
import matplotlib.pyplot as plt
from sklearn import datasets
import numpy as np
# digitsデータをロード
digits = datasets.load_digits()
# 画像を2行5列に表示
for label, img in zip(digits.target[:10], digits.images[:10]):
plt.subplot(2,5,label+1) # 行列配置で表示をする
plt.axis('off') # 軸の非表示
plt.imshow(img, cmap = plt.cm.gray_r, interpolation = 'nearest') # 画像表示をする
plt.title('Digit:[0]'.format(label)) # 画像のラベルを「Digit:○」の形で表示する
plt.show()
#%% 分類器を作る
#3と8のデータ一を求める
flag_3_8 = (digits.target == 3) + (digits.target == 8)
#3と8のデータ取得
images = digits.images[flag_3_8]
labels = digits.target[flag_3_8]
#行列の形を一次元に変更
images = images.reshape(images.shape[0], -1)
#%% 分類器を生成する
from sklearn import tree
n_samples = len(flag_3_8[flag_3_8]) #サンプル数 配列の長さからサンプル数を読み出す
train_size = int (n_samples * 3 / 5) #サンプル数のうち、学習データを全体の6割とする
classifier = tree.DecisionTreeClassifier() #分類器を生成 決定木というやつらしい
classifier.fit(images[:train_size], labels[:train_size]) #分類器に学習データを与え、学習させる
#%% 分類器の性能評価
from sklearn import metrics
expected = labels[train_size:] #サンプルデータ移行のデータを性能評価に使う
predicted = classifier.predict(images[train_size:]) #分類器に性能評価用データを読み込ませ、回答を出させる
print('Accuracy:\n', metrics.accuracy_score(expected, predicted)) #正答率を計算する
print('\nConfusion matrix:\n', metrics.confusion_matrix(expected, predicted)) #混合行列の表示(予測と実際の正誤表)
print('\nPrecision:\n', metrics.precision_score(expected, predicted, pos_label=3)) #label3の適合率
print('\nRecall:\n', metrics.recall_score(expected, predicted, pos_label=3)) #label3の再現率
print('\nF-measure:\n',metrics.f1_score(expected, predicted, pos_label=3)) #label3のF値
| 34.351852
| 99
| 0.669542
|
9b582fd92da5a307d8b54e44b80fc4b8d9427898
| 6,339
|
py
|
Python
|
data/Imagenet.py
|
dumpmemory/Transformer-Explainability
|
1dd5a47caded2613b74058777bf7758597fa92cf
|
[
"MIT"
] | 894
|
2020-12-17T21:03:31.000Z
|
2022-03-31T11:58:31.000Z
|
data/Imagenet.py
|
jeonggunlee/Transformer-Explainability
|
951e112d24c1a642ceefeb0dd03a607040305383
|
[
"MIT"
] | 33
|
2020-12-18T13:23:57.000Z
|
2022-03-31T20:51:45.000Z
|
data/Imagenet.py
|
jeonggunlee/Transformer-Explainability
|
951e112d24c1a642ceefeb0dd03a607040305383
|
[
"MIT"
] | 109
|
2020-12-19T01:39:28.000Z
|
2022-03-31T09:52:39.000Z
|
import os
import torch
import torch.utils.data as data
import numpy as np
import cv2
from torchvision.datasets import ImageNet
from PIL import Image, ImageFilter
import h5py
from glob import glob
class ImageNet_blur(ImageNet):
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
gauss_blur = ImageFilter.GaussianBlur(11)
median_blur = ImageFilter.MedianFilter(11)
blurred_img1 = sample.filter(gauss_blur)
blurred_img2 = sample.filter(median_blur)
blurred_img = Image.blend(blurred_img1, blurred_img2, 0.5)
if self.transform is not None:
sample = self.transform(sample)
blurred_img = self.transform(blurred_img)
if self.target_transform is not None:
target = self.target_transform(target)
return (sample, blurred_img), target
class Imagenet_Segmentation(data.Dataset):
CLASSES = 2
def __init__(self,
path,
transform=None,
target_transform=None):
self.path = path
self.transform = transform
self.target_transform = target_transform
# self.h5py = h5py.File(path, 'r+')
self.h5py = None
tmp = h5py.File(path, 'r')
self.data_length = len(tmp['/value/img'])
tmp.close()
del tmp
def __getitem__(self, index):
if self.h5py is None:
self.h5py = h5py.File(self.path, 'r')
img = np.array(self.h5py[self.h5py['/value/img'][index, 0]]).transpose((2, 1, 0))
target = np.array(self.h5py[self.h5py[self.h5py['/value/gt'][index, 0]][0, 0]]).transpose((1, 0))
img = Image.fromarray(img).convert('RGB')
target = Image.fromarray(target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = np.array(self.target_transform(target)).astype('int32')
target = torch.from_numpy(target).long()
return img, target
def __len__(self):
# return len(self.h5py['/value/img'])
return self.data_length
class Imagenet_Segmentation_Blur(data.Dataset):
CLASSES = 2
def __init__(self,
path,
transform=None,
target_transform=None):
self.path = path
self.transform = transform
self.target_transform = target_transform
# self.h5py = h5py.File(path, 'r+')
self.h5py = None
tmp = h5py.File(path, 'r')
self.data_length = len(tmp['/value/img'])
tmp.close()
del tmp
def __getitem__(self, index):
if self.h5py is None:
self.h5py = h5py.File(self.path, 'r')
img = np.array(self.h5py[self.h5py['/value/img'][index, 0]]).transpose((2, 1, 0))
target = np.array(self.h5py[self.h5py[self.h5py['/value/gt'][index, 0]][0, 0]]).transpose((1, 0))
img = Image.fromarray(img).convert('RGB')
target = Image.fromarray(target)
gauss_blur = ImageFilter.GaussianBlur(11)
median_blur = ImageFilter.MedianFilter(11)
blurred_img1 = img.filter(gauss_blur)
blurred_img2 = img.filter(median_blur)
blurred_img = Image.blend(blurred_img1, blurred_img2, 0.5)
# blurred_img1 = cv2.GaussianBlur(img, (11, 11), 5)
# blurred_img2 = np.float32(cv2.medianBlur(img, 11))
# blurred_img = (blurred_img1 + blurred_img2) / 2
if self.transform is not None:
img = self.transform(img)
blurred_img = self.transform(blurred_img)
if self.target_transform is not None:
target = np.array(self.target_transform(target)).astype('int32')
target = torch.from_numpy(target).long()
return (img, blurred_img), target
def __len__(self):
# return len(self.h5py['/value/img'])
return self.data_length
class Imagenet_Segmentation_eval_dir(data.Dataset):
CLASSES = 2
def __init__(self,
path,
eval_path,
transform=None,
target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.h5py = h5py.File(path, 'r+')
# 500 each file
self.results = glob(os.path.join(eval_path, '*.npy'))
def __getitem__(self, index):
img = np.array(self.h5py[self.h5py['/value/img'][index, 0]]).transpose((2, 1, 0))
target = np.array(self.h5py[self.h5py[self.h5py['/value/gt'][index, 0]][0, 0]]).transpose((1, 0))
res = np.load(self.results[index])
img = Image.fromarray(img).convert('RGB')
target = Image.fromarray(target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = np.array(self.target_transform(target)).astype('int32')
target = torch.from_numpy(target).long()
return img, target
def __len__(self):
return len(self.h5py['/value/img'])
if __name__ == '__main__':
import torchvision.transforms as transforms
from tqdm import tqdm
from imageio import imsave
import scipy.io as sio
# meta = sio.loadmat('/home/shirgur/ext/Data/Datasets/temp/ILSVRC2012_devkit_t12/data/meta.mat', squeeze_me=True)['synsets']
# Data
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_img_trans = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize,
])
test_lbl_trans = transforms.Compose([
transforms.Resize((224, 224), Image.NEAREST),
])
ds = Imagenet_Segmentation('/home/shirgur/ext/Data/Datasets/imagenet-seg/other/gtsegs_ijcv.mat',
transform=test_img_trans, target_transform=test_lbl_trans)
for i, (img, tgt) in enumerate(tqdm(ds)):
tgt = (tgt.numpy() * 255).astype(np.uint8)
imsave('/home/shirgur/ext/Code/C2S/run/imagenet/gt/{}.png'.format(i), tgt)
print('here')
| 31.226601
| 128
| 0.604354
|
cddc7bf1470b37fefbfdde2847d38ce422742507
| 341
|
py
|
Python
|
flask_app.py
|
kjflyback/June-work
|
73d865d6c19b1734764250f7cb28d80c93b171b4
|
[
"Apache-2.0"
] | null | null | null |
flask_app.py
|
kjflyback/June-work
|
73d865d6c19b1734764250f7cb28d80c93b171b4
|
[
"Apache-2.0"
] | null | null | null |
flask_app.py
|
kjflyback/June-work
|
73d865d6c19b1734764250f7cb28d80c93b171b4
|
[
"Apache-2.0"
] | null | null | null |
import os
from dailywork import app as application
print application.config
basedir = os.path.abspath('')
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, "app.db")
application.config['SQLALCHEMY_MIGRATE_REPO'] = os.path.join(basedir, 'db_repository')
application.debug = True
application.run('', 8000)
| 37.888889
| 94
| 0.765396
|
d02982384e8d86d71d25e6d868fd0e9d02dc3174
| 3,237
|
py
|
Python
|
Chapter_5/dictionary_dt.py
|
alenasf/AutomateTheBoringStuff
|
041e56221eb98d9893c24d22497034e6344c0490
|
[
"Apache-2.0"
] | null | null | null |
Chapter_5/dictionary_dt.py
|
alenasf/AutomateTheBoringStuff
|
041e56221eb98d9893c24d22497034e6344c0490
|
[
"Apache-2.0"
] | null | null | null |
Chapter_5/dictionary_dt.py
|
alenasf/AutomateTheBoringStuff
|
041e56221eb98d9893c24d22497034e6344c0490
|
[
"Apache-2.0"
] | null | null | null |
# The Dictionary Data Type
myCat = {'size':'fat', 'color': 'gray', 'disposition': 'loud'}
myCat['size'] # 'fat'
'My cat has ' + myCat['color'] + ' fur.' # 'My cat has gray fur.'
# Dictionaries vs. List
spam = ['cats', 'dogs', 'moose']
bacon = ['dogs', 'moose', 'cats']
spam == bacon # False
eggs = {'name': 'Zophie', 'species': 'cat', 'age': '8'}
ham = {'species': 'cat', 'age': '8', 'name': 'Zophie' }
eggs == ham # True
# birthday.py
birthdays = {'Alice': 'Apr 1', 'Bob': 'Dec 12', 'Carol': 'Mar 4'}
while True:
print('Enter a name: (blank to quit)')
name = input()
if name == '':
break
if name in birthdays:
print(birthdays[name] + 'is the birthday of ' + name)
else:
print('I do not have birthday information for ' + name)
print('What is their birthday?')
bday = input()
birthdays[name] = bday
print('Birthday database updated.')
# Odered dictionaries in Python 3.7
eggs = {'name': 'Zophie', 'species': 'cat', 'age': '8'}
list(eggs) # ['name', 'species', 'age']
ham = {'species': 'cat', 'age': '8', 'name': 'Zophie' }
list(ham) # ['species', 'age', 'name']
# The keys(), values(), and items() Methods
spam = {'color': 'red', 'age': 42}
for v in spam.values():
print(v)
# red
# 42
for k in spam.key():
print(k)
#color
#age
for i in spam.items():
print(i)
# ('color': 'red')
# ('age': 42) # Values in dict_items value returned by the items() method are tuples of the key and value.
# If you want a true list from one of these methods, see this example:
spam = {'color': 'red', 'age': 42}
spam.keys() # dict_keys(['color', 'age'])
list(spam.keys()) #['color', 'age']
spam = {'color': 'red', 'age': 42}
for k, v, in spam.items():
print('Key: ' + k + ' Value: ' + str(v))
# Key: color Value: red
# Key: age Value: 42
# Checking Wether a Key or Value Exists in a Dictionary
spam = {'name': 'Zophie', 'age': 7}
'name' in spam.keys() # True
'Zophie' in spam.values() # True
'color' not in spam.keys() # True
'color' in spam # False
# The get() Method
# has 2 arguments: the key of the value to retrieve and a fallback value to return if that key does not exist
picnicItems = {'apples': 5, 'cups': 2}
'I am bringing ' + str(picnicItems.get('cups', 0)) + ' cups.'
'I am bringing ' + str(picnicItems.get('eggs', 0)) + ' eggs.'
# The setdefault() Method
spam = {'name': 'Pooka', 'age': 5}
if 'color' not in spam:
spam['color'] = 'black'
# same code with setdefault() method:
spam = {'name': 'Pooka', 'age': 5}
spam.setdefault('color', 'black') # 'black'
spam # {'color': 'black', 'name': 'Pooka', 'age': 5}
spam.setdefault('color', 'white') # 'black'
spam # {'color': 'black', 'name': 'Pooka', 'age': 5}
# CharacterCount.py
message = 'It was a bright cold day in April, and the clocks were strinking thirteen'
count = {}
for character in message:
count.setdefault(character,0)
count[character] = count[character] + 1
print(count)
# Pretty Print
import pprint
message = 'It was a bright cold day in April, and the clocks were strinking thirteen'
count = {}
for character in message:
count.setdefault(character,0)
count[character] = count[character] + 1
pprint.pprint(count)
| 26.975
| 109
| 0.604263
|
90b656f331385aa19d1cd0a85b67cd1dbba2aee2
| 5,121
|
py
|
Python
|
multivis/utils/transform.py
|
brettChapman/cimcb_vis
|
b373ed426b24ece1dcc20febd7c8023921b024d6
|
[
"MIT"
] | 1
|
2021-06-27T23:52:40.000Z
|
2021-06-27T23:52:40.000Z
|
multivis/utils/transform.py
|
brettChapman/cimcb_vis
|
b373ed426b24ece1dcc20febd7c8023921b024d6
|
[
"MIT"
] | null | null | null |
multivis/utils/transform.py
|
brettChapman/cimcb_vis
|
b373ed426b24ece1dcc20febd7c8023921b024d6
|
[
"MIT"
] | 2
|
2021-06-27T23:53:03.000Z
|
2021-07-12T12:59:23.000Z
|
import sys
from .scaler import scaler
from sklearn.preprocessing import OrdinalEncoder
import numpy as np
def transform(data, transform_type, min, max):
"""Scales and transforms data in forward or reverse order based on different transform options
Parameters
----------
data : A 1D numpy array of values
transform_type : The transform type to apply to the data ("linear", "reverse_linear", "log", "reverse_log", "square", "reverse_square", "area", "reverse_area", "volume", "reverse_volume", "ordinal", "reverse_ordinal")
min : The minimum value for scaling
max : The maximum value for scaling
Returns
-------
transformed_data : A scaled and transformed numpy array
"""
data, transform_type, min, max = __checkData(data, transform_type, min, max)
if transform_type != "ordinal":
#if not ordinal scale first between 1 and 10 to avoid log zero and divide by zero errors while transforming
data = np.array([x for x in list(scaler(data, type="minmax", minimum=1, maximum=10))])
scaled_data = []
if transform_type == 'linear':
scaled_data = [x for x in list(scaler(data, type='minmax', minimum=min, maximum=max))]
if transform_type == 'reverse_linear':
data = np.divide(1, data)
scaled_data = [x for x in list(scaler(data, type='minmax', minimum=min, maximum=max))]
elif transform_type == 'log':
data = np.log(data)
scaled_data = [x for x in list(scaler(data, type='minmax', minimum=min, maximum=max))]
elif transform_type == 'reverse_log':
data = np.divide(1, data)
data = np.log(data)
scaled_data = [x for x in list(scaler(data, type='minmax', minimum=min, maximum=max))]
elif transform_type == 'square':
data = np.square(data)
scaled_data = [x for x in list(scaler(data, type='minmax', minimum=min, maximum=max))]
elif transform_type == 'reverse_square':
data = np.divide(1, data)
data = np.square(data)
scaled_data = [x for x in list(scaler(data, type='minmax', minimum=min, maximum=max))]
elif transform_type == 'area':
data = np.square(data)
data = [np.multiply(x, np.pi) for x in list(map(float, data))]
scaled_data = [round(x) for x in list(map(int, scaler(data, type='minmax', minimum=min, maximum=max)))]
elif transform_type == 'reverse_area':
data = np.divide(1, data)
data = np.square(data)
data = [np.multiply(x, np.pi) for x in list(map(float, data))]
scaled_data = [round(x) for x in list(map(int, scaler(data, type='minmax', minimum=min, maximum=max)))]
elif transform_type == 'volume':
data = [np.power(x, 3) for x in list(map(float, data))]
data = [np.multiply(x, np.pi) for x in list(map(float, data))]
data = [np.multiply(x, 4 / 3) for x in list(map(float, data))]
scaled_data = [round(x) for x in list(map(int, scaler(data, type='minmax', minimum=min, maximum=max)))]
elif transform_type == 'reverse_volume':
data = np.divide(1, data)
data = [np.power(x, 3) for x in list(map(float, data))]
data = [np.multiply(x, np.pi) for x in list(map(float, data))]
data = [np.multiply(x, 4 / 3) for x in list(map(float, data))]
scaled_data = [round(x) for x in list(map(int, scaler(data, type='minmax', minimum=min, maximum=max)))]
elif transform_type == 'ordinal':
encoder = OrdinalEncoder()
scaled_data = encoder.fit_transform(data.reshape(-1, 1)).flatten()
scaled_data = np.array([x for x in list(scaler(scaled_data, type="minmax", minimum=min, maximum=max))])
elif transform_type == 'reverse_ordinal':
encoder = OrdinalEncoder()
scaled_data = encoder.fit_transform(data.reshape(-1, 1)).flatten()
scaled_data = np.divide(1, scaled_data)
scaled_data = np.array([x for x in list(scaler(scaled_data, type="minmax", minimum=min, maximum=max))])
return scaled_data
def __checkData(data, transform, min, max):
if not isinstance(data, np.ndarray):
print("Error: A numpy array was not entered. Please check your data.")
sys.exit()
if transform.lower() not in ["linear", "reverse_linear", "log", "reverse_log", "square", "reverse_square", "area", "reverse_area", "volume", "reverse_volume", "ordinal", "reverse_ordinal"]:
print("Error: The chosen transform type is not valid. Choose either \"linear\", \"reverse_linear\", \"log\", \"reverse_log\", \"square\", \"reverse_square\", \"area\", \"reverse_area\", \"volume\", \"reverse_volume\", \"ordinal\", \"reverse_ordinal\".")
sys.exit()
if not isinstance(min, float):
if not isinstance(min, int):
print("Error: The minimum scaling value is not valid. Choose a float or integer value.")
sys.exit()
if not isinstance(max, float):
if not isinstance(max, int):
print("Error: The maximum scaling value is not valid. Choose a float or integer value.")
sys.exit()
return data, transform, min, max
| 49.240385
| 261
| 0.635618
|
821386fd1f49bd02fa9e355f18f24affabefe91b
| 673
|
py
|
Python
|
Development/Scripts/loadKfoldHistory.py
|
simonsimon006/tensorflow-wavelets
|
21a095bf0048ae2488ca5ae4961d2cbfe94263a9
|
[
"MIT"
] | null | null | null |
Development/Scripts/loadKfoldHistory.py
|
simonsimon006/tensorflow-wavelets
|
21a095bf0048ae2488ca5ae4961d2cbfe94263a9
|
[
"MIT"
] | 1
|
2021-11-11T14:47:43.000Z
|
2021-11-11T14:52:51.000Z
|
Development/Scripts/loadKfoldHistory.py
|
simonsimon006/tensorflow-wavelets
|
21a095bf0048ae2488ca5ae4961d2cbfe94263a9
|
[
"MIT"
] | 1
|
2021-11-11T12:18:21.000Z
|
2021-11-11T12:18:21.000Z
|
import os
import pickle
import matplotlib.pyplot as plt
num_folds = 10
history_file_path = r"../{}_trainHistoryCifar10CNN.txt"
fig, ax = plt.subplots(1, 10)
fig.suptitle('Horizontally stacked subplots')
for fold in range(num_folds):
with open(history_file_path.format(fold+1), 'rb') as pickle_file:
history = pickle.load(pickle_file)
# plot train and validation loss
ax[fold].plot(history['loss'])
ax[fold].plot(history['val_loss'])
ax[fold].set_title('model loss fold:'+str(fold+1))
ax[fold].set_ylabel('loss')
ax[fold].set_xlabel('epoch')
ax[fold].legend(['train', 'validation'], loc='upper left')
fig.show()
plt.show()
| 21.709677
| 69
| 0.687964
|
540d94c46211862f96476b28508dc347db8f8c5a
| 3,066
|
py
|
Python
|
plenum/test/restart/test_restart_to_same_view_with_killed_primary.py
|
spivachuk/plenum
|
05123166e8ffa89520541ea3b59b20390aaf92a4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/restart/test_restart_to_same_view_with_killed_primary.py
|
spivachuk/plenum
|
05123166e8ffa89520541ea3b59b20390aaf92a4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/restart/test_restart_to_same_view_with_killed_primary.py
|
spivachuk/plenum
|
05123166e8ffa89520541ea3b59b20390aaf92a4
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from plenum.test import waits
from plenum.test.helper import sdk_send_random_and_check, waitForViewChange, view_change_timeout
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.restart.helper import restart_nodes
from plenum.test.test_node import ensureElectionsDone, ensure_node_disconnected
nodeCount = 7
TestRunningTimeLimitSec = 150
VIEW_CHANGE_TIMEOUT = 10
@pytest.fixture(scope="module")
def tconf(tconf):
with view_change_timeout(tconf, VIEW_CHANGE_TIMEOUT):
old_network_3pc_watcher_state = tconf.ENABLE_INCONSISTENCY_WATCHER_NETWORK
tconf.ENABLE_INCONSISTENCY_WATCHER_NETWORK = True
yield tconf
tconf.ENABLE_INCONSISTENCY_WATCHER_NETWORK = old_network_3pc_watcher_state
def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
sdk_pool_handle, sdk_wallet_client):
restart_timeout = tconf.ToleratePrimaryDisconnection + \
waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
primary = txnPoolNodeSet[0]
alive_nodes = txnPoolNodeSet[1:]
minority = alive_nodes[-1:]
majority = alive_nodes[:-1]
# Move to higher view by killing primary
primary.cleanupOnStopping = True
primary.stop()
looper.removeProdable(primary)
ensure_node_disconnected(looper, primary, txnPoolNodeSet)
waitForViewChange(looper, alive_nodes, 1)
ensureElectionsDone(looper, alive_nodes, numInstances=3)
# Add transaction to ledger
sdk_send_random_and_check(looper, alive_nodes, sdk_pool_handle, sdk_wallet_client, 1)
# Restart majority group
majority_before_restart = majority.copy()
restart_nodes(looper, alive_nodes, majority, tconf, tdir, allPluginsPath,
after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
waitForViewChange(looper, majority, 1, customTimeout=2.1 * VIEW_CHANGE_TIMEOUT)
ensureElectionsDone(looper, majority, numInstances=3)
# Check that nodes in minority group are aware that they might have inconsistent 3PC state
for node in minority:
assert node.spylog.count(node.on_inconsistent_3pc_state) == 1
# Check that nodes in majority group didn't think they might have inconsistent 3PC state
for node in majority_before_restart:
assert node.spylog.count(node.on_inconsistent_3pc_state) == 0
# Check that nodes in majority group don't think they might have inconsistent 3PC state
for node in majority:
assert node.spylog.count(node.on_inconsistent_3pc_state) == 0
# Restart minority group
restart_nodes(looper, alive_nodes, minority, tconf, tdir, allPluginsPath,
after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
ensureElectionsDone(looper, alive_nodes, numInstances=3)
# Check that all nodes are still functional
sdk_ensure_pool_functional(looper, alive_nodes, sdk_wallet_client, sdk_pool_handle)
| 43.183099
| 106
| 0.764514
|
0dd6ff80c6258009a79b870e3af647c3de526a1a
| 2,275
|
py
|
Python
|
fastplm/uer/layers/multi_headed_attn.py
|
autoliuweijie/FastPLM
|
fc92a0a5c757b8f3bc7f1af32ade5137d705e031
|
[
"MIT"
] | 2
|
2021-12-30T12:03:12.000Z
|
2022-01-30T10:38:43.000Z
|
fastplm/uer/layers/multi_headed_attn.py
|
autoliuweijie/FastPLM
|
fc92a0a5c757b8f3bc7f1af32ade5137d705e031
|
[
"MIT"
] | null | null | null |
fastplm/uer/layers/multi_headed_attn.py
|
autoliuweijie/FastPLM
|
fc92a0a5c757b8f3bc7f1af32ade5137d705e031
|
[
"MIT"
] | null | null | null |
# -*- encoding:utf-8 -*-
import math
import torch
import torch.nn as nn
class MultiHeadedAttention(nn.Module):
"""
Each head is a self-attention operation.
self-attention refers to https://arxiv.org/pdf/1706.03762.pdf
"""
def __init__(self, hidden_size, heads_num, dropout):
super(MultiHeadedAttention, self).__init__()
self.hidden_size = hidden_size
self.heads_num = heads_num
self.per_head_size = hidden_size // heads_num
self.linear_layers = nn.ModuleList([
nn.Linear(hidden_size, hidden_size) for _ in range(3)
])
self.dropout = nn.Dropout(dropout)
self.final_linear = nn.Linear(hidden_size, hidden_size)
def forward(self, key, value, query, mask):
"""
Args:
key: [batch_size x seq_length x hidden_size]
value: [batch_size x seq_length x hidden_size]
query: [batch_size x seq_length x hidden_size]
mask: [batch_size x 1 x seq_length x seq_length]
Returns:
output: [batch_size x seq_length x hidden_size]
"""
batch_size, seq_length, hidden_size = key.size()
heads_num = self.heads_num
per_head_size = self.per_head_size
def shape(x):
return x. \
contiguous(). \
view(batch_size, seq_length, heads_num, per_head_size). \
transpose(1, 2)
def unshape(x):
return x. \
transpose(1, 2). \
contiguous(). \
view(batch_size, seq_length, hidden_size)
query, key, value = [l(x). \
view(batch_size, -1, heads_num, per_head_size). \
transpose(1, 2) \
for l, x in zip(self.linear_layers, (query, key, value))
]
scores = torch.matmul(query, key.transpose(-2, -1))
scores = scores / math.sqrt(float(per_head_size))
scores = scores + mask
probs = nn.Softmax(dim=-1)(scores)
probs = self.dropout(probs)
output = unshape(torch.matmul(probs, value))
output = self.final_linear(output)
return output
| 33.955224
| 85
| 0.555165
|
03827a4e99d05eb918f6fa87f3c2a8aded37683d
| 705
|
py
|
Python
|
tools/distrib/python/grpcio_tools/grpc_version.py
|
happybits/grpc
|
669c52b08aeb8503b27fbbd5457e229c2b97fe68
|
[
"Apache-2.0"
] | 4
|
2019-09-10T09:48:57.000Z
|
2021-08-04T09:38:06.000Z
|
tools/distrib/python/grpcio_tools/grpc_version.py
|
happybits/grpc
|
669c52b08aeb8503b27fbbd5457e229c2b97fe68
|
[
"Apache-2.0"
] | null | null | null |
tools/distrib/python/grpcio_tools/grpc_version.py
|
happybits/grpc
|
669c52b08aeb8503b27fbbd5457e229c2b97fe68
|
[
"Apache-2.0"
] | 1
|
2019-09-10T09:48:58.000Z
|
2019-09-10T09:48:58.000Z
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
VERSION = '1.18.0'
| 39.166667
| 106
| 0.763121
|
86b4dc0c37fd64d3825fa089cf4d7bedb4b0d2c3
| 2,458
|
py
|
Python
|
questions/valid-number/Solution.py
|
achow113/LeetCode
|
604d3f25d513df4045e9a3712a5034704a6043a1
|
[
"MIT"
] | 141
|
2017-12-12T21:45:53.000Z
|
2022-03-25T07:03:39.000Z
|
questions/valid-number/Solution.py
|
mithunonline/leetcode-solutions
|
604d3f25d513df4045e9a3712a5034704a6043a1
|
[
"MIT"
] | 32
|
2015-10-05T14:09:52.000Z
|
2021-05-30T10:28:41.000Z
|
questions/valid-number/Solution.py
|
mithunonline/leetcode-solutions
|
604d3f25d513df4045e9a3712a5034704a6043a1
|
[
"MIT"
] | 56
|
2015-09-30T05:23:28.000Z
|
2022-03-08T07:57:11.000Z
|
"""
A valid number can be split up into these components (in order):
A decimal number or an integer.
(Optional) An 'e' or 'E', followed by an integer.
A decimal number can be split up into these components (in order):
(Optional) A sign character (either '+' or '-').
One of the following formats:
At least one digit, followed by a dot '.'.
At least one digit, followed by a dot '.', followed by at least one digit.
A dot '.', followed by at least one digit.
An integer can be split up into these components (in order):
(Optional) A sign character (either '+' or '-').
At least one digit.
For example, all the following are valid numbers: ["2", "0089", "-0.1", "+3.14", "4.", "-.9", "2e10", "-90E3", "3e+7", "+6e-1", "53.5e93", "-123.456e789"], while the following are not valid numbers: ["abc", "1a", "1e", "e3", "99e2.5", "--6", "-+3", "95a54e53"].
Given a string s, return true if s is a valid number.
Example 1:
Input: s = "0"
Output: true
Example 2:
Input: s = "e"
Output: false
Example 3:
Input: s = "."
Output: false
Example 4:
Input: s = ".1"
Output: true
Constraints:
1 <= s.length <= 20
s consists of only English letters (both uppercase and lowercase), digits (0-9), plus '+', minus '-', or dot '.'.
"""
class Solution:
def isNumber(self, s: str) -> bool:
def isPureDigit(s):
if not s:
return False
return s.isnumeric()
def isInt(s):
if not s:
return False
idx = 0
if s[0] == '+' or s[0] == '-':
idx = 1
return isPureDigit(s[idx:])
def isDecimal(s):
if len(s) <= 1:
return False
idx = s.find('.')
if idx == -1:
return False
start = 0
if s[0] == '+' or s[0] == '-':
start = 1
first, second = s[start:idx], s[idx + 1:]
if idx == 0:
return isPureDigit(second)
elif idx == len(s) - 1:
return isPureDigit(first)
else:
return (not first or isPureDigit(first)) and isPureDigit(second)
idx = -1
if 'e' in s:
idx = s.find('e')
elif 'E' in s:
idx = s.find('E')
if idx == -1:
return isInt(s) or isDecimal(s)
return (isInt(s[:idx]) or isDecimal(s[:idx])) and isInt(s[idx + 1:])
| 25.604167
| 261
| 0.517901
|
5abf235492964d7b36389cfe246dd09ba03dbb06
| 1,276
|
py
|
Python
|
rastervision/data/label_source/object_detection_label_source.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 3
|
2020-07-05T04:04:18.000Z
|
2021-02-05T16:19:55.000Z
|
rastervision/data/label_source/object_detection_label_source.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/data/label_source/object_detection_label_source.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1
|
2020-04-27T15:21:53.000Z
|
2020-04-27T15:21:53.000Z
|
import rastervision as rv
from rastervision.data.label import ObjectDetectionLabels
from rastervision.data.label_source import LabelSource
class ObjectDetectionLabelSource(LabelSource):
def __init__(self, vector_source, crs_transformer, class_map, extent):
"""Constructor.
Args:
vector_source: (VectorSource or str)
crs_transformer: CRSTransformer to convert from map coords in label
in GeoJSON file to pixel coords.
class_map: ClassMap used to infer class_ids from class_name
(or label) field
extent: Box used to filter the labels by extent
"""
if isinstance(vector_source, str):
provider = rv._registry.get_vector_source_default_provider(
vector_source)
vector_source = provider.construct(vector_source) \
.create_source(
crs_transformer=crs_transformer, extent=extent, class_map=class_map)
self.labels = ObjectDetectionLabels.from_geojson(
vector_source.get_geojson(), extent=extent)
def get_labels(self, window=None):
if window is None:
return self.labels
return ObjectDetectionLabels.get_overlapping(self.labels, window)
| 38.666667
| 88
| 0.67163
|
ecc447d40b146e618440e56c1aca1d467d407615
| 256
|
py
|
Python
|
onetoone/urls.py
|
x9sheikh/djangoDataBaseRelationships
|
b80ec61df2eaa31a34b150f5f185966bd90863be
|
[
"MIT"
] | null | null | null |
onetoone/urls.py
|
x9sheikh/djangoDataBaseRelationships
|
b80ec61df2eaa31a34b150f5f185966bd90863be
|
[
"MIT"
] | null | null | null |
onetoone/urls.py
|
x9sheikh/djangoDataBaseRelationships
|
b80ec61df2eaa31a34b150f5f185966bd90863be
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('', views.onetoone, name='onetoone'),
path('detail/<int:college_id>/', views.detail, name='detail')
]
| 23.272727
| 65
| 0.722656
|
6c6dc74a4aeb1581f2da45b16d59d6e6a174f869
| 2,343
|
py
|
Python
|
check-confidences-on-noise.py
|
erwinvanthiel/ASL
|
1b8846919f4bcf7bf65881faf254395cb01f8ae3
|
[
"MIT"
] | null | null | null |
check-confidences-on-noise.py
|
erwinvanthiel/ASL
|
1b8846919f4bcf7bf65881faf254395cb01f8ae3
|
[
"MIT"
] | null | null | null |
check-confidences-on-noise.py
|
erwinvanthiel/ASL
|
1b8846919f4bcf7bf65881faf254395cb01f8ae3
|
[
"MIT"
] | null | null | null |
import os
import torch
from src.helper_functions.helper_functions import parse_args
from src.loss_functions.losses import AsymmetricLoss, AsymmetricLossOptimized
from src.models import create_model
import argparse
import matplotlib
import torchvision.transforms as transforms
from pgd import create_targeted_adversarial_examples
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
from src.helper_functions.helper_functions import mAP, CocoDetection, CocoDetectionFiltered, CutoutPIL, ModelEma, add_weight_decay
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # USE GPU
########################## ARGUMENTS #############################################
parser = argparse.ArgumentParser(description='ASL MS-COCO Inference on a single image')
parser.add_argument('data', metavar='DIR', help='path to dataset', default='coco')
parser.add_argument('--model_path', type=str, default='mlc-model-epoch50')
parser.add_argument('--pic_path', type=str, default='./pics/test.jpg')
parser.add_argument('--model_name', type=str, default='tresnet_m')
parser.add_argument('--input_size', type=int, default=224)
parser.add_argument('--dataset_type', type=str, default='MS-COCO')
#IMPORTANT PARAMETER!
parser.add_argument('--th', type=float, default=0.5)
parser.add_argument('-b', '--batch-size', default=16, type=int,
metavar='N', help='mini-batch size (default: 16)')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 16)')
args = parse_args(parser)
########################## SETUP THE MODEL AND LOAD THE DATA #####################
# setup model
print('creating and loading the model...')
# state = torch.load(args.model_path, map_location='cpu')
args.num_classes = 80
model = create_model(args).cuda()
model_state = torch.load(args.model_path, map_location='cpu')
model.load_state_dict(model_state["state_dict"])
model.eval()
input_tensor = torch.rand(32,3,224,224).to(device)
pred = torch.sigmoid(model(input_tensor)).cpu().detach().numpy().sum(axis=0)
print(pred)
plt.bar(range(80), pred)
plt.xlabel("Label index")
plt.ylabel("confidence")
plt.title("Average confidence in negative sample")
# plt.savefig('average-confidences.png')
print(np.argsort(pred * -1))
| 37.190476
| 130
| 0.716603
|
6fff59e5f1fa82d9f2759db55fbe3b307364a4e3
| 2,446
|
py
|
Python
|
cfa_dns/__init__.py
|
migurski/DNS-Service
|
e3e2fa2f50fee5fc843b4067f6c227df42dc37f8
|
[
"0BSD"
] | 2
|
2015-04-24T17:22:26.000Z
|
2015-07-29T22:02:55.000Z
|
cfa_dns/__init__.py
|
migurski/DNS-Service
|
e3e2fa2f50fee5fc843b4067f6c227df42dc37f8
|
[
"0BSD"
] | 2
|
2015-04-27T23:00:08.000Z
|
2015-05-06T18:02:47.000Z
|
cfa_dns/__init__.py
|
migurski/DNS-Service
|
e3e2fa2f50fee5fc843b4067f6c227df42dc37f8
|
[
"0BSD"
] | 4
|
2015-04-25T00:10:39.000Z
|
2021-04-16T10:46:08.000Z
|
from sys import stderr
from csv import DictReader
from urlparse import urlparse
from os.path import dirname, join, basename
from os import environ
from flask import Blueprint, Flask
from .api import format_csv_row, hash_host_records, check_upstream, push_upstream
URL_REDIRECTS = 'URL', 'URL301'
allowed_types = ('A', 'CNAME', 'MX', 'AAAA', 'TXT', 'FRAME', 'NS') + URL_REDIRECTS
allowed_ttls = range(60, 172801)
cfadns = Blueprint('gloss', __name__)
def create_app(environ):
''' Check validity of hosts, push them live, create and return a Flask app.
'''
filename = join(dirname(__file__), '..', 'host-records.csv')
check_file(filename)
dns_api_base, dns_api_key = environ['DNS_API_BASE'], environ['DNS_API_KEY']
check_upstream(dns_api_base, dns_api_key)
with open(filename) as file:
host_records = list(DictReader(file))
push_upstream(dns_api_base, dns_api_key, host_records)
app = Flask(__name__)
app.config['DNS_API_BASE'] = dns_api_base
app.config['DNS_API_KEY'] = dns_api_key
app.register_blueprint(cfadns)
return app
def check_file(filename):
''' Check given file for valid host records collection.
Throw exceptions if a problem is found, otherwise return nothing.
'''
with open(filename) as file:
found_rows = list(DictReader(file))
hosts = []
# Are types and TTLs all as expected?
for (index, row) in enumerate(found_rows):
row.update(dict(source='{} row {}'.format(filename, index+1)))
if row['Type'] not in allowed_types:
raise ValueError('"{Type}" is a bad record type, {source}'.format(**row))
if int(row['TTL']) not in allowed_ttls:
raise ValueError('"{TTL}" is a bad TTL, {source}'.format(**row))
if row['Type'] in URL_REDIRECTS:
scheme = urlparse(row['Value']).scheme
if scheme not in ('http', 'https'):
raise ValueError('"{Value}" is a bad redirect, {source}'.format(**row))
elif row['Type'] == 'CNAME':
if not row['Value'].endswith('.'):
raise ValueError('"{Value}" is missing a closing period, {source}'.format(**row))
hosts.append(format_csv_row(row))
hash = hash_host_records(hosts)
print >> stderr, '{} checks out with hash "{}"'.format(basename(filename), hash)
from . import views
| 33.054054
| 97
| 0.636141
|
b73656bc27abac0df545c84c5b062fbdd33f9f53
| 8,522
|
py
|
Python
|
whoosh/filedb/structfile.py
|
archatas/whoosh
|
61822f611cc24572416c3e7601862e63af083428
|
[
"Apache-2.0"
] | 1
|
2017-06-29T07:11:20.000Z
|
2017-06-29T07:11:20.000Z
|
whoosh/filedb/structfile.py
|
archatas/whoosh
|
61822f611cc24572416c3e7601862e63af083428
|
[
"Apache-2.0"
] | null | null | null |
whoosh/filedb/structfile.py
|
archatas/whoosh
|
61822f611cc24572416c3e7601862e63af083428
|
[
"Apache-2.0"
] | 1
|
2020-11-15T14:23:30.000Z
|
2020-11-15T14:23:30.000Z
|
#===============================================================================
# Copyright 2009 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import mmap, os
from cPickle import dump as dump_pickle
from cPickle import load as load_pickle
from whoosh.system import _INT_SIZE, _USHORT_SIZE, _ULONG_SIZE, _FLOAT_SIZE
from whoosh.util import varint, read_varint, float_to_byte, byte_to_float
from whoosh.util.struct2 import calcsize, unpack, Struct
_SIZEMAP = dict((typecode, calcsize(typecode)) for typecode in "bBiIhHlLf")
_ORDERMAP = {"little": "<", "big": ">"}
# Struct functions
_types = (("sbyte", "b"), ("ushort", "H"), ("int", "i"),
("ulong", "L"), ("float", "f"))
_sbyte_struct = Struct("!b")
_ushort_struct = Struct("!H")
_int_struct = Struct("!i")
_uint_struct = Struct("!I")
_ulong_struct = Struct("!L")
_float_struct = Struct("!f")
pack_sbyte = _sbyte_struct.pack
pack_ushort = _ushort_struct.pack
pack_int = _int_struct.pack
pack_uint = _uint_struct.pack
pack_ulong = _ulong_struct.pack
pack_float = _float_struct.pack
unpack_sbyte = _sbyte_struct.unpack
unpack_ushort = _ushort_struct.unpack
unpack_int = _int_struct.unpack
unpack_uint = _uint_struct.unpack
unpack_ulong = _ulong_struct.unpack
unpack_float = _float_struct.unpack
# Main function
class StructFile(object):
"""Returns a "structured file" object that wraps the given file object and
provides numerous additional methods for writing structured data, such as
"write_varint" and "write_ulong".
"""
def __init__(self, fileobj, name=None, onclose=None, mapped=True):
self.file = fileobj
self._name = name
self.onclose = onclose
self.is_closed = False
for attr in ("read", "write", "tell", "seek"):
if hasattr(fileobj, attr):
setattr(self, attr, getattr(fileobj, attr))
# If mapped is True, set the 'map' attribute to a memory-mapped
# representation of the file. Otherwise, the fake 'map' that set up by
# the base class will be used.
if mapped and hasattr(fileobj, "mode") and "r" in fileobj.mode:
fd = fileobj.fileno()
self.size = os.fstat(fd).st_size
try:
self.map = mmap.mmap(fd, self.size, access=mmap.ACCESS_READ)
except OSError:
self._setup_fake_map()
else:
self._setup_fake_map()
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._name)
def flush(self):
"""Flushes the buffer of the wrapped file. This is a no-op if the
wrapped file does not have a flush method.
"""
if hasattr(self.file, "flush"):
self.file.flush()
def close(self):
"""Closes the wrapped file. This is a no-op if the wrapped file does
not have a close method.
"""
del self.map
if self.onclose:
self.onclose(self)
if hasattr(self.file, "close"):
self.file.close()
self.is_closed = True
def _setup_fake_map(self):
_self = self
class fakemap(object):
def __getitem__(self, slice):
if isinstance(slice, (int, long)):
_self.seek(slice)
return _self.read(1)
else:
_self.seek(slice.start)
return _self.read(slice.stop - slice.start)
self.map = fakemap()
def write_string(self, s):
"""Writes a string to the wrapped file. This method writes the length
of the string first, so you can read the string back without having to
know how long it was.
"""
self.write_varint(len(s))
self.file.write(s)
def write_string2(self, s):
self.write(pack_ushort(len(s)) + s)
def read_string(self):
"""Reads a string from the wrapped file.
"""
return self.file.read(self.read_varint())
def read_string2(self):
l = self.read_ushort()
return self.read(l)
def skip_string(self):
l = self.read_varint()
self.seek(l, 1)
def write_varint(self, i):
"""Writes a variable-length integer to the wrapped file.
"""
self.file.write(varint(i))
def read_varint(self):
"""Reads a variable-length encoded integer from the wrapped file.
"""
return read_varint(self.file.read)
def write_byte(self, n):
"""Writes a single byte to the wrapped file, shortcut for
``file.write(chr(n))``.
"""
self.file.write(chr(n))
def read_byte(self):
return ord(self.file.read(1))
def get_byte(self, position):
return ord(self.map[position])
def write_8bitfloat(self, f, mantissabits=5, zeroexp=2):
"""Writes a byte-sized representation of floating point value f to the
wrapped file.
:param mantissabits: the number of bits to use for the mantissa
(with the rest used for the exponent).
:param zeroexp: the zero point for the exponent.
"""
self.write_byte(float_to_byte(f, mantissabits, zeroexp))
def read_8bitfloat(self, mantissabits=5, zeroexp=2):
"""Reads a byte-sized representation of a floating point value.
:param mantissabits: the number of bits to use for the mantissa
(with the rest used for the exponent).
:param zeroexp: the zero point for the exponent.
"""
return byte_to_float(self.read_byte(), mantissabits, zeroexp)
def write_pickle(self, obj, protocol= -1):
"""Writes a pickled representation of obj to the wrapped file.
"""
dump_pickle(obj, self.file, protocol)
def read_pickle(self):
"""Reads a pickled object from the wrapped file.
"""
return load_pickle(self.file)
def write_sbyte(self, n):
self.file.write(pack_sbyte(n))
def write_int(self, n):
self.file.write(pack_int(n))
def write_uint(self, n):
self.file.write(pack_uint(n))
def write_ushort(self, n):
self.file.write(pack_ushort(n))
def write_ulong(self, n):
self.file.write(pack_ulong(n))
def write_float(self, n):
self.file.write(pack_float(n))
def write_array(self, arry):
a = Struct("!" + arry.typecode * len(arry)).pack(*arry)
self.file.write(a)
def read_sbyte(self):
return unpack_sbyte(self.file.read(1))[0]
def read_int(self):
return unpack_int(self.file.read(_INT_SIZE))[0]
def read_uint(self):
return unpack_uint(self.file.read(_INT_SIZE))[0]
def read_ushort(self):
return unpack_ushort(self.file.read(_USHORT_SIZE))[0]
def read_ulong(self):
return unpack_ulong(self.file.read(_ULONG_SIZE))[0]
def read_float(self):
return unpack_float(self.file.read(_FLOAT_SIZE))[0]
def read_array(self, typecode, length):
packed = self.file.read(_SIZEMAP[typecode] * length)
return Struct("!" + typecode * length).unpack(packed)
def get_sbyte(self, position):
return unpack_sbyte(self.map[position:position + 1])[0]
def get_int(self, position):
return unpack_int(self.map[position:position + _INT_SIZE])[0]
def get_uint(self, position):
return unpack_uint(self.map[position:position + _INT_SIZE])[0]
def get_ushort(self, position):
return unpack_ushort(self.map[position:position + _USHORT_SIZE])[0]
def get_ulong(self, position):
return unpack_ulong(self.map[position:position + _ULONG_SIZE])[0]
def get_float(self, position):
return unpack_float(self.map[position:position + _FLOAT_SIZE])[0]
def get_array(self, position, typecode, length):
return unpack("!" + typecode * length,
self.map[position:position + _SIZEMAP[typecode] * length])
| 33.159533
| 80
| 0.624971
|
0c5b210dda2ccfb9cee17de7bd792e76b30582d3
| 16,927
|
py
|
Python
|
vylint/vylint.py
|
sambacha/vylint
|
a2c6c3d7ce446f0c18e567aa4bf881f8ee2252ef
|
[
"Apache-2.0"
] | null | null | null |
vylint/vylint.py
|
sambacha/vylint
|
a2c6c3d7ce446f0c18e567aa4bf881f8ee2252ef
|
[
"Apache-2.0"
] | null | null | null |
vylint/vylint.py
|
sambacha/vylint
|
a2c6c3d7ce446f0c18e567aa4bf881f8ee2252ef
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import fileinput
import os
import re
import shlex
import subprocess
import sys
import vylint
from vylint import messages
MESSAGES = messages.MESSAGES
def is_continuation(line):
return re.search(r"\\\s*$", line)
def check_for_do(line, report):
if not is_continuation(line):
match = re.match(r"^\s*(for|while|until)\s", line)
if match:
operator = match.group(1).strip()
if operator == "for":
# "for i in ..." and "for ((" is vyper, but
# "for (" is likely from an embedded awk script,
# so skip it
if re.search(r"for \([^\(]", line):
return
if not re.search(r";\s*do$", line):
report.print_error((MESSAGES["E010"].msg % operator), line)
def check_if_then(line, report):
if not is_continuation(line):
if re.search(r"^\s*(el)?if \[", line):
if not re.search(r";\s*then$", line):
report.print_error(MESSAGES["E011"].msg, line)
def check_no_trailing_whitespace(line, report):
if re.search(r"[ \t]+$", line):
report.print_error(MESSAGES["E001"].msg, line)
def check_no_long_lines(line, report, max_line_length):
if len(line.rstrip("\r\n")) > max_line_length:
report.print_error(MESSAGES["E006"].msg, line)
def check_indents(logical_line, report):
# this is rather complex to handle argument offset indenting;
# primarily done by emacs. If there is an argument, it will try
# to line up the following arguments underneath it, e.g.
# foobar_cmd bar baz \
# moo boo
# Thus the offset in this case might not be a strict multiple of 4
# Find the offset of the first argument of the command (if it has
# one)
m = re.search(
r"^(?P<indent>[ \t]+)?(?P<cmd>\S+)(?P<ws>\s+)(?P<arg>\S+)", logical_line[0]
)
arg_offset = None
if m:
arg_offset = len(m.group("indent")) if m.group("indent") else 0
arg_offset += len(m.group("cmd")) + len(m.group("ws"))
# go through each line
for lineno, line in enumerate(logical_line):
m = re.search(r"^(?P<indent>[ \t]+)", line)
if m:
# no tabs, only spaces
if re.search(r"\t", m.group("indent")):
report.print_error(MESSAGES["E002"].msg, line)
offset = len(m.group("indent"))
# the first line and lines without an argument should be
# offset by 4 spaces
if (lineno == 0) or (arg_offset is None):
if (offset % 4) != 0:
report.print_error(MESSAGES["E003"].msg, line)
else:
# other lines are allowed to line up with the first
# argument, or be multiple-of 4 spaces
if offset != arg_offset and (offset % 4) != 0:
report.print_error(MESSAGES["E003"].msg, line)
def check_function_decl(line, report):
failed = False
if line.startswith("function"):
if not re.search(r"^function [\w-]* \{$", line):
failed = True
else:
# catch the case without "function", e.g.
# things like '^foo() {'
if re.search(r"^\s*?\(\)\s*?\{", line):
failed = True
if failed:
report.print_error(MESSAGES["E020"].msg, line)
def starts_heredoc(line):
# note, watch out for <<EOF and <<'EOF' ; quotes in the
# deliminator are part of syntax
m = re.search(r"[^<]<<\s*([\'\"]?)(?P<token>\w+)([\'\"]?)", line)
return m.group("token") if m else False
def end_of_heredoc(line, token):
return token and re.search(r"^%s\s*$" % token, line)
def check_arithmetic(line, report):
if "$[" in line:
report.print_error(MESSAGES["E041"].msg, line)
def check_bare_arithmetic(line, report):
if line.lstrip().startswith("(("):
report.print_error(MESSAGES["E043"].msg, line)
def check_local_subshell(line, report):
# XXX: should we increase the string checking to see if the $( is
# anywhere with a string being set? Risk of false positives?x
if line.lstrip().startswith("local ") and any(
s in line for s in ("=$(", "=`", '="$(', '="`')
):
report.print_error(MESSAGES["E042"].msg, line)
def check_hashbang(line, filename, report):
# this check only runs on the first line
# the check is referenced as `hashbang` as #!/user/bin/env vyper` could be used
# also as you will see below I am not using a vyper parser, this is straight madness.
# madness mate
# bloody madeness.
#
if (
not filename.endswith(".vy")
and not line.startswith("# @version")
and not os.path.basename(filename).startswith(".")
):
report.print_error(MESSAGES["E005"].msg, line)
def check_conditional_expression(line, report):
# without a complete vyper syntax parser here we go ghetto with my girl shlex.
# shlex is pretty helpful in getting us something we can walk to
# find this pattern. It does however have issues with
# unterminated quotes on multi-line strings (e.g.)
#
# foo="bar <-- we only see this bit in "line"
# baz"
#
# So we're just going to ignore parser failures here and move on.
# LOL.
# Possibly in the future I wont half ass this.
#
try:
toks = shlex.shlex(line)
toks.wordchars = "[]=~"
toks = list(toks)
except ValueError:
return
in_single_bracket = False
for tok in toks:
if tok == "[":
in_single_bracket = True
elif tok in ("=~", "<", ">") and in_single_bracket:
report.print_error(MESSAGES["E044"].msg, line)
elif tok == "]":
in_single_bracket = False
# next we check characte encoding
# fuck UTF
# ASCII ALL THE WAY BABY
def check_syntax(filename, report):
# ergo we gotta trick this into think its a bash script.
# so....
# we run the file through "bash -n" to catch basic syntax errors and
# other warnings
# and pray to mother mary dat dis shit stays together
matches = []
# sample lines we want to match:
# foo.vy: line 4: warning: \
# here-document at line 1 delimited by end-of-file (wanted `EOF')
# foo.vy: line 9: syntax error: unexpected end of file
# foo.vy: line 7: syntax error near unexpected token `}'
#
# i.e. consistency with ":"'s isn't constant, so just do our
# best...
r = re.compile("^(?P<file>.*): line (?P<lineno>[0-9]+): (?P<error>.*)")
# we are parsing the error message, so force it to ignore the
# system locale so we don't get messages in another language
bash_environment = os.environ
bash_environment["LC_ALL"] = "C"
proc = subprocess.Popen(
["bash", "-n", filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=bash_environment,
universal_newlines=True,
)
outputs = proc.communicate()
for line in outputs[1].split("\n"):
m = r.match(line)
if m:
matches.append(m)
for m in matches:
if "syntax error" in m.group("error"):
msg = "%s: %s" % (MESSAGES["E040"].msg, m.group("error"))
report.print_error(
msg, filename=filename, filelineno=int(m.group("lineno"))
)
# Matching output from bash warning about here-documents not
# ending.
# FIXME: are there other warnings that might come out
# with "bash -n"? A quick scan of the source code suggests
# no, but there might be other interesting things we could
# catch.
if "warning:" in m.group("error"):
if "delimited by end-of-file" in m.group("error"):
start = re.match("^.*line (?P<start>[0-9]+).*$", m.group("error"))
report.print_error(
MESSAGES["E012"].msg % int(start.group("start")),
filename=filename,
filelineno=int(m.group("lineno")),
)
class VyperRun(object):
def __init__(self):
self.error_count = 0
self.error_list = None
self.ignore_list = None
self.warning_count = 0
self.warning_list = None
def register_ignores(self, ignores):
if ignores:
self.ignore_list = "^(" + "|".join(ignores.split(",")) + ")"
def register_warnings(self, warnings):
if warnings:
self.warning_list = "^(" + "|".join(warnings.split(",")) + ")"
def register_errors(self, errors):
if errors:
self.error_list = "^(" + "|".join(errors.split(",")) + ")"
def should_ignore(self, error):
return self.ignore_list and re.search(self.ignore_list, error)
def should_warn(self, error):
# if in the errors list, overrides warning level
if self.error_list and re.search(self.error_list, error):
return False
if messages.is_default_warning(error):
return True
return self.warning_list and re.search(self.warning_list, error)
def print_error(self, error, line="", filename=None, filelineno=None):
if self.should_ignore(error):
return
warn = self.should_warn(error)
if not filename:
filename = fileinput.filename()
if not filelineno:
filelineno = fileinput.filelineno()
if warn:
self.warning_count = self.warning_count + 1
else:
self.error_count = self.error_count + 1
self.log_error(error, line, filename, filelineno, warn)
def log_error(self, error, line, filename, filelineno, warn=False):
# following pycodestyle/pep8 default output format
# https://github.com/PyCQA/pycodestyle/blob/master/pycodestyle.py#L108
print(
"%(filename)s:%(filelineno)s:1: %(error)s"
% {
"filename": filename,
"filelineno": filelineno,
"warn": "W" if warn else "E",
"error": error.replace(":", "", 1),
"line": line.rstrip("\n"),
}
)
def check_files(self, files, verbose, max_line_length=79):
logical_line = ""
token = False
# NOTE(mrodden): magic; replace with proper
# report class when necessary
report = self
for fname in files:
# reset world
in_heredoc = False
in_continuation = False
# simple syntax checking, as files can pass style but still cause
# syntax errors when you try to run them.
check_syntax(fname, report)
for line in fileinput.input(fname):
if fileinput.isfirstline():
check_hashbang(line, fileinput.filename(), report)
if verbose:
print("Running vylint on %s" % fileinput.filename())
# Don't run any tests on comment lines (but remember
# inside a heredoc this might be part of the syntax of
# an embedded script, just ignore that)
if line.lstrip().startswith("#") and not in_heredoc:
continue
# Strip trailing comments. From bash:
#
# a word beginning with # causes that word and all
# remaining characters on that line to be ignored.
# ...
# A character that, when unquoted, separates
# words. One of the following: | & ; ( ) < > space
# tab
#
# for simplicity, we strip inline comments by
# matching just '<space>#'.
if not in_heredoc:
ll_split = line.split(" #", 1)
if len(ll_split) > 1:
line = ll_split[0].rstrip()
# see if this starts a heredoc
if not in_heredoc:
token = starts_heredoc(line)
if token:
in_heredoc = True
logical_line = [line]
continue
# see if this starts a continuation
if not in_continuation:
if is_continuation(line):
in_continuation = True
logical_line = [line]
continue
# if we are in a heredoc or continuation, just loop
# back and keep buffering the lines into
# "logical_line" until the end of the
# heredoc/continuation.
if in_heredoc:
logical_line.append(line)
if not end_of_heredoc(line, token):
continue
else:
in_heredoc = False
# FIXME: if we want to do something with
# heredocs in the future, then the whole thing
# is now stored in logical_line. for now,
# skip
continue
elif in_continuation:
logical_line.append(line)
if is_continuation(line):
continue
else:
in_continuation = False
else:
logical_line = [line]
check_indents(logical_line, report)
# at this point, logical_line is an array that holds
# the whole continuation. XXX : historically, we've
# just handled every line in a continuation
# separatley. Stick with what works...
for line in logical_line:
check_no_trailing_whitespace(line, report)
check_no_long_lines(line, report, max_line_length)
check_for_do(line, report)
check_if_then(line, report)
check_function_decl(line, report)
check_arithmetic(line, report)
check_local_subshell(line, report)
check_bare_arithmetic(line, report)
check_conditional_expression(line, report)
# finished processing the file
# last line should always end with a newline
if not line.endswith("\n"):
report.print_error(MESSAGES["E004"].msg, line)
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="A Vyper Contract style checker")
parser.add_argument(
"files", metavar="file", nargs="*", help="files to scan for errors"
)
parser.add_argument("-i", "--ignore", help="Rules to ignore")
parser.add_argument("-w", "--warn", help="Rules to always warn (rather than error)")
parser.add_argument(
"-e", "--error", help="Rules to always error (rather than warn)"
)
parser.add_argument(
"--max-line-length", default=79, type=int, help="Max line length"
)
parser.add_argument("-v", "--verbose", action="store_true", default=False)
parser.add_argument(
"--version",
action="store_true",
help="show vylint version number and exit",
default=False,
)
parser.add_argument("-s", "--show", action="store_true", default=False)
opts = parser.parse_args(args)
if opts.version:
print("vylint: %s" % vylint.__version__)
sys.exit(0)
if opts.show:
messages.print_messages()
sys.exit(0)
files = opts.files
if not files:
parser.print_usage()
return 1
run = VyperRun()
run.register_ignores(opts.ignore)
run.register_warnings(opts.warn)
run.register_errors(opts.error)
try:
run.check_files(files, opts.verbose, opts.max_line_length)
except IOError as e:
print("vylint: %s" % e)
return 1
if run.warning_count > 0:
print("%d vylint warning(s) found" % run.warning_count)
if run.error_count > 0:
print("%d vylint error(s) found" % run.error_count)
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| 34.265182
| 89
| 0.564601
|
61fbc326ad9fe727bd6ce85614f24da014ab40d8
| 237
|
py
|
Python
|
Test2/PlaceHolder.py
|
leejw51/BumblebeeNet
|
fd9bedbfaad9bb94bcb4f1dc44eec8125bb9c690
|
[
"MIT"
] | null | null | null |
Test2/PlaceHolder.py
|
leejw51/BumblebeeNet
|
fd9bedbfaad9bb94bcb4f1dc44eec8125bb9c690
|
[
"MIT"
] | null | null | null |
Test2/PlaceHolder.py
|
leejw51/BumblebeeNet
|
fd9bedbfaad9bb94bcb4f1dc44eec8125bb9c690
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
a = tf.placeholder( tf.int32, [3])
b = tf.constant(2)
x2_op = a * b
session = tf.Session()
r1 = session.run(x2_op , feed_dict={ a:[1,2,3]} )
print(r1)
r2 = session.run(x2_op, feed_dict={a:[10,20,10]} )
print(r2)
| 21.545455
| 50
| 0.649789
|
b62c2fdb6f8463c47d88d96fd78314067273e682
| 20,668
|
py
|
Python
|
tests/samples_tests/smoke_tests/common/samples_common_test_clas.py
|
kurylo/openvino
|
4da0941cd2e8f9829875e60df73d3cd01f820b9c
|
[
"Apache-2.0"
] | 5
|
2020-04-20T10:05:50.000Z
|
2020-04-22T13:08:07.000Z
|
tests/samples_tests/smoke_tests/common/samples_common_test_clas.py
|
kurylo/openvino
|
4da0941cd2e8f9829875e60df73d3cd01f820b9c
|
[
"Apache-2.0"
] | 33
|
2021-09-23T04:14:30.000Z
|
2022-01-24T13:21:32.000Z
|
tests/samples_tests/smoke_tests/common/samples_common_test_clas.py
|
kurylo/openvino
|
4da0941cd2e8f9829875e60df73d3cd01f820b9c
|
[
"Apache-2.0"
] | 11
|
2021-11-09T00:51:40.000Z
|
2021-11-10T12:04:16.000Z
|
"""
Copyright (C) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
import itertools
import shutil
import sys
import csv
import re
import pytest
from glob import iglob
import numpy as np
from pathlib import Path
import requests
import zipfile
import logging as log
from common.common_utils import shell
from distutils import spawn
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
def search_model_path_recursively(config_key, model_name):
search_pattern = config_key + '/**/' + model_name
path_found = list(iglob(search_pattern, recursive=True))
if len(path_found) == 1:
return path_found[0]
elif len(path_found) == 0:
raise FileNotFoundError("File not found for pattern {}".format(search_pattern))
else:
raise ValueError("More than one file with {} name".format(model_name))
class Environment:
"""
Environment used by tests.
:attr env: environment dictionary. populated dynamically from environment
configuration file.
"""
env = {}
@classmethod
def abs_path(cls, env_key, *paths):
"""Construct absolute path by appending paths to environment value.
:param cls: class
:param env_key: Environment.env key used to get the base path
:param paths: paths to be appended to Environment.env value
:return: absolute path string where Environment.env[env_key] is
appended with paths
"""
return str(Path(cls.env[env_key], *paths))
def get_tests(cmd_params, use_device=True, use_batch=False):
# Several keys:
# use_device
# use_batch
# you should specify 'False' when sample not required '-d' or '-b' keys.
# Example: use_device=False for all 'hello_*" samples
# use batch = False for all samples except for hello_shape_infer_ssd
new_cmd_params = []
cmd_keys = list(cmd_params.keys())
devices = os.environ["TEST_DEVICE"].split(';') if os.environ.get("TEST_DEVICE") else ["CPU"]
# You can pass keys (like d, d_lpr ..) via use_device list. And the topology executes only on these devices
# Use this option when a topology isn't supported in some plugin. In default CPU only.
if isinstance(use_device, list):
for dev_key in use_device:
dev_list = np.array(cmd_params[dev_key])
for _dev in dev_list:
if not _dev in devices and _dev in cmd_params[dev_key]:
cmd_params[dev_key].remove(_dev)
use_device = False
for it in itertools.product(*[cmd_params[key] for key in cmd_params.keys()]):
test = {}
for key in zip(cmd_keys, it):
test.update({key[0]: key[1]})
# Fill array with images accordong batch size
if 'batch' in test and use_batch == False:
images = ""
for i in range(test['batch']):
images += test['i'] + " "
images = images.rstrip()
test['i'] = images
# Remove batch attr
del test['batch']
# Delete bitstream param:
if 'd' in test and 'bitstream' in test:
del test['bitstream']
# Add new tests params
new_cmd_params.append(test)
test_args = []
for i in range(len(new_cmd_params)):
# key "use_device" is to run sample with device, exception: helo_classification_sample
if use_device:
for d in devices:
new_d = {}
new_d.update(new_cmd_params[i])
new_d.update({'d': d})
test_args.append(new_d)
else:
test_args.append(new_cmd_params[i])
return test_args
def getting_samples_data_zip(url, samples_path, size_of_chunk=128):
if os.path.exists(samples_path) or os.path.exists(samples_path[:-4]):
return
try:
print("\nStart downloading samples_smoke_tests_data.zip...")
samples_request = requests.get(url, stream=True)
with open(samples_path, 'wb') as samples_file:
for elem in samples_request.iter_content(chunk_size=size_of_chunk):
samples_file.write(elem)
print("\nsamples_smoke_tests_data.zip downloaded successfully")
samples_file.close()
print("\nExtracting of samples_smoke_tests_data.zip...")
with zipfile.ZipFile(samples_path, 'r') as samples_zip:
samples_zip.extractall(Environment.env['smoke_tests_path'])
nameFolder = str(Environment.env['samples_data_zip'])[Environment.env['samples_data_zip'].rfind('/')+1:][:-4]
smoke_tests_path = os.path.join(Environment.env['smoke_tests_path'])
if os.path.exists(os.path.join(smoke_tests_path,nameFolder)):
os.rename(os.path.join(smoke_tests_path, nameFolder), os.path.join(smoke_tests_path, 'samples_smoke_tests_data') )
if os.path.exists(samples_path):
print("\nRemoving samples_smoke_tests_data.zip...")
os.remove(samples_path)
except Exception:
print(f"Exception during downloading samples_smoke_tests_data.zip")
class SamplesCommonTestClass():
@classmethod
def made_executable_path(cls, path1, path2, sample_type='C++'):
executable_path = os.path.join(path1, path2, path2) if 'python' in sample_type.lower() \
else os.path.join(path1, path2)
is_windows = sys.platform.startswith('win')
if 'python' in sample_type.lower():
executable_path += '.py'
if is_windows:
executable_path = 'python ' + executable_path
else:
executable_path = 'python3 ' + executable_path
elif 'c' in sample_type.lower() and not 'c++' in sample_type.lower():
executable_path += '_c'
if is_windows and not 'python' in sample_type.lower():
executable_path += '.exe'
# This exeption is made for benchmark_app, because it locates in another place.
if 'benchmark_app' in path2 and 'python' in sample_type.lower():
executable_path = spawn.find_executable(str('benchmark_app'))
# if not hasattr(cls, 'executable_path'):
cls.executable_path = executable_path
@staticmethod
def reset_models_path(model):
pathList = model.split(os.sep)
modelName = pathList[len(pathList)-1]
precision = pathList[len(pathList)-2]
for root, subFolder, files in os.walk(Environment.env['models_path']):
for item in files:
if item.endswith(modelName) :
if precision in root :
model = str(os.path.join(root,item))
else :
model = os.path.join(Environment.env['models_path'], model)
return model
@staticmethod
def join_env_path(param, executable_path, complete_path=True):
gpu_lib_path = os.path.join(os.environ.get('IE_APP_PATH'), 'lib')
if 'i' in param:
# If batch > 1, then concatenate images
if ' ' in param['i']:
param['i'] = param['i'].split(' ')
elif complete_path:
param['i'] = list([param['i']])
for k in param.keys():
if ('i' == k) and complete_path:
param['i'] = [os.path.join(Environment.env['test_data'], e) for e in param['i']]
param['i'] = ' '.join(param['i'])
elif ('ref_m' == k):
param['ref_m'] = SamplesCommonTestClass.reset_models_path(param['ref_m'])
elif ('m' == k):
param['m'] = SamplesCommonTestClass.reset_models_path(param['m'])
elif ('m_ag' == k):
param['m_ag'] = SamplesCommonTestClass.reset_models_path(param['m_ag'])
elif ('m_hp' == k):
param['m_hp'] = SamplesCommonTestClass.reset_models_path(param['m_hp'])
elif ('m_va' == k):
param['m_va'] = SamplesCommonTestClass.reset_models_path(param['m_va'])
elif ('m_lpr' == k):
param['m_lpr'] = SamplesCommonTestClass.reset_models_path(param['m_lpr'])
elif ('m_em' == k):
param['m_em'] = SamplesCommonTestClass.reset_models_path(param['m_em'])
elif ('m_pa' == k):
param['m_pa'] = SamplesCommonTestClass.reset_models_path(param['m_pa'])
elif ('m_reid' == k):
param['m_reid'] = SamplesCommonTestClass.reset_models_path(param['m_reid'])
elif ('m_fd' == k):
param['m_fd'] = SamplesCommonTestClass.reset_models_path(param['m_fd'])
elif ('m_act' == k):
param['m_act'] = SamplesCommonTestClass.reset_models_path(param['m_act'])
elif ('m_lm' == k):
param['m_lm'] = SamplesCommonTestClass.reset_models_path(param['m_lm'])
elif ('m_det' == k):
param['m_det'] = SamplesCommonTestClass.reset_models_path(param['m_det'])
elif ('m_td' == k):
param['m_td'] = SamplesCommonTestClass.reset_models_path(param['m_td'])
elif ('m_tr' == k):
param['m_tr'] = SamplesCommonTestClass.reset_models_path(param['m_tr'])
elif ('m_en' == k):
param['m_en'] = SamplesCommonTestClass.reset_models_path(param['m_en'])
elif ('m_de' == k):
param['m_de'] = SamplesCommonTestClass.reset_models_path(param['m_de'])
elif ('l' == k and 'pascal_voc_classes' in param['l']):
param['l'] = os.path.join(Environment.env['test_data'], param['l'])
elif ('pp' == k):
param['pp'] = gpu_lib_path
elif ('r' == k) and complete_path:
if len(param['r']) > 0:
param['r'] = os.path.join(Environment.env['test_data'], param['r'])
elif ('o' == k) and complete_path:
param['o'] = os.path.join(Environment.env['out_directory'], param['o'])
elif ('wg' == k):
param['wg'] = os.path.join(Environment.env['out_directory'], param['wg'])
elif ('we' == k):
param['we'] = os.path.join(Environment.env['out_directory'], param['we'])
elif ('fg' == k):
param['fg'] = os.path.join(Environment.env['test_data'], param['fg'])
elif ('labels' == k):
label_folder = os.path.dirname(executable_path.split(' ')[-1])
param['labels'] = os.path.join(label_folder, param['labels'])
elif ('lb') == k:
param['lb'] = os.path.join(Environment.env['test_data'], param['lb'])
@staticmethod
def get_cmd_line(param, use_preffix=True, long_hyphen=None):
if long_hyphen is None:
long_hyphen = []
line = ''
for key in sorted(param.keys()):
if use_preffix and any([x for x in long_hyphen if key == x]):
line += '--{} {} '.format(key, param[key])
elif use_preffix and key not in long_hyphen:
line += '-{} {} '.format(key, param[key])
elif not use_preffix:
line += '{} '.format(param[key])
return line
@staticmethod
def check_is_perf(stdout):
# This function check if FPS in stdout. If yes - then need to run this sample for perfomance
for line in stdout:
if 'fps' in line.lower():
return True
return False
@staticmethod
def check_has_niter(param):
# Check if niter has already in params, so it was set before
if 'niter' in param:
return True
return False
@staticmethod
def find_fps(stdout):
stdout = stdout.split('\n')
for line in stdout:
if 'fps' in line.lower():
return float(re.findall(r"\d+\.\d+", line)[0])
@staticmethod
def write_csv(sample_name, sample_type, cmd_perf, fps_perf):
csv_path = Environment.env['perf_csv_name']
with open(csv_path, 'a', newline='') as f:
perf_writer = csv.writer(f, delimiter='|', quotechar='|', quoting=csv.QUOTE_MINIMAL)
perf_writer.writerow([sample_name, sample_type, cmd_perf.rstrip(), fps_perf])
@staticmethod
def get_empty_cmd_line(param, use_preffix=True, long_hyphen=None):
line = ''
return line
@staticmethod
def get_hello_cmd_line(param, use_preffix=True, long_hyphen=None):
line = ''
for key in ['m', 'i', 'd']:
if key in param:
if use_preffix:
line += '-{} {} '.format(key, param[key])
else:
line += '{} '.format(param[key])
return line
@staticmethod
def get_hello_shape_cmd_line(param, use_preffix=True, long_hyphen=None):
line = ''
for key in ['m', 'i', 'd', 'batch']:
if key in param:
if use_preffix:
line += '-{} {} '.format(key, param[key])
else:
line += '{} '.format(param[key])
return line
@staticmethod
def get_hello_nv12_cmd_line(param, use_preffix=True, long_hyphen=None):
line = ''
for key in ['m', 'i', 'size', 'd']:
if key in param:
if use_preffix:
line += '-{} {} '.format(key, param[key])
else:
line += '{} '.format(param[key])
return line
@classmethod
def setup_class(cls):
getting_samples_data_zip(Environment.env['samples_data_zip'], Environment.env['samples_path'])
assert os.environ.get('IE_APP_PATH') is not None, "IE_APP_PATH environment variable is not specified!"
assert os.path.exists(Environment.env['models_path']), \
"Path for public models {} is not exist!".format(Environment.env['models_path'])
assert os.path.exists(Environment.env['test_data']), \
"Path for test data {} is not exist!".format(Environment.env['test_data'])
cls.output_dir = Environment.env['out_directory']
def _test(self, param, use_preffix=True, get_cmd_func=None, get_shell_result=False, long_hyphen=None, complete_path=True):
"""
:param param:
:param use_preffix: use it when sample doesn't require keys (i.e. hello_classification <path_to_model> <path_to_image>
instead of hello_classification -m <path_to_model> -i <path_to_image>)
:param get_cmd_func: to use specific cmd concatenate function, again for hello_request_classification sample
:param get_shell_result: to return the result of sample running (retcode, strout, stderr) directly, \
without failing inside _test function. Needed for negative test cases checking \
(e.g. error messages validation)
:param long_hyphen: to concatenate cmd param with '--', instead of '-', example: instance_segmentation_demo --labels
:return:
"""
# Copy param to another variable, because it is need to save original parameters without changes
param_cp = dict(param)
sample_type = param_cp.get('sample_type', "C++")
if 'python' in sample_type.lower():
assert os.environ.get('IE_APP_PYTHON_PATH') is not None, \
"IE_APP_PYTHON_PATH environment variable is not specified!"
self.made_executable_path(os.environ.get('IE_APP_PYTHON_PATH'), self.sample_name,
sample_type=sample_type)
else:
self.made_executable_path(os.environ.get('IE_APP_PATH'), self.sample_name, sample_type=sample_type)
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
if 'bitstream' in param_cp:
del param_cp['bitstream']
if 'precision' in param_cp:
del param_cp['precision']
if get_cmd_func is None:
get_cmd_func = self.get_cmd_line
self.join_env_path(param_cp, executable_path=self.executable_path, complete_path=complete_path)
# Updating all attributes in the original dictionary (param), because param_cp was changes (join_env_path)
for key in param.keys():
if key in param_cp:
param[key] = param_cp[key]
if 'sample_type' in param_cp:
del param_cp['sample_type']
cmd_line = get_cmd_func(param_cp, use_preffix=use_preffix, long_hyphen=long_hyphen)
log.info("Running command: {} {}".format(self.executable_path, cmd_line))
retcode, stdout, stderr = shell([self.executable_path, cmd_line])
# Execute performance:
if Environment.env['performance'] and retcode == 0:
perf_iter = int(Environment.env['performance'])
# Check if samples are for performance testing: if FPS in output
is_perf = self.check_is_perf(stdout.split('\n'))
is_niter = self.check_has_niter(param_cp)
if not is_perf:
# Skipping all tests for this sample, because no of them are ready for performance.
# Add name of sample to global pytest variable, then skip it in setup method
if 'list_of_skipped_samples' in Environment.env:
Environment.env['list_of_skipped_samples'].append(self.sample_name)
else:
Environment.env.update({'list_of_skipped_samples': [self.sample_name]})
pytest.skip('[INFO] Sample {} not executed for performance'.format(self.executable_path))
else:
log.info('Running perfomance for {} iteraction'.format(perf_iter))
# Perf_iter = 0 when it isn't neccessary to add niter key
if perf_iter > 0:
if is_niter:
log.warning('Changed value of niter param to {}'.format(perf_iter))
param_cp['niter'] = perf_iter
else:
log.warning('Added key: niter to param with value: {}'.format(perf_iter))
param_cp.update({'niter': perf_iter})
cmd_perf = get_cmd_func(param_cp, use_preffix=use_preffix, long_hyphen=long_hyphen)
retcode_perf, stdout_perf, stderr_perf = shell([self.executable_path, cmd_perf])
if (retcode_perf != 0):
log.error(stderr_perf)
assert retcode_perf == 0, "Execution sample for performace failed"
fps_perf = self.find_fps(stdout_perf)
self.write_csv(sample_name=self.sample_name, sample_type=sample_type, cmd_perf=cmd_perf, fps_perf=fps_perf)
log.info('Perf results: {}'.format(fps_perf))
if get_shell_result:
return retcode, stdout, stderr
# Check return code
if (retcode != 0):
log.error(stderr)
assert retcode == 0, "Sample execution failed"
return stdout
def setup_method(self):
"""
Clean up IRs and npy files from self.output_dir if exist
And skip several test for performance
:return: """
if os.path.exists(self.output_dir):
shutil.rmtree(self.output_dir)
filenames = glob.glob('out*.bmp')
[os.remove(fn) for fn in filenames]
# Skip samples that are not for performance:
if Environment.env['performance'] and 'list_of_skipped_samples' in Environment.env and \
self.sample_name in Environment.env['list_of_skipped_samples']:
pytest.skip('[Skip from setup] Sample {} not executed for performance'.format(self.sample_name))
def teardown_method(self):
"""
Clean up IRs and npy files from self.output_dir if exist
:return: """
is_save = getattr(self, 'save', None)
if not is_save and os.path.exists(self.output_dir):
shutil.rmtree(self.output_dir)
filenames = glob.glob('out*.bmp')
[os.remove(fn) for fn in filenames]
| 45.028322
| 126
| 0.600397
|
6c4c9855cf8c880afddcef1bce1f99cab6fc8dc4
| 40,074
|
py
|
Python
|
tensorflow/python/framework/tensor_util.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 74
|
2020-07-06T17:11:39.000Z
|
2022-01-28T06:31:28.000Z
|
tensorflow/python/framework/tensor_util.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 88
|
2020-11-24T08:18:10.000Z
|
2022-03-25T20:28:30.000Z
|
tensorflow/python/framework/tensor_util.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 12
|
2020-07-08T07:27:17.000Z
|
2021-12-27T08:54:27.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.types import core
from tensorflow.python.types import internal
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Fallback in case fast_tensor_util is not properly compiled.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asarray(x, dtype=np.float16).view(np.uint16).item()
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromFloat16(x) for x in proto_values])
def _MediumAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
# TODO: Remove the conversion if cython supports np.float16_t
fast_tensor_util.AppendFloat16ArrayToTensorProto(
tensor_proto,
np.asarray(proto_values, dtype=np.float16).view(np.uint16))
def ExtractBitsFromBFloat16(x):
return np.asarray(
x, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16).item()
def SlowAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromBFloat16(x) for x in proto_values])
def FastAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
fast_tensor_util.AppendBFloat16ArrayToTensorProto(
tensor_proto, np.asarray(
proto_values, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16))
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype:
FastAppendBFloat16ArrayToTensorProto,
np.float16:
_MediumAppendFloat16ArrayToTensorProto,
np.float32:
fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64:
fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32:
fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64:
fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16:
fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.uint32:
fast_tensor_util.AppendUInt32ArrayToTensorProto,
np.uint64:
fast_tensor_util.AppendUInt64ArrayToTensorProto,
np.int8:
fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16:
fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64:
fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128:
fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object:
fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool:
fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt16ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt16ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([x.item() for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([x.item() for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item() for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([x.item() for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item()[0] for x in proto_values])
def SlowAppendUInt32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint32_val.extend([x.item() for x in proto_values])
def SlowAppendUInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint64_val.extend([x.item() for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([x.item() for x in proto_values])
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype: SlowAppendBFloat16ArrayToTensorProto,
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.uint32: SlowAppendUInt32ArrayToTensorProto,
np.uint64: SlowAppendUInt64ArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, (list, tuple)):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8,
dtypes.int16, dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8,
dtypes.qint16, dtypes.quint16, dtypes.qint32, dtypes.uint32, dtypes.uint64
])
# pylint: disable=invalid-name
def _check_failed(v):
# NB. none of the _check_* functions could raise a ValueError, so
# it is safe to use here.
raise ValueError(v)
def _check_quantized(values):
# Cannot rely on `nest` because the leaves are tuples.
if not isinstance(values, (list, tuple)):
_check_failed(values)
if isinstance(values, tuple):
_ = [_check_int(v) for v in values]
else:
_ = [_check_quantized(v) for v in values]
def _generate_isinstance_check(expected_types):
def inner(values):
for v in nest.flatten(values):
if not (isinstance(v, expected_types) or
(isinstance(v, np.ndarray) and
issubclass(v.dtype.type, expected_types))):
_check_failed(v)
return inner
_check_int = _generate_isinstance_check(
(compat.integral_types, tensor_shape.Dimension))
_check_float = _generate_isinstance_check(compat.real_types)
_check_complex = _generate_isinstance_check(compat.complex_types)
_check_str = _generate_isinstance_check(compat.bytes_or_text_types)
_check_bool = _generate_isinstance_check(bool)
def _check_not_tensor(values):
_ = [_check_failed(v) for v in nest.flatten(values)
if isinstance(v, ops.Tensor)]
# pylint: enable=invalid-name
_TF_TO_IS_OK = {
dtypes.bool: _check_bool,
dtypes.complex128: _check_complex,
dtypes.complex64: _check_complex,
dtypes.float16: _check_float,
dtypes.float32: _check_float,
dtypes.float64: _check_float,
dtypes.int16: _check_int,
dtypes.int32: _check_int,
dtypes.int64: _check_int,
dtypes.int8: _check_int,
dtypes.qint16: _check_quantized,
dtypes.qint32: _check_quantized,
dtypes.qint8: _check_quantized,
dtypes.quint16: _check_quantized,
dtypes.quint8: _check_quantized,
dtypes.string: _check_str,
dtypes.uint16: _check_int,
dtypes.uint8: _check_int,
dtypes.uint32: _check_int,
dtypes.uint64: _check_int,
}
def _AssertCompatible(values, dtype):
if dtype is None:
fn = _check_not_tensor
else:
try:
fn = _TF_TO_IS_OK[dtype]
except KeyError:
# There isn't a specific fn, so we try to do the best possible.
if dtype.is_integer:
fn = _check_int
elif dtype.is_floating:
fn = _check_float
elif dtype.is_complex:
fn = _check_complex
elif dtype.is_quantized:
fn = _check_quantized
else:
fn = _check_not_tensor
try:
fn(values)
except ValueError as e:
[mismatch] = e.args
if dtype is None:
raise TypeError("Expected any non-tensor type, got a tensor instead.")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
def _is_array_like(obj): # pylint: disable=invalid-name
"""Check if a given object is array-like."""
if isinstance(obj, ops.Tensor) and not isinstance(obj, ops._EagerTensorBase): # pylint: disable=protected-access
# Tensor implements __array__ only so it can inform the user that it is not
# a valid array.
return False
# TODO(slebedev): an object could also implement C-level array interface.
if (callable(getattr(obj, "__array__", None)) or
isinstance(getattr(obj, "__array_interface__", None), dict)):
return True
try:
memoryview(obj)
except TypeError:
return False
else:
return not isinstance(obj, bytes)
# pylint: disable=invalid-name
@tf_export("make_tensor_proto")
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False,
allow_broadcast=False):
"""Create a TensorProto.
In TensorFlow 2.0, representing tensors as protos should no longer be a
common workflow. That said, this utility function is still useful for
generating TF Serving request protos:
```python
request = tensorflow_serving.apis.predict_pb2.PredictRequest()
request.model_spec.name = "my_model"
request.model_spec.signature_name = "serving_default"
request.inputs["images"].CopyFrom(tf.make_tensor_proto(X_new))
```
`make_tensor_proto` accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto-converted) must have the compatible type with dtype.
`make_tensor_proto` then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
allow_broadcast: Boolean that enables allowing scalars and 1 length vector
broadcasting. Cannot be true when verify_shape is true.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tf.make_ndarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
"""
if allow_broadcast and verify_shape:
raise ValueError("allow_broadcast and verify_shape are not both allowed.")
if isinstance(values, tensor_pb2.TensorProto):
return values
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (
dtype in [
dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16,
dtypes.qint32
])
if _is_array_like(values):
values = np.asarray(values)
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype and dtype.is_numpy_compatible:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
if dtype and dtype.is_numpy_compatible:
np_dt = dtype.as_numpy_dtype
else:
np_dt = None
# If shape is None, numpy.prod returns None when dtype is not set, but
# raises exception when dtype is set to np.int64
if shape is not None and np.prod(shape, dtype=np.int64) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" %
(values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (not hasattr(dtype, "base_dtype") or
dtype.base_dtype != numpy_dtype.base_dtype):
raise TypeError("Incompatible types: %s vs. %s. Value is %s" %
(dtype, nparray.dtype, values))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape, dtype=np.int64)
is_same_size = shape_size == nparray.size
if allow_broadcast:
if nparray.shape == (1,) or nparray.shape == tuple():
pass
elif nparray.size != shape_size:
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
else:
if verify_shape and nparray.shape != tuple(shape):
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tobytes()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
# At this point, values may be a list of objects that we could not
# identify a common type for (hence it was inferred as
# np.object/dtypes.string). If we are unable to convert it to a
# string, we raise a more helpful error message.
#
# Ideally, we'd be able to convert the elements of the list to a
# common type, but this type inference requires some thinking and
# so we defer it for now.
try:
str_values = [compat.as_bytes(x) for x in proto_values]
except TypeError:
raise TypeError("Failed to convert object of type %s to Tensor. "
"Contents: %s. Consider casting elements to a "
"supported type." % (type(values), values))
tensor_proto.string_val.extend(str_values)
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError(
"Element type not supported in TensorProto: %s" % numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
# pylint: enable=invalid-name
@tf_export("make_ndarray")
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
For example:
```python
# Tensor a has shape (2,3)
a = tf.constant([[1,2,3],[4,5,6]])
proto_tensor = tf.make_tensor_proto(a) # convert `tensor a` to a proto tensor
tf.make_ndarray(proto_tensor) # output: array([[1, 2, 3],
# [4, 5, 6]], dtype=int32)
# output has shape (2,3)
```
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape, dtype=np.int64)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return (np.frombuffer(tensor.tensor_content,
dtype=dtype).copy().reshape(shape))
if tensor_dtype == dtypes.string:
# np.pad throws on these arrays of type np.object.
values = list(tensor.string_val)
padding = num_elements - len(values)
if padding > 0:
last = values[-1] if values else ""
values.extend([last] * padding)
return np.array(values, dtype=dtype).reshape(shape)
if tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16:
# the half_val field of the TensorProto stores the binary representation
# of the fp16: we need to reinterpret this as a proper float16
values = np.fromiter(tensor.half_val, dtype=np.uint16)
values.dtype = tensor_dtype.as_numpy_dtype
elif tensor_dtype == dtypes.float32:
values = np.fromiter(tensor.float_val, dtype=dtype)
elif tensor_dtype == dtypes.float64:
values = np.fromiter(tensor.double_val, dtype=dtype)
elif tensor_dtype in [
dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16, dtypes.int8,
dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16
]:
values = np.fromiter(tensor.int_val, dtype=dtype)
elif tensor_dtype == dtypes.int64:
values = np.fromiter(tensor.int64_val, dtype=dtype)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.bool:
values = np.fromiter(tensor.bool_val, dtype=dtype)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
if values.size == 0:
return np.zeros(shape, dtype)
if values.size != num_elements:
values = np.pad(values, (0, num_elements - values.size), "edge")
return values.reshape(shape)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor, partial):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("%r is not a Tensor, has type %s" % (tensor, type(tensor)))
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array(
[dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.ndarray(
shape=(),
buffer=np.array([input_shape.ndims], dtype=np.int32),
dtype=np.int32)
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "ConcatV2":
dim = constant_value(tensor.op.inputs[-1])
if dim is None:
return None
values = []
for x in tensor.op.inputs[:-1]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "Pack":
values = []
# Some imported GraphDefs have Pack ops with zero inputs. Those are invalid
# and shouldn't be produced, but to deal sensibly with them here we check
# and return None.
if not tensor.op.inputs:
return None
# We can't handle axis != 0 Packs at the moment.
if tensor.op.get_attr("axis") != 0:
return None
for x in tensor.op.inputs:
value = constant_value(x, partial)
if value is None and not partial:
return None
values.append(value)
return np.array(values)
elif tensor.op.type == "Unpack":
# We can't handle axis != 0 Unpacks at the moment.
if tensor.op.get_attr("axis") != 0:
return None
value = constant_value(tensor.op.inputs[0], partial)
if value is None:
return None
return value[tensor.value_index]
elif tensor.op.type == "Split":
dim = constant_value(tensor.op.inputs[0])
value = constant_value(tensor.op.inputs[1], partial)
if value is None or dim is None:
return None
split = np.split(value, tensor.op.get_attr("num_split"), dim)
return split[tensor.value_index]
elif tensor.op.type == "Fill":
fill_shape = tensor.shape
fill_value = constant_value(tensor.op.inputs[1])
if fill_shape.is_fully_defined() and fill_value is not None:
return np.full(fill_shape.as_list(), fill_value, dtype=fill_value.dtype)
else:
return None
elif tensor.op.type == "Equal":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.equal(value1, value2)
elif tensor.op.type == "NotEqual":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.not_equal(value1, value2)
elif tensor.op.type == "StopGradient":
return constant_value(tensor.op.inputs[0], partial)
elif tensor.op.type in ("CheckNumericsV2", "DebugIdentityV2", "Identity"):
return constant_value(tensor.op.inputs[0], partial)
else:
return None
@tf_export("get_static_value")
def constant_value(tensor, partial=False): # pylint: disable=invalid-name
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
Compatibility(V1): If `constant_value(tensor)` returns a non-`None` result, it
will no longer be possible to feed a different value for `tensor`. This allows
the result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
partial: If True, the returned numpy array is allowed to have partially
evaluated values. Values that can't be evaluated will be None.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
if isinstance(tensor, ops.EagerTensor):
try:
return tensor.numpy()
except errors_impl.UnimplementedError:
# Some EagerTensors may not implement .numpy/resolve, e.g. parallel
# tensors with multiple components on different devices.
return None
if not is_tensor(tensor):
return tensor
if not isinstance(tensor, ops.Tensor):
return None
ret = _ConstantValue(tensor, partial)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-0 or rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
Raises:
ValueError: If the shape is rank-0 and is not statically known to be -1.
"""
if isinstance(tensor, ops.EagerTensor):
return tensor_shape.TensorShape(
[dim if dim != -1 else None for dim in tensor.numpy()])
if tensor.get_shape().ndims == 0:
value = constant_value(tensor)
if value is None:
raise ValueError(
"Received a scalar with unknown value as shape; require a statically "
"known scalar with value '-1' to describe an unknown shape.")
if value != -1:
raise ValueError(
"Received a scalar value '%s' as shape; require a statically known "
"scalar with value '-1' to describe an unknown shape." % value)
return tensor_shape.unknown_shape()
shape = tensor.get_shape().with_rank(1)
if shape == [0]:
return tensor_shape.TensorShape([])
elif tensor.op.type == "Cast":
pre_cast = constant_value_as_shape(tensor.op.inputs[0])
if pre_cast.dims is None:
# the input to cast has a totally undefined shape; just return that.
return pre_cast
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
if cast_dtype not in (dtypes.int32, dtypes.int64):
return tensor_shape.unknown_shape(shape.dims[0].value)
dest_dtype_shape_array = np.array(
[x if x is not None else -1 for x in pre_cast.as_list()]).astype(
cast_dtype.as_numpy_dtype)
return tensor_shape.TensorShape([
x if x >= 0 else None
for x in dest_dtype_shape_array])
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.TensorShape([]) # Empty list.
# Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
# would not be rank 1.
assert tensor.op.get_attr("axis") == 0
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.TensorShape([]) # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "ConcatV2":
# We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.TensorShape([]) # Empty list.
for concat_input in tensor.op.inputs[:-1]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "StridedSlice":
try:
begin = constant_value(tensor.op.inputs[1])
end = constant_value(tensor.op.inputs[2])
strides = constant_value(tensor.op.inputs[3])
if begin is not None and end is not None and strides is not None:
begin = begin[0]
end = end[0]
strides = strides[0]
begin_mask = tensor.op.get_attr("begin_mask")
if begin_mask == 1:
begin = None
end_mask = tensor.op.get_attr("end_mask")
if end_mask == 1:
end = None
ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
new_axis_mask = tensor.op.get_attr("new_axis_mask")
shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
valid_attributes = (not ellipsis_mask and not new_axis_mask and
not shrink_axis_mask and (not begin_mask or
(begin_mask == 1)) and
(not end_mask or (end_mask == 1)))
if valid_attributes: # additional inputs not supported
prev = constant_value_as_shape(tensor.op.inputs[0])
prev = prev[begin:end:strides]
ret = tensor_shape.TensorShape(prev)
return ret
except ValueError: # Could come from get_attr or slicing prev.
pass
except TypeError: # Could come from slicing prev.
pass
elif (tensor.op.type == "Placeholder" and
tensor.op.graph.building_function and
hasattr(tensor.op.graph, "internal_captures")):
# If we are inside a FuncGraph try to lookup the constant value of the
# corresponding external capture. Note that we only look at captures and
# not the fed inputs because those can be fed different values in different
# instantiations of the function call or different iterations of a
# tf.while_loop.
for i, capture in enumerate(tensor.op.graph.internal_captures):
if capture is tensor:
external_capture = tensor.op.graph.external_captures[i]
return constant_value_as_shape(external_capture)
ret = tensor_shape.unknown_shape(shape.dims[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(
tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
return ret
# TODO(mdan): Deprecate in favor of more static-friendly types.
@tf_export("is_tensor")
def is_tensor(x): # pylint: disable=invalid-name
"""Checks whether `x` is a TF-native type that can be passed to many TF ops.
Use is_tensor to differentiate types that can ingested by TensorFlow ops
without any conversion (e.g., `tf.Tensor`, `tf.SparseTensor`, and
`tf.RaggedTensor`) from types that need to be converted into tensors before
they are ingested (e.g., numpy `ndarray` and Python scalars).
For example, in the following code block:
```python
if not tf.is_tensor(t):
t = tf.convert_to_tensor(t)
return t.dtype
```
we check to make sure that `t` is a tensor (and convert it if not) before
accessing its `shape` and `dtype`.
Args:
x: A python object to check.
Returns:
`True` if `x` is a tensor or "tensor-like", `False` if not.
"""
return (isinstance(x, internal.NativeObject) or
isinstance(x, core.Tensor) or
getattr(x, "is_tensor_like", False))
def shape_tensor(shape): # pylint: disable=invalid-name
"""Convert to an int32 or int64 tensor, defaulting to int32 if empty."""
dtype = None
if isinstance(shape, (tuple, list)):
if not shape:
dtype = dtypes.int32
else:
# If there are Dimension objects in the shape, unwrap them. This can be a
# problem if v1 and v2 TensorShape objects get mixed up in partial
# conversions, leading to shapes such as (1, 2, Dimension(5)), which are
# not convertible to Tensors because of mixed content.
shape = tuple(map(tensor_shape.dimension_value, shape))
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
# DO NOT USE: For testing only.
_ENABLE_MAYBE_SET_STATIC_SHAPE = True
def maybe_set_static_shape(tensor, shape): # pylint: disable=invalid-name
"""Sets the shape of `tensor` to the `shape`'s constant value, if inferrable.
This is a temporary workaround to fix shape inference across functional op
boundaries. E.g.
```python
shape = tf.constant([3])
@tf.function
def f():
u = tf.random_uniform(shape)
return u
```
If we were to rely solely on C++ shape inference, the shape of `u` inside
`f` would be unknown because C++ shape inference is not aware of the outer
graph and all it sees is a Placeholder node when backtracing the captured
tensor for `shape`. `maybe_set_static_shape` computes the static shape value
of `shape` by traversing the `FuncGraph` boundaries and sets the correct
shape.
A longer term solution would be to fix C++ shape inference.
Args:
tensor: A tensor.
shape: A shape tensor.
"""
if (_ENABLE_MAYBE_SET_STATIC_SHAPE and not context.executing_eagerly() and
ops.get_default_graph().building_function and
not tensor.shape.is_fully_defined() and is_tensor(shape)):
shape = shape_tensor(shape)
const_shape = constant_value_as_shape(shape)
tensor.set_shape(const_shape)
| 37.27814
| 115
| 0.703349
|
c8d1d966e0220885215198b13a6094433d5d4b78
| 3,095
|
py
|
Python
|
submitit/test_helpers.py
|
bichkd/submitit
|
163725c67b79e2b0899ddd95914167fd82668f52
|
[
"MIT"
] | null | null | null |
submitit/test_helpers.py
|
bichkd/submitit
|
163725c67b79e2b0899ddd95914167fd82668f52
|
[
"MIT"
] | null | null | null |
submitit/test_helpers.py
|
bichkd/submitit
|
163725c67b79e2b0899ddd95914167fd82668f52
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import time
from pathlib import Path
import pytest
from . import helpers
from .core import utils
def _three_time(x: int) -> int:
return 3 * x
requires_rsync = pytest.mark.skipif(
not helpers.RsyncSnapshot.available(), reason="Rsync is required for snapshotting"
)
def test_function_sequence_checkpoint(tmp_path: Path) -> None:
file = tmp_path / "test_funcseq.pkl"
fs0 = helpers.FunctionSequence(verbose=True)
fs0.add(_three_time, 4)
fs0.add(_three_time, 5)
assert len(fs0) == 2
assert sum(x.done() for x in fs0) == 0
utils.cloudpickle_dump(fs0, file)
fs1 = utils.pickle_load(file)
assert sum(x.done() for x in fs1) == 0
assert fs1() == [12, 15]
assert sum(x.done() for x in fs1) == 2
def test_as_completed(executor) -> None:
def f(x: float) -> float:
time.sleep(x)
return x
# slow need to be > 1.5s otherwise it might finish before we start polling.
slow, fast = 1.5, 0.1
# One slow job and two fast jobs.
jobs = executor.map_array(f, [slow, fast, fast])
start = time.time()
finished_jobs = []
for n, j in enumerate(helpers.as_completed(jobs, poll_frequency=0.1)):
elapsed = time.time() - start
if n < 2:
# we start getting result before the slow job finished.
assert elapsed < slow
finished_jobs.append(j)
# We get fast job results first, then result of the slow job.
assert [fast, fast, slow] == [j.result() for j in finished_jobs]
assert jobs[0] is finished_jobs[-1]
@requires_rsync
def test_snapshot(tmp_path: Path) -> None:
cwd = Path.cwd()
with helpers.RsyncSnapshot(tmp_path):
assert Path.cwd() == tmp_path
assert (tmp_path / "submitit/test_helpers.py").exists()
assert Path.cwd() == cwd
@requires_rsync
def test_snapshot_excludes(tmp_path: Path) -> None:
exclude = ["submitit/test_*"]
with helpers.RsyncSnapshot(snapshot_dir=tmp_path, exclude=exclude):
assert (tmp_path / "submitit/helpers.py").exists()
assert not (tmp_path / "submitit/test_helpers.py").exists()
@requires_rsync
def test_job_use_snapshot_cwd(executor, tmp_path: Path) -> None:
with helpers.RsyncSnapshot(snapshot_dir=tmp_path):
job = executor.submit(os.getcwd)
assert Path(job.result()) == tmp_path
@requires_rsync
def test_job_use_snapshot_modules(executor, tmp_path: Path) -> None:
with helpers.RsyncSnapshot(snapshot_dir=tmp_path):
def submitit_file() -> Path:
# pylint: disable=import-outside-toplevel
import submitit
return Path(submitit.__file__)
job = executor.submit(submitit_file)
# Here we load the normal submitit
assert submitit_file() == Path(__file__).parent / "__init__.py"
# In the job we should import submitit from the snapshot dir
assert job.result() == tmp_path / "submitit/__init__.py"
| 30.643564
| 86
| 0.674637
|
09cf2bf54073c4486cbbbccc5a288769a6ad1368
| 17,456
|
py
|
Python
|
fs/osfs.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
fs/osfs.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
fs/osfs.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
"""Manage the filesystem provided by your OS.
In essence, an `OSFS` is a thin layer over the `io` and `os` modules
of the Python standard library.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import io
import itertools
import logging
import os
import platform
import stat
import sys
import six
try:
from os import scandir
except ImportError:
try:
from scandir import scandir
except ImportError: # pragma: nocover
scandir = None
from . import errors
from .errors import FileExists
from .base import FS
from .enums import ResourceType
from ._fscompat import fsencode, fsdecode, fspath
from .info import Info
from .path import basename
from .permissions import Permissions
from .error_tools import convert_os_errors
from .mode import Mode, validate_open_mode
from .errors import NoURL
log = logging.getLogger('fs.osfs')
_WINDOWS_PLATFORM = platform.system() == 'Windows'
@six.python_2_unicode_compatible
class OSFS(FS):
"""Create an OSFS.
Arguments:
root_path (str or ~os.PathLike): An OS path or path-like object to
the location on your HD you wish to manage.
create (bool, optional): Set to `True` to create the root directory
if it does not already exist, otherwise the directory should exist
prior to creating the ``OSFS`` instance (default `False`).
create_mode (int, optional): The permissions that will be used to
create the directory if ``create`` is `True` and the path doesn't
exist, defaults to ``0o777``.
Raises:
`fs.errors.CreateFailed`: If ``root_path`` does not
exist, or could not be created.
Examples:
>>> current_directory_fs = OSFS('.')
>>> home_fs = OSFS('~/')
>>> windows_system32_fs = OSFS('c://system32')
"""
def __init__(self,
root_path,
create=False,
create_mode=0o777):
"""Create an OSFS instance.
"""
super(OSFS, self).__init__()
root_path = fsdecode(fspath(root_path))
_root_path = os.path.expanduser(os.path.expandvars(root_path))
_root_path = os.path.normpath(os.path.abspath(_root_path))
self.root_path = _root_path
if create:
try:
if not os.path.isdir(_root_path):
os.makedirs(_root_path, mode=create_mode)
except OSError as error:
raise errors.CreateFailed(
'unable to create {} ({})'.format(root_path, error)
)
else:
if not os.path.isdir(_root_path):
raise errors.CreateFailed(
'root path does not exist'
)
_meta = self._meta = {
'case_insensitive': os.path.normcase('Aa') != 'aa',
'network': False,
'read_only': False,
'supports_rename': True,
'thread_safe': True,
'unicode_paths': os.path.supports_unicode_filenames,
'virtual': False,
}
if _WINDOWS_PLATFORM: # pragma: nocover
_meta["invalid_path_chars"] =\
''.join(six.unichr(n) for n in range(31)) + '\\:*?"<>|'
else:
_meta["invalid_path_chars"] = '\0'
if 'PC_PATH_MAX' in os.pathconf_names:
_meta['max_sys_path_length'] = (
os.pathconf(
fsencode(_root_path),
os.pathconf_names['PC_PATH_MAX']
)
)
def __repr__(self):
_fmt = "{}({!r})"
return _fmt.format(self.__class__.__name__,
self.root_path)
def __str__(self):
fmt = "<{} '{}'>"
return fmt.format(self.__class__.__name__.lower(),
self.root_path)
def _to_sys_path(self, path):
"""Convert a FS path to a path on the OS.
"""
sys_path = os.path.join(
self.root_path,
path.lstrip('/').replace('/', os.sep)
)
return sys_path
@classmethod
def _make_details_from_stat(cls, stat_result):
"""Make a *details* info dict from an `os.stat_result` object.
"""
details = {
'_write': ['accessed', 'modified'],
'accessed': stat_result.st_atime,
'modified': stat_result.st_mtime,
'size': stat_result.st_size,
'type': int(cls._get_type_from_stat(stat_result))
}
# On other Unix systems (such as FreeBSD), the following
# attributes may be available (but may be only filled out if
# root tries to use them):
details['created'] = getattr(stat_result, 'st_birthtime', None)
ctime_key = (
'created'
if _WINDOWS_PLATFORM
else 'metadata_changed'
)
details[ctime_key] = stat_result.st_ctime
return details
@classmethod
def _make_access_from_stat(cls, stat_result):
"""Make an *access* info dict from an `os.stat_result` object.
"""
access = {}
access['permissions'] = Permissions(
mode=stat_result.st_mode
).dump()
access['gid'] = stat_result.st_gid
access['uid'] = stat_result.st_uid
if not _WINDOWS_PLATFORM:
import grp
import pwd
try:
access['group'] = grp.getgrgid(access['gid']).gr_name
except KeyError: # pragma: nocover
pass
try:
access['user'] = pwd.getpwuid(access['uid']).pw_name
except KeyError: # pragma: nocover
pass
return access
STAT_TO_RESOURCE_TYPE = {
stat.S_IFDIR: ResourceType.directory,
stat.S_IFCHR: ResourceType.character,
stat.S_IFBLK: ResourceType.block_special_file,
stat.S_IFREG: ResourceType.file,
stat.S_IFIFO: ResourceType.fifo,
stat.S_IFLNK: ResourceType.symlink,
stat.S_IFSOCK: ResourceType.socket
}
@classmethod
def _get_type_from_stat(cls, _stat):
"""Get the resource type from an `os.stat_result` object.
"""
st_mode = _stat.st_mode
st_type = stat.S_IFMT(st_mode)
return cls.STAT_TO_RESOURCE_TYPE.get(st_type, ResourceType.unknown)
# --------------------------------------------------------
# Required Methods
# --------------------------------------------------------
def _gettarget(self, sys_path):
try:
target = os.readlink(sys_path)
except OSError:
return None
else:
return target
def _make_link_info(self, sys_path):
_target = self._gettarget(sys_path)
link = {
'target': _target,
}
return link
def getinfo(self, path, namespaces=None):
self.check()
namespaces = namespaces or ()
_path = self.validatepath(path)
sys_path = self.getsyspath(_path)
_lstat = None
with convert_os_errors('getinfo', path):
_stat = os.stat(sys_path)
if 'lstat' in namespaces:
_stat = os.lstat(sys_path)
info = {
'basic': {
'name': basename(_path),
'is_dir': stat.S_ISDIR(_stat.st_mode)
}
}
if 'details' in namespaces:
info['details'] = self._make_details_from_stat(_stat)
if 'stat' in namespaces:
info['stat'] = {
k: getattr(_stat, k)
for k in dir(_stat) if k.startswith('st_')
}
if 'lstat' in namespaces:
info['lstat'] = {
k: getattr(_lstat, k)
for k in dir(_lstat) if k.startswith('st_')
}
if 'link' in namespaces:
info['link'] = self._make_link_info(sys_path)
if 'access' in namespaces:
info['access'] = self._make_access_from_stat(_stat)
return Info(info)
def listdir(self, path):
self.check()
_path = self.validatepath(path)
sys_path = self._to_sys_path(_path)
with convert_os_errors('listdir', path, directory=True):
names = os.listdir(sys_path)
return names
def makedir(self, path, permissions=None, recreate=False):
self.check()
mode = Permissions.get_mode(permissions)
_path = self.validatepath(path)
sys_path = self._to_sys_path(_path)
with convert_os_errors('makedir', path, directory=True):
try:
os.mkdir(sys_path, mode)
except OSError as error:
if error.errno == errno.ENOENT:
raise errors.ResourceNotFound(path)
elif error.errno == errno.EEXIST and recreate:
pass
else:
raise
return self.opendir(_path)
def openbin(self, path, mode="r", buffering=-1, **options):
_mode = Mode(mode)
_mode.validate_bin()
self.check()
_path = self.validatepath(path)
sys_path = self._to_sys_path(_path)
with convert_os_errors('openbin', path):
if six.PY2 and _mode.exclusive and self.exists(path):
raise errors.FileExists(path)
binary_file = io.open(
sys_path,
mode=_mode.to_platform_bin(),
buffering=buffering,
**options
)
return binary_file
def remove(self, path):
self.check()
_path = self.validatepath(path)
sys_path = self._to_sys_path(_path)
with convert_os_errors('remove', path):
try:
os.remove(sys_path)
except OSError as error:
if error.errno == errno.EACCES and sys.platform == "win32":
# sometimes windows says this for attempts to remove a dir
if os.path.isdir(sys_path): # pragma: nocover
raise errors.FileExpected(path)
if error.errno == errno.EPERM and sys.platform == "darwin":
# sometimes OSX says this for attempts to remove a dir
if os.path.isdir(sys_path): # pragma: nocover
raise errors.FileExpected(path)
raise
def removedir(self, path):
self.check()
_path = self.validatepath(path)
if _path == '/':
raise errors.RemoveRootError()
sys_path = self._to_sys_path(path)
with convert_os_errors('removedir', path, directory=True):
os.rmdir(sys_path)
# --------------------------------------------------------
# Optional Methods
# --------------------------------------------------------
def getsyspath(self, path):
sys_path = self._to_sys_path(path)
return sys_path
def geturl(self, path, purpose='download'):
if purpose != 'download':
raise NoURL(path, purpose)
return "file://" + self.getsyspath(path)
def gettype(self, path):
self.check()
sys_path = self._to_sys_path(path)
with convert_os_errors('gettype', path):
stat = os.stat(sys_path)
resource_type = self._get_type_from_stat(stat)
return resource_type
def islink(self, path):
self.check()
_path = self.validatepath(path)
sys_path = self._to_sys_path(_path)
if not self.exists(path):
raise errors.ResourceNotFound(path)
with convert_os_errors('islink', path):
return os.path.islink(sys_path)
def open(self,
path,
mode="r",
buffering=-1,
encoding=None,
errors=None,
newline='',
line_buffering=False,
**options):
_mode = Mode(mode)
validate_open_mode(mode)
self.check()
_path = self.validatepath(path)
sys_path = self._to_sys_path(_path)
with convert_os_errors('open', path):
if six.PY2 and _mode.exclusive and self.exists(path):
raise FileExists(path)
_encoding = encoding or 'utf-8'
return io.open(
sys_path,
mode=_mode.to_platform(),
buffering=buffering,
encoding=None if _mode.binary else _encoding,
errors=errors,
newline=None if _mode.binary else newline,
**options
)
def setinfo(self, path, info):
self.check()
_path = self.validatepath(path)
sys_path = self._to_sys_path(_path)
if not os.path.exists(sys_path):
raise errors.ResourceNotFound(path)
if 'details' in info:
details = info['details']
if 'accessed' in details or 'modified' in details:
accessed = details.get("accessed")
modified = details.get("modified", accessed)
accessed = int(modified if accessed is None else accessed)
modified = int(modified)
if accessed is not None or modified is not None:
with convert_os_errors('setinfo', path):
os.utime(sys_path, (accessed, modified))
if scandir:
def _scandir(self, path, namespaces=None):
self.check()
namespaces = namespaces or ()
_path = self.validatepath(path)
sys_path = self._to_sys_path(_path)
with convert_os_errors('scandir', path, directory=True):
for dir_entry in scandir(sys_path):
info = {
"basic": {
"name": dir_entry.name,
"is_dir": dir_entry.is_dir()
}
}
if 'details' in namespaces:
stat_result = dir_entry.stat()
info['details'] = \
self._make_details_from_stat(stat_result)
if 'stat' in namespaces:
stat_result = dir_entry.stat()
info['stat'] = {
k: getattr(stat_result, k)
for k in dir(stat_result) if k.startswith('st_')
}
if 'lstat' in namespaces:
lstat_result = dir_entry.stat(follow_symlinks=False)
info['lstat'] = {
k: getattr(lstat_result, k)
for k in dir(lstat_result) if k.startswith('st_')
}
if 'link' in namespaces:
info['link'] = self._make_link_info(
os.path.join(sys_path, dir_entry.name)
)
if 'access' in namespaces:
stat_result = dir_entry.stat()
info['access'] =\
self._make_access_from_stat(stat_result)
yield Info(info)
else:
def _scandir(self, path, namespaces=None):
self.check()
namespaces = namespaces or ()
_path = self.validatepath(path)
sys_path = self._to_sys_path(_path)
with convert_os_errors('scandir', path, directory=True):
for entry_name in os.listdir(sys_path):
entry_path = os.path.join(sys_path, entry_name)
stat_result = os.stat(entry_path)
info = {
"basic": {
"name": entry_name,
"is_dir": stat.S_ISDIR(stat_result.st_mode),
}
}
if 'details' in namespaces:
info['details'] = \
self._make_details_from_stat(stat_result)
if 'stat' in namespaces:
info['stat'] = {
k: getattr(stat_result, k)
for k in dir(stat_result) if k.startswith('st_')
}
if 'lstat' in namespaces:
lstat_result = os.lstat(entry_path)
info['lstat'] = {
k: getattr(lstat_result, k)
for k in dir(lstat_result) if k.startswith('st_')
}
if 'link' in namespaces:
info['link'] = self._make_link_info(
os.path.join(sys_path, entry_name)
)
if 'access' in namespaces:
info['access'] =\
self._make_access_from_stat(stat_result)
yield Info(info)
def scandir(self, path, namespaces=None, page=None):
iter_info = self._scandir(path, namespaces=namespaces)
if page is not None:
start, end = page
iter_info = itertools.islice(iter_info, start, end)
return iter_info
| 35.264646
| 78
| 0.522113
|
1070d84f0e5849f13d21d4317dbe671c16e317aa
| 10,473
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/Curve.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/__init___parts/Curve.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/__init___parts/Curve.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class Curve(GeometryObject, IDisposable):
""" A parametric curve. """
def Clone(self):
"""
Clone(self: Curve) -> Curve
Returns a copy of this curve.
Returns: A copy of this curve.
"""
pass
def ComputeDerivatives(self, parameter, normalized):
"""
ComputeDerivatives(self: Curve,parameter: float,normalized: bool) -> Transform
Returns the vectors describing the curve at the specified parameter.
parameter: The parameter to be evaluated.
normalized: If false,param is interpreted as natural parameterization of the curve.
If
true,param is expected to be in [0,1] interval mapped to the bounds of the
curve. Setting to true is valid only if the curve is bound.
Returns: The transformation containing the point on the curve,the tangent vector,
derivative of tangent vector,and bi-normal vector.
"""
pass
def ComputeNormalizedParameter(self, rawParameter):
"""
ComputeNormalizedParameter(self: Curve,rawParameter: float) -> float
Computes the normalized curve parameter from the raw parameter.
rawParameter: The raw parameter.
Returns: The real number equal to the normalized curve parameter.
"""
pass
def ComputeRawParameter(self, normalizedParameter):
"""
ComputeRawParameter(self: Curve,normalizedParameter: float) -> float
Computes the raw parameter from the normalized parameter.
normalizedParameter: The normalized parameter.
Returns: The real number equal to the raw curve parameter.
"""
pass
def CreateOffset(self, offsetDist, referenceVector):
"""
CreateOffset(self: Curve,offsetDist: float,referenceVector: XYZ) -> Curve
Creates a new curve that is an offset of the existing curve.
offsetDist: The signed distance that controls the offset.
referenceVector: A reference vector to define the offset direction.
Returns: The new curve.
"""
pass
def CreateReversed(self):
"""
CreateReversed(self: Curve) -> Curve
Creates a new curve with the opposite orientation of the existing curve.
Returns: The new curve.
"""
pass
def CreateTransformed(self, transform):
"""
CreateTransformed(self: Curve,transform: Transform) -> Curve
Crates a new instance of a curve as a transformation of this curve.
transform: The transform to apply.
Returns: The new curve.
"""
pass
def Dispose(self):
""" Dispose(self: APIObject,A_0: bool) """
pass
def Distance(self, point):
"""
Distance(self: Curve,point: XYZ) -> float
Returns the shortest distance from the specified point to this curve.
point: The specified point.
Returns: The real number equal to the shortest distance.
"""
pass
def Evaluate(self, parameter, normalized):
"""
Evaluate(self: Curve,parameter: float,normalized: bool) -> XYZ
Evaluates and returns the point that matches a parameter along the curve.
parameter: The parameter to be evaluated.
normalized: If false,param is interpreted as natural parameterization of the curve.
If
true,param is expected to be in [0,1] interval mapped to the bounds of the
curve. Setting to true is valid only if the curve is bound.
Returns: The point evaluated along the curve.
"""
pass
def GetEndParameter(self, index):
"""
GetEndParameter(self: Curve,index: int) -> float
Returns the raw parameter value at the start or end of this curve.
index: 0 for the start or 1 for end of the curve.
Returns: The parameter.
"""
pass
def GetEndPoint(self, index):
"""
GetEndPoint(self: Curve,index: int) -> XYZ
Returns the 3D point at the start or end of this curve.
index: 0 for the start or 1 for end of the curve.
Returns: The curve endpoint.
"""
pass
def GetEndPointReference(self, index):
"""
GetEndPointReference(self: Curve,index: int) -> Reference
Returns a stable reference to the start point or the end point of the curve.
index: Use 0 for the start point; 1 for the end point.
Returns: Reference to the point or ll if reference cannot be obtained.
"""
pass
def Intersect(self, curve, resultArray=None):
"""
Intersect(self: Curve,curve: Curve) -> SetComparisonResult
Calculates the intersection of this curve with the specified curve.
curve: The specified curve to intersect with this curve.
Returns: SetComparisonResult.Overlap - One or more intersections were encountered.
SetComparisonResult.Subset - The inputs are parallel lines with only one common
intersection point,or
the curve used to invoke the intersection check is a
line entirely within the unbound line passed as argument
curve.SetComparisonResult.Superset - The input curve is entirely within the
unbound line used to invoke the intersection check.SetComparisonResult.Disjoint
- There is no intersection found between the two
curves.SetComparisonResult.Equal - The two curves are identical.
Intersect(self: Curve,curve: Curve) -> (SetComparisonResult,IntersectionResultArray)
Calculates the intersection of this curve with the specified curve and returns
the intersection results.
curve: The specified curve to intersect with this curve.
Returns: SetComparisonResult.Overlap - One or more intersections were encountered. The
output argument has the details.SetComparisonResult.Subset - The inputs are
parallel lines with only one common intersection point,or
the curve used to
invoke the intersection check is a line entirely within the unbound line passed
as argument curve.
If the former,the output argument has the details of the
intersection point.SetComparisonResult.Superset - The input curve is entirely
within the unbound line used to invoke the intersection
check.SetComparisonResult.Disjoint - There is no intersection found between the
two curves.SetComparisonResult.Equal - The two curves are identical.
"""
pass
def IsInside(self, parameter, end=None):
"""
IsInside(self: Curve,parameter: float) -> bool
Indicates whether the specified parameter value is within this curve's bounds.
parameter: The raw curve parameter to be evaluated.
Returns: True if the parameter is within the bounds,otherwise false.
IsInside(self: Curve,parameter: float) -> (bool,int)
Indicates whether the specified parameter value is within this curve's bounds
and outputs the end index.
parameter: The raw curve parameter to be evaluated.
Returns: True if the parameter is within the curve's bounds,otherwise false.
"""
pass
def MakeBound(self, startParameter, endParameter):
"""
MakeBound(self: Curve,startParameter: float,endParameter: float)
Changes the bounds of this curve to the specified values.
startParameter: The new parameter of the start point.
endParameter: The new parameter of the end point.
"""
pass
def MakeUnbound(self):
"""
MakeUnbound(self: Curve)
Makes this curve unbound.
"""
pass
def Project(self, point):
"""
Project(self: Curve,point: XYZ) -> IntersectionResult
Projects the specified point on this curve.
point: The point to be projected.
Returns: Geometric information if projection is successful.
"""
pass
def ReleaseManagedResources(self, *args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: GeometryObject) """
pass
def SetGraphicsStyleId(self, id):
"""
SetGraphicsStyleId(self: Curve,id: ElementId)
Sets the graphics style id for this curve.
id: The id of the GraphicsStyle element from which to apply the curve properties.
"""
pass
def Tessellate(self):
"""
Tessellate(self: Curve) -> IList[XYZ]
Valid only if the curve is bound. Returns a polyline approximation to the curve.
"""
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
ApproximateLength = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The approximate length of the curve.
Get: ApproximateLength(self: Curve) -> float
"""
IsBound = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Describes whether the parameter of the curve is restricted to a particular interval.
Get: IsBound(self: Curve) -> bool
"""
IsCyclic = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The boolean value that indicates whether this curve is cyclic.
Get: IsCyclic(self: Curve) -> bool
"""
Length = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The exact length of the curve.
Get: Length(self: Curve) -> float
"""
Period = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The period of this curve.
Get: Period(self: Curve) -> float
"""
Reference = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Returns a stable reference to the curve.
Get: Reference(self: Curve) -> Reference
"""
| 22.522581
| 221
| 0.648047
|
0a057124891471c41a4da9668d67def1ebf87a87
| 30,661
|
py
|
Python
|
venv/lib/python3.8/site-packages/pandas/tests/frame/methods/test_sort_values.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 3
|
2018-04-24T13:31:51.000Z
|
2019-07-09T07:31:43.000Z
|
venv/lib/python3.8/site-packages/pandas/tests/frame/methods/test_sort_values.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 4
|
2019-12-14T16:32:46.000Z
|
2022-02-12T00:32:28.000Z
|
venv/lib/python3.8/site-packages/pandas/tests/frame/methods/test_sort_values.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 5
|
2018-04-24T13:31:56.000Z
|
2021-10-21T05:06:23.000Z
|
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
[0, 1, 2],
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
[2, 1, 0],
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_item_cache(self, using_array_manager):
# previous behavior incorrect retained an invalid _item_cache entry
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
if not using_array_manager:
assert len(df._mgr.blocks) == 2
df.sort_values(by="A")
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
tm.assert_frame_equal(df, expected)
class TestDataFrameSortKey: # test key sorting (issue 27237)
def test_sort_values_inplace_key(self, sort_by_key):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key)
assert return_value is None
expected = frame.sort_values(by="A", key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=1, axis=1, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by=1, axis=1, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by="A", ascending=False, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True, key=sort_by_key
)
expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_key(self):
df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = df.sort_values(0)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: x + 5)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
result = df.sort_values("a", key=lambda x: -x)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x)
expected = df.iloc[[3, 1, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 1, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key_by_name(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
def key(col):
if col.name == "a":
return -col
else:
return col
result = df.sort_values(by="a", key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by="b", key=key)
expected = df.iloc[[0, 1, 4, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_string(self):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
result = df.sort_values(1)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values([0, 1], key=lambda col: col.str.lower())
tm.assert_frame_equal(result, df)
result = df.sort_values(
[0, 1], key=lambda col: col.str.lower(), ascending=False
)
expected = df.sort_values(1, key=lambda col: col.str.lower(), ascending=False)
tm.assert_frame_equal(result, expected)
def test_sort_values_key_empty(self, sort_by_key):
df = DataFrame(np.array([]))
df.sort_values(0, key=sort_by_key)
df.sort_index(key=sort_by_key)
def test_changes_length_raises(self):
df = DataFrame({"A": [1, 2, 3]})
with pytest.raises(ValueError, match="change the shape"):
df.sort_values("A", key=lambda x: x[:1])
def test_sort_values_key_axes(self):
df = DataFrame({0: ["Hello", "goodbye"], 1: [0, 1]})
result = df.sort_values(0, key=lambda col: col.str.lower())
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_dict_axis(self):
df = DataFrame({0: ["Hello", 0], 1: ["goodbye", 1]})
result = df.sort_values(0, key=lambda col: col.str.lower(), axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col, axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_sort_values_key_casts_to_categorical(self, ordered):
# https://github.com/pandas-dev/pandas/issues/36383
categories = ["c", "b", "a"]
df = DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]})
def sorter(key):
if key.name == "y":
return pd.Series(
Categorical(key, categories=categories, ordered=ordered)
)
return key
result = df.sort_values(by=["x", "y"], key=sorter)
expected = DataFrame(
{"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0])
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_none():
return DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 2, 2, 1, 1],
"A": np.arange(6, 0, -1),
("B", 5): ["one", "one", "two", "two", "one", "one"],
}
)
@pytest.fixture(params=[["outer"], ["outer", "inner"]])
def df_idx(request, df_none):
levels = request.param
return df_none.set_index(levels)
@pytest.fixture(
params=[
"inner", # index level
["outer"], # list of index level
"A", # column
[("B", 5)], # list of column
["inner", "outer"], # two index levels
[("B", 5), "outer"], # index level and column
["A", ("B", 5)], # Two columns
["inner", "outer"], # two index levels and column
]
)
def sort_names(request):
return request.param
@pytest.fixture(params=[True, False])
def ascending(request):
return request.param
class TestSortValuesLevelAsStr:
def test_sort_index_level_and_column_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get index levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on columns and the setting index
expected = df_none.sort_values(
by=sort_names, ascending=ascending, axis=0
).set_index(levels)
# Compute result sorting on mix on columns and index levels
result = df_idx.sort_values(by=sort_names, ascending=ascending, axis=0)
tm.assert_frame_equal(result, expected)
def test_sort_column_level_and_index_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on axis=0, setting index levels, and then
# transposing. For some cases this will result in a frame with
# multiple column levels
expected = (
df_none.sort_values(by=sort_names, ascending=ascending, axis=0)
.set_index(levels)
.T
)
# Compute result by transposing and sorting on axis=1.
result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1)
tm.assert_frame_equal(result, expected)
def test_sort_values_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
df = DataFrame({"a": [1, 2, 3]})
msg = (
r"In a future version of pandas all arguments of DataFrame\.sort_values "
r"except for the argument 'by' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.sort_values("a", 0)
expected = DataFrame({"a": [1, 2, 3]})
tm.assert_frame_equal(result, expected)
def test_sort_values_validate_ascending_for_value_error(self):
# GH41634
df = DataFrame({"D": [23, 7, 21]})
msg = 'For argument "ascending" expected type bool, received type str.'
with pytest.raises(ValueError, match=msg):
df.sort_values(by="D", ascending="False")
@pytest.mark.parametrize("ascending", [False, 0, 1, True])
def test_sort_values_validate_ascending_functional(self, ascending):
df = DataFrame({"D": [23, 7, 21]})
indexer = df["D"].argsort().values
if not ascending:
indexer = indexer[::-1]
expected = df.loc[df.index[indexer]]
result = df.sort_values(by="D", ascending=ascending)
tm.assert_frame_equal(result, expected)
| 34.723669
| 88
| 0.545449
|
8853317691c94c0987ca651dbb64906659c10ead
| 1,808
|
py
|
Python
|
djangobbs/install/models.py
|
JuanbingTeam/djangobbs
|
2d52d83b80758e153b0604e71fb0cef4e6528275
|
[
"Apache-2.0"
] | null | null | null |
djangobbs/install/models.py
|
JuanbingTeam/djangobbs
|
2d52d83b80758e153b0604e71fb0cef4e6528275
|
[
"Apache-2.0"
] | null | null | null |
djangobbs/install/models.py
|
JuanbingTeam/djangobbs
|
2d52d83b80758e153b0604e71fb0cef4e6528275
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#coding=utf-8
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django.utils.translation import ugettext as _T
from cPickle import loads, dumps
# Create your models here.
class ConfigTree(models.Model):
"""用于保存网站配置的树形注册表"""
parent = models.ForeignKey('self', null=True, default=None, blank=True) # 父节点,为None说明是根节点
key = models.CharField(max_length=100, db_index=True) # 键名
content = models.TextField(blank=True, default="") # 内容,建议保存pickle序列化的串
def __unicode__(self):
if self.parent == None:
return unicode(self.key)
else:
return unicode(self.parent) + u"/" + unicode(self.key)
@staticmethod
def get_config(path, default=None):
path = path.split("/")
record = None
try:
for i in path:
if i != "":
record = ConfigTree.objects.get(parent=record, key=i)
return loads(str(record.content))
except ConfigTree.DoesNotExist:
return default
@staticmethod
def put_config(path, data):
path = path.split("/")
record = None
for i in path:
if i != "":
try:
record = ConfigTree.objects.get(parent=record, key=i)
except ConfigTree.DoesNotExist:
result = ConfigTree()
result.parent = record
result.key = i
result.content = dumps(None)
result.save()
record = result
record.content = dumps(data)
record.save()
admin.site.register(ConfigTree)
| 31.719298
| 94
| 0.545907
|
af278234817cff8810d67db0fd87391fd1b6685b
| 1,125
|
py
|
Python
|
SchemaCollaboration/core/management/commands/create_default_data_package_status.py
|
kant/schema-collaboration
|
9233141dc14aa3dccf41dbdc17c8d25f55e5c640
|
[
"MIT"
] | null | null | null |
SchemaCollaboration/core/management/commands/create_default_data_package_status.py
|
kant/schema-collaboration
|
9233141dc14aa3dccf41dbdc17c8d25f55e5c640
|
[
"MIT"
] | null | null | null |
SchemaCollaboration/core/management/commands/create_default_data_package_status.py
|
kant/schema-collaboration
|
9233141dc14aa3dccf41dbdc17c8d25f55e5c640
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from core.models import DatapackageStatus
class Command(BaseCommand):
help = 'Creates default data package status'
def add_arguments(self, parser):
parser.add_argument('--only-if-no-status', action='store_true')
def handle(self, *args, **options):
if options['only_if_no_status'] and DatapackageStatus.objects.count() > 0:
print('Database already contains status - doing nothing')
return
create_status(['Draft', 'In Progress', 'Completed'])
draft_status = DatapackageStatus.objects.get(name='Draft')
draft_status.behaviour = DatapackageStatus.StatusBehaviour.DEFAULT_ON_DATAPACKAGE_CREATION
draft_status.save()
def create_status(status_names):
for status_name in status_names:
try:
DatapackageStatus.objects.create(name=status_name)
except IntegrityError:
print(f'Cannot create status name="{status_name}". Does already exist?')
continue
print(f'Created: {status_name} status')
| 33.088235
| 98
| 0.696
|
2a262b6ff2ca0186f75ba4eb7a319d12b9194415
| 990
|
py
|
Python
|
custom_components/freehands/entity.py
|
riveccia/homeassistant_freehands
|
66ca8f0f0121a269ca7fa85212eef42c15afbdf3
|
[
"MIT"
] | null | null | null |
custom_components/freehands/entity.py
|
riveccia/homeassistant_freehands
|
66ca8f0f0121a269ca7fa85212eef42c15afbdf3
|
[
"MIT"
] | null | null | null |
custom_components/freehands/entity.py
|
riveccia/homeassistant_freehands
|
66ca8f0f0121a269ca7fa85212eef42c15afbdf3
|
[
"MIT"
] | null | null | null |
"""FreehandsEntity class"""
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ATTRIBUTION
from .const import DOMAIN
from .const import NAME
from .const import VERSION
class FreehandsEntity(CoordinatorEntity):
def __init__(self, coordinator, config_entry):
super().__init__(coordinator)
self.config_entry = config_entry
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return self.config_entry.entry_id
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": NAME,
"model": VERSION,
"manufacturer": NAME,
}
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
"attribution": ATTRIBUTION,
"id": str(self.coordinator.data.get("id")),
"integration": DOMAIN,
}
| 26.756757
| 70
| 0.627273
|
01245a3a72739f3ec80c033e782c308fa155d0d0
| 290
|
py
|
Python
|
validadores/deposito.py
|
mayronceccon/olist-python-labs-project
|
217188212158ba2401866e06173d762e346ebeee
|
[
"MIT"
] | null | null | null |
validadores/deposito.py
|
mayronceccon/olist-python-labs-project
|
217188212158ba2401866e06173d762e346ebeee
|
[
"MIT"
] | null | null | null |
validadores/deposito.py
|
mayronceccon/olist-python-labs-project
|
217188212158ba2401866e06173d762e346ebeee
|
[
"MIT"
] | null | null | null |
from validadores.regras.valor import Valor as ValidacaoValor
from validadores.regras.valor_negativo import ValorNegativo as ValidacaoValorNegativo
class Deposito():
def __init__(self, valor=0):
ValidacaoValor(valor).is_valid()
ValidacaoValorNegativo(valor).is_valid()
| 32.222222
| 85
| 0.782759
|
e89d380b5f0f91f9495d566b644621ef685f2d17
| 12,768
|
py
|
Python
|
youtube-8m-ensemble/inference-combine-tfrecords-video.py
|
wangheda/youtube-8m
|
07e54b387ee027cb58b0c14f5eb7c88cfa516d58
|
[
"Apache-2.0"
] | 196
|
2017-06-16T12:06:56.000Z
|
2022-02-18T10:50:43.000Z
|
youtube-8m-ensemble/inference-combine-tfrecords-video.py
|
wangheda/youtube-8m
|
07e54b387ee027cb58b0c14f5eb7c88cfa516d58
|
[
"Apache-2.0"
] | 5
|
2017-08-04T02:37:34.000Z
|
2018-10-27T18:32:38.000Z
|
youtube-8m-ensemble/inference-combine-tfrecords-video.py
|
wangheda/youtube-8m
|
07e54b387ee027cb58b0c14f5eb7c88cfa516d58
|
[
"Apache-2.0"
] | 71
|
2017-06-20T15:04:13.000Z
|
2021-10-06T16:43:32.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary for combine model output and model input into one set of files."""
import os
import time
import numpy
import numpy as np
import tensorflow as tf
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
import utils
import eval_util
import losses
import readers
import ensemble_level_models
FLAGS = flags.FLAGS
if __name__ == '__main__':
flags.DEFINE_string("output_dir", "",
"The file to save the predictions to.")
flags.DEFINE_string(
"input_data_pattern", "",
"File globs defining the input dataset in tensorflow.SequenceExample format.")
flags.DEFINE_string(
"prediction_data_pattern", "",
"File globs defining the output dataset in tensorflow.SequenceExample format.")
flags.DEFINE_string("input_feature_names", "mean_rgb,mean_audio", "Name of the feature "
"to use for training.")
flags.DEFINE_string("input_feature_sizes", "1024,128", "Length of the feature vectors.")
flags.DEFINE_string("prediction_feature_names", "predictions", "Name of the feature "
"to use for training.")
flags.DEFINE_string("prediction_feature_sizes", "4716", "Length of the feature vectors.")
flags.DEFINE_integer("batch_size", 256,
"How many examples to process per batch.")
flags.DEFINE_integer("file_size", 4096,
"Number of frames per batch for DBoF.")
def find_class_by_name(name, modules):
"""Searches the provided modules for the named class and returns it."""
modules = [getattr(module, name, None) for module in modules]
return next(a for a in modules if a)
def get_input_data_tensors(reader,
data_pattern,
batch_size=256):
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
eval_data,
batch_size=batch_size,
capacity=batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
def build_graph(input_reader, input_data_pattern,
prediction_reader, prediction_data_pattern,
batch_size=256):
"""Creates the Tensorflow graph for evaluation.
Args:
all_readers: The data file reader. It should inherit from BaseReader.
model: The core model (e.g. logistic or neural net). It should inherit
from BaseModel.
all_data_patterns: glob path to the evaluation data files.
label_loss_fn: What kind of loss to apply to the model. It should inherit
from BaseLoss.
batch_size: How many examples to process at a time.
"""
video_ids_batch, model_inputs_batch, labels_batch, unused_num_frames = (
get_input_data_tensors(
input_reader,
input_data_pattern,
batch_size=batch_size))
video_ids_batch2, model_predictions_batch, labels_batch2, unused_num_frames2 = (
get_input_data_tensors(
prediction_reader,
prediction_data_pattern,
batch_size=batch_size))
video_ids_equal = tf.reduce_mean(tf.cast(tf.equal(video_ids_batch, video_ids_batch2), tf.float32))
labels_equal = tf.reduce_mean(tf.reduce_sum(tf.cast(tf.equal(labels_batch, labels_batch2), tf.float32), axis=1))
tf.add_to_collection("video_ids_equal", video_ids_equal)
tf.add_to_collection("labels_equal", labels_equal)
tf.add_to_collection("video_ids_batch", video_ids_batch)
tf.add_to_collection("labels_batch", tf.cast(labels_batch, tf.float32))
tf.add_to_collection("inputs_batch", model_inputs_batch)
tf.add_to_collection("predictions_batch", model_predictions_batch)
def inference_loop(video_ids_batch, labels_batch, inputs_batch, predictions_batch, video_ids_equal, labels_equal,
output_dir, batch_size):
with tf.Session() as sess:
sess.run([tf.local_variables_initializer()])
# Start the queue runners.
fetches = [video_ids_batch, labels_batch, inputs_batch, predictions_batch, video_ids_equal, labels_equal]
coord = tf.train.Coordinator()
start_time = time.time()
video_ids = []
video_labels = []
video_inputs = []
video_predictions = []
filenum = 0
num_examples_processed = 0
total_num_examples_processed = 0
directory = FLAGS.output_dir
if not os.path.exists(directory):
os.makedirs(directory)
else:
raise IOError("Output path exists! path='" + directory + "'")
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(
sess, coord=coord, daemon=True,
start=True))
while not coord.should_stop():
ids_val = None
ids_val, labels_val, inputs_val, predictions_val, ids_equal_val, labels_equal_val = sess.run(fetches)
print "ids equal = %f" % (ids_equal_val)
print "labels equal = %f" % (labels_equal_val)
video_ids.append(ids_val)
video_labels.append(labels_val)
video_inputs.append(inputs_val)
video_predictions.append(predictions_val)
num_examples_processed += len(ids_val)
ids_shape = ids_val.shape[0]
inputs_shape = inputs_val.shape[0]
predictions_shape = predictions_val.shape[0]
assert ids_shape == inputs_shape == predictions_shape, "tensor ids(%d), inputs(%d) and predictions(%d) should have equal rows" % (ids_shape, inputs_shape, predictions_shape)
ids_val = None
if num_examples_processed >= FLAGS.file_size:
assert num_examples_processed==FLAGS.file_size, "num_examples_processed should be equal to %d"%FLAGS.file_size
video_ids = np.concatenate(video_ids, axis=0)
video_labels = np.concatenate(video_labels, axis=0)
video_inputs = np.concatenate(video_inputs, axis=0)
video_predictions = np.concatenate(video_predictions, axis=0)
write_to_record(video_ids, video_labels, video_inputs, video_predictions, filenum, num_examples_processed)
video_ids = []
video_labels = []
video_inputs = []
video_predictions = []
filenum += 1
total_num_examples_processed += num_examples_processed
now = time.time()
logging.info("num examples processed: " + str(num_examples_processed) + " elapsed seconds: " + "{0:.2f}".format(now-start_time))
num_examples_processed = 0
except tf.errors.OutOfRangeError as e:
if ids_val is not None:
video_ids.append(ids_val)
video_labels.append(labels_val)
video_inputs.append(inputs_val)
video_predictions.append(predictions_val)
num_examples_processed += len(ids_val)
if 0 < num_examples_processed <= FLAGS.file_size:
video_ids = np.concatenate(video_ids, axis=0)
video_labels = np.concatenate(video_labels, axis=0)
video_inputs = np.concatenate(video_inputs, axis=0)
video_predictions = np.concatenate(video_predictions, axis=0)
write_to_record(video_ids, video_labels, video_inputs, video_predictions, filenum, num_examples_processed)
total_num_examples_processed += num_examples_processed
now = time.time()
logging.info("num examples processed: " + str(num_examples_processed) + " elapsed seconds: " + "{0:.2f}".format(now-start_time))
num_examples_processed = 0
logging.info("Done with inference. %d samples was written to %s" % (total_num_examples_processed, FLAGS.output_dir))
except Exception as e: # pylint: disable=broad-except
logging.info("Unexpected exception: " + str(e))
finally:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def write_to_record(video_ids, video_labels, video_inputs, video_predictions, filenum, num_examples_processed):
writer = tf.python_io.TFRecordWriter(FLAGS.output_dir + '/' + 'predictions-%04d.tfrecord' % filenum)
for i in range(num_examples_processed):
video_id = video_ids[i]
video_label = np.nonzero(video_labels[i,:])[0]
video_input = video_inputs[i,:]
video_prediction = video_predictions[i,:]
example = get_output_feature(video_id, video_label, video_input, video_prediction)
serialized = example.SerializeToString()
writer.write(serialized)
writer.close()
def get_output_feature(video_id, video_label, video_input, video_prediction):
feature_maps = {'video_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[video_id])),
'labels': tf.train.Feature(int64_list=tf.train.Int64List(value=video_label))}
input_feature_names = FLAGS.input_feature_names.split(",")
input_feature_sizes = map(int, FLAGS.input_feature_sizes.split(","))
input_feature_start = 0
for i in range(len(input_feature_names)):
feature_maps[input_feature_names[i]] = tf.train.Feature(
float_list=tf.train.FloatList(
value=video_input[
input_feature_start :
input_feature_start + input_feature_sizes[i]]))
input_feature_start += input_feature_sizes[i]
prediction_feature_names = FLAGS.prediction_feature_names.split(",")
prediction_feature_sizes = map(int, FLAGS.prediction_feature_sizes.split(","))
prediction_feature_start = 0
for i in range(len(prediction_feature_names)):
feature_maps[prediction_feature_names[i]] = tf.train.Feature(
float_list=tf.train.FloatList(
value=video_prediction[
prediction_feature_start :
prediction_feature_start + prediction_feature_sizes[i]]))
prediction_feature_start += prediction_feature_sizes[i]
example = tf.train.Example(features=tf.train.Features(feature=feature_maps))
return example
def main(unused_argv):
logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
if FLAGS.input_data_pattern is "":
raise IOError("'input_data_pattern' was not specified. " +
"Nothing to evaluate.")
if FLAGS.prediction_data_pattern is "":
raise IOError("'prediction_data_pattern' was not specified. " +
"Nothing to evaluate.")
# convert feature_names and feature_sizes to lists of values
input_feature_names, input_feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.input_feature_names, FLAGS.input_feature_sizes)
input_reader = readers.EnsembleReader(
feature_names=input_feature_names,
feature_sizes=input_feature_sizes)
prediction_feature_names, prediction_feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.prediction_feature_names, FLAGS.prediction_feature_sizes)
prediction_reader = readers.EnsembleReader(
feature_names=prediction_feature_names,
feature_sizes=prediction_feature_sizes)
build_graph(
input_reader=input_reader,
prediction_reader=prediction_reader,
input_data_pattern=FLAGS.input_data_pattern,
prediction_data_pattern=FLAGS.prediction_data_pattern,
batch_size=FLAGS.batch_size)
logging.info("built evaluation graph")
video_ids_equal = tf.get_collection("video_ids_equal")[0]
labels_equal = tf.get_collection("labels_equal")[0]
video_ids_batch = tf.get_collection("video_ids_batch")[0]
labels_batch = tf.get_collection("labels_batch")[0]
inputs_batch = tf.get_collection("inputs_batch")[0]
predictions_batch = tf.get_collection("predictions_batch")[0]
inference_loop(video_ids_batch, labels_batch, inputs_batch, predictions_batch, video_ids_equal, labels_equal,
FLAGS.output_dir, FLAGS.batch_size)
if __name__ == "__main__":
app.run()
| 41.589577
| 181
| 0.703869
|
857e6a644427075b538b72919788fb1210aa4180
| 6,091
|
py
|
Python
|
tutorial/mnist.py
|
NumesSanguis/MLTensor
|
bd5b467f0567254843fd9f7729b65decaa672fed
|
[
"Apache-2.0"
] | 1
|
2015-12-18T02:25:20.000Z
|
2015-12-18T02:25:20.000Z
|
tutorial/mnist.py
|
NumesSanguis/MLTensor
|
bd5b467f0567254843fd9f7729b65decaa672fed
|
[
"Apache-2.0"
] | null | null | null |
tutorial/mnist.py
|
NumesSanguis/MLTensor
|
bd5b467f0567254843fd9f7729b65decaa672fed
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the MNIST network.
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as is required for running the network
forward to make predictions.
2. loss() - Adds to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradients.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
TensorFlow install instructions:
https://tensorflow.org/get_started/os_setup.html
MNIST tutorial:
https://tensorflow.org/tutorials/mnist/tf/index.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow.python.platform
import tensorflow as tf
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
def inference(images, hidden1_units, hidden2_units):
"""Build the MNIST model up to where it may be used for inference.
Args:
images: Images placeholder, from inputs().
hidden1: Size of the first hidden layer.
hidden2: Size of the second hidden layer.
Returns:
softmax_linear: Output tensor with the computed logits.
"""
# Hidden 1
with tf.name_scope('hidden1') as scope:
weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hidden2') as scope:
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope('softmax_linear') as scope:
weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
# Convert from sparse integer labels in the range [0, NUM_CLASSSES)
# to 1-hot dense float vectors (that is we will have batch_size vectors,
# each with NUM_CLASSES values, all of which are 0.0 except there will
# be a 1.0 in the entry corresponding to the label).
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, NUM_CLASSES]), 1.0, 0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
onehot_labels,
name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def training(loss, learning_rate):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.scalar_summary(loss.op.name, loss)
# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label's is was in the top k (here k=1)
# of all logits for that example.
correct = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32))
| 41.719178
| 80
| 0.692497
|
bf09120ad44c0f7f7c4343e8f93baa8718ab7fc2
| 12,432
|
py
|
Python
|
python/federatedml/model_selection/k_fold.py
|
hust-suwb/FATE
|
05395603994150c1fd2b2645d3f6df1965382e79
|
[
"Apache-2.0"
] | 3,787
|
2019-08-30T04:55:10.000Z
|
2022-03-31T23:30:07.000Z
|
fate/confs-10000/shared_dir/federatedml/model_selection/k_fold.py
|
Huangxy-Minel/flare
|
3c091567bfaedfdf0f0d41b00f3e3d501d572515
|
[
"Apache-2.0"
] | 1,439
|
2019-08-29T16:35:52.000Z
|
2022-03-31T11:55:31.000Z
|
fate/confs-10000/shared_dir/federatedml/model_selection/k_fold.py
|
Huangxy-Minel/flare
|
3c091567bfaedfdf0f0d41b00f3e3d501d572515
|
[
"Apache-2.0"
] | 1,179
|
2019-08-29T16:18:32.000Z
|
2022-03-31T12:55:38.000Z
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
from sklearn.model_selection import KFold as sk_KFold
from fate_arch.session import computing_session as session
from federatedml.evaluation.evaluation import Evaluation
from federatedml.model_selection.cross_validate import BaseCrossValidator
from federatedml.model_selection.indices import collect_index
from federatedml.transfer_variable.transfer_class.cross_validation_transfer_variable import \
CrossValidationTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
class KFold(BaseCrossValidator):
def __init__(self):
super(KFold, self).__init__()
self.model_param = None
self.n_splits = 1
self.shuffle = True
self.random_seed = 1
self.fold_history = None
def _init_model(self, param):
self.model_param = param
self.n_splits = param.n_splits
self.mode = param.mode
self.role = param.role
self.shuffle = param.shuffle
self.random_seed = param.random_seed
self.output_fold_history = param.output_fold_history
self.history_value_type = param.history_value_type
# self.evaluate_param = param.evaluate_param
# np.random.seed(self.random_seed)
def split(self, data_inst):
# header = data_inst.schema.get('header')
schema = data_inst.schema
data_sids_iter, data_size = collect_index(data_inst)
data_sids = []
key_type = None
for sid, _ in data_sids_iter:
if key_type is None:
key_type = type(sid)
data_sids.append(sid)
data_sids = np.array(data_sids)
# if self.shuffle:
# np.random.shuffle(data_sids)
random_state = self.random_seed if self.shuffle else None
kf = sk_KFold(n_splits=self.n_splits, shuffle=self.shuffle, random_state=random_state)
n = 0
for train, test in kf.split(data_sids):
train_sids = data_sids[train]
test_sids = data_sids[test]
n += 1
train_sids_table = [(key_type(x), 1) for x in train_sids]
test_sids_table = [(key_type(x), 1) for x in test_sids]
train_table = session.parallelize(train_sids_table,
include_key=True,
partition=data_inst.partitions)
train_data = data_inst.join(train_table, lambda x, y: x)
test_table = session.parallelize(test_sids_table,
include_key=True,
partition=data_inst.partitions)
test_data = data_inst.join(test_table, lambda x, y: x)
train_data.schema = schema
test_data.schema = schema
yield train_data, test_data
@staticmethod
def generate_new_id(id, fold_num, data_type):
return f"{id}#fold{fold_num}#{data_type}"
def transform_history_data(self, data, predict_data, fold_num, data_type):
if self.history_value_type == "score":
if predict_data is not None:
history_data = predict_data.map(lambda k, v: (KFold.generate_new_id(k, fold_num, data_type), v))
history_data.schema = copy.deepcopy(predict_data.schema)
else:
history_data = data.map(lambda k, v: (KFold.generate_new_id(k, fold_num, data_type), fold_num))
schema = copy.deepcopy(data.schema)
schema["header"] = ["fold_num"]
history_data.schema = schema
elif self.history_value_type == "instance":
history_data = data.map(lambda k, v: (KFold.generate_new_id(k, fold_num, data_type), v))
history_data.schema = copy.deepcopy(data.schema)
else:
raise ValueError(f"unknown history value type")
return history_data
def run(self, component_parameters, data_inst, original_model, host_do_evaluate):
self._init_model(component_parameters)
if data_inst is None:
self._arbiter_run(original_model)
return
total_data_count = data_inst.count()
LOGGER.debug("data_inst count: {}".format(data_inst.count()))
if self.output_fold_history:
if total_data_count * self.n_splits > consts.MAX_SAMPLE_OUTPUT_LIMIT:
LOGGER.warning(f"max sample output limit {consts.MAX_SAMPLE_OUTPUT_LIMIT} exceeded with n_splits ({self.n_splits}) * instance_count ({total_data_count})")
if self.mode == consts.HOMO or self.role == consts.GUEST:
data_generator = self.split(data_inst)
else:
data_generator = [(data_inst, data_inst)] * self.n_splits
fold_num = 0
summary_res = {}
for train_data, test_data in data_generator:
model = copy.deepcopy(original_model)
LOGGER.debug("In CV, set_flowid flowid is : {}".format(fold_num))
model.set_flowid(fold_num)
model.set_cv_fold(fold_num)
LOGGER.info("KFold fold_num is: {}".format(fold_num))
if self.mode == consts.HETERO:
train_data = self._align_data_index(train_data, model.flowid, consts.TRAIN_DATA)
LOGGER.info("Train data Synchronized")
test_data = self._align_data_index(test_data, model.flowid, consts.TEST_DATA)
LOGGER.info("Test data Synchronized")
LOGGER.debug("train_data count: {}".format(train_data.count()))
if train_data.count() + test_data.count() != total_data_count:
raise EnvironmentError("In cv fold: {}, train count: {}, test count: {}, original data count: {}."
"Thus, 'train count + test count = total count' condition is not satisfied"
.format(fold_num, train_data.count(), test_data.count(), total_data_count))
this_flowid = 'train.' + str(fold_num)
LOGGER.debug("In CV, set_flowid flowid is : {}".format(this_flowid))
model.set_flowid(this_flowid)
model.fit(train_data, test_data)
this_flowid = 'predict_train.' + str(fold_num)
LOGGER.debug("In CV, set_flowid flowid is : {}".format(this_flowid))
model.set_flowid(this_flowid)
train_pred_res = model.predict(train_data)
# if train_pred_res is not None:
if self.role == consts.GUEST or host_do_evaluate:
fold_name = "_".join(['train', 'fold', str(fold_num)])
train_pred_res = train_pred_res.mapValues(lambda value: value + ['train'])
train_pred_res = model.set_predict_data_schema(train_pred_res, train_data.schema)
# LOGGER.debug(f"train_pred_res schema: {train_pred_res.schema}")
self.evaluate(train_pred_res, fold_name, model)
this_flowid = 'predict_validate.' + str(fold_num)
LOGGER.debug("In CV, set_flowid flowid is : {}".format(this_flowid))
model.set_flowid(this_flowid)
test_pred_res = model.predict(test_data)
# if pred_res is not None:
if self.role == consts.GUEST or host_do_evaluate:
fold_name = "_".join(['validate', 'fold', str(fold_num)])
test_pred_res = test_pred_res.mapValues(lambda value: value + ['validate'])
test_pred_res = model.set_predict_data_schema(test_pred_res, test_data.schema)
# LOGGER.debug(f"train_pred_res schema: {test_pred_res.schema}")
self.evaluate(test_pred_res, fold_name, model)
LOGGER.debug("Finish fold: {}".format(fold_num))
if self.output_fold_history:
LOGGER.debug(f"generating fold history for fold {fold_num}")
fold_train_data = self.transform_history_data(train_data, train_pred_res, fold_num, "train")
fold_validate_data = self.transform_history_data(test_data, test_pred_res, fold_num, "validate")
fold_history_data = fold_train_data.union(fold_validate_data)
fold_history_data.schema = fold_train_data.schema
if self.fold_history is None:
self.fold_history = fold_history_data
else:
new_fold_history = self.fold_history.union(fold_history_data)
new_fold_history.schema = fold_history_data.schema
self.fold_history = new_fold_history
summary_res[f"fold_{fold_num}"] = model.summary()
fold_num += 1
summary_res['fold_num'] = fold_num
LOGGER.debug("Finish all fold running")
original_model.set_summary(summary_res)
if self.output_fold_history:
LOGGER.debug(f"output data schema: {self.fold_history.schema}")
#LOGGER.debug(f"output data: {list(self.fold_history.collect())}")
LOGGER.debug(f"output data is: {self.fold_history}")
return self.fold_history
else:
return data_inst
def _arbiter_run(self, original_model):
for fold_num in range(self.n_splits):
LOGGER.info("KFold flowid is: {}".format(fold_num))
model = copy.deepcopy(original_model)
this_flowid = 'train.' + str(fold_num)
model.set_flowid(this_flowid)
model.set_cv_fold(fold_num)
model.fit(None)
this_flowid = 'predict_train.' + str(fold_num)
model.set_flowid(this_flowid)
model.predict(None)
this_flowid = 'predict_validate.' + str(fold_num)
model.set_flowid(this_flowid)
model.predict(None)
def _align_data_index(self, data_instance, flowid, data_application=None):
schema = data_instance.schema
if data_application is None:
# LOGGER.warning("not data_application!")
# return
raise ValueError("In _align_data_index, data_application should be provided.")
transfer_variable = CrossValidationTransferVariable()
if data_application == consts.TRAIN_DATA:
transfer_id = transfer_variable.train_sid
elif data_application == consts.TEST_DATA:
transfer_id = transfer_variable.test_sid
else:
raise ValueError("In _align_data_index, data_application should be provided.")
if self.role == consts.GUEST:
data_sid = data_instance.mapValues(lambda v: 1)
transfer_id.remote(data_sid,
role=consts.HOST,
idx=-1,
suffix=(flowid,))
LOGGER.info("remote {} to host".format(data_application))
return data_instance
elif self.role == consts.HOST:
data_sid = transfer_id.get(idx=0,
suffix=(flowid,))
LOGGER.info("get {} from guest".format(data_application))
join_data_insts = data_sid.join(data_instance, lambda s, d: d)
join_data_insts.schema = schema
return join_data_insts
def evaluate(self, validate_data, fold_name, model):
if validate_data is None:
return
eval_obj = Evaluation()
# LOGGER.debug("In KFold, evaluate_param is: {}".format(self.evaluate_param.__dict__))
# eval_obj._init_model(self.evaluate_param)
eval_param = model.get_metrics_param()
eval_param.check_single_value_default_metric()
eval_obj._init_model(eval_param)
eval_obj.set_tracker(model.tracker)
validate_data = {fold_name: validate_data}
eval_obj.fit(validate_data)
eval_obj.save_data()
| 45.372263
| 170
| 0.628218
|
66618bddb5c74af6764f62e6c02559ad2b18829f
| 361
|
py
|
Python
|
zaius/reports/__init__.py
|
idio/python-zaius-export
|
839c2b929d14e9f262d3e1a5b7ca85b7ebf8fcfc
|
[
"MIT"
] | 3
|
2020-04-24T15:51:39.000Z
|
2022-02-15T06:57:07.000Z
|
zaius/reports/__init__.py
|
idio/python-zaius-export
|
839c2b929d14e9f262d3e1a5b7ca85b7ebf8fcfc
|
[
"MIT"
] | 2
|
2020-05-06T15:56:09.000Z
|
2021-08-11T05:19:12.000Z
|
zaius/reports/__init__.py
|
idio/python-zaius-export
|
839c2b929d14e9f262d3e1a5b7ca85b7ebf8fcfc
|
[
"MIT"
] | 4
|
2020-06-17T15:53:36.000Z
|
2022-01-05T12:09:14.000Z
|
# -*- coding: utf-8 -*-
"""Pre-baked reports
Set of reports that can be called from code or executed using
the zaius-export command line utility.
Example:
zaius-export demo
"""
from .spec import ReportSpec
from . import demo
from . import product_attribution
from . import lifecycle_progress
from . import email_metrics
SPECS = ReportSpec.specs
| 20.055556
| 61
| 0.739612
|
035694a4800aa77af33e58a5cab080189b432140
| 2,942
|
py
|
Python
|
examples/dataflow-data-generator/data-generator-pipeline/tests/test_fix_record_for_avro.py
|
mrhungr-github/professional-services
|
3163204b24815741e0d4cb6482ae9c4b0b2c2f3c
|
[
"Apache-2.0"
] | null | null | null |
examples/dataflow-data-generator/data-generator-pipeline/tests/test_fix_record_for_avro.py
|
mrhungr-github/professional-services
|
3163204b24815741e0d4cb6482ae9c4b0b2c2f3c
|
[
"Apache-2.0"
] | null | null | null |
examples/dataflow-data-generator/data-generator-pipeline/tests/test_fix_record_for_avro.py
|
mrhungr-github/professional-services
|
3163204b24815741e0d4cb6482ae9c4b0b2c2f3c
|
[
"Apache-2.0"
] | 1
|
2019-04-19T03:16:20.000Z
|
2019-04-19T03:16:20.000Z
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import avro.schema
import json
import logging
import unittest
from data_generator.AvroUtil import fix_record_for_avro
class TestAvroFixer(unittest.TestCase):
def test_fix_record_for_avro(self):
avro_schema = avro.schema.parse(
json.dumps({
u'type': u'record',
u'name': u'AthleticsWorldRecords',
u'fields': [
{u'name': u'birthday',
u'type': [u'null',
{u'logicalType': u'date', u'type': u'int'}]},
{u'name': u'athlete', u'type':'string'},
{u'name': u'race_start_time',
u'type': [u'null',
{u'logicalType': u'time-micros', u'type': u'long'}]},
{u'name':u'race_start_datetime',
u'type': [u'null',
{u'logicalType': u'timestamp-millis', u'type': u'long'}]},
{u'name':u'race_end_timestamp',
u'type': [u'null',
{u'logicalType': u'timestamp-micros', u'type': u'long'}]},
{u'name':u'race_distance_m', u'type':'int'},
{u'name':u'time_seconds', u'type':'float'},
{u'name':u'is_world_record', u'type':'boolean'}
]
})
)
input_record = {
u'birthday': u'1988-12-17',
u'athlete': u'David Rudisha',
u'race_start_time': u'20:20:00.00',
u'race_start_datetime': u'2012-09-08T20:20:00.00',
u'race_end_timestamp': u'2012-09-08T20:21:40.91',
u'race_distance_m': 800,
u'time_seconds': 100.91,
u'is_world_record': True
}
expected_output = [{
u'birthday': 6925,
u'athlete': u'David Rudisha',
u'race_start_time': 73200000000L,
u'race_start_datetime': 1347135600000L,
u'race_end_timestamp': 1347135700910000L,
u'race_distance_m': 800,
u'time_seconds': 100.91,
u'is_world_record': True
}]
output_record = fix_record_for_avro(input_record, avro_schema)
self.assertDictEqual(output_record[0], expected_output[0])
if __name__ == '__main__':
unittest.main()
| 38.207792
| 82
| 0.546227
|
7f88630b271075ceb79990a1674d7244f51f0858
| 7,826
|
py
|
Python
|
examples/dmri_group_connectivity_mrtrix.py
|
sebastientourbier/nipype_lts5
|
3b9718d154443574cc6a5d0bbd76ccf7964e6a45
|
[
"BSD-3-Clause"
] | 1
|
2018-09-09T14:47:04.000Z
|
2018-09-09T14:47:04.000Z
|
examples/dmri_group_connectivity_mrtrix.py
|
MarcCote/nipype
|
b4014e1d87509f35242f0547f51d2b8962f83cbe
|
[
"BSD-3-Clause"
] | null | null | null |
examples/dmri_group_connectivity_mrtrix.py
|
MarcCote/nipype
|
b4014e1d87509f35242f0547f51d2b8962f83cbe
|
[
"BSD-3-Clause"
] | 1
|
2020-02-19T13:47:05.000Z
|
2020-02-19T13:47:05.000Z
|
"""
==================================================
dMRI: Group connectivity - MRtrix, FSL, FreeSurfer
==================================================
Introduction
============
This script, dmri_group_connectivity_mrtrix.py, runs group-based connectivity analysis using
the dmri.mrtrix.connectivity_mapping Nipype workflow. Further detail on the processing can be
found in :ref:`dmri_connectivity_advanced. This tutorial can be run using:
python dmri_group_connectivity_mrtrix.py
We perform this analysis using one healthy subject and two subjects who suffer from Parkinson's disease.
The whole package (960 mb as .tar.gz / 1.3 gb uncompressed) including the Freesurfer directories for these subjects, can be acquired from here:
* http://db.tt/b6F1t0QV
A data package containing the outputs of this pipeline can be obtained from here:
* http://db.tt/elmMnIt1
Along with MRtrix, FSL, and Freesurfer, you must also have the Connectome File Format
library installed as well as the Connectome Mapper (cmp).
* MRtrix: http://www.brain.org.au/software/mrtrix/
* FSL: http://www.fmrib.ox.ac.uk/fsl/
* Freesurfer: http://surfer.nmr.mgh.harvard.edu/
* CTMK: http://www.cmtk.org/
* CFF: sudo apt-get install python-cfflib
Or on github at:
* CFFlib: https://github.com/LTS5/cfflib
* CMP: https://github.com/LTS5/cmp
Output data can be visualized in ConnectomeViewer, TrackVis, Gephi,
the MRtrix Viewer (mrview), and anything that can view Nifti files.
* ConnectomeViewer: https://github.com/LTS5/connectomeviewer
* TrackVis: http://trackvis.org/
* Gephi: http://gephi.org/
The fiber data is available in Numpy arrays, and the connectivity matrix
is also produced as a MATLAB matrix.
Import the workflows
--------------------
First, we import the necessary modules from nipype.
"""
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs # freesurfer
import os.path as op # system functions
import cmp
from nipype.workflows.dmri.mrtrix.group_connectivity import create_group_connectivity_pipeline
from nipype.workflows.dmri.connectivity.group_connectivity import (create_merge_network_results_by_group_workflow, create_merge_group_network_results_workflow, create_average_networks_by_group_workflow)
"""
Set the proper directories
--------------------------
First, we import the necessary modules from nipype.
"""
subjects_dir = op.abspath('groupcondatapackage/subjects/')
data_dir = op.abspath('groupcondatapackage/data/')
fs.FSCommand.set_default_subjects_dir(subjects_dir)
fsl.FSLCommand.set_default_output_type('NIFTI')
"""
Define the groups
-----------------
Here we define the groups for this study. We would like to search for differences between the healthy subject and the two
vegetative patients. The group list is defined as a Python dictionary (see http://docs.python.org/tutorial/datastructures.html),
with group IDs ('controls', 'parkinsons') as keys, and subject/patient names as values. We set the main output directory as 'groupcon'.
"""
group_list = {}
group_list['controls'] = ['cont17']
group_list['parkinsons'] = ['pat10', 'pat20']
"""
The output directory must be named as well.
"""
global output_dir
output_dir = op.abspath('dmri_group_connectivity_mrtrix')
"""
Main processing loop
====================
The title for the final grouped-network connectome file is dependent on the group names. The resulting file for this example
is 'parkinsons-controls.cff'. The following code implements the format a-b-c-...x.cff for an arbitary number of groups.
.. warning::
The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dti'.
The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme.
The workflow is created given the information input about the groups and subjects.
.. seealso::
* nipype/workflows/dmri/mrtrix/group_connectivity.py
* nipype/workflows/dmri/mrtrix/connectivity_mapping.py
* :ref:`dmri_connectivity_advanced`
We set values for absolute threshold used on the fractional anisotropy map. This is done
in order to identify single-fiber voxels. In brains with more damage, however, it may be necessary
to reduce the threshold, since their brains are have lower average fractional anisotropy values.
We invert the b-vectors in the encoding file, and set the maximum harmonic order
of the pre-tractography spherical deconvolution step. This is done to show
how to set inputs that will affect both groups.
Next we create and run the second-level pipeline. The purpose of this workflow is simple:
It is used to merge each subject's CFF file into one, so that there is a single file containing
all of the networks for each group. This can be useful for performing Network Brain Statistics
using the NBS plugin in ConnectomeViewer.
.. seealso::
http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html
"""
title = ''
for idx, group_id in enumerate(group_list.keys()):
title += group_id
if not idx == len(group_list.keys()) - 1:
title += '-'
info = dict(dwi=[['subject_id', 'dti']],
bvecs=[['subject_id', 'bvecs']],
bvals=[['subject_id', 'bvals']])
l1pipeline = create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, info)
# This is used to demonstrate the ease through which different parameters can be set for each group.
if group_id == 'parkinsons':
l1pipeline.inputs.connectivity.mapping.threshold_FA.absolute_threshold_value = 0.5
else:
l1pipeline.inputs.connectivity.mapping.threshold_FA.absolute_threshold_value = 0.7
# Here with invert the b-vectors in the Y direction and set the maximum harmonic order of the
# spherical deconvolution step
l1pipeline.inputs.connectivity.mapping.fsl2mrtrix.invert_y = True
l1pipeline.inputs.connectivity.mapping.csdeconv.maximum_harmonic_order = 6
# Here we define the parcellation scheme and the number of tracks to produce
parcellation_name = 'scale500'
l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name
cmp_config = cmp.configuration.PipelineConfiguration()
cmp_config.parcellation_scheme = "Lausanne2008"
l1pipeline.inputs.connectivity.mapping.inputnode_within.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
l1pipeline.inputs.connectivity.mapping.probCSDstreamtrack.desired_number_of_tracks = 100000
l1pipeline.run()
l1pipeline.write_graph(format='eps', graph2use='flat')
# The second-level pipeline is created here
l2pipeline = create_merge_network_results_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir)
l2pipeline.inputs.l2inputnode.network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
l2pipeline.run()
l2pipeline.write_graph(format='eps', graph2use='flat')
"""
Now that the for loop is complete there are two grouped CFF files each containing the appropriate subjects.
It is also convenient to have every subject in a single CFF file, so that is what the third-level pipeline does.
"""
l3pipeline = create_merge_group_network_results_workflow(group_list, data_dir, subjects_dir, output_dir, title)
l3pipeline.run()
l3pipeline.write_graph(format='eps', graph2use='flat')
"""
The fourth and final workflow averages the networks and saves them in another CFF file
"""
l4pipeline = create_average_networks_by_group_workflow(group_list, data_dir, subjects_dir, output_dir, title)
l4pipeline.run()
l4pipeline.write_graph(format='eps', graph2use='flat')
| 41.850267
| 202
| 0.753131
|
94c686b8061e3bf0f15751e83dfbbf3eac6ae9db
| 2,101
|
py
|
Python
|
tests/test_mock_serial.py
|
jamesleesaunders/pi-alertme
|
f8e8ea69ebe353e4aadcc54d89bb4fe1f5b473e5
|
[
"MIT"
] | 16
|
2017-04-29T22:20:46.000Z
|
2020-07-30T00:33:33.000Z
|
tests/test_mock_serial.py
|
jamesleesaunders/pi-alertme
|
f8e8ea69ebe353e4aadcc54d89bb4fe1f5b473e5
|
[
"MIT"
] | 36
|
2017-03-11T07:00:37.000Z
|
2018-01-04T08:28:50.000Z
|
tests/test_mock_serial.py
|
jamesleesaunders/pyalertme
|
f8e8ea69ebe353e4aadcc54d89bb4fe1f5b473e5
|
[
"MIT"
] | 4
|
2017-10-20T20:49:34.000Z
|
2018-11-05T09:57:39.000Z
|
#! /usr/bin/python
"""
test_mock_serial.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Tests fake device objects for proper functionality.
"""
import unittest
from mock_serial import Serial
class TestFakeSerialRead(unittest.TestCase):
"""
Fake Serial class should work as intended to emulate reading from a serial port.
"""
def setUp(self):
"""
Create a fake read device for each test.
"""
self.device = Serial()
self.device.set_read_data("test")
def test_read_single_byte(self):
"""
Reading one byte at a time should work as expected.
"""
self.assertEqual(self.device.read(), 't')
self.assertEqual(self.device.read(), 'e')
self.assertEqual(self.device.read(), 's')
self.assertEqual(self.device.read(), 't')
def test_read_multiple_bytes(self):
"""
Reading multiple bytes at a time should work as expected.
"""
self.assertEqual(self.device.read(3), 'tes')
self.assertEqual(self.device.read(), 't')
def test_write(self):
"""
Test serial write function.
"""
self.device.write("Hello World")
self.assertEqual(self.device.get_data_written(), "Hello World")
def test_open(self):
"""
Test open(), close() and isOpen() functions.
"""
self.device.open()
self.assertEqual(self.device.isOpen(), True)
self.device.close()
self.assertEqual(self.device.isOpen(), False)
def test_get_settings_dict(self):
"""
Test getSettingsDict() function returns dictionary of settings.
"""
expected = {
'timeout': 1,
'parity': 'N',
'baudrate': 19200,
'bytesize': 8,
'stopbits': 1,
'xonxoff': 0,
'rtscts': 0
}
self.assertEqual(self.device.getSettingsDict(), expected)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 28.780822
| 85
| 0.559257
|
ebdc9181686683e0f9489b2ac626712accfbe0db
| 4,580
|
py
|
Python
|
tensorflow/contrib/cmake/tools/create_def_file.py
|
AlexxNica/tensorflow
|
f8dce81aeaff40dc78d398741854ad8766806f91
|
[
"Apache-2.0"
] | 1
|
2021-04-20T11:37:41.000Z
|
2021-04-20T11:37:41.000Z
|
tensorflow/contrib/cmake/tools/create_def_file.py
|
AIroot/tensorflow
|
6fecffd7eca64100ce14969a2c3366b89b7cecc2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/cmake/tools/create_def_file.py
|
AIroot/tensorflow
|
6fecffd7eca64100ce14969a2c3366b89b7cecc2
|
[
"Apache-2.0"
] | 1
|
2019-03-16T05:48:07.000Z
|
2019-03-16T05:48:07.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""create_def_file.py - tool to create a windows def file.
The def file can be used to export symbols from the tensorflow dll to enable
tf.load_library().
Because the linker allows only 64K symbols to be exported per dll
we filter the symbols down to the essentials. The regular expressions
we use for this are specific to tensorflow.
TODO: this works fine but there is an issue with exporting
'const char * const' and importing it from a user_ops. The problem is
on the importing end and using __declspec(dllimport) works around it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import io
import os
import re
import subprocess
import sys
import tempfile
# External tools we use that come with visual studio sdk and
# we assume that the caller has the correct PATH to the sdk
UNDNAME = "undname.exe"
DUMPBIN = "dumpbin.exe"
# Exclude if matched
EXCLUDE_RE = re.compile(r"deleting destructor|::internal::")
# Include if matched before exclude
INCLUDEPRE_RE = re.compile(r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::CheckOpMessageBuilder")
# Include if matched after exclude
INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
r"tensorflow::|"
r"functor::|"
r"perftools::gputools")
def get_args():
"""Parse command line."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="input library", required=True)
parser.add_argument("--output", help="output deffile", required=True)
args = parser.parse_args()
return args
def main():
"""main."""
args = get_args()
# Pipe dumpbin to extract all linkable symbols from a lib.
# Good symbols are collected in candidates and also written to
# a temp file.
candidates = []
tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", args.input],
stdout=subprocess.PIPE)
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"):
cols = line.split()
if len(cols) < 2:
continue
sym = cols[1]
tmpfile.file.write(sym + "\n")
candidates.append(sym)
tmpfile.file.close()
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(DUMPBIN, exit_code))
return exit_code
# Run the symbols through undname to get their undecorated name
# so we can filter on something readable.
with open(args.output, "w") as def_fp:
# track dupes
taken = set()
# Header for the def file. Since the tensorflow.dll is actually called
# _pywrap_tensorflow.pyd in the python wheel, hint that in the def file.
def_fp.write("LIBRARY _pywrap_tensorflow_internal.pyd\n")
def_fp.write("EXPORTS\n")
def_fp.write("\t ??1OpDef@tensorflow@@UEAA@XZ\n")
# Each symbols returned by undname matches the same position in candidates.
# We compare on undname but use the decorated name from candidates.
dupes = 0
proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
for idx, line in enumerate(io.TextIOWrapper(proc.stdout, encoding="utf-8")):
decorated = candidates[idx]
if decorated in taken:
# Symbol is already in output, done.
dupes += 1
continue
if not INCLUDEPRE_RE.search(line):
if EXCLUDE_RE.search(line):
continue
if not INCLUDE_RE.search(line):
continue
def_fp.write("\t" + decorated + "\n")
taken.add(decorated)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(UNDNAME, exit_code))
return exit_code
os.unlink(tmpfile.name)
print("symbols={}, taken={}, dupes={}"
.format(len(candidates), len(taken), dupes))
return 0
if __name__ == "__main__":
sys.exit(main())
| 33.188406
| 80
| 0.680786
|
26dab2dff557058b14411e43a8e48bfad178e601
| 3,382
|
py
|
Python
|
social_core/exceptions.py
|
candlerb/social-core
|
36f25c4218f17c1c74fbb22881716d3c4bdbd379
|
[
"BSD-3-Clause"
] | null | null | null |
social_core/exceptions.py
|
candlerb/social-core
|
36f25c4218f17c1c74fbb22881716d3c4bdbd379
|
[
"BSD-3-Clause"
] | null | null | null |
social_core/exceptions.py
|
candlerb/social-core
|
36f25c4218f17c1c74fbb22881716d3c4bdbd379
|
[
"BSD-3-Clause"
] | null | null | null |
class SocialAuthBaseException(ValueError):
"""Base class for pipeline exceptions."""
pass
class WrongBackend(SocialAuthBaseException):
def __init__(self, backend_name):
self.backend_name = backend_name
def __str__(self):
return 'Incorrect authentication service "{}"'.format(
self.backend_name
)
class MissingBackend(WrongBackend):
def __str__(self):
return f'Missing backend "{self.backend_name}" entry'
class NotAllowedToDisconnect(SocialAuthBaseException):
"""User is not allowed to disconnect it's social account."""
def __str__(self):
return 'This account is not allowed to be disconnected.'
class AuthException(SocialAuthBaseException):
"""Auth process exception."""
def __init__(self, backend, *args, **kwargs):
self.backend = backend
super().__init__(*args, **kwargs)
class AuthFailed(AuthException):
"""Auth process failed for some reason."""
def __str__(self):
msg = super().__str__()
if msg == 'access_denied':
return 'Authentication process was canceled'
return f'Authentication failed: {msg}'
class AuthCanceled(AuthException):
"""Auth process was canceled by user."""
def __init__(self, *args, **kwargs):
self.response = kwargs.pop('response', None)
super().__init__(*args, **kwargs)
def __str__(self):
msg = super().__str__()
if msg:
return f'Authentication process canceled: {msg}'
return 'Authentication process canceled'
class AuthUnknownError(AuthException):
"""Unknown auth process error."""
def __str__(self):
msg = super().__str__()
return f'An unknown error happened while authenticating {msg}'
class AuthTokenError(AuthException):
"""Auth token error."""
def __str__(self):
msg = super().__str__()
return f'Token error: {msg}'
class AuthMissingParameter(AuthException):
"""Missing parameter needed to start or complete the process."""
def __init__(self, backend, parameter, *args, **kwargs):
self.parameter = parameter
super().__init__(backend, *args, **kwargs)
def __str__(self):
return f'Missing needed parameter {self.parameter}'
class AuthStateMissing(AuthException):
"""State parameter is incorrect."""
def __str__(self):
return 'Session value state missing.'
class AuthStateForbidden(AuthException):
"""State parameter is incorrect."""
def __str__(self):
return 'Wrong state parameter given.'
class AuthAlreadyAssociated(AuthException):
"""A different user has already associated the target social account"""
def __str__(self):
return 'This account is already in use.'
class AuthTokenRevoked(AuthException):
"""User revoked the access_token in the provider."""
def __str__(self):
return 'User revoke access to the token'
class AuthForbidden(AuthException):
"""Authentication for this user is forbidden"""
def __str__(self):
return 'Your credentials aren\'t allowed'
class AuthUnreachableProvider(AuthException):
"""Cannot reach the provider"""
def __str__(self):
return 'The authentication provider could not be reached'
class InvalidEmail(AuthException):
def __str__(self):
return 'Email couldn\'t be validated'
| 28.420168
| 75
| 0.674157
|
44b93f4703e08cba337d6d46d0f3d99fee5cc7e3
| 3,856
|
py
|
Python
|
grade-display/assemble.py
|
chrononyan/cs61a-apps
|
e44b5cf4b305bd5aed7bdd60c3b3b826abcf5a4a
|
[
"MIT"
] | null | null | null |
grade-display/assemble.py
|
chrononyan/cs61a-apps
|
e44b5cf4b305bd5aed7bdd60c3b3b826abcf5a4a
|
[
"MIT"
] | null | null | null |
grade-display/assemble.py
|
chrononyan/cs61a-apps
|
e44b5cf4b305bd5aed7bdd60c3b3b826abcf5a4a
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
import numpy as np
import requests
from common.rpc.howamidoing import upload_grades
from common.rpc.auth import read_spreadsheet
ROSTER = "data/roster.csv"
GRADES = "data/okpy_grades.csv"
MT1 = "data/mt1.csv" # midterm scores from Gradescope
MT2 = "data/mt2.csv" # midterm scores from Gradescope
SECTIONS = "data/sections.csv" # section scores from sections.cs61a.org
# ---------------------------
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# helpful functions
def csv(name):
return pd.read_csv(os.path.join(__location__, name), dtype={"sid": str, "SID": str})
def web_csv(url, sheet):
resp = read_spreadsheet(url=url, sheet_name=sheet)
cols = resp[0]
data = [x[: len(cols)] for x in resp[1:]]
for row in data:
while len(row) < len(cols):
row.append(0)
return pd.DataFrame(data, columns=cols)
# exam recovery calculations
def attendance(row):
return row["Section Attendance (Total)"]
def exam_recovery(your_exam_score, attendance, max_exam_score, cap=10):
if your_exam_score == 0:
return 0
half_score = max_exam_score / 2
max_recovery = max(0, (half_score - your_exam_score) / 2)
recovery_ratio = min(attendance, cap) / cap
return max_recovery * recovery_ratio
def assemble(gscope, recovery=False, adjustments=[]):
print("Loading scores data...")
roster = csv(ROSTER).rename(columns={"sid": "SID", "email": "Email"})
grades = csv(GRADES)
if gscope:
for name in gscope:
scores = csv(f"data/{name}.csv")[["SID", "Total Score"]]
scores = scores.fillna(0)
grades = (
pd.merge(grades, scores, how="left", on="SID")
.rename(columns={"Total Score": f"{gscope[name]} (Raw)"})
.fillna(0)
)
if adjustments:
print("Applying adjustments...")
for url, sheet in adjustments:
adj = web_csv(url, sheet)
for col in adj.columns[1:]:
adj[col] = pd.to_numeric(adj[col])
adj = adj.replace("", np.nan).fillna(0)
grades = pd.merge(grades, adj, how="left", on="Email").fillna(0)
print("Adding section attendance...")
sections = csv(SECTIONS).replace("", np.nan).fillna(0)
grades = pd.merge(grades, sections, how="left", on="Email").fillna(0)
# in su21, grant everyone points for discussion 0
grades["Discussion 0"] = 1.0
if recovery:
print("Calculating recovery points...")
if "mt1" in gscope:
grades["Midterm 1 (Recovery)"] = grades.apply(
lambda row: exam_recovery(row["Midterm 1 (Raw)"], attendance(row), 40),
axis=1,
)
if "mt2" in gscope:
grades["Midterm 2 (Recovery)"] = grades.apply(
lambda row: exam_recovery(row["Midterm 2 (Raw)"], attendance(row), 50),
axis=1,
)
if "mt" in gscope:
grades["Midterm (Recovery)"] = grades.apply(
lambda row: exam_recovery(row["Midterm (Raw)"], attendance(row), 55),
)
out = pd.merge(roster, grades, how="left", on="Email")
columns = [*grades.columns, "name"]
out = out.rename(columns={"SID_x": "SID"})
# finalize
out = out[columns]
out["TA"] = ""
out = out.replace("", np.nan)
finalized = out.fillna(0)
finalized = finalized.rename(columns={"name": "Name"})
finalized = finalized.applymap(lambda x: 1 if x == "Yes" else 0 if x == "No" else x)
print("Writing to file...")
finalized.to_csv("data/grades.csv", index=False)
print("Uploading data to Howamidoing...")
upload = finalized.to_csv(index=False)
upload_grades(data=upload)
print("Done.")
if __name__ == "__main__":
assemble()
| 31.606557
| 88
| 0.600104
|
3fbe2c665a2586a1d5ff1d7238473e9f5db2410c
| 4,092
|
py
|
Python
|
src/tests/worker/run_thread_testing.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | 1
|
2019-06-17T17:01:17.000Z
|
2019-06-17T17:01:17.000Z
|
src/tests/worker/run_thread_testing.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | 7
|
2021-02-08T20:46:15.000Z
|
2021-09-08T02:12:59.000Z
|
src/tests/worker/run_thread_testing.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import threading
from worker.task_status import TaskStatus
from worker.worker_status import WorkerStatus
class RunThread(threading.Thread):
"""The thread executing the template method. Same as run_thread, but with additional asserts for testing"""
def __init__(self, task, worker, msg_out):
"""The constructor.
:param task: the initialized Task.
:param worker: the worker class
:param msg_out: the class for outgoing messages"""
self._task = task
self._worker = worker
self._msg = msg_out
# initialize thread
threading.Thread.__init__(self)
def run(self, ):
self.evaluate_and_run_task()
def evaluate_and_run_task(self):
"""
Decides what to do next based on task.get_status. In fact this
implements the result driven algorithm for the move command: First
executes copy method, then consistency check, then delete. Updates
the Master after each step via msg_out. If TaskStatus is set to
either exception or error, pauses the worker and sends detailed
report to Master via msg_out. If TaskStatus is set to terminated,
sends detailed report and stops execution of task.
"""
# we just notify here instead of in all the methods. but we have to
# skip this at busy wait...
stat = self._task.status
if stat == TaskStatus.COPYING:
# We could enter deadlock here, so we circumvent this with a
# watchdog counter
# ALso, signaling Master every time would be too much... so we
# check this before updating.
# wait for 60 seconds to see if status was changed in that time
time.sleep(60000)
self._wait_cnt += 1
# we wait for one hour maximum
if self._wait_cnt < 60:
self.evaluate_and_run_task()
return
# update task and then decide what to do next
self._msg.update_task(self._task)
if stat == TaskStatus.INITIALIZED:
self._task = self._worker.copy(self._task)
assert self._worker.status == WorkerStatus.ACTIVE
self.evaluate_and_run_task()
elif stat == TaskStatus.COPIED:
self._task = self._worker._consistency_check(self._task)
assert self._worker.status == WorkerStatus.ACTIVE
self.evaluate_and_run_task()
elif stat == TaskStatus.CHECKED:
self._task = self._worker.delete(self._task)
elif stat == TaskStatus.DELETED:
# at the moment we do not do anything else here, but one could
# insert another step if needed
self._task.status = TaskStatus.FINISHED
assert self._worker.status == WorkerStatus.WAITING
self.evaluate_and_run_task()
elif stat == TaskStatus.FINISHED:
self._msg.final_informations(self._task)
assert self._worker.status == WorkerStatus.WAITING
self._worker.status = WorkerStatus.WAITING
self._task = None
elif stat == TaskStatus.PAUSED:
# we do not busy wait here as we want to wait for a
# resume_paused_task command
return
elif stat == TaskStatus.TERMINATED:
assert self._worker.status == WorkerStatus.WAITING
self._msg.final_informations(self._task)
self._task = None
elif stat == TaskStatus.ERROR:
# at error we pause and wait for resume_paused_task command
self._msg.raise_error(self._task)
self._worker.status = WorkerStatus.PAUSED
assert self._worker.status == WorkerStatus.PAUSED
elif stat == TaskStatus.EXCEPTION:
# at error we pause and wait for resume_paused_task command
self._msg.raise_exception(self._task)
self._worker.status = WorkerStatus.PAUSED
assert self._worker.status == WorkerStatus.PAUSED
return
| 38.971429
| 111
| 0.632454
|
e672253009bc3ceaf93d402c9d0c10d43153f688
| 18,379
|
py
|
Python
|
django/db/backends/creation.py
|
leereilly/django-1
|
fe43ad5707d116bb1729bc17a24ca16c90ae040d
|
[
"BSD-3-Clause"
] | 1
|
2015-11-11T12:20:45.000Z
|
2015-11-11T12:20:45.000Z
|
django/db/backends/creation.py
|
leereilly/django-1
|
fe43ad5707d116bb1729bc17a24ca16c90ae040d
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/backends/creation.py
|
leereilly/django-1
|
fe43ad5707d116bb1729bc17a24ca16c90ae040d
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import time
from django.conf import settings
from django.db.utils import load_backend
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
return '%x' % (abs(hash(args)) % 4294967296L) # 2**32
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
null = f.null
if (f.empty_strings_allowed and not f.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if not null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
tablespace_sql = self.connection.ops.tablespace_sql(
tablespace, inline=True)
if tablespace_sql:
field_output.append(tablespace_sql)
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(
f, known_models, style)
if pending:
pending_references.setdefault(f.rel.to, []).append(
(model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
", ".join(
[style.SQL_FIELD(qn(opts.get_field(f).column))
for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(
' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
opts.db_tablespace)
if tablespace_sql:
full_statement.append(tablespace_sql)
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary
# keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"""
Return the SQL snippet defining the foreign key reference for a field.
"""
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' +
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' +
style.SQL_FIELD(qn(field.rel.to._meta.get_field(
field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"""
Returns any ALTER TABLE statements to add constraints after the fact.
"""
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
qn = self.connection.ops.quote_name
final_output = []
opts = model._meta
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (
r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') +
' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table), qn(truncate_name(
r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_indexes_for_model(self, model, style):
"""
Returns the CREATE INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
return output
def sql_indexes_for_field(self, model, f, style):
"""
Return the CREATE INDEX SQL statements for a single model field.
"""
from django.db.backends.util import truncate_name
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
i_name = '%s_%s' % (model._meta.db_table, self._digest(f.column))
output = [style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(
i_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' +
"(%s)" % style.SQL_FIELD(qn(f.column)) +
"%s;" % tablespace_sql]
else:
output = []
return output
def sql_destroy_model(self, model, references_to_delete, style):
"""
Return the DROP TABLE and restraint dropping statements for a single
model.
"""
if not model._meta.managed or model._meta.proxy:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(
model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (
col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(
r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Creating test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr)
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Confirm the feature set of the test database
self.connection.features.confirm()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table,
database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
self.connection.cursor()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"""
Internal implementation - creates the test db tables.
"""
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self._prepare_for_test_db_ddl()
try:
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception as e:
sys.stderr.write(
"Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print ("Destroying old test database '%s'..."
% self.connection.alias)
cursor.execute(
"DROP DATABASE %s" % qn(test_database_name))
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name),
suffix))
except Exception as e:
sys.stderr.write(
"Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Destroying test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr)
# Temporarily use a new connection and a copy of the settings dict.
# This prevents the production database from being exposed to potential
# child threads while (or after) the test database is destroyed.
# Refs #10868 and #17786.
settings_dict = self.connection.settings_dict.copy()
settings_dict['NAME'] = old_database_name
backend = load_backend(settings_dict['ENGINE'])
new_connection = backend.DatabaseWrapper(
settings_dict,
alias='__destroy_test_db__',
allow_thread_sharing=False)
new_connection.creation._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self._prepare_for_test_db_ddl()
# Wait to avoid "database is being accessed by other users" errors.
time.sleep(1)
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"""
Make sure a connection is in autocommit mode. - Deprecated, not used
anymore by Django code. Kept for compatibility with user code that
might use it.
"""
pass
def _prepare_for_test_db_ddl(self):
"""
Internal implementation - Hook for tasks that should be performed
before the ``CREATE DATABASE``/``DROP DATABASE`` clauses used by
testing code to create/ destroy test databases. Needed e.g. in
PostgreSQL to rollback and close any active transaction.
"""
pass
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
| 41.961187
| 84
| 0.585124
|
b77adf6325f5d476ad202f1dc4c4a0cc9386f33a
| 199
|
py
|
Python
|
testing/unit_alignment/problem.py
|
pj0620/acca-video-series
|
1b09548014cc899ded5a8fdd1293f7fc121a98bc
|
[
"MIT"
] | null | null | null |
testing/unit_alignment/problem.py
|
pj0620/acca-video-series
|
1b09548014cc899ded5a8fdd1293f7fc121a98bc
|
[
"MIT"
] | 3
|
2020-04-16T09:24:48.000Z
|
2021-03-27T19:27:48.000Z
|
testing/unit_alignment/problem.py
|
pj0620/acca-video-series
|
1b09548014cc899ded5a8fdd1293f7fc121a98bc
|
[
"MIT"
] | 1
|
2020-09-01T05:32:04.000Z
|
2020-09-01T05:32:04.000Z
|
from manimlib.imports import *
class Problem(Scene):
def construct(self):
num = DecimalNumber(
12,
unit="Amps"
)
self.add(num)
self.wait()
| 19.9
| 30
| 0.512563
|
b812893e9a01a9ad9423596483c7b233b5e9f770
| 11,432
|
py
|
Python
|
Pysegreg/segregationMetrics.py
|
sandrofsousa/Resolution
|
70262f08a582850b78fb50f30e23b5f4405d3736
|
[
"MIT"
] | null | null | null |
Pysegreg/segregationMetrics.py
|
sandrofsousa/Resolution
|
70262f08a582850b78fb50f30e23b5f4405d3736
|
[
"MIT"
] | null | null | null |
Pysegreg/segregationMetrics.py
|
sandrofsousa/Resolution
|
70262f08a582850b78fb50f30e23b5f4405d3736
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.spatial.distance import cdist
class Segreg(object):
def __init__(self):
self.attributeMatrix = np.matrix([]) # attributes matrix full size - all columns
self.location = [] # x and y coordinates from tract centroid (2D lists)
self.pop = [] # population of each groups by tract (2D lists)
self.pop_sum = [] # total population of the tract (sum all groups)
self.locality = [] # population intensity by groups by tract
self.n_location = 0 # length of list (n lines) (attributeMatrix.shape[0])
self.n_group = 0 # number of groups (attributeMatrix.shape[1] - 4)
self.costMatrix = [] # scipy cdist distance matrix
self.tract_id = [] # tract ids in string format
def readAttributesFile(self, filepath):
"""
This function reads the csv file and populate the class's attributes. Data has to be exactly in the
following format or results will be wrong:
area id, x_coord, y_coord, attribute 1, attributes 2, attributes 3, attribute n...
:param filepath: path with file to be read
:return: attribute Matrix [n,n]
"""
raw_data = np.genfromtxt(filepath, skip_header=1, delimiter=",", filling_values=0, dtype=None)
data = [list(item)[1:] for item in raw_data]
self.attributeMatrix = np.asmatrix(data)
n = self.attributeMatrix.shape[1]
self.location = self.attributeMatrix[:, 0:2]
self.location = self.location.astype('float')
self.pop = self.attributeMatrix[:, 2:n].astype('int')
# self.pop[np.where(self.pop < 0)[0], np.where(self.pop < 0)[1]] = 0
self.n_group = n-2
self.n_location = self.attributeMatrix.shape[0]
self.pop_sum = np.sum(self.pop, axis=1)
self.tract_id = np.asarray([x[0] for x in raw_data]).astype(str)
self.tract_id = self.tract_id.reshape((self.n_location, 1))
return self.attributeMatrix
def getWeight(self, distance, bandwidth, weightmethod=1):
"""
This function computes the weights for neighborhood. Default value is Gaussian(1)
:param distance: distance in meters to be considered for weighting
:param bandwidth: bandwidth in meters selected to perform neighborhood
:param weightmethod: method to be used: 1-gussian , 2-bi square and empty-moving windows
:return: weight array for internal use
"""
distance = np.asarray(distance.T)
if weightmethod == 1:
weight = np.exp((-0.5) * (distance/bandwidth) * (distance/bandwidth))
elif weightmethod == 2:
weight = (1 - (distance/bandwidth)*(distance/bandwidth)) * (1 - (distance/bandwidth)*(distance/bandwidth))
sel = np.where(distance > bandwidth)
weight[sel[0]] = 0
elif weightmethod == 3:
weight = (1 + (distance * 0))
sel = np.where(distance > bandwidth)
weight[sel[0]] = 0
else:
raise Exception('Invalid weight method selected!')
return weight
def cal_timeMatrix(self, bandwidth, weightmethod, matrix):
"""
This function calculate the local population intensity for all groups based on a time matrix.
:param bandwidth: bandwidth for neighborhood in meters
:param weightmethod: 1 for gaussian, 2 for bi-square and empty for moving window
:param matrix: path/file for input time matrix
:return: 2d array like with population intensity for all groups
"""
n_local = self.location.shape[0]
n_subgroup = self.pop.shape[1]
locality_temp = np.empty([n_local, n_subgroup])
for index in range(0, n_local):
for index_sub in range(0, n_subgroup):
cost = matrix[index, :].reshape(1, n_local)
weight = self.getWeight(cost, bandwidth, weightmethod)
locality_temp[index, index_sub] = np.sum(weight * np.asarray(self.pop[:, index_sub])) / np.sum(weight)
self.locality = locality_temp
self.locality[np.where(self.locality < 0)[0], np.where(self.locality < 0)[1]] = 0
return locality_temp
def cal_localityMatrix(self, bandwidth=5000, weightmethod=1):
"""
This function calculate the local population intensity for all groups.
:param bandwidth: bandwidth for neighborhood in meters
:param weightmethod: 1 for gaussian, 2 for bi-square and empty for moving window
:return: 2d array like with population intensity for all groups
"""
n_local = self.location.shape[0]
n_subgroup = self.pop.shape[1]
locality_temp = np.empty([n_local, n_subgroup])
for index in range(0, n_local):
for index_sub in range(0, n_subgroup):
cost = cdist(self.location[index, :], self.location)
weight = self.getWeight(cost, bandwidth, weightmethod)
locality_temp[index, index_sub] = np.sum(weight * np.asarray(self.pop[:, index_sub]))/np.sum(weight)
self.locality = locality_temp
self.locality[np.where(self.locality < 0)[0], np.where(self.locality < 0)[1]] = 0
return locality_temp
def cal_localDissimilarity(self):
"""
Compute local dissimilarity for all groups.
:return: 1d array like with results for all groups, size of localities
"""
if len(self.locality) == 0:
lj = np.ravel(self.pop_sum)
tjm = np.asarray(self.pop) * 1.0 / lj[:, None]
tm = np.sum(self.pop, axis=0) * 1.0 / np.sum(self.pop)
index_i = np.sum(np.asarray(tm) * np.asarray(1 - tm))
pop_total = np.sum(self.pop)
local_diss = np.sum(1.0 * np.array(np.fabs(tjm - tm)) *
np.asarray(self.pop_sum).ravel()[:, None] / (2 * pop_total * index_i), axis=1)
else:
lj = np.asarray(np.sum(self.locality, axis=1))
tjm = self.locality * 1.0 / lj[:, None]
tm = np.sum(self.pop, axis=0) * 1.0 / np.sum(self.pop)
index_i = np.sum(np.asarray(tm) * np.asarray(1 - tm))
pop_total = np.sum(self.pop)
local_diss = np.sum(1.0 * np.array(np.fabs(tjm - tm)) *
np.asarray(self.pop_sum).ravel()[:, None] / (2 * pop_total * index_i), axis=1)
local_diss = np.nan_to_num(local_diss)
return local_diss
def cal_globalDissimilarity(self):
"""
This function call local dissimilarity and compute the sum from individual values.
:return: display global value
"""
local_diss = self.cal_localDissimilarity()
global_diss = np.sum(local_diss)
return global_diss
def cal_localExposure(self):
"""
This function computes the local exposure index of group m to group n.
in situations where m=n, then the result is the isolation index.
:return: 2d list with individual indexes
"""
m = self.n_group
j = self.n_location
exposure_rs = np.zeros((j, (m * m)))
if len(self.locality) == 0:
local_expo = np.asarray(self.pop) * 1.0 / np.asarray(np.sum(self.pop, axis=0)).ravel()
locality_rate = np.asarray(self.pop) * 1.0 / np.asarray(np.sum(self.pop, axis=1)).ravel()[:, None]
for i in range(m):
exposure_rs[:, ((i * m) + 0):((i * m) + m)] = np.asarray(locality_rate) * \
np.asarray(local_expo[:, i]).ravel()[:, None]
else:
local_expo = np.asarray(self.pop) * 1.0 / np.asarray(np.sum(self.pop, axis=0)).ravel()
locality_rate = np.asarray(self.locality) * 1.0 / np.asarray(np.sum(self.locality, axis=1)).ravel()[:, None]
for i in range(m):
exposure_rs[:, ((i * m) + 0):((i * m) + m)] = np.asarray(locality_rate) * \
np.asarray(local_expo[:, i]).ravel()[:, None]
exposure_rs[np.isinf(exposure_rs)] = 0
exposure_rs[np.isnan(exposure_rs)] = 0
return exposure_rs
def cal_globalExposure(self):
"""
This function call local exposure function and sum the results for the global index.
:return: displays global number result
"""
m = self.n_group
local_exp = self.cal_localExposure()
global_exp = np.sum(local_exp, axis=0)
global_exp = global_exp.reshape((m, m))
return global_exp
def cal_localEntropy(self):
"""
This function computes the local entropy score for a unit area Ei (diversity). A unit within the
metropolitan area, such as a census tract. If population intensity was previously computed,
the spatial version will be returned, else the non spatial version will be selected (raw data).
:return: 2d array with local indices
"""
if len(self.locality) == 0:
proportion = np.asarray(self.pop / self.pop_sum)
else:
polygon_sum = np.sum(self.locality, axis=1).reshape(self.n_location, 1)
proportion = np.asarray(self.locality / polygon_sum)
entropy = proportion * np.log(1 / proportion)
entropy[np.isnan(entropy)] = 0
entropy[np.isinf(entropy)] = 0
entropy = np.sum(entropy, axis=1)
entropy = entropy.reshape((self.n_location, 1))
return entropy
def cal_globalEntropy(self):
"""
This function computes the global entropy score E (diversity). A metropolitan area's entropy score.
:return: diversity score
"""
group_score = []
pop_total = np.sum(self.pop_sum)
prop = np.asarray(np.sum(self.pop, axis=0))[0]
# loop at sum of each population groups
for group in prop:
group_idx = group / pop_total * np.log(1 / (group / pop_total))
group_score.append(group_idx)
entropy = np.sum(group_score)
return entropy
def cal_localIndexH(self):
"""
This function computes the local entropy index H for all localities. The functions cal_localEntropy() for
local diversity and cal_globalEntropy for global entropy are called as input. If population intensity
was previously computed, the spatial version will be returned, else the non spatial version will be
selected (raw data).
:return: array like with scores for n groups (size groups)
"""
local_entropy = self.cal_localEntropy()
global_entropy = self.cal_globalEntropy()
et = global_entropy * np.sum(self.pop_sum)
eei = np.asarray(global_entropy - local_entropy)
h_local = np.asarray(self.pop_sum) * eei / et
return h_local
def cal_globalIndexH(self):
"""
Function to compute global index H returning the sum of local values. The function cal_localIndexH is
called as input for sum of individual values.
:return: values with global index for each group.
"""
h_local = self.cal_localIndexH()
h_global = np.sum(h_local)
return h_global
| 43.969231
| 120
| 0.598933
|
27ade3a648060d6c9b3e714aa52b27adf78da99b
| 10,757
|
py
|
Python
|
advanced_circuits_algorithms/QPE/utils_qpe.py
|
wkcwells/amazon-braket-examples
|
19a11641d77951e6619d7941cd2488242b18e937
|
[
"Apache-2.0"
] | null | null | null |
advanced_circuits_algorithms/QPE/utils_qpe.py
|
wkcwells/amazon-braket-examples
|
19a11641d77951e6619d7941cd2488242b18e937
|
[
"Apache-2.0"
] | null | null | null |
advanced_circuits_algorithms/QPE/utils_qpe.py
|
wkcwells/amazon-braket-examples
|
19a11641d77951e6619d7941cd2488242b18e937
|
[
"Apache-2.0"
] | null | null | null |
# general imports
import numpy as np
import math
from collections import Counter
from datetime import datetime
import pickle
# AWS imports: Import Braket SDK modules
from braket.circuits import Circuit, circuit
# local imports
from utils_qft import inverse_qft
@circuit.subroutine(register=True)
def controlled_unitary(control, target_qubits, unitary):
"""
Construct a circuit object corresponding to the controlled unitary
Args:
control: The qubit on which to control the gate
target_qubits: List of qubits on which the unitary U acts
unitary: matrix representation of the unitary we wish to implement in a controlled way
"""
# Define projectors onto the computational basis
p0 = np.array([[1., 0.],
[0., 0.]])
p1 = np.array([[0., 0.],
[0., 1.]])
# Instantiate circuit object
circ = Circuit()
# Construct numpy matrix
id_matrix = np.eye(len(unitary))
controlled_matrix = np.kron(p0, id_matrix) + np.kron(p1, unitary)
# Set all target qubits
targets = [control] + target_qubits
# Add controlled unitary
circ.unitary(matrix=controlled_matrix, targets=targets)
return circ
@circuit.subroutine(register=True)
def qpe(precision_qubits, query_qubits, unitary, control_unitary=True):
"""
Function to implement the QPE algorithm using two registers for precision (read-out) and query.
Register qubits need not be contiguous.
Args:
precision_qubits: list of qubits defining the precision register
query_qubits: list of qubits defining the query register
unitary: Matrix representation of the unitary whose eigenvalues we wish to estimate
control_unitary: Optional boolean flag for controlled unitaries,
with C-(U^{2^k}) by default (default is True),
or C-U controlled-unitary (2**power) times
"""
qpe_circ = Circuit()
# Get number of qubits
num_precision_qubits = len(precision_qubits)
num_query_qubits = len(query_qubits)
# Apply Hadamard across precision register
qpe_circ.h(precision_qubits)
# Apply controlled unitaries. Start with the last precision_qubit, and end with the first
for ii, qubit in enumerate(reversed(precision_qubits)):
# Set power exponent for unitary
power = ii
# Alterantive 1: Implement C-(U^{2^k})
if control_unitary:
# Define the matrix U^{2^k}
Uexp = np.linalg.matrix_power(unitary,2**power)
# Apply the controlled unitary C-(U^{2^k})
qpe_circ.controlled_unitary(qubit, query_qubits, Uexp)
# Alterantive 2: One can instead apply controlled-unitary (2**power) times to get C-U^{2^power}
else:
for _ in range(2**power):
qpe_circ.controlled_unitary(qubit, query_qubits, unitary)
# Apply inverse qft to the precision_qubits
if True:
print('##### QFT is commented out ####')
else:
qpe_circ.inverse_qft(precision_qubits)
return qpe_circ
# helper function to remove query bits from bitstrings
def substring(key, precision_qubits):
"""
Helper function to get substring from keys for dedicated string positions as given by precision_qubits.
This function is necessary to allow for arbitary qubit mappings in the precision and query registers
(i.e., so that the register qubits need not be contiguous.)
Args:
key: string from which we want to extract the substring supported only on the precision qubits
precision_qubits: List of qubits corresponding to precision_qubits.
Currently assumed to be a list of integers corresponding to the indices of the qubits.
"""
short_key = ''
for idx in precision_qubits:
short_key = short_key + key[idx]
return short_key
# helper function to convert binary fractional to decimal
# reference: https://www.geeksforgeeks.org/convert-binary-fraction-decimal/
def binaryToDecimal(binary):
"""
Helper function to convert binary string (example: '01001') to decimal
Args:
binary: string which to convert to decimal fraction
"""
length = len(binary)
fracDecimal = 0
# Convert fractional part of binary to decimal equivalent
twos = 2
for ii in range(length):
fracDecimal += ((ord(binary[ii]) - ord('0')) / twos);
twos *= 2.0
# return fractional part
return fracDecimal
# helper function for postprocessing based on measurement shots
def get_qpe_phases(measurement_counts, precision_qubits, items_to_keep=1):
"""
Get QPE phase estimate from measurement_counts for given number of precision qubits
Args:
measurement_counts: measurement results from a device run
precision_qubits: List of qubits corresponding to precision_qubits.
Currently assumed to be a list of integers corresponding to the indices of the qubits.
items_to_keep: number of items to return (topmost measurement counts for precision register)
"""
# Aggregate the results (i.e., ignore/trace out the query register qubits):
# First get bitstrings with corresponding counts for precision qubits only
bitstrings_precision_register = [substring(key, precision_qubits) for key in measurement_counts.keys()]
# Then keep only the unique strings
bitstrings_precision_register_set = set(bitstrings_precision_register)
# Cast as a list for later use
bitstrings_precision_register_list = list(bitstrings_precision_register_set)
# Now create a new dict to collect measurement results on the precision_qubits.
# Keys are given by the measurement count substrings on the register qubits. Initialize the counts to zero.
precision_results_dic = {key: 0 for key in bitstrings_precision_register_list}
# Loop over all measurement outcomes
for key in measurement_counts.keys():
# Save the measurement count for this outcome
counts = measurement_counts[key]
# Generate the corresponding shortened key (supported only on the precision_qubits register)
count_key = substring(key, precision_qubits)
# Add these measurement counts to the corresponding key in our new dict
precision_results_dic[count_key] += counts
# Get topmost values only
c = Counter(precision_results_dic)
topmost= c.most_common(items_to_keep)
# get decimal phases from bitstrings for topmost bitstrings
phases_decimal = [binaryToDecimal(item[0]) for item in topmost]
# Get decimal phases from bitstrings for all bitstrings
# number_precision_qubits = len(precision_qubits)
# Generate binary decimal expansion
# phases_decimal = [int(key, 2)/(2**number_precision_qubits) for key in precision_results_dic]
# phases_decimal = [binaryToDecimal(key) for key in precision_results_dic]
return phases_decimal, precision_results_dic
def run_qpe(unitary, precision_qubits, query_qubits, query_circuit,
device, s3_folder=None, items_to_keep=2, shots=1000, poll_time=1000, save_to_pck=False):
"""
Function to run QPE algorithm end-to-end and return measurement counts.
Args:
precision_qubits: list of qubits defining the precision register
query_qubits: list of qubits defining the query register
unitary: Matrix representation of the unitary whose eigenvalues we wish to estimate
query_circuit: query circuit for state preparation of query register
items_to_keep: (optional) number of items to return (topmost measurement counts for precision register)
device: Braket device backend
shots: (optional) number of measurement shots (default is 1000)
poll_time: (optional) polling time in seconds for device.run(...) call
save_to_pck: (optional) save results to pickle file if True (default is False)
"""
# get size of precision register and total number of qubits
number_precision_qubits = len(precision_qubits)
num_qubits = len(precision_qubits) + len(query_qubits)
# Define the circuit. Start by copying the query_circuit, then add ther QPE:
circ = query_circuit
circ.qpe(precision_qubits, query_qubits, unitary)
# Add desired results_types
circ.probability()
if shots == 0:
circ.state_vector()
# Run the circuit with all zeros input.
# The query_circuit subcircuit generates the desired input from all zeros.
# The code below executes the correct device.run call depending on whether the backend is local or cloud based
if device.name == 'DefaultSimulator':
task = device.run(circ, shots=shots)
else:
task = device.run(circ, s3_folder, shots=shots, poll_timeout_seconds=poll_time)
# get result for this task
result = task.result()
# get metadata
metadata = result.task_metadata
# get output probabilities (see result_types above)
probs_values = result.values[0]
# get measurement results
measurements = result.measurements
measured_qubits = result.measured_qubits
measurement_counts = result.measurement_counts
measurement_probabilities = result.measurement_probabilities
# bitstrings
format_bitstring = '{0:0' + str(num_qubits) + 'b}'
bitstring_keys = [format_bitstring.format(ii) for ii in range(2**num_qubits)]
# QPE postprocessing
phases_decimal = None
eigenvalues = None
precision_results_dic = None
states = None
if shots != 0:
phases_decimal, precision_results_dic = get_qpe_phases(measurement_counts, precision_qubits, items_to_keep)
eigenvalues = [np.exp(2*np.pi*1j*phase) for phase in phases_decimal]
else:
states = result.values[1]
# aggregate results
out = {'circuit': circ,
'task_metadata': metadata,
'measurements': measurements,
'measured_qubits': measured_qubits,
'measurement_counts': measurement_counts,
'measurement_probabilities': measurement_probabilities,
'probs_values': probs_values,
'bitstring_keys': bitstring_keys,
'precision_results_dic': precision_results_dic,
'phases_decimal': phases_decimal,
'eigenvalues': eigenvalues,
'states': states}
if save_to_pck:
# store results: dump output to pickle with timestamp in filename
time_now = datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')
results_file = 'results-'+time_now+'.pck'
pickle.dump(out, open(results_file, "wb"))
# you can load results as follows
# out = pickle.load(open(results_file, "rb"))
return out
| 36.097315
| 115
| 0.699823
|
803ee16c8fca1d8f736dbf9e3fb9ae10560b016c
| 21,865
|
py
|
Python
|
scripts/generate_fixtures.py
|
oss-spanish-geoserver/observatory-extension
|
6a063ca0eb33043f22e47171fc328aa50a6efc39
|
[
"BSD-3-Clause"
] | 6
|
2017-11-08T23:23:07.000Z
|
2021-03-05T05:39:23.000Z
|
scripts/generate_fixtures.py
|
oss-spanish-geoserver/observatory-extension
|
6a063ca0eb33043f22e47171fc328aa50a6efc39
|
[
"BSD-3-Clause"
] | 178
|
2016-04-08T19:48:37.000Z
|
2020-02-13T14:22:42.000Z
|
scripts/generate_fixtures.py
|
oss-spanish-geoserver/observatory-extension
|
6a063ca0eb33043f22e47171fc328aa50a6efc39
|
[
"BSD-3-Clause"
] | 4
|
2016-05-16T14:47:33.000Z
|
2020-08-30T21:39:22.000Z
|
import os
import psycopg2
import subprocess
PGUSER = os.environ.get('PGUSER', 'postgres')
PGPASSWORD = os.environ.get('PGPASSWORD', '')
PGHOST=os.environ.get('PGHOST', 'localhost')
PGPORT=os.environ.get('PGPORT', '5432')
PGDATABASE=os.environ.get('PGDATABASE', 'postgres')
DB_CONN = psycopg2.connect('postgres://{user}:{password}@{host}:{port}/{database}'.format(
user=PGUSER,
password=PGPASSWORD,
host=PGHOST,
port=PGPORT,
database=PGDATABASE
))
CURSOR = DB_CONN.cursor()
def query(q):
'''
Query the database.
'''
try:
CURSOR.execute(q)
return CURSOR
except:
DB_CONN.rollback()
raise
def commit():
try:
DB_CONN.commit()
except:
DB_CONN.rollback()
raise
def get_tablename_query(column_id, boundary_id, timespan):
"""
given a column_id, boundary-id (us.census.tiger.block_group), and
timespan, give back the current table hash from the data observatory
"""
return """
SELECT numer_tablename, numer_geomref_colname, numer_tid,
geom_tablename, geom_geomref_colname, geom_tid
FROM observatory.obs_meta
WHERE numer_id = '{numer_id}' AND
geom_id = '{geom_id}' AND
numer_timespan = '{numer_timespan}'
""".format(numer_id=column_id,
geom_id=boundary_id,
numer_timespan=timespan)
METADATA_TABLES = ['obs_table', 'obs_column_table', 'obs_column', 'obs_column_tag',
'obs_tag', 'obs_column_to_column', 'obs_dump_version', 'obs_meta',
'obs_table_to_table', 'obs_meta_numer', 'obs_meta_denom',
'obs_meta_geom', 'obs_meta_timespan', 'obs_meta_geom_numer_timespan',
'obs_column_table_tile', 'obs_column_table_tile_simple']
FIXTURES = [
('us.census.acs.B01003001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B01001002_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B01001026_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B01002001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002003_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002004_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002006_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002012_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B05001006_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08006001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08006002_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08301010_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08006009_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08006011_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08006015_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08006017_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B09001001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B11001001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B14001001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B14001002_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B14001005_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B14001006_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B14001007_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B14001008_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003017_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003022_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003023_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B16001001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B16001002_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B16001003_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B17001001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B17001002_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19013001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19083001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19301001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25001001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25002003_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25004002_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25004004_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25058001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25071001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25075001_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25075025_quantile', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B01003001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B01001002', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B01001026', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B01002001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B03002003', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B03002004', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B03002006', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B03002012', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B03002005', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B03002008', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B03002009', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B03002002', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B11001001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B15003001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B15003017', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B15003019', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B15003020', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B15003021', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B15003022', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B15003023', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19013001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19301001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B25001001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B25002003', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B25004002', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B25004004', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B25058001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B25071001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B25075001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B25075025', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B25081002', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B08134001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B08134002', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001002', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001003', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001004', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001005', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001006', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001007', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001008', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001009', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001010', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001011', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001012', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001013', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001014', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001015', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001016', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B19001017', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B01001002', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B01003001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B01001002', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B01001026', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B01002001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002003', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002004', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002006', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002012', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002005', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002008', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002009', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B03002002', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B11001001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003017', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003019', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003020', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003021', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003022', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B15003023', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19013001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19083001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19301001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25001001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25002003', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25004002', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25004004', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25058001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25071001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25075001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25075025', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B25081002', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08134001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08134002', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08134008', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08134008', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B08134010', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001002', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001003', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001004', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001005', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001006', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001007', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001008', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001009', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001010', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001011', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001012', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001013', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001014', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001015', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001016', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.acs.B19001017', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.spielman_singleton_segments.X10', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.spielman_singleton_segments.X55', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.zillow.AllHomes_Zhvi', 'us.census.tiger.zcta5', '2014-01'),
('us.zillow.AllHomes_Zhvi', 'us.census.tiger.zcta5', '2016-06'),
('us.census.acs.B01003001', 'us.census.tiger.zcta5', '2010 - 2014'),
('us.census.acs.B01003001', 'us.census.tiger.block_group', '2010 - 2014'),
('us.census.acs.B01003001', 'us.census.tiger.census_tract', '2010 - 2014'),
('us.census.tiger.place_geoname', 'us.census.tiger.place_clipped', '2015'),
('us.census.tiger.county_geoname', 'us.census.tiger.county_clipped', '2015'),
('us.census.tiger.county_geoname', 'us.census.tiger.county', '2015'),
('us.census.tiger.block_group_geoname', 'us.census.tiger.block_group', '2015'),
]
OUTFILE_PATH = os.path.join(os.path.dirname(__file__), '..',
'src/pg/test/fixtures/load_fixtures.sql')
DROPFILE_PATH = os.path.join(os.path.dirname(__file__), '..',
'src/pg/test/fixtures/drop_fixtures.sql')
def dump(cols, tablename, where=''):
with open(DROPFILE_PATH, 'a') as dropfile:
dropfile.write('DROP TABLE IF EXISTS observatory.{tablename};\n'.format(
tablename=tablename,
))
subprocess.check_call('PGPASSWORD={pgpassword} PGUSER={pguser} PGHOST={pghost} PGDATABASE={pgdb} '
'pg_dump -x --section=pre-data -t observatory.{tablename} '
' | sed "s:SET search_path.*::" '
' | sed "s:ALTER TABLE.*OWNER.*::" '
' | sed "s:SET idle_in_transaction_session_timeout.*::" '
' >> {outfile}'.format(
tablename=tablename,
outfile=OUTFILE_PATH,
pgpassword=PGPASSWORD,
pghost=PGHOST,
pgdb=PGDATABASE,
pguser=PGUSER
), shell=True)
with open(OUTFILE_PATH, 'a') as outfile:
outfile.write('COPY observatory."{}" FROM stdin WITH CSV HEADER;\n'.format(tablename))
subprocess.check_call('''
PGPASSWORD={pgpassword} psql -U {pguser} -d {pgdb} -h {pghost} -c "COPY (SELECT {cols} \
FROM observatory.{tablename} {where}) \
TO STDOUT WITH CSV HEADER" >> {outfile}'''.format(
cols=cols,
tablename=tablename,
where=where,
outfile=OUTFILE_PATH,
pgpassword=PGPASSWORD,
pghost=PGHOST,
pgdb=PGDATABASE,
pguser=PGUSER
), shell=True)
with open(OUTFILE_PATH, 'a') as outfile:
outfile.write('\\.\n\n')
def main():
unique_tables = set()
for f in FIXTURES:
column_id, boundary_id, timespan = f
tablename_query = get_tablename_query(column_id, boundary_id, timespan)
resp = query(tablename_query).fetchone()
if resp:
numer_tablename, numer_colname, numer_table_id = resp[0:3]
geom_tablename, geom_colname, geom_table_id = resp[3:6]
else:
raise Exception("Could not find table for {}, {}, {}".format(
column_id, boundary_id, timespan))
numer = (numer_tablename, numer_colname, numer_table_id, )
geom = (geom_tablename, geom_colname, geom_table_id, )
if numer not in unique_tables:
print(numer)
unique_tables.add(numer)
if geom not in unique_tables:
print(geom)
unique_tables.add(geom)
print unique_tables
with open(OUTFILE_PATH, 'w') as outfile:
outfile.write('SET client_min_messages TO WARNING;\n\\set ECHO none\n')
outfile.write('CREATE SCHEMA IF NOT EXISTS observatory;\n\n')
with open(DROPFILE_PATH, 'w') as dropfile:
dropfile.write('SET client_min_messages TO WARNING;\n\\set ECHO none\n')
for tablename in METADATA_TABLES:
print(tablename)
if tablename == 'obs_meta':
where = "WHERE " + " OR ".join([
"(numer_id, geom_id, numer_timespan) = ('{}', '{}', '{}')".format(
numer_id, geom_id, timespan)
for numer_id, geom_id, timespan in FIXTURES
])
elif tablename == 'obs_meta_numer':
where = "WHERE " + " OR ".join([
"numer_id IN ('{}', '{}')".format(numer_id, geom_id)
for numer_id, geom_id, timespan in FIXTURES
])
elif tablename == 'obs_meta_denom':
where = "WHERE " + " OR ".join([
"denom_id IN ('{}', '{}')".format(numer_id, geom_id)
for numer_id, geom_id, timespan in FIXTURES
])
elif tablename == 'obs_meta_geom':
where = "WHERE " + " OR ".join([
"geom_id IN ('{}', '{}')".format(numer_id, geom_id)
for numer_id, geom_id, timespan in FIXTURES
])
elif tablename == 'obs_meta_timespan':
where = "WHERE " + " OR ".join([
"timespan_id = ('{}')".format(timespan)
for numer_id, geom_id, timespan in FIXTURES
])
elif tablename == 'obs_column':
where = "WHERE " + " OR ".join([
"id IN ('{}', '{}')".format(numer_id, geom_id)
for numer_id, geom_id, timespan in FIXTURES
])
elif tablename == 'obs_column_tag':
where = "WHERE " + " OR ".join([
"column_id IN ('{}', '{}')".format(numer_id, geom_id)
for numer_id, geom_id, timespan in FIXTURES
])
elif tablename in ('obs_column_table', 'obs_column_table_tile',
'obs_column_table_tile_simple'):
where = '''WHERE table_id IN ({table_ids}) AND
(column_id IN ({numer_ids}) OR column_id IN ({geom_ids}))
'''.format(
numer_ids=','.join(["'{}'".format(x) for x, _, _ in FIXTURES]),
geom_ids=','.join(["'{}'".format(x) for _, x, _ in FIXTURES]),
table_ids=','.join(["'{}'".format(x) for _, _, x in unique_tables])
)
elif tablename == 'obs_column_to_column':
where = "WHERE " + " OR ".join([
"source_id IN ('{}', '{}') OR target_id IN ('{}', '{}')".format(
numer_id, geom_id, numer_id, geom_id)
for numer_id, geom_id, timespan in FIXTURES
])
elif tablename == 'obs_table':
where = 'WHERE timespan IN ({timespans}) ' \
'OR id IN ({table_ids}) '.format(
timespans=','.join(["'{}'".format(x) for _, _, x in FIXTURES]),
table_ids=','.join(["'{}'".format(x) for _, _, x in unique_tables])
)
elif tablename in ('obs_table_to_table'):
where = '''WHERE source_id IN ({table_ids})'''.format(
table_ids=','.join(["'{}'".format(x) for _, _, x in unique_tables])
)
else:
where = ''
dump('*', tablename, where)
for tablename, colname, table_id in unique_tables:
if 'zcta5' in table_id or 'zillow_zip' in table_id:
where = '\'11%\''
compare = 'LIKE'
elif 'county' in table_id and 'tiger' in table_id:
where = "('48061', '36047')"
compare = 'IN'
else:
where = '\'36047%\''
compare = 'LIKE'
print ' '.join(['*', tablename, "WHERE {}::text {} {}".format(colname, compare, where)])
dump('*', tablename, "WHERE {}::text {} {}".format(colname, compare, where))
if __name__ == '__main__':
main()
| 56.792208
| 102
| 0.618248
|
ae47071fd9aeb5211b403be8f2544af1ffc527f6
| 361
|
py
|
Python
|
examples/servertls_clientauth.py
|
aheck/reflectrpc
|
9602d3b3f06d3d8ee8549788301e43b172a597f6
|
[
"MIT"
] | 33
|
2016-06-12T15:25:07.000Z
|
2021-11-16T18:53:16.000Z
|
examples/servertls_clientauth.py
|
aheck/reflectrpc
|
9602d3b3f06d3d8ee8549788301e43b172a597f6
|
[
"MIT"
] | null | null | null |
examples/servertls_clientauth.py
|
aheck/reflectrpc
|
9602d3b3f06d3d8ee8549788301e43b172a597f6
|
[
"MIT"
] | 3
|
2016-10-10T20:50:41.000Z
|
2021-05-01T23:51:34.000Z
|
#!/usr/bin/env python3
import sys
sys.path.append('..')
import reflectrpc
import reflectrpc.twistedserver
import rpcexample
jsonrpc = rpcexample.build_example_rpcservice()
server = reflectrpc.twistedserver.TwistedJsonRpcServer(jsonrpc, 'localhost', 5500)
server.enable_tls('./certs/server.pem')
server.enable_client_auth('./certs/rootCA.crt')
server.run()
| 21.235294
| 82
| 0.792244
|
c498a8aafd679743c368285073716217ee2b6e2f
| 13,728
|
py
|
Python
|
examples/trials/cifar10_grad_match/cords/selectionstrategies/supervisedlearning/craigstrategy.py
|
savan77/nni
|
510213393d9cae58c5a8cccd21f322f7bba4e0cf
|
[
"MIT"
] | null | null | null |
examples/trials/cifar10_grad_match/cords/selectionstrategies/supervisedlearning/craigstrategy.py
|
savan77/nni
|
510213393d9cae58c5a8cccd21f322f7bba4e0cf
|
[
"MIT"
] | null | null | null |
examples/trials/cifar10_grad_match/cords/selectionstrategies/supervisedlearning/craigstrategy.py
|
savan77/nni
|
510213393d9cae58c5a8cccd21f322f7bba4e0cf
|
[
"MIT"
] | null | null | null |
import apricot
import numpy as np
import torch
import torch.nn.functional as F
from scipy.sparse import csr_matrix
from .dataselectionstrategy import DataSelectionStrategy
from torch.utils.data.sampler import SubsetRandomSampler
import math
class CRAIGStrategy(DataSelectionStrategy):
"""
Implementation of CRAIG Strategy from the paper :footcite:`mirzasoleiman2020coresets` for supervised learning frameworks.
CRAIG strategy tries to solve the optimization problem given below for convex loss functions:
.. math::
\\sum_{i\\in \\mathcal{U}} \\min_{j \\in S, |S| \\leq k} \\| x^i - x^j \\|
In the above equation, :math:`\\mathcal{U}` denotes the training set where :math:`(x^i, y^i)` denotes the :math:`i^{th}` training data point and label respectively,
:math:`L_T` denotes the training loss, :math:`S` denotes the data subset selected at each round, and :math:`k` is the budget for the subset.
Since, the above optimization problem is not dependent on model parameters, we run the subset selection only once right before the start of the training.
CRAIG strategy tries to solve the optimization problem given below for non-convex loss functions:
.. math::
\\sum_{i\\in \\mathcal{U}} \\min_{j \\in S, |S| \\leq k} \\| \\nabla_{\\theta} {L_T}^i(\\theta) - \\nabla_{\\theta} {L_T}^j(\\theta) \\|
In the above equation, :math:`\\mathcal{U}` denotes the training set, :math:`L_T` denotes the training loss, :math:`S` denotes the data subset selected at each round,
and :math:`k` is the budget for the subset. In this case, CRAIG acts an adaptive subset selection strategy that selects a new subset every epoch.
Both the optimization problems given above are an instance of facility location problems which is a submodular function. Hence, it can be optimally solved using greedy selection methods.
Parameters
----------
trainloader: class
Loading the training data using pytorch DataLoader
valloader: class
Loading the validation data using pytorch DataLoader
model: class
Model architecture used for training
loss_type: class
The type of loss criterion
device: str
The device being utilized - cpu | cuda
num_classes: int
The number of target classes in the dataset
linear_layer: bool
Apply linear transformation to the data
if_convex: bool
If convex or not
selection_type: str
Type of selection:
- 'PerClass': PerClass Implementation where the facility location problem is solved for each class seperately for speed ups.
- 'Supervised': Supervised Implementation where the facility location problem is solved using a sparse similarity matrix by assigning the similarity of a point with other points of different class to zero.
"""
def __init__(self, trainloader, valloader, model, loss_type,
device, num_classes, linear_layer, if_convex, selection_type):
"""
Constructer method
"""
super().__init__(trainloader, valloader, model, num_classes, linear_layer)
self.loss_type = loss_type # Make sure it has reduction='none' instead of default
self.device = device
self.if_convex = if_convex
self.selection_type = selection_type
def distance(self, x, y, exp=2):
"""
Compute the distance.
Parameters
----------
x: Tensor
First input tensor
y: Tensor
Second input tensor
exp: float, optional
The exponent value (default: 2)
Returns
----------
dist: Tensor
Output tensor
"""
n = x.size(0)
m = y.size(0)
d = x.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
dist = torch.pow(x - y, exp).sum(2)
#dist = torch.exp(-1 * torch.pow(x - y, 2).sum(2))
return dist
def compute_score(self, model_params, idxs):
"""
Compute the score of the indices.
Parameters
----------
model_params: OrderedDict
Python dictionary object containing models parameters
idxs: list
The indices
"""
trainset = self.trainloader.sampler.data_source
subset_loader = torch.utils.data.DataLoader(trainset, batch_size=self.trainloader.batch_size, shuffle=False,
sampler=SubsetRandomSampler(idxs),
pin_memory=True)
self.model.load_state_dict(model_params)
self.N = 0
g_is = []
with torch.no_grad():
if self.if_convex:
for batch_idx, (inputs, targets) in enumerate(subset_loader):
inputs, targets = inputs, targets
if self.selection_type == 'PerBatch':
self.N += 1
g_is.append(inputs.view(inputs.size()[0], -1).mean(dim=0).view(1, -1))
else:
self.N += inputs.size()[0]
g_is.append(inputs.view(inputs.size()[0], -1))
else:
embDim = self.model.get_embedding_dim()
for batch_idx, (inputs, targets) in enumerate(subset_loader):
inputs, targets = inputs.to(self.device), targets.to(self.device, non_blocking=True)
if self.selection_type == 'PerBatch':
self.N += 1
else:
self.N += inputs.size()[0]
with torch.no_grad():
out, l1 = self.model(inputs, last=True)
data = F.softmax(out, dim=1)
outputs = torch.zeros(len(inputs), self.num_classes).to(self.device)
outputs.scatter_(1, targets.view(-1, 1), 1)
l0_grads = data - outputs
if self.linear_layer:
l0_expand = torch.repeat_interleave(l0_grads, embDim, dim=1)
l1_grads = l0_expand * l1.repeat(1, self.num_classes)
if self.selection_type == 'PerBatch':
g_is.append(torch.cat((l0_grads, l1_grads), dim=1).mean(dim=0).view(1, -1))
else:
g_is.append(torch.cat((l0_grads, l1_grads), dim=1))
else:
if self.selection_type == 'PerBatch':
g_is.append(l0_grads.mean(dim=0).view(1, -1))
else:
g_is.append(l0_grads)
self.dist_mat = torch.zeros([self.N, self.N], dtype=torch.float32)
first_i = True
if self.selection_type == 'PerBatch':
g_is = torch.cat(g_is, dim=0)
self.dist_mat = self.distance(g_is, g_is).cpu()
else:
for i, g_i in enumerate(g_is, 0):
if first_i:
size_b = g_i.size(0)
first_i = False
for j, g_j in enumerate(g_is, 0):
self.dist_mat[i * size_b: i * size_b + g_i.size(0),
j * size_b: j * size_b + g_j.size(0)] = self.distance(g_i, g_j).cpu()
self.const = torch.max(self.dist_mat).item()
self.dist_mat = (self.const - self.dist_mat).numpy()
def compute_gamma(self, idxs):
"""
Compute the gamma values for the indices.
Parameters
----------
idxs: list
The indices
Returns
----------
gamma: list
Gradient values of the input indices
"""
if self.selection_type in ['PerClass', 'PerBatch']:
gamma = [0 for i in range(len(idxs))]
best = self.dist_mat[idxs] # .to(self.device)
rep = np.argmax(best, axis=0)
for i in rep:
gamma[i] += 1
elif self.selection_type == 'Supervised':
gamma = [0 for i in range(len(idxs))]
best = self.dist_mat[idxs] # .to(self.device)
rep = np.argmax(best, axis=0)
for i in range(rep.shape[1]):
gamma[rep[0, i]] += 1
return gamma
def get_similarity_kernel(self):
"""
Obtain the similarity kernel.
Returns
----------
kernel: ndarray
Array of kernel values
"""
for batch_idx, (inputs, targets) in enumerate(self.trainloader):
if batch_idx == 0:
labels = targets
else:
tmp_target_i = targets
labels = torch.cat((labels, tmp_target_i), dim=0)
kernel = np.zeros((labels.shape[0], labels.shape[0]))
for target in np.unique(labels):
x = np.where(labels == target)[0]
# prod = np.transpose([np.tile(x, len(x)), np.repeat(x, len(x))])
for i in x:
kernel[i, x] = 1
return kernel
def select(self, budget, model_params, optimizer):
"""
Data selection method using different submodular optimization
functions.
Parameters
----------
budget: int
The number of data points to be selected
model_params: OrderedDict
Python dictionary object containing models parameters
optimizer: str
The optimization approach for data selection. Must be one of
'random', 'modular', 'naive', 'lazy', 'approximate-lazy', 'two-stage',
'stochastic', 'sample', 'greedi', 'bidirectional'
Returns
----------
total_greedy_list: list
List containing indices of the best datapoints
gammas: list
List containing gradients of datapoints present in greedySet
"""
for batch_idx, (inputs, targets) in enumerate(self.trainloader):
if batch_idx == 0:
labels = targets
else:
tmp_target_i = targets
labels = torch.cat((labels, tmp_target_i), dim=0)
#per_class_bud = int(budget / self.num_classes)
total_greedy_list = []
gammas = []
if self.selection_type == 'PerClass':
for i in range(self.num_classes):
idxs = torch.where(labels == i)[0]
self.compute_score(model_params, idxs)
fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',
n_samples= math.ceil(budget * len(idxs) / self.N_trn), optimizer=optimizer)
sim_sub = fl.fit_transform(self.dist_mat)
greedyList = list(np.argmax(sim_sub, axis=1))
gamma = self.compute_gamma(greedyList)
total_greedy_list.extend(idxs[greedyList])
gammas.extend(gamma)
rand_indices = np.random.permutation(len(total_greedy_list))
total_greedy_list= list(np.array(total_greedy_list)[rand_indices])
gammas = list(np.array(gammas)[rand_indices])
elif self.selection_type == 'Supervised':
for i in range(self.num_classes):
if i == 0:
idxs = torch.where(labels == i)[0]
N = len(idxs)
self.compute_score(model_params, idxs)
row = idxs.repeat_interleave(N)
col = idxs.repeat(N)
data = self.dist_mat.flatten()
else:
idxs = torch.where(labels == i)[0]
N = len(idxs)
self.compute_score(model_params, idxs)
row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)
col = torch.cat((col, idxs.repeat(N)), dim=0)
data = np.concatenate([data, self.dist_mat.flatten()], axis=0)
sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))
self.dist_mat = sparse_simmat
fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',
n_samples=budget, optimizer=optimizer)
sim_sub = fl.fit_transform(sparse_simmat)
total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))
gammas = self.compute_gamma(total_greedy_list)
elif self.selection_type == 'PerBatch':
idxs = torch.arange(self.N_trn)
N = len(idxs)
self.compute_score(model_params, idxs)
fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',
n_samples=math.ceil(budget/self.trainloader.batch_size), optimizer=optimizer)
sim_sub = fl.fit_transform(self.dist_mat)
temp_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))
gammas_temp = self.compute_gamma(temp_list)
batch_wise_indices = list(self.trainloader.batch_sampler)
for i in range(len(temp_list)):
tmp = batch_wise_indices[temp_list[i]]
total_greedy_list.extend(tmp)
gammas.extend(list(gammas_temp[i] * np.ones(len(tmp))))
return total_greedy_list, gammas
| 43.305994
| 215
| 0.5566
|
ff2e09b16dda166327615c7f9daa693f62d7e713
| 436
|
py
|
Python
|
examples/rosetta/xyz.py
|
bennn/PyonR
|
16edd14f3950fd5a01f8b0237e023536ef48d17b
|
[
"MIT"
] | 132
|
2015-01-03T23:22:29.000Z
|
2021-11-29T22:17:32.000Z
|
examples/rosetta/xyz.py
|
bennn/PyonR
|
16edd14f3950fd5a01f8b0237e023536ef48d17b
|
[
"MIT"
] | 7
|
2015-06-29T18:51:10.000Z
|
2019-06-07T13:10:18.000Z
|
examples/rosetta/xyz.py
|
bennn/PyonR
|
16edd14f3950fd5a01f8b0237e023536ef48d17b
|
[
"MIT"
] | 15
|
2015-07-14T02:40:21.000Z
|
2021-03-10T11:26:31.000Z
|
#lang python
from '(planet aml/rosetta)' import *
import predicates
class XYZ(object):
x = property(cx)
y = property(cy)
z = property(cz)
rho = property(cyl_rho)
phi = property(cyl_phi)
__add__ = PLUS_c
def __repr__(self):
return "<" + str(cx(self)) + ", " \
+ str(cy(self)) + ", " \
+ str(cz(self)) + ">"
predicates.set_predicate(position_QUERY, XYZ)
| 19.818182
| 45
| 0.541284
|
8a951559c37e6368051149e0d287f06588d57a6f
| 2,590
|
py
|
Python
|
proxstar/starrs.py
|
ajnagashima/proxstar
|
35c7e95dc95607455c41f0c2b152a8af59c14ef1
|
[
"MIT"
] | null | null | null |
proxstar/starrs.py
|
ajnagashima/proxstar
|
35c7e95dc95607455c41f0c2b152a8af59c14ef1
|
[
"MIT"
] | null | null | null |
proxstar/starrs.py
|
ajnagashima/proxstar
|
35c7e95dc95607455c41f0c2b152a8af59c14ef1
|
[
"MIT"
] | null | null | null |
import psycopg2
def get_next_ip(starrs, range_name):
c = starrs.cursor()
try:
c.execute("BEGIN")
c.callproc("api.initialize", ('root', ))
c.callproc("api.get_address_from_range", (range_name, ))
results = c.fetchall()
c.execute("COMMIT")
finally:
c.close()
return results[0][0]
def get_ip_for_mac(starrs, mac):
c = starrs.cursor()
try:
c.execute("BEGIN")
c.callproc("api.initialize", ('root', ))
c.callproc("api.get_system_interface_addresses", (mac.lower(), ))
results = c.fetchall()
c.execute("COMMIT")
finally:
c.close()
if not results:
return 'No IP'
return results[0][3]
def renew_ip(starrs, addr):
c = starrs.cursor()
try:
c.execute("BEGIN")
c.callproc("api.initialize", ('root', ))
c.callproc("api.renew_interface_address", (addr, ))
results = c.fetchall()
c.execute("COMMIT")
finally:
c.close()
return results
def check_hostname(starrs, hostname):
c = starrs.cursor()
try:
c.execute("BEGIN")
c.callproc("api.initialize", ('root', ))
c.callproc("api.validate_name", (hostname, ))
c.execute("COMMIT")
c.execute("BEGIN")
c.callproc("api.initialize", ('root', ))
c.callproc("api.validate_domain", (hostname, 'csh.rit.edu'))
valid = c.fetchall()[0][0]
c.execute("COMMIT")
c.execute("BEGIN")
c.callproc("api.initialize", ('root', ))
c.callproc("api.check_dns_hostname", (hostname, 'csh.rit.edu'))
available = False
if not c.fetchall()[0][0]:
available = True
c.execute("COMMIT")
except (psycopg2.InternalError):
valid = False
available = False
finally:
c.close()
return valid, available
def register_starrs(starrs, name, owner, mac, addr):
c = starrs.cursor()
try:
c.execute("BEGIN")
c.callproc("api.initialize", ('root', ))
c.callproc(
"api.create_system_quick",
(name, owner, 'members', mac, addr, 'csh.rit.edu', 'dhcp', True))
results = c.fetchall()
c.execute("COMMIT")
finally:
c.close()
return results
def delete_starrs(starrs, name):
c = starrs.cursor()
try:
c.execute("BEGIN")
c.callproc("api.initialize", ('root', ))
c.callproc("api.remove_system", (name, ))
results = c.fetchall()
c.execute("COMMIT")
finally:
c.close()
return results
| 26.428571
| 77
| 0.557529
|
4145747d9dd773cf73f2d9dc19ec87c389dd9059
| 29,358
|
py
|
Python
|
theano/sandbox/cuda/__init__.py
|
julianser/Theano
|
56da8ca8775ce610595a61c3984c160aabd6ec7b
|
[
"BSD-3-Clause"
] | 4
|
2017-09-10T00:54:17.000Z
|
2019-10-22T23:28:09.000Z
|
theano/sandbox/cuda/__init__.py
|
julianser/Theano
|
56da8ca8775ce610595a61c3984c160aabd6ec7b
|
[
"BSD-3-Clause"
] | null | null | null |
theano/sandbox/cuda/__init__.py
|
julianser/Theano
|
56da8ca8775ce610595a61c3984c160aabd6ec7b
|
[
"BSD-3-Clause"
] | 2
|
2019-08-12T13:45:09.000Z
|
2020-08-14T10:13:50.000Z
|
from __future__ import absolute_import, print_function, division
import atexit
import errno
import logging
import os
import shutil
import stat
import sys
import textwrap
import warnings
import theano
from theano.compat import get_unbound_function
from theano.compile import optdb
from theano.gof import EquilibriumDB, SequenceDB, TopoOptimizer
from theano.gof.cmodule import get_lib_extension
from theano.gof.compilelock import get_lock, release_lock
from theano import config
from . import nvcc_compiler
from theano.tensor.basic import register_transfer
# ignore_newtrees is to speed the optimization as this is the pattern
# we use for optimization. Otherwise, we can iterate 100s of time on
# the graph and apply only a few optimizations each time.
gpu_optimizer = EquilibriumDB(ignore_newtrees=False)
gpu_seqopt = SequenceDB()
def register_opt(*tags, **kwargs):
if any([not isinstance(t, str) for t in tags]):
raise RuntimeError("Bad call to register_opt."
" All tags must be strings.", tags)
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'fast_compile',
'gpu', *tags, **kwargs)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
optdb.register(
name, TopoOptimizer(
local_opt, failure_callback=TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace', 'gpu', *tags)
return local_opt
return f
_logger_name = 'theano.sandbox.cuda'
_logger = logging.getLogger(_logger_name)
# is_nvcc_available called here to initialize global vars in
# nvcc_compiler module
nvcc_compiler.is_nvcc_available()
# Compile cuda_ndarray.cu
# This need that nvcc (part of cuda) is installed. If it is not, a warning is
# printed and this module will not be working properly (we set `cuda_available`
# to False).
# This variable is True by default, and set to False if nvcc is not
# available or their is no cuda card or something goes wrong when
# trying to initialize cuda.
cuda_available = True
# Global variable to avoid displaying the same warning multiple times.
cuda_warning_is_displayed = False
# This variable is set to True when we enable cuda.(i.e. when use() is called)
cuda_enabled = False
# Code factorized within a function so that it may be called from multiple
# places (which is not currently the case, but may be useful in the future).
def set_cuda_disabled():
"""
Function used to disable cuda.
A warning is displayed, so that the user is aware that cuda-based code is
not going to work.
Note that there is no point calling this function from outside of
`cuda.__init__`, since it has no effect once the module is loaded.
"""
global cuda_available, cuda_warning_is_displayed
cuda_available = False
# cuda_ndarray compile and import
cuda_path = os.path.abspath(os.path.split(__file__)[0])
cuda_ndarray_loc = os.path.join(config.compiledir, 'cuda_ndarray')
cuda_ndarray_so = os.path.join(
cuda_ndarray_loc, 'cuda_ndarray.' + get_lib_extension())
libcuda_ndarray_so = os.path.join(
cuda_ndarray_loc, 'libcuda_ndarray.' + get_lib_extension())
def try_import():
"""
Load the cuda_ndarray module if present and up to date.
Return True if loaded correctly, otherwise return False.
"""
cuda_files = (
'cuda_ndarray.cu',
'cuda_ndarray.cuh',
'conv_full_kernel.cu',
'cnmem.h',
'cnmem.cpp',
'conv_kernel.cu')
stat_times = [os.stat(os.path.join(cuda_path, cuda_file))[stat.ST_MTIME]
for cuda_file in cuda_files]
date = max(stat_times)
if os.path.exists(cuda_ndarray_so):
if date >= os.stat(cuda_ndarray_so)[stat.ST_MTIME]:
return False
try:
# If we load a previously-compiled version, config.compiledir should
# be in sys.path.
sys.path[0:0] = [config.compiledir]
import cuda_ndarray.cuda_ndarray
del sys.path[0]
except ImportError:
return False
return True
if not nvcc_compiler.is_nvcc_available() or not theano.config.cxx:
# It can happen that the file cuda_ndarray.so is already compiled
# but nvcc is not available. In that case we need to disable the CUDA
# back-end as we won't be able to compile any new op and we can't only
# use already compiled GPU op and not the others.
# Also, if cxx is not available, we need to disable all GPU code.
set_cuda_disabled()
compile_cuda_ndarray = False
elif not config.device.startswith('gpu') and config.force_device:
# We where asked to NEVER use the GPU
set_cuda_disabled()
compile_cuda_ndarray = False
else:
# Add the theano cache directory's cuda_ndarray subdirectory to the
# list of places that are hard-coded into compiled modules' runtime
# library search list. This works in conjunction with
# nvcc_compiler.NVCC_compiler.compile_str which adds this folder during
# compilation with -L and also adds -lcuda_ndarray when compiling
# modules.
nvcc_compiler.add_standard_rpath(cuda_ndarray_loc)
compile_cuda_ndarray = not try_import()
if compile_cuda_ndarray and cuda_available:
get_lock()
try:
# Retry to load again in case someone else compiled it
# while we waited for the lock
if not try_import():
try:
if not nvcc_compiler.is_nvcc_available():
set_cuda_disabled()
if cuda_available:
code = open(os.path.join(cuda_path,
"cuda_ndarray.cu")).read()
if not os.path.exists(cuda_ndarray_loc):
os.makedirs(cuda_ndarray_loc)
# If $TMPDIR is defined, nvopencc wants it to exist
if 'TMPDIR' in os.environ:
tmpdir = os.environ['TMPDIR']
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
compiler = nvcc_compiler.NVCC_compiler()
preargs = ['-O3'] + compiler.compile_args()
compiler.compile_str(
'cuda_ndarray',
code,
location=cuda_ndarray_loc,
include_dirs=[cuda_path],
libs=[config.cublas.lib],
preargs=preargs,
)
from cuda_ndarray.cuda_ndarray import *
except Exception as e:
_logger.error("Failed to compile cuda_ndarray.cu: %s", str(e))
set_cuda_disabled()
finally:
release_lock()
del compile_cuda_ndarray
if cuda_available:
global cuda_initialization_error_message
# The module should be compiled.
from cuda_ndarray.cuda_ndarray import *
# If necessary,
# create a symlink called libcuda_ndarray.so
# which nvcc_compiler.NVCC_compiler uses when linking
# any module except "cuda_ndarray" itself.
def ok():
"""
Check if an existing library exists and can be read.
"""
try:
open(libcuda_ndarray_so).close()
return True
except IOError:
return False
if not ok():
if sys.platform == "win32":
# The Python `os` module does not support symlinks on win32.
shutil.copyfile(cuda_ndarray_so, libcuda_ndarray_so)
else:
try:
os.symlink(cuda_ndarray_so, libcuda_ndarray_so)
except OSError as e:
# This may happen for instance when running multiple
# concurrent jobs, if two of them try to create the
# symlink simultaneously.
# If that happens, we verify that the existing symlink is
# indeed working.
if getattr(e, 'errno', None) != errno.EEXIST or not ok():
raise
try:
# This only test if the cuda driver is available and if there
# is at least one GPU that support cuda. This do not select a
# device.
gpu_init()
cuda_available = True
cuda_initialization_error_message = ""
# actively closing our gpu session presents segfault-on-exit on some systems
atexit.register(gpu_shutdown)
except EnvironmentError as e:
cuda_available = False
cuda_initialization_error_message = " ".join(e.args)
else:
cuda_initialization_error_message = 'cuda unavailable'
class GpuOp(theano.gof.Op):
"""
Parent class for all GPU Ops.
This class ensures we verify the GPU is working properly when a GPU Op is
used for the first time.
It is defined in __init__.py so that it exists even when `cuda_available`
is False (this is necessary to avoid breaking the test suite).
"""
def prepare_node(self, node, storage_map, compute_map, impl):
if use.device_number is None:
use("gpu",
force=True,
default_to_move_computation_to_gpu=False,
move_shared_float32_to_gpu=False,
enable_cuda=False)
# We must do those import to be able to create the full doc when
# nvcc is not available
from theano.sandbox.cuda.var import (CudaNdarrayVariable,
CudaNdarrayConstant,
CudaNdarraySharedVariable,
float32_shared_constructor)
from theano.sandbox.cuda.type import CudaNdarrayType
def dnn_available():
if config.dnn.enabled == "False":
dnn_available.avail = False
dnn_available.msg = "Disabled by dnn.enabled flag"
if dnn_available.avail is None and not cuda_available:
dnn_available.msg = "CUDA not available"
dnn_available.avail = False
elif config.dnn.enabled == "no_check":
raise RuntimeException("The old gpu back-end do not support the flag dnn.enabled=no_check")
elif dnn_available.avail is None:
dev = active_device_number()
if device_properties(dev)['major'] < 3:
dnn_available.msg = "Device not supported"
dnn_available.avail = False
else:
preambule = textwrap.dedent(
"""
#include <stdio.h>
#include <cuda.h>
#include <cudnn.h>
#include <cudnn_helper.h>
""")
body = textwrap.dedent(
"""
cudnnHandle_t _handle = NULL;
cudnnStatus_t err;
if ((err = cudnnCreate(&_handle)) != CUDNN_STATUS_SUCCESS) {
fprintf(stderr, "could not create cuDNN handle: %s",
cudnnGetErrorString(err));
return 1;
}
""")
# to support path that includes spaces, we need to wrap it with double quotes on Windows
path_wrapper = "\"" if os.name =='nt' else ""
params = ["-l", "cudnn"]
params.extend(['-I%s%s%s' % (path_wrapper, os.path.dirname(__file__), path_wrapper)])
if config.dnn.include_path:
params.extend(['-I%s%s%s' % (path_wrapper, config.dnn.include_path, path_wrapper)])
if config.dnn.library_path:
params.extend(['-L%s%s%s' % (path_wrapper, config.dnn.library_path, path_wrapper)])
if config.nvcc.compiler_bindir:
params.extend(['--compiler-bindir',
'%s%s%s' % (path_wrapper, config.nvcc.compiler_bindir, path_wrapper)])
params.extend([flag for flag in config.nvcc.flags.split(' ') if flag])
# Do not run here the test program. It would run on the
# default gpu, not the one selected by the user. If mixed
# GPU are installed or if the GPUs are configured in
# exclusive mode, this cause bad detection.
comp, out, err = nvcc_compiler.NVCC_compiler.try_flags(
flag_list=params, preambule=preambule, body=body,
try_run=False, output=True)
dnn_available.avail = comp
if not dnn_available.avail:
dnn_available.msg = (
"Can not compile with cuDNN. We got this error:\n" +
str(err))
else:
# If we can compile, check that we can import and run.
v = dnn_version()
if isinstance(v, tuple) and v[0] != v[1]:
dnn_available.avail = False
dnn_available.msg = ("Mixed dnn version. The header is"
" from one version, but we link with"
" a different version %s" % str(v))
raise RuntimeError(dnn_available.msg)
if v == -1 or v[0] < 4007:
# 4007 is the final release of cudnn v4
dnn_available.avail = False
dnn_available.msg = "Version is too old. Update to v5, was %d." % v[0]
raise RuntimeError(dnn_available.msg)
else:
dnn_available.avail = comp
if config.dnn.enabled == "True":
if not dnn_available.avail:
raise RuntimeError(
"You enabled cuDNN, but we aren't able to use it: %s" %
dnn_available.msg)
return dnn_available.avail
dnn_available.avail = None
dnn_available.msg = None
class DnnVersion(GpuOp):
def c_compiler(self):
return nvcc_compiler.NVCC_compiler
def c_headers(self):
return ['cudnn.h']
def c_header_dirs(self):
return [config.dnn.include_path]
def c_libraries(self):
return ['cudnn']
def c_lib_dirs(self):
return [config.dnn.library_path]
def c_compile_args(self):
return ['-Wl,-rpath,' + config.dnn.library_path]
def c_support_code(self):
return textwrap.dedent(
"""
#if PY_MAJOR_VERSION >= 3
#define PyInt_FromLong PyLong_FromLong
#endif
""")
def make_node(self):
return theano.gof.Apply(self, [], [theano.gof.Generic()()])
def c_code(self, node, name, inputs, outputs, sub):
o = outputs[0]
return textwrap.dedent(
"""
#if defined(CUDNN_VERSION)
%(o)s = PyTuple_Pack(2, PyInt_FromLong(CUDNN_VERSION), PyInt_FromLong(cudnnGetVersion()));
#else
%(o)s = PyInt_FromLong(-1);
#endif
""") % locals()
def do_constant_folding(self, node):
# Needed as we do not want to cache this information.
return False
def c_code_cache_version(self):
# Not needed, but make it clear that we do not want to cache this.
return None
def dnn_version():
"""Return the current cuDNN version we compile with.
This returns a tuple with the header version and the library
version we link with. For older cudnn version without version
information, we return -1.
"""
if not dnn_available():
raise Exception(
"We can't determine the cudnn version as it is not available",
dnn_available.msg)
if dnn_version.v is None:
f = theano.function([], DnnVersion()(),
theano.Mode(optimizer=None),
profile=False)
dnn_version.v = f()
return dnn_version.v
dnn_version.v = None
if cuda_available:
# check if their is an old cuda_ndarray that was loading instead of the one
# we compiled!
import cuda_ndarray.cuda_ndarray
if cuda_ndarray_so != cuda_ndarray.cuda_ndarray.__file__:
_logger.warning("cuda_ndarray was loaded from %s, but Theano expected "
"to load it from %s. This is not expected as theano "
"should compile it automatically for you. Do you have "
"a directory called cuda_ndarray in your "
"LD_LIBRARY_PATH environment variable? If so, please "
"remove it as it is outdated.",
cuda_ndarray.cuda_ndarray.__file__,
cuda_ndarray_so)
shared_constructor = float32_shared_constructor
from . import basic_ops
from .basic_ops import (
GpuFromHost, HostFromGpu, GpuElemwise,
GpuDimShuffle, GpuCAReduce, GpuReshape, GpuContiguous,
GpuSubtensor, GpuIncSubtensor,
GpuAdvancedSubtensor1, GpuAdvancedIncSubtensor1,
gpu_flatten, GpuFlatten, GpuShape, GpuAlloc, GpuAllocEmpty, GpuSplit,
GpuJoin, fscalar, fvector, fmatrix, frow, fcol,
ftensor3, ftensor4,
scalar, vector, matrix, row, col,
tensor3, tensor4)
from .basic_ops import (host_from_gpu, gpu_from_host, as_cuda_array,
as_cuda_ndarray_variable)
import cuda_ndarray
from . import opt, dnn
from .rng_curand import CURAND_RandomStreams
def transfer(x, target):
if target == 'gpu':
return as_cuda_ndarray_variable(x)
register_transfer(transfer)
def use(device,
force=False,
default_to_move_computation_to_gpu=True,
move_shared_float32_to_gpu=True,
enable_cuda=True,
test_driver=True):
"""
Error and warning about CUDA should be displayed only when this
function is called. We need to be able to load this module only
to check if it is available!
Parameters
----------
device : string
"cpu", "gpu", "gpuN" (N is the device number to use).
force
Will always raise an exception if we can't use the gpu.
default_to_move_computation_to_gpu
If gpu init succeeded, enable by default optimizations to move
computations to the gpu.
move_shared_float32_to_gpu
If gpu init succeeded, put new shared variables in float32 on the gpu.
enable_cuda
If the gpu is correctly enabled, set the variable cuda_enabled to True.
"""
global cuda_enabled, cuda_initialization_error_message
_logger.warn("The cuda backend is deprecated and will be removed in "
"the next release (v0.10). Please switch to the gpuarray backend. "
"You can get more information about how to switch at this "
"URL:\n https://github.com/Theano/Theano/wiki/Converting-to-the-new-gpu-back-end%28gpuarray%29\n")
if force and not cuda_available and device.startswith('gpu'):
if not nvcc_compiler.is_nvcc_available():
raise EnvironmentError("You forced the use of gpu device '%s', but"
" nvcc was not found. Set it in your PATH "
"environment variable or set the Theano "
"flags 'cuda.root' to its directory"
"" % device)
else:
raise EnvironmentError("You forced the use of gpu device %s, "
"but CUDA initialization failed "
"with error:\n%s" % (
device,
cuda_initialization_error_message))
elif not nvcc_compiler.is_nvcc_available():
_logger.error("nvcc compiler not found on $PATH. "
"Check your nvcc installation and try again.")
return
elif not cuda_available:
error_addendum = ""
try:
if cuda_initialization_error_message:
error_addendum = (" (error: %s)" %
cuda_initialization_error_message)
except NameError:
# cuda_initialization_error_message is not available b/c compilation failed
pass
_logger.warning("CUDA is installed, but device %s is not available %s",
device, error_addendum)
return
if device == 'gpu':
pass
elif device.startswith('gpu'):
device = int(device[3:])
elif device == 'cpu':
device = -1
else:
raise ValueError("Invalid device identifier", device)
if use.device_number is None:
# No successful call to use() has been made yet
if device != 'gpu' and device < 0:
return
msg = ("Theano flag device=gpu* (old gpu back-end) only support"
" floatX=float32. You have floatX=%s. Use the new gpu"
" back-end with device=cuda* for that value of floatX." %
config.floatX)
if config.floatX == 'float16':
raise RuntimeError(msg)
elif config.floatX == 'float64':
warnings.warn(msg)
# Has PyCUDA already initialized the GPU context
pycuda_init_dev = False
if config.pycuda.init:
import theano.misc.pycuda_init
pycuda_init_dev = theano.misc.pycuda_init.pycuda_available
try:
if pycuda_init_dev:
use.device_number = active_device_number()
# This is needed to initialize the cublas handle.
gpu_init(use.device_number, config.lib.cnmem)
elif(device != 'gpu'):
assert isinstance(device, int)
gpu_init(device, config.lib.cnmem)
use.device_number = device
active_device = active_device_number()
assert active_device == device, (active_device, device)
else:
# This mean the driver should select the GPU. As we
# need to get the device number now, we force the
# selection of the GPU by the driver now and then we
# query the active GPU. If we check the active GPU before
# the device is initialized we will always receive 0
# event if another device is selected later.
if not hasattr(cuda_ndarray.cuda_ndarray, 'select_a_gpu'):
raise Exception(
"Delete your Theano cache. The automatic"
" recompilation did not work.")
cuda_ndarray.cuda_ndarray.select_a_gpu()
use.device_number = active_device_number()
# This is needed to initialize the cublas handle.
gpu_init(use.device_number, config.lib.cnmem)
if test_driver:
import theano.sandbox.cuda.tests.test_driver
theano.sandbox.cuda.tests.test_driver.test_nvidia_driver1()
if device_properties(use.device_number)["warpSize"] != 32:
raise ValueError("Your GPU has a warpSize != 32. Currently"
" we have code that depends on this. Email"
" the Theano mailing list to tell us about"
" this new GPU as we don't know any with"
" this property")
if config.print_active_device:
if config.lib.cnmem:
if config.lib.cnmem > 1:
cnmem_enabled = "enabled with initial size: %d MB" % config.lib.cnmem
else:
cnmem = min(config.lib.cnmem, 0.95) * 100
cnmem_enabled = "enabled with initial size: %.1f%% of memory" % cnmem
else:
cnmem_enabled = "disabled"
cudnn_version = "not available"
warn = None
try:
if dnn_available():
(hdr_v, runtime_v) = dnn_version()
cudnn_version = runtime_v
# 5200 should not print warning with cudnn 5 final.
if cudnn_version >= 5200:
warn = ("Your cuDNN version is more recent than the one"
" Theano officially supports."
" If you see any problems, try updating Theano or"
" downgrading cuDNN to version 5.1.")
except Exception:
cudnn_version = dnn_available.msg
print("Using gpu device %d: %s (CNMeM is %s, cuDNN %s)" % (
active_device_number(),
active_device_name(),
cnmem_enabled,
cudnn_version,),
file=sys.stderr)
if warn:
warnings.warn(warn)
if device_properties(use.device_number)['regsPerBlock'] < 16384:
# We will try to use too much register per bloc at many places
# when there is only 8k register per multi-processor.
_logger.warning(
"You are probably using an old GPU, that Theano"
" does not support."
" This means GPU code will most likely be slow AND may"
" crash when we try to use features"
" that your GPU does not support.")
except (EnvironmentError, ValueError, RuntimeError) as e:
_logger.error(("ERROR: Not using GPU."
" Initialisation of device %s failed:\n%s"),
str(device), e)
cuda_enabled = False
if force:
e.args += (("You asked to force this device and it failed."
" No fallback to the cpu or other gpu device."),)
raise
elif use.device_number != device and device != 'gpu':
_logger.warning(("Ignoring call to use(%s), GPU number %i "
"is already in use."),
str(device), use.device_number)
if move_shared_float32_to_gpu:
handle_shared_float32(True)
if enable_cuda:
cuda_enabled = True
if default_to_move_computation_to_gpu:
# Do not add inplace tag here. We do not want to
# enable/disable gpu opt based on the inplace tag.
optdb.add_tags('gpu_opt',
'fast_compile',
'fast_run')
optdb.add_tags('gpu_after_fusion',
'fast_run')
optdb.add_tags('gpu_scanOp_make_inplace',
'fast_run')
if force:
try:
# in case the device if just gpu,
# we check that the driver init it correctly.
cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((5, 5))
except (Exception, NameError) as e:
# NameError when no gpu present as cuda_ndarray is not loaded.
e.args += ("ERROR: GPU forced but failed. ",)
raise
use.device_number = None
def unuse():
"""
This undo what was done by the call to.
use('gpu[0-9]', default_to_move_computation_to_gpu=True,
move_shared_float32_to_gpu=True,
enable_cuda=True)
This is used in Pylearn2 tests to enable/disable the GPU when needed.
After this call, the rest of Theano think the GPU shouldn't be used by
default.
"""
global cuda_enabled
cuda_enabled = False
handle_shared_float32(False)
optdb.remove_tags('gpu_opt',
'fast_compile',
'fast_run')
optdb.remove_tags('gpu_after_fusion',
'fast_run')
def handle_shared_float32(tf):
"""
Set the default shared type for float32 tensor to CudaNdarrayType.
This function is intended to be called from use(gpu_index), not directly.
"""
if tf:
theano.compile.shared_constructor(float32_shared_constructor)
else:
theano.compile.shared_constructor(float32_shared_constructor, True)
assert (float32_shared_constructor not in
theano.compile.shared.constructors)
# We can't test the driver during import here as this cause circular
# import dependency. So we also test it in the file theano/__init__.py
if config.device.startswith('gpu'):
use(device=config.device, force=config.force_device, test_driver=False)
elif config.init_gpu_device.startswith('gpu'):
assert config.device == "cpu", (
"We can use the Theano flag init_gpu_device"
" only when the Theano flag device=='cpu'")
_logger.warning(("GPU device %s will be initialized, and used if a GPU is "
"needed. However, no computation, nor shared variables, "
"will be implicitly moved to that device. If you want "
"that behavior, use the 'device' flag instead."),
config.init_gpu_device)
use(device=config.init_gpu_device,
force=config.force_device,
default_to_move_computation_to_gpu=False,
move_shared_float32_to_gpu=False,
enable_cuda=False, test_driver=False)
| 39.406711
| 115
| 0.591423
|
c723d2ead137ebd366dd498a9332385e75699d74
| 2,490
|
py
|
Python
|
aiovault/cli.py
|
johnnoone/aiovault
|
03e1bfb6f0404dcf97ce87a98c539027c4e78a37
|
[
"BSD-3-Clause"
] | 1
|
2022-01-31T22:37:57.000Z
|
2022-01-31T22:37:57.000Z
|
aiovault/cli.py
|
johnnoone/aiovault
|
03e1bfb6f0404dcf97ce87a98c539027c4e78a37
|
[
"BSD-3-Clause"
] | null | null | null |
aiovault/cli.py
|
johnnoone/aiovault
|
03e1bfb6f0404dcf97ce87a98c539027c4e78a37
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import os.path
import re
from collections import namedtuple
from subprocess import Popen, PIPE
PATTERN_KEYS = re.compile('Key \d+: (?P<key>[a-f0-9]+)') # noqa
PATTERN_ROOT = re.compile('Initial Root Token: (?P<token>[a-f0-9-]+)') # noqa
PATTERN_CRITERIAS = re.compile('Vault initialized with (?P<shares>\d+) keys and a key threshold of (?P<threshold>\d+)') # noqa
Response = namedtuple('Response', 'cmd stdout stderr code')
class CLIError(Exception):
pass
class VaultCLI:
def __init__(self, config):
self.config = config
def initialize(self, shares=None, threshold=None):
cmd = ['vault', 'init']
if shares:
cmd.extend(['-key-shares', str(shares)])
if threshold:
cmd.extend(['-key-threshold', str(threshold)])
response = self(cmd)
contents = response.stdout.decode('utf-8')
keys = PATTERN_KEYS.findall(contents)
root_token = PATTERN_ROOT.search(contents).groups('token')[0]
shares, threshold = PATTERN_CRITERIAS.search(contents).groups()
self.config.update({
'keys': set(keys),
'shares': int(shares),
'threshold': int(threshold),
'root_token': root_token,
})
return True
def unseal(self):
for key in self.config.keys:
cmd = ['vault', 'unseal', key]
response = self(cmd)
return response
def audit_syslog(self):
cmd = ['vault', 'audit-enable', 'syslog']
response = self(cmd)
if response.code:
raise Exception(response.stderr.decode('utf-8'))
def audit_file(self, path):
cmd = ['vault', 'audit-enable', 'file', 'path=%s' % path]
response = self(cmd)
if response.code:
raise Exception(response.stderr.decode('utf-8'))
def __call__(self, cmd):
env = os.environ.copy()
env.setdefault('GOMAXPROCS', '2')
if hasattr(self.config, 'csr'):
env.setdefault('VAULT_CAPATH', self.config.csr)
if hasattr(self.config, 'root_token'):
env.setdefault('VAULT_TOKEN', self.config.root_token)
shell = not isinstance(cmd, (list, tuple))
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env, shell=shell)
stdout, stderr = proc.communicate()
if not proc.returncode:
return Response(cmd, stdout, stderr, proc.returncode)
raise CLIError(stderr.decode('utf-8'), proc.returncode)
| 33.648649
| 127
| 0.603614
|
7c668b8b1ecfdacef66a59dd86e127a75b854386
| 20,371
|
py
|
Python
|
avalanche/evaluation/metrics/forgetting_bwt.py
|
wutong8023/avalanche
|
6fb13545998b9a8c08ad291efd1d1769ec4a5ae8
|
[
"MIT"
] | null | null | null |
avalanche/evaluation/metrics/forgetting_bwt.py
|
wutong8023/avalanche
|
6fb13545998b9a8c08ad291efd1d1769ec4a5ae8
|
[
"MIT"
] | null | null | null |
avalanche/evaluation/metrics/forgetting_bwt.py
|
wutong8023/avalanche
|
6fb13545998b9a8c08ad291efd1d1769ec4a5ae8
|
[
"MIT"
] | null | null | null |
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 14-12-2020 #
# Author(s): Lorenzo Pellegrini #
# E-mail: contact@continualai.org #
# Website: www.continualai.org #
################################################################################
from typing import Dict, TYPE_CHECKING, Union, List
from avalanche.evaluation.metric_definitions import Metric, PluginMetric
from avalanche.evaluation.metric_results import MetricValue, MetricResult
from avalanche.evaluation.metrics import Accuracy, Mean
from avalanche.evaluation.metric_utils import get_metric_name, \
phase_and_task, stream_type
if TYPE_CHECKING:
from avalanche.training import BaseStrategy
class Forgetting(Metric[Union[float, None, Dict[int, float]]]):
"""
The standalone Forgetting metric.
This metric returns the forgetting relative to a specific key.
Alternatively, this metric returns a dict in which each key is associated
to the forgetting.
Forgetting is computed as the difference between the first value recorded
for a specific key and the last value recorded for that key.
The value associated to a key can be update with the `update` method.
At initialization, this metric returns an empty dictionary.
"""
def __init__(self):
"""
Creates an instance of the standalone Forgetting metric
"""
super().__init__()
self.initial: Dict[int, float] = dict()
"""
The initial value for each key.
"""
self.last: Dict[int, float] = dict()
"""
The last value detected for each key
"""
def update_initial(self, k, v):
self.initial[k] = v
def update_last(self, k, v):
self.last[k] = v
def update(self, k, v, initial=False):
if initial:
self.update_initial(k, v)
else:
self.update_last(k, v)
def result(self, k=None) -> Union[float, None, Dict[int, float]]:
"""
Forgetting is returned only for keys encountered twice.
:param k: the key for which returning forgetting. If k has not
updated at least twice it returns None. If k is None,
forgetting will be returned for all keys encountered at least
twice.
:return: the difference between the first and last value encountered
for k, if k is not None. It returns None if k has not been updated
at least twice. If k is None, returns a dictionary
containing keys whose value has been updated at least twice. The
associated value is the difference between the first and last
value recorded for that key.
"""
forgetting = {}
if k is not None:
if k in self.initial and k in self.last:
return self.initial[k] - self.last[k]
else:
return None
ik = set(self.initial.keys())
both_keys = list(ik.intersection(set(self.last.keys())))
for k in both_keys:
forgetting[k] = self.initial[k] - self.last[k]
return forgetting
def reset_last(self) -> None:
self.last: Dict[int, float] = dict()
def reset(self) -> None:
self.initial: Dict[int, float] = dict()
self.last: Dict[int, float] = dict()
class GenericExperienceForgetting(PluginMetric[Dict[int, float]]):
"""
The GenericExperienceForgetting metric, describing the change in
a metric detected for a certain experience. The user should
subclass this and provide the desired metric.
In particular, the user should override:
* __init__ by calling `super` and instantiating the `self.current_metric`
property as a valid avalanche metric
* `metric_update`, to update `current_metric`
* `metric_result` to get the result from `current_metric`.
* `__str__` to define the experience forgetting name.
This plugin metric, computed separately for each experience,
is the difference between the metric result obtained after
first training on a experience and the metric result obtained
on the same experience at the end of successive experiences.
This metric is computed during the eval phase only.
"""
def __init__(self):
"""
Creates an instance of the GenericExperienceForgetting metric.
"""
super().__init__()
self.forgetting = Forgetting()
"""
The general metric to compute forgetting
"""
self._current_metric = None
"""
The metric the user should override
"""
self.eval_exp_id = None
"""
The current evaluation experience id
"""
self.train_exp_id = None
"""
The last encountered training experience id
"""
def reset(self) -> None:
"""
Resets the metric.
Beware that this will also reset the initial metric of each
experience!
:return: None.
"""
self.forgetting.reset()
def reset_last(self) -> None:
"""
Resets the last metric value.
This will preserve the initial metric value of each experience.
To be used at the beginning of each eval experience.
:return: None.
"""
self.forgetting.reset_last()
def update(self, k, v, initial=False):
"""
Update forgetting metric.
See `Forgetting` for more detailed information.
:param k: key to update
:param v: value associated to k
:param initial: update initial value. If False, update
last value.
"""
self.forgetting.update(k, v, initial=initial)
def result(self, k=None) -> Union[float, None, Dict[int, float]]:
"""
See `Forgetting` documentation for more detailed information.
k: optional key from which compute forgetting.
"""
return self.forgetting.result(k=k)
def before_training_exp(self, strategy: 'BaseStrategy') -> None:
self.train_exp_id = strategy.experience.current_experience
def before_eval(self, strategy) -> None:
self.reset_last()
def before_eval_exp(self, strategy: 'BaseStrategy') -> None:
self._current_metric.reset()
def after_eval_iteration(self, strategy: 'BaseStrategy') -> None:
super().after_eval_iteration(strategy)
self.eval_exp_id = strategy.experience.current_experience
self.metric_update(strategy)
def after_eval_exp(self, strategy: 'BaseStrategy') \
-> MetricResult:
# update experience on which training just ended
if self.train_exp_id == self.eval_exp_id:
self.update(self.eval_exp_id,
self.metric_result(strategy),
initial=True)
else:
# update other experiences
# if experience has not been encountered in training
# its value will not be considered in forgetting
self.update(self.eval_exp_id,
self.metric_result(strategy))
return self._package_result(strategy)
def _package_result(self, strategy: 'BaseStrategy') \
-> MetricResult:
# this checks if the evaluation experience has been
# already encountered at training time
# before the last training.
# If not, forgetting should not be returned.
forgetting = self.result(k=self.eval_exp_id)
if forgetting is not None:
metric_name = get_metric_name(self, strategy, add_experience=True)
plot_x_position = strategy.clock.train_iterations
metric_values = [MetricValue(
self, metric_name, forgetting, plot_x_position)]
return metric_values
def metric_update(self, strategy):
raise NotImplementedError
def metric_result(self, strategy):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class ExperienceForgetting(GenericExperienceForgetting):
"""
The ExperienceForgetting metric, describing the accuracy loss
detected for a certain experience.
This plugin metric, computed separately for each experience,
is the difference between the accuracy result obtained after
first training on a experience and the accuracy result obtained
on the same experience at the end of successive experiences.
This metric is computed during the eval phase only.
"""
def __init__(self):
"""
Creates an instance of the ExperienceForgetting metric.
"""
super().__init__()
self._current_metric = Accuracy()
"""
The average accuracy over the current evaluation experience
"""
def metric_update(self, strategy):
self._current_metric.update(strategy.mb_y,
strategy.mb_output, 0)
def metric_result(self, strategy):
return self._current_metric.result(0)[0]
def __str__(self):
return "ExperienceForgetting"
class GenericStreamForgetting(GenericExperienceForgetting):
"""
The GenericStreamForgetting metric, describing the average evaluation
change in the desired metric detected over all experiences observed
during training.
In particular, the user should override:
* __init__ by calling `super` and instantiating the `self.current_metric`
property as a valid avalanche metric
* `metric_update`, to update `current_metric`
* `metric_result` to get the result from `current_metric`.
* `__str__` to define the experience forgetting name.
This plugin metric, computed over all observed experiences during training,
is the average over the difference between the metric result obtained
after first training on a experience and the metric result obtained
on the same experience at the end of successive experiences.
This metric is computed during the eval phase only.
"""
def __init__(self):
"""
Creates an instance of the GenericStreamForgetting metric.
"""
super().__init__()
self.stream_forgetting = Mean()
"""
The average forgetting over all experiences
"""
def reset(self) -> None:
"""
Resets the forgetting metrics.
Beware that this will also reset the initial metric value of each
experience!
:return: None.
"""
super().reset()
self.stream_forgetting.reset()
def exp_update(self, k, v, initial=False):
"""
Update forgetting metric.
See `Forgetting` for more detailed information.
:param k: key to update
:param v: value associated to k
:param initial: update initial value. If False, update
last value.
"""
super().update(k, v, initial=initial)
def exp_result(self, k=None) -> Union[float, None, Dict[int, float]]:
"""
Result for experience defined by a key.
See `Forgetting` documentation for more detailed information.
k: optional key from which compute forgetting.
"""
return super().result(k)
def result(self, k=None) -> Union[float, None, Dict[int, float]]:
"""
The average forgetting over all experience.
k: optional key from which compute forgetting.
"""
return self.stream_forgetting.result()
def before_eval(self, strategy) -> None:
super().before_eval(strategy)
self.stream_forgetting.reset()
def after_eval_exp(self, strategy: 'BaseStrategy') -> None:
# update experience on which training just ended
if self.train_exp_id == self.eval_exp_id:
self.exp_update(self.eval_exp_id,
self.metric_result(strategy),
initial=True)
else:
# update other experiences
# if experience has not been encountered in training
# its value will not be considered in forgetting
self.exp_update(self.eval_exp_id,
self.metric_result(strategy))
# this checks if the evaluation experience has been
# already encountered at training time
# before the last training.
# If not, forgetting should not be returned.
exp_forgetting = self.exp_result(k=self.eval_exp_id)
if exp_forgetting is not None:
self.stream_forgetting.update(exp_forgetting, weight=1)
def after_eval(self, strategy: 'BaseStrategy') -> \
'MetricResult':
return self._package_result(strategy)
def _package_result(self, strategy: 'BaseStrategy') -> \
MetricResult:
metric_value = self.result()
phase_name, _ = phase_and_task(strategy)
stream = stream_type(strategy.experience)
metric_name = '{}/{}_phase/{}_stream' \
.format(str(self),
phase_name,
stream)
plot_x_position = strategy.clock.train_iterations
return [MetricValue(self, metric_name, metric_value, plot_x_position)]
def metric_update(self, strategy):
raise NotImplementedError
def metric_result(self, strategy):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class StreamForgetting(GenericStreamForgetting):
"""
The StreamForgetting metric, describing the average evaluation accuracy loss
detected over all experiences observed during training.
This plugin metric, computed over all observed experiences during training,
is the average over the difference between the accuracy result obtained
after first training on a experience and the accuracy result obtained
on the same experience at the end of successive experiences.
This metric is computed during the eval phase only.
"""
def __init__(self):
"""
Creates an instance of the StreamForgetting metric.
"""
super().__init__()
self._current_metric = Accuracy()
"""
The average accuracy over the current evaluation experience
"""
def metric_update(self, strategy):
self._current_metric.update(strategy.mb_y,
strategy.mb_output, 0)
def metric_result(self, strategy):
return self._current_metric.result(0)[0]
def __str__(self):
return "StreamForgetting"
def forgetting_metrics(*, experience=False, stream=False) \
-> List[PluginMetric]:
"""
Helper method that can be used to obtain the desired set of
plugin metrics.
:param experience: If True, will return a metric able to log
the forgetting on each evaluation experience.
:param stream: If True, will return a metric able to log
the forgetting averaged over the evaluation stream experiences,
which have been observed during training.
:return: A list of plugin metrics.
"""
metrics = []
if experience:
metrics.append(ExperienceForgetting())
if stream:
metrics.append(StreamForgetting())
return metrics
def forgetting_to_bwt(f):
"""
Convert forgetting to backward transfer.
BWT = -1 * forgetting
"""
if f is None:
return f
if isinstance(f, dict):
bwt = {k: -1 * v for k, v in f.items()}
elif isinstance(f, float):
bwt = -1 * f
else:
raise ValueError("Forgetting data type not recognized when converting"
"to backward transfer.")
return bwt
class BWT(Forgetting):
"""
The standalone Backward Transfer metric.
This metric returns the backward transfer relative to a specific key.
Alternatively, this metric returns a dict in which each key is associated
to the backward transfer.
Backward transfer is computed as the difference between the last value
recorded for a specific key and the first value recorded for that key.
The value associated to a key can be update with the `update` method.
At initialization, this metric returns an empty dictionary.
"""
def result(self, k=None) -> Union[float, None, Dict[int, float]]:
"""
Backward Transfer is returned only for keys encountered twice.
Backward Transfer is the negative forgetting.
:param k: the key for which returning backward transfer. If k has not
updated at least twice it returns None. If k is None,
backward transfer will be returned for all keys encountered at
least twice.
:return: the difference between the last value encountered for k
and its first value, if k is not None.
It returns None if k has not been updated
at least twice. If k is None, returns a dictionary
containing keys whose value has been updated at least twice. The
associated value is the difference between the last and first
value recorded for that key.
"""
forgetting = super().result(k)
bwt = forgetting_to_bwt(forgetting)
return bwt
class ExperienceBWT(ExperienceForgetting):
"""
The Experience Backward Transfer metric.
This plugin metric, computed separately for each experience,
is the difference between the last accuracy result obtained on a certain
experience and the accuracy result obtained when first training on that
experience.
This metric is computed during the eval phase only.
"""
def result(self, k=None) -> Union[float, None, Dict[int, float]]:
"""
See `Forgetting` documentation for more detailed information.
k: optional key from which compute forgetting.
"""
forgetting = super().result(k)
return forgetting_to_bwt(forgetting)
def __str__(self):
return "ExperienceBWT"
class StreamBWT(StreamForgetting):
"""
The StreamBWT metric, emitting the average BWT across all experiences
encountered during training.
This plugin metric, computed over all observed experiences during training,
is the average over the difference between the last accuracy result
obtained on an experience and the accuracy result obtained when first
training on that experience.
This metric is computed during the eval phase only.
"""
def exp_result(self, k=None) -> Union[float, None, Dict[int, float]]:
"""
Result for experience defined by a key.
See `BWT` documentation for more detailed information.
k: optional key from which compute backward transfer.
"""
forgetting = super().exp_result(k)
return forgetting_to_bwt(forgetting)
def __str__(self):
return "StreamBWT"
def bwt_metrics(*, experience=False, stream=False) \
-> List[PluginMetric]:
"""
Helper method that can be used to obtain the desired set of
plugin metrics.
:param experience: If True, will return a metric able to log
the backward transfer on each evaluation experience.
:param stream: If True, will return a metric able to log
the backward transfer averaged over the evaluation stream experiences
which have been observed during training.
:return: A list of plugin metrics.
"""
metrics = []
if experience:
metrics.append(ExperienceBWT())
if stream:
metrics.append(StreamBWT())
return metrics
__all__ = [
'Forgetting',
'GenericExperienceForgetting',
'GenericStreamForgetting',
'ExperienceForgetting',
'StreamForgetting',
'forgetting_metrics',
'BWT',
'ExperienceBWT',
'StreamBWT',
'bwt_metrics'
]
| 32.909532
| 80
| 0.63129
|
ab367ad170ab2d23eca56a11b6b21b3d535e9387
| 581
|
py
|
Python
|
beautiful_turtle_pattern.py
|
Prannov/Python-Projects
|
dc96756d1a6c2661958c9310e88c3cdaba044d38
|
[
"Unlicense"
] | 1
|
2021-10-01T15:50:15.000Z
|
2021-10-01T15:50:15.000Z
|
beautiful_turtle_pattern.py
|
Prannov/Python-Projects
|
dc96756d1a6c2661958c9310e88c3cdaba044d38
|
[
"Unlicense"
] | 4
|
2021-10-01T14:33:02.000Z
|
2021-11-04T11:50:54.000Z
|
beautiful_turtle_pattern.py
|
Prannov/Python-Projects
|
dc96756d1a6c2661958c9310e88c3cdaba044d38
|
[
"Unlicense"
] | 14
|
2021-10-01T14:10:02.000Z
|
2021-10-29T19:01:20.000Z
|
import turtle
pen = turtle.Turtle()
# initial length of the square-like shapes
size = 50
# Changing shape of the turtle
pen.shape("turtle")
# Changing Background color
turtle.bgcolor("blue")
# Changing color of the pen
pen.color("yellow")
# Increasing speed of the pen
pen.speed(100)
# Running a loop with 300 iterations
for i in range(300):
# after 200 white squares change the color
if i==200:
pen.color("white")
# moving the pen in forward direction
pen.forward(size)
# rotating the pen
pen.left(89)
# increasing the length
size+=1
| 19.366667
| 46
| 0.690189
|
2e36eeea955543e4b53b2b1bffb68d632754901d
| 75
|
py
|
Python
|
allennlp_models/generation/predictors/__init__.py
|
matt-peters/allennlp-models
|
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
|
[
"Apache-2.0"
] | 402
|
2020-03-11T22:58:35.000Z
|
2022-03-29T09:05:27.000Z
|
allennlp_models/generation/predictors/__init__.py
|
matt-peters/allennlp-models
|
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
|
[
"Apache-2.0"
] | 116
|
2020-03-11T01:26:57.000Z
|
2022-03-25T13:03:56.000Z
|
allennlp_models/generation/predictors/__init__.py
|
matt-peters/allennlp-models
|
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
|
[
"Apache-2.0"
] | 140
|
2020-03-11T00:51:35.000Z
|
2022-03-29T09:05:36.000Z
|
from allennlp_models.generation.predictors.seq2seq import Seq2SeqPredictor
| 37.5
| 74
| 0.906667
|
26a46bdb61d468c5f5dc5a59154afd5bcdd294c6
| 1,672
|
py
|
Python
|
tests/cli/commands/test_triggerer_command.py
|
arezamoosavi/airflow
|
c3c81c3144386d1de535c1c5e777270e727bb69e
|
[
"Apache-2.0"
] | 2
|
2016-08-23T14:22:15.000Z
|
2017-09-28T19:45:26.000Z
|
tests/cli/commands/test_triggerer_command.py
|
arezamoosavi/airflow
|
c3c81c3144386d1de535c1c5e777270e727bb69e
|
[
"Apache-2.0"
] | 2
|
2019-02-16T19:00:53.000Z
|
2019-05-09T23:29:14.000Z
|
tests/cli/commands/test_triggerer_command.py
|
arezamoosavi/airflow
|
c3c81c3144386d1de535c1c5e777270e727bb69e
|
[
"Apache-2.0"
] | 6
|
2018-04-09T07:46:05.000Z
|
2019-07-16T00:13:15.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import pytest
from airflow import PY37
from airflow.cli import cli_parser
from airflow.cli.commands import triggerer_command
class TestTriggererCommand(unittest.TestCase):
"""
Tests the CLI interface and that it correctly calls the TriggererJob
"""
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
@pytest.mark.skipif(not PY37, reason="triggerer subcommand only works with Python 3.7+")
@mock.patch("airflow.cli.commands.triggerer_command.TriggererJob")
def test_capacity_argument(
self,
mock_scheduler_job,
):
"""Ensure that the capacity argument is passed correctly"""
args = self.parser.parse_args(['triggerer', '--capacity=42'])
triggerer_command.triggerer(args)
mock_scheduler_job.assert_called_once_with(capacity=42)
| 35.574468
| 92
| 0.746411
|
f7a486df643e22eeeb30144f329cc25e01ddddea
| 1,742
|
py
|
Python
|
spark_auto_mapper_fhir/value_sets/consent_scope_codes.py
|
imranq2/SparkAutoMapper.FHIR
|
dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2
|
[
"Apache-2.0"
] | 1
|
2020-10-31T23:25:07.000Z
|
2020-10-31T23:25:07.000Z
|
spark_auto_mapper_fhir/value_sets/consent_scope_codes.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
spark_auto_mapper_fhir/value_sets/consent_scope_codes.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ConsentScopeCodesCode(GenericTypeCode):
"""
ConsentScopeCodes
From: http://terminology.hl7.org/CodeSystem/consentscope in valuesets.xml
This value set includes the four Consent scope codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/consentscope
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/consentscope"
class ConsentScopeCodesCodeValues:
"""
Actions to be taken if they are no longer able to make decisions for
themselves
From: http://terminology.hl7.org/CodeSystem/consentscope in valuesets.xml
"""
AdvancedCareDirective = ConsentScopeCodesCode("adr")
"""
Consent to participate in research protocol and information sharing required
From: http://terminology.hl7.org/CodeSystem/consentscope in valuesets.xml
"""
Research = ConsentScopeCodesCode("research")
"""
Agreement to collect, access, use or disclose (share) information
From: http://terminology.hl7.org/CodeSystem/consentscope in valuesets.xml
"""
PrivacyConsent = ConsentScopeCodesCode("patient-privacy")
"""
Consent to undergo a specific treatment
From: http://terminology.hl7.org/CodeSystem/consentscope in valuesets.xml
"""
Treatment = ConsentScopeCodesCode("treatment")
| 34.84
| 84
| 0.751435
|
f7e82198f73eb6eea760faf5549e386be4a06f75
| 576
|
py
|
Python
|
app/modules/categories/categories_views.py
|
gurgy11/caffeinated
|
278d09a88162d12409f0af445797b9790a319528
|
[
"MIT"
] | 1
|
2022-02-14T01:02:15.000Z
|
2022-02-14T01:02:15.000Z
|
app/modules/categories/categories_views.py
|
gurgy11/caffeinated
|
278d09a88162d12409f0af445797b9790a319528
|
[
"MIT"
] | null | null | null |
app/modules/categories/categories_views.py
|
gurgy11/caffeinated
|
278d09a88162d12409f0af445797b9790a319528
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, redirect, render_template, url_for, session, jsonify, request
from app.lib.authentication import login_required
bp = Blueprint('categories', __name__, url_prefix='/categories')
@bp.route('/')
@bp.route('/index')
@login_required
def index():
return render_template('categories/index.html', title='Categories')
@bp.route('/create', methods=['GET', 'POST'])
@login_required
def create():
if request.method == 'POST':
return jsonify(request.form)
return render_template('categories/create.html', title='Categories')
| 26.181818
| 90
| 0.717014
|
41165ea7e4c7044b352c1b78a1a2b01c54095e31
| 11,705
|
py
|
Python
|
autotest/pyscripts/test_ogrinfo_py.py
|
kammerer/gdal
|
a981e817543921ddc262f0ea2c137b52dea1e3f2
|
[
"MIT"
] | 1
|
2018-12-19T14:08:20.000Z
|
2018-12-19T14:08:20.000Z
|
autotest/pyscripts/test_ogrinfo_py.py
|
kammerer/gdal
|
a981e817543921ddc262f0ea2c137b52dea1e3f2
|
[
"MIT"
] | null | null | null |
autotest/pyscripts/test_ogrinfo_py.py
|
kammerer/gdal
|
a981e817543921ddc262f0ea2c137b52dea1e3f2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: ogrinfo.py testing
# Author: Even Rouault <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import ogrtest
import test_py_scripts
import pytest
###############################################################################
# Simple test
def test_ogrinfo_py_1():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '../ogr/data/poly.shp')
assert ret.find('ESRI Shapefile') != -1
###############################################################################
# Test -ro option
def test_ogrinfo_py_2():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '-ro ../ogr/data/poly.shp')
assert ret.find('ESRI Shapefile') != -1
###############################################################################
# Test -al option
def test_ogrinfo_py_3():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '-al ../ogr/data/poly.shp')
assert ret.find('Feature Count: 10') != -1
###############################################################################
# Test layer name
def test_ogrinfo_py_4():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '../ogr/data/poly.shp poly')
assert ret.find('Feature Count: 10') != -1
###############################################################################
# Test -sql option
def test_ogrinfo_py_5():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '../ogr/data/poly.shp -sql "select * from poly"')
assert ret.find('Feature Count: 10') != -1
###############################################################################
# Test -geom=NO option
def test_ogrinfo_py_6():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '../ogr/data/poly.shp poly -geom=no')
assert ret.find('Feature Count: 10') != -1
assert ret.find('POLYGON') == -1
###############################################################################
# Test -geom=SUMMARY option
def test_ogrinfo_py_7():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '../ogr/data/poly.shp poly -geom=summary')
assert ret.find('Feature Count: 10') != -1
assert ret.find('POLYGON (') == -1
assert ret.find('POLYGON :') != -1
###############################################################################
# Test -spat option
def test_ogrinfo_py_8():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '../ogr/data/poly.shp poly -spat 479609 4764629 479764 4764817')
if ogrtest.have_geos():
assert ret.find('Feature Count: 4') != -1
return
else:
assert ret.find('Feature Count: 5') != -1
return
###############################################################################
# Test -where option
def test_ogrinfo_py_9():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '../ogr/data/poly.shp poly -where "EAS_ID=171"')
assert ret.find('Feature Count: 1') != -1
###############################################################################
# Test -fid option
def test_ogrinfo_py_10():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '../ogr/data/poly.shp poly -fid 9')
assert ret.find('OGRFeature(poly):9') != -1
###############################################################################
# Test -fields=no option
def test_ogrinfo_py_11():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '../ogr/data/poly.shp poly -fields=no')
assert ret.find('AREA (Real') == -1
assert ret.find('POLYGON (') != -1
###############################################################################
# Test RFC 41 support
def test_ogrinfo_py_22():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
f = open('tmp/test_ogrinfo_22.csv', 'wt')
f.write('_WKTgeom1_EPSG_4326,_WKTgeom2_EPSG_32631\n')
f.write('"POINT(1 2)","POINT(3 4)"\n')
f.close()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', 'tmp/test_ogrinfo_22.csv')
assert '1: test_ogrinfo_22 (Unknown (any), Unknown (any))' in ret
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '-al tmp/test_ogrinfo_22.csv')
expected_ret = """INFO: Open of `tmp/test_ogrinfo_22.csv'
using driver `CSV' successful.
Layer name: test_ogrinfo_22
Geometry (geom__WKTgeom1_EPSG_4326): Unknown (any)
Geometry (geom__WKTgeom2_EPSG_32631): Unknown (any)
Feature Count: 1
Extent (geom__WKTgeom1_EPSG_4326): (1.000000, 2.000000) - (1.000000, 2.000000)
Extent (geom__WKTgeom2_EPSG_32631): (3.000000, 4.000000) - (3.000000, 4.000000)
SRS WKT (geom__WKTgeom1_EPSG_4326):
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
SRS WKT (geom__WKTgeom2_EPSG_32631):
PROJCS["WGS 84 / UTM zone 31N",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",3],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["Easting",EAST],
AXIS["Northing",NORTH],
AUTHORITY["EPSG","32631"]]
Geometry Column 1 = geom__WKTgeom1_EPSG_4326
Geometry Column 2 = geom__WKTgeom2_EPSG_32631
_WKTgeom1_EPSG_4326: String (0.0)
_WKTgeom2_EPSG_32631: String (0.0)
OGRFeature(test_ogrinfo_22):1
_WKTgeom1_EPSG_4326 (String) = POINT(1 2)
_WKTgeom2_EPSG_32631 (String) = POINT(3 4)
geom__WKTgeom1_EPSG_4326 = POINT (1 2)
geom__WKTgeom2_EPSG_32631 = POINT (3 4)
"""
expected_lines = expected_ret.splitlines()
lines = ret.splitlines()
for i, exp_line in enumerate(expected_lines):
assert exp_line == lines[i], ret
os.unlink('tmp/test_ogrinfo_22.csv')
###############################################################################
# Test -geomfield (RFC 41) support
def test_ogrinfo_py_23():
script_path = test_py_scripts.get_py_script('ogrinfo')
if script_path is None:
pytest.skip()
f = open('tmp/test_ogrinfo_23.csv', 'wt')
f.write('_WKTgeom1_EPSG_4326,_WKTgeom2_EPSG_32631\n')
f.write('"POINT(1 2)","POINT(3 4)"\n')
f.write('"POINT(3 4)","POINT(1 2)"\n')
f.close()
ret = test_py_scripts.run_py_script(script_path, 'ogrinfo', '-al tmp/test_ogrinfo_23.csv -spat 1 2 1 2 -geomfield geom__WKTgeom2_EPSG_32631')
expected_ret = """INFO: Open of `tmp/test_ogrinfo_23.csv'
using driver `CSV' successful.
Layer name: test_ogrinfo_23
Geometry (geom__WKTgeom1_EPSG_4326): Unknown (any)
Geometry (geom__WKTgeom2_EPSG_32631): Unknown (any)
Feature Count: 1
Extent (geom__WKTgeom1_EPSG_4326): (3.000000, 4.000000) - (3.000000, 4.000000)
Extent (geom__WKTgeom2_EPSG_32631): (1.000000, 2.000000) - (1.000000, 2.000000)
SRS WKT (geom__WKTgeom1_EPSG_4326):
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
SRS WKT (geom__WKTgeom2_EPSG_32631):
PROJCS["WGS 84 / UTM zone 31N",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",3],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["Easting",EAST],
AXIS["Northing",NORTH],
AUTHORITY["EPSG","32631"]]
Geometry Column 1 = geom__WKTgeom1_EPSG_4326
Geometry Column 2 = geom__WKTgeom2_EPSG_32631
_WKTgeom1_EPSG_4326: String (0.0)
_WKTgeom2_EPSG_32631: String (0.0)
OGRFeature(test_ogrinfo_23):2
_WKTgeom1_EPSG_4326 (String) = POINT(3 4)
_WKTgeom2_EPSG_32631 (String) = POINT(1 2)
geom__WKTgeom1_EPSG_4326 = POINT (3 4)
geom__WKTgeom2_EPSG_32631 = POINT (1 2)
"""
expected_lines = expected_ret.splitlines()
lines = ret.splitlines()
for i, exp_line in enumerate(expected_lines):
assert exp_line == lines[i], ret
os.unlink('tmp/test_ogrinfo_23.csv')
| 34.83631
| 145
| 0.608543
|
8c1ce8d8b2763651a1c0026262ae06e369c9195d
| 3,534
|
py
|
Python
|
nadlogar/config/settings.py
|
LenartBucar/nadlogar
|
2aba693254d56896419d09e066f91551492f8980
|
[
"MIT"
] | null | null | null |
nadlogar/config/settings.py
|
LenartBucar/nadlogar
|
2aba693254d56896419d09e066f91551492f8980
|
[
"MIT"
] | null | null | null |
nadlogar/config/settings.py
|
LenartBucar/nadlogar
|
2aba693254d56896419d09e066f91551492f8980
|
[
"MIT"
] | null | null | null |
"""
Django settings for nadlogar project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "t2-r&t0yj0b%q$b^@ptqya=13mq0rsz1_5&h^ub-=+(ueiqsql"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"utils",
"users",
"students",
"quizzes",
"problems",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"quizzes.context_processors.my_quizzes",
"students.context_processors.my_groups",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "/"
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "sl-si"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_DIRS = [BASE_DIR / "static"]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| 25.42446
| 91
| 0.693548
|
64f3629116fbfe18c3d77532326c2d303acfd114
| 10,061
|
py
|
Python
|
tf_agents/agents/behavioral_cloning/behavioral_cloning_agent_test.py
|
Bhaney44/agents
|
e5fd5b19ba86fbc5980c9f8e173ce959f8b7bb45
|
[
"Apache-2.0"
] | 2
|
2021-02-16T14:20:53.000Z
|
2021-02-16T16:38:03.000Z
|
tf_agents/agents/behavioral_cloning/behavioral_cloning_agent_test.py
|
Bhaney44/agents
|
e5fd5b19ba86fbc5980c9f8e173ce959f8b7bb45
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/agents/behavioral_cloning/behavioral_cloning_agent_test.py
|
Bhaney44/agents
|
e5fd5b19ba86fbc5980c9f8e173ce959f8b7bb45
|
[
"Apache-2.0"
] | 1
|
2020-08-18T13:32:15.000Z
|
2020-08-18T13:32:15.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for agents.behavioral_cloning.behavioral_cloning_agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tf_agents.agents.behavioral_cloning import behavioral_cloning_agent
from tf_agents.agents.dqn import q_network
from tf_agents.drivers import test_utils as driver_test_utils
from tf_agents.environments import time_step as ts
from tf_agents.environments import trajectory
from tf_agents.environments import trajectory_replay
from tf_agents.networks import network
from tf_agents.networks import q_rnn_network
from tf_agents.specs import tensor_spec
from tf_agents.utils import common
# Number of times to train in test loops.
TRAIN_ITERATIONS = 150
class DummyNet(network.Network):
def __init__(self, unused_observation_spec, action_spec, name=None):
super(DummyNet, self).__init__(
unused_observation_spec, state_spec=(), name=name)
action_spec = tf.nest.flatten(action_spec)[0]
num_actions = action_spec.maximum - action_spec.minimum + 1
self._layers.append(
tf.keras.layers.Dense(
num_actions,
kernel_initializer=tf.compat.v1.initializers.constant([[2, 1],
[1, 1]]),
bias_initializer=tf.compat.v1.initializers.constant([[1], [1]]),
dtype=tf.float32))
def call(self, inputs, unused_step_type=None, network_state=()):
inputs = tf.cast(inputs[0], tf.float32)
for layer in self.layers:
inputs = layer(inputs)
return inputs, network_state
class BehavioralCloningAgentTest(tf.test.TestCase):
def setUp(self):
super(BehavioralCloningAgentTest, self).setUp()
tf.compat.v1.enable_resource_variables()
self._obs_spec = [tensor_spec.TensorSpec([2], tf.float32)]
self._time_step_spec = ts.time_step_spec(self._obs_spec)
self._action_spec = [tensor_spec.BoundedTensorSpec([], tf.int32, 0, 1)]
self._observation_spec = self._time_step_spec.observation
def testCreateAgent(self):
cloning_net = DummyNet(self._observation_spec, self._action_spec)
agent = behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec,
self._action_spec,
cloning_network=cloning_net,
optimizer=None)
self.assertIsNotNone(agent.policy)
def testCreateAgentNestSizeChecks(self):
action_spec = [
tensor_spec.BoundedTensorSpec([], tf.int32, 0, 1),
tensor_spec.BoundedTensorSpec([], tf.int32, 0, 1)
]
cloning_net = DummyNet(self._observation_spec, action_spec)
with self.assertRaisesRegexp(NotImplementedError, '.*Multi-arity.*'):
behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec,
action_spec,
cloning_network=cloning_net,
optimizer=None)
def testCreateAgentDimChecks(self):
action_spec = [tensor_spec.BoundedTensorSpec([1, 2], tf.int32, 0, 1)]
cloning_net = DummyNet(self._observation_spec, action_spec)
with self.assertRaisesRegexp(NotImplementedError, '.*one dimensional.*'):
behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec, action_spec,
cloning_network=cloning_net,
optimizer=None)
# TODO(kbanoop): Add a test where the target network has different values.
def testLoss(self):
with tf.compat.v2.summary.record_if(False):
cloning_net = DummyNet(self._observation_spec, self._action_spec)
agent = behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec,
self._action_spec,
cloning_network=cloning_net,
optimizer=None)
observations = [tf.constant([[1, 2], [3, 4]], dtype=tf.float32)]
actions = [tf.constant([0, 1], dtype=tf.int32)]
rewards = tf.constant([10, 20], dtype=tf.float32)
discounts = tf.constant([0.9, 0.9], dtype=tf.float32)
experience = trajectory.first(
observation=observations,
action=actions,
policy_info=(),
reward=rewards,
discount=discounts)
loss_info = agent._loss(experience)
self.evaluate(tf.compat.v1.initialize_all_variables())
total_loss, _ = self.evaluate(loss_info)
expected_loss = tf.reduce_mean(
input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=cloning_net(observations)[0], labels=actions[0]))
self.assertAllClose(total_loss, expected_loss)
def testTrain(self):
with tf.compat.v2.summary.record_if(False):
# Emits trajectories shaped (batch=1, time=6, ...)
traj, time_step_spec, action_spec = (
driver_test_utils.make_random_trajectory())
# Convert to shapes (batch=6, 1, ...) so this works with a non-RNN model.
traj = tf.nest.map_structure(common.transpose_batch_time, traj)
cloning_net = q_network.QNetwork(
time_step_spec.observation, action_spec)
agent = behavioral_cloning_agent.BehavioralCloningAgent(
time_step_spec,
action_spec,
cloning_network=cloning_net,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.01))
# Disable clipping to make sure we can see the difference in behavior
agent.policy._clip = False
# Remove policy_info, as BehavioralCloningAgent expects none.
traj = traj.replace(policy_info=())
# TODO(b/123883319)
if tf.executing_eagerly():
train_and_loss = lambda: agent.train(traj)
else:
train_and_loss = agent.train(traj)
replay = trajectory_replay.TrajectoryReplay(agent.policy)
self.evaluate(tf.compat.v1.global_variables_initializer())
initial_actions = self.evaluate(replay.run(traj)[0])
for _ in range(TRAIN_ITERATIONS):
self.evaluate(train_and_loss)
post_training_actions = self.evaluate(replay.run(traj)[0])
# We don't necessarily converge to the same actions as in trajectory after
# 10 steps of an untuned optimizer, but the policy does change.
self.assertFalse(np.all(initial_actions == post_training_actions))
def testTrainWithRNN(self):
with tf.compat.v2.summary.record_if(False):
# Emits trajectories shaped (batch=1, time=6, ...)
traj, time_step_spec, action_spec = (
driver_test_utils.make_random_trajectory())
cloning_net = q_rnn_network.QRnnNetwork(
time_step_spec.observation, action_spec)
agent = behavioral_cloning_agent.BehavioralCloningAgent(
time_step_spec,
action_spec,
cloning_network=cloning_net,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.01))
# Disable clipping to make sure we can see the difference in behavior
agent.policy._clip = False
# Remove policy_info, as BehavioralCloningAgent expects none.
traj = traj.replace(policy_info=())
# TODO(b/123883319)
if tf.executing_eagerly():
train_and_loss = lambda: agent.train(traj)
else:
train_and_loss = agent.train(traj)
replay = trajectory_replay.TrajectoryReplay(agent.policy)
self.evaluate(tf.compat.v1.global_variables_initializer())
initial_actions = self.evaluate(replay.run(traj)[0])
for _ in range(TRAIN_ITERATIONS):
self.evaluate(train_and_loss)
post_training_actions = self.evaluate(replay.run(traj)[0])
# We don't necessarily converge to the same actions as in trajectory after
# 10 steps of an untuned optimizer, but the policy does change.
self.assertFalse(np.all(initial_actions == post_training_actions))
def testPolicy(self):
cloning_net = DummyNet(self._observation_spec, self._action_spec)
agent = behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec,
self._action_spec,
cloning_network=cloning_net,
optimizer=None)
observations = [tf.constant([[1, 2], [3, 4]], dtype=tf.float32)]
time_steps = ts.restart(observations, batch_size=2)
policy = agent.policy
action_step = policy.action(time_steps)
# Batch size 2.
self.assertAllEqual(
[2] + self._action_spec[0].shape.as_list(),
action_step.action[0].shape,
)
self.evaluate(tf.compat.v1.initialize_all_variables())
actions_ = self.evaluate(action_step.action)
self.assertTrue(all(actions_[0] <= self._action_spec[0].maximum))
self.assertTrue(all(actions_[0] >= self._action_spec[0].minimum))
def testInitializeRestoreAgent(self):
cloning_net = DummyNet(self._observation_spec, self._action_spec)
agent = behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec,
self._action_spec,
cloning_network=cloning_net,
optimizer=None)
observations = [tf.constant([[1, 2], [3, 4]], dtype=tf.float32)]
time_steps = ts.restart(observations, batch_size=2)
policy = agent.policy
action_step = policy.action(time_steps)
self.evaluate(tf.compat.v1.initialize_all_variables())
checkpoint = tf.train.Checkpoint(agent=agent)
latest_checkpoint = tf.train.latest_checkpoint(self.get_temp_dir())
checkpoint_load_status = checkpoint.restore(latest_checkpoint)
with self.cached_session() as sess:
checkpoint_load_status.initialize_or_restore(sess)
self.assertAllEqual(sess.run(action_step.action), [[0, 0]])
if __name__ == '__main__':
tf.test.main()
| 40.732794
| 80
| 0.710069
|
8b59574254957a44bb8f953cae6d260eed99c47b
| 3,868
|
py
|
Python
|
src/tasks/scrape_reddit/task.py
|
xhonino/Youtube-automatic-reddit
|
8490b8cb517fd2421a92dc58471e1339aa6ac4fc
|
[
"MIT"
] | null | null | null |
src/tasks/scrape_reddit/task.py
|
xhonino/Youtube-automatic-reddit
|
8490b8cb517fd2421a92dc58471e1339aa6ac4fc
|
[
"MIT"
] | null | null | null |
src/tasks/scrape_reddit/task.py
|
xhonino/Youtube-automatic-reddit
|
8490b8cb517fd2421a92dc58471e1339aa6ac4fc
|
[
"MIT"
] | null | null | null |
import praw
import os
from time import sleep
from praw.models import MoreComments
from src.tasks.scrape_reddit.post import Post, Comment
from src.tasks.scrape_reddit.profanity_filter import filter
client_id = "iosCZqE9n_yFQw"
client_secret = "Eu_vqISa7HWnPpWQmh6xcsDx36w"
reddit = praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent='YOUTUBE')
def get_hottest_post(context):
video_minutes_limit = context["video_minutes_limit"]
post = reddit.submission(url=context["url"])
comments = []
post.comments.replace_more(limit=1)
total_characters = 0 ### ### ### #### Duhet per kontrollin e gjatesise se videos
for comment in post.comments:
if comment.body == '[deleted]':
continue
if len(comment.body) > 1500:
continue
if comment.stickied:
continue
if comment.body.find("https:") != -1 or comment.body.find("http:") != -1 or comment.body.find("www.") != -1:
continue
comment_body = filter(comment.body)
if comment_body == "[removed]":
continue
comment_reply = ""
comment.replies.replace_more(limit=1)
if len(comment.replies) > 0:
reply = comment.replies[0]
if isinstance(reply, MoreComments):
continue
comment_reply = filter(reply.body)
comment_output = Comment(comment_body, comment_reply)
if comment.author is not None:
comment_output.author = comment.author.name
else:
comment_output.author = '[deleted]'
comment_output.score = comment.score
comments.append(comment_output)
############### Kontroll per gjatesine e videos ##################
total_characters += len(comment_body)
video_minutes = total_characters/1000
# sleep(5)
if video_minutes >= video_minutes_limit:
break
post.title = filter(post.title)
post_data = Post(post.title, comments)
post_data.score = post.score
post_data.num_comments = post.num_comments
context["post"] = post_data
return
############## OLD CODE ##################
# subreddit_name=context["subreddit"]
# comment_limit=context["comment_limit"]
# nsfw=context["nsfw"]
# subreddit = reddit.subreddit(subreddit_name)
# hot_posts = subreddit.hot()
# for post in hot_posts:
# if not post.stickied and post.over_18 == nsfw:
# title = post.title
# if len(title) >= 100:
# continue # respect youtube limit of 100
# comments = []
# for comment in post.comments:
# if isinstance(comment, MoreComments):
# continue
# if comment.stickied:
# continue
# comment_body = comment.body
# if comment_body == "[removed]":
# continue
# comment_reply = ""
# comment.replies.replace_more(limit=1)
# if len(comment.replies) > 0:
# reply = comment.replies[0]
# if isinstance(reply, MoreComments):
# continue
# comment_reply = reply.body
# comment_output = Comment(comment_body, comment_reply)
# comment_output.author = comment.author.name
# comment_output.score = comment.score
# comments.append(comment_output)
# if len(comments) >= comment_limit:
# break
#
# post_data = Post(title, comments)
# post_data.score = post.score
# post_data.num_comments = post.num_comments
# context["post"] = post_data
# return
############## OLD CODE ##################
if __name__ == '__main__':
url = 'https://www.reddit.com/r/AskReddit/comments/i3ntz9/what_is_the_worst_feeling_emotionally_in_your/'
context= {'url':url, 'video_minutes_limit': 3, "post":None}
get_hottest_post(context)
| 36.490566
| 114
| 0.613754
|
b282f718de53acee0cb5f2a8bd6396f832a4fb37
| 22,565
|
py
|
Python
|
compose/project.py
|
VisionSystemsInc/nvidia-docker-plugin-compose
|
2de246a4648d0905f6f120efcc792dc45b2639c5
|
[
"Apache-2.0"
] | null | null | null |
compose/project.py
|
VisionSystemsInc/nvidia-docker-plugin-compose
|
2de246a4648d0905f6f120efcc792dc45b2639c5
|
[
"Apache-2.0"
] | null | null | null |
compose/project.py
|
VisionSystemsInc/nvidia-docker-plugin-compose
|
2de246a4648d0905f6f120efcc792dc45b2639c5
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import logging
import operator
from functools import reduce
import enum
from docker.errors import APIError
from . import parallel
from .config import ConfigurationError
from .config.config import V1
from .config.sort_services import get_container_name_from_network_mode
from .config.sort_services import get_service_name_from_network_mode
from .const import IMAGE_EVENTS
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .container import Container
from .network import build_networks
from .network import get_networks
from .network import ProjectNetworks
from .service import BuildAction
from .service import ContainerNetworkMode
from .service import ConvergenceStrategy
from .service import NetworkMode
from .service import Service
from .service import ServiceNetworkMode
from .utils import microseconds_from_time_nano
from .volume import ProjectVolumes
log = logging.getLogger(__name__)
@enum.unique
class OneOffFilter(enum.Enum):
include = 0
exclude = 1
only = 2
@classmethod
def update_labels(cls, value, labels):
if value == cls.only:
labels.append('{0}={1}'.format(LABEL_ONE_OFF, "True"))
elif value == cls.exclude:
labels.append('{0}={1}'.format(LABEL_ONE_OFF, "False"))
elif value == cls.include:
pass
else:
raise ValueError("Invalid value for one_off: {}".format(repr(value)))
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client, networks=None, volumes=None, config_version=None):
self.name = name
self.services = services
self.client = client
self.volumes = volumes or ProjectVolumes({})
self.networks = networks or ProjectNetworks({}, False)
self.config_version = config_version
def labels(self, one_off=OneOffFilter.exclude):
labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
OneOffFilter.update_labels(one_off, labels)
return labels
@classmethod
def from_config(cls, name, config_data, client):
"""
Construct a Project from a config.Config object.
"""
use_networking = (config_data.version and config_data.version != V1)
networks = build_networks(name, config_data, client)
project_networks = ProjectNetworks.from_services(
config_data.services,
networks,
use_networking)
volumes = ProjectVolumes.from_config(name, config_data, client)
project = cls(name, [], client, project_networks, volumes, config_data.version)
for service_dict in config_data.services:
service_dict = dict(service_dict)
if use_networking:
service_networks = get_networks(service_dict, networks)
else:
service_networks = {}
service_dict.pop('networks', None)
links = project.get_links(service_dict)
network_mode = project.get_network_mode(
service_dict, list(service_networks.keys())
)
volumes_from = get_volumes_from(project, service_dict)
if config_data.version != V1:
service_dict['volumes'] = [
volumes.namespace_spec(volume_spec)
for volume_spec in service_dict.get('volumes', [])
]
secrets = get_secrets(
service_dict['name'],
service_dict.pop('secrets', None) or [],
config_data.secrets)
project.services.append(
Service(
service_dict.pop('name'),
client=client,
project=name,
use_networking=use_networking,
networks=service_networks,
links=links,
network_mode=network_mode,
volumes_from=volumes_from,
secrets=secrets,
**service_dict)
)
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
service_names = self.service_names
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_services_without_duplicate(self, service_names=None, include_deps=False):
services = self.get_services(service_names, include_deps)
for service in services:
service.remove_duplicate_containers()
return services
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError(
'Service "%s" has a link to service "%s" which does not '
'exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_network_mode(self, service_dict, networks):
network_mode = service_dict.pop('network_mode', None)
if not network_mode:
if self.networks.use_networking:
return NetworkMode(networks[0]) if networks else NetworkMode('none')
return NetworkMode(None)
service_name = get_service_name_from_network_mode(network_mode)
if service_name:
return ServiceNetworkMode(self.get_service(service_name))
container_name = get_container_name_from_network_mode(network_mode)
if container_name:
try:
return ContainerNetworkMode(Container.from_id(self.client, container_name))
except APIError:
raise ConfigurationError(
"Service '{name}' uses the network stack of container '{dep}' which "
"does not exist.".format(name=service_dict['name'], dep=container_name))
return NetworkMode(network_mode)
def start(self, service_names=None, **options):
containers = []
def start_service(service):
service_containers = service.start(quiet=True, **options)
containers.extend(service_containers)
services = self.get_services(service_names)
def get_deps(service):
return {
(self.get_service(dep), config)
for dep, config in service.get_dependency_configs().items()
}
parallel.parallel_execute(
services,
start_service,
operator.attrgetter('name'),
'Starting',
get_deps)
return containers
def stop(self, service_names=None, one_off=OneOffFilter.exclude, **options):
containers = self.containers(service_names, one_off=one_off)
def get_deps(container):
# actually returning inversed dependencies
return {(other, None) for other in containers
if container.service in
self.get_service(other.service).get_dependency_names()}
parallel.parallel_execute(
containers,
self.build_container_operation_with_timeout_func('stop', options),
operator.attrgetter('name'),
'Stopping',
get_deps)
def pause(self, service_names=None, **options):
containers = self.containers(service_names)
parallel.parallel_pause(reversed(containers), options)
return containers
def unpause(self, service_names=None, **options):
containers = self.containers(service_names)
parallel.parallel_unpause(containers, options)
return containers
def kill(self, service_names=None, **options):
parallel.parallel_kill(self.containers(service_names), options)
def remove_stopped(self, service_names=None, one_off=OneOffFilter.exclude, **options):
parallel.parallel_remove(self.containers(
service_names, stopped=True, one_off=one_off
), options)
def down(self, remove_image_type, include_volumes, remove_orphans=False):
self.stop(one_off=OneOffFilter.include)
self.find_orphan_containers(remove_orphans)
self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
self.networks.remove()
if include_volumes:
self.volumes.remove()
self.remove_images(remove_image_type)
def remove_images(self, remove_image_type):
for service in self.get_services():
service.remove_image(remove_image_type)
def restart(self, service_names=None, **options):
containers = self.containers(service_names, stopped=True)
parallel.parallel_execute(
containers,
self.build_container_operation_with_timeout_func('restart', options),
operator.attrgetter('name'),
'Restarting')
return containers
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, build_args=None):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache, pull, force_rm, build_args)
else:
log.info('%s uses an image, skipping' % service.name)
def create(
self,
service_names=None,
strategy=ConvergenceStrategy.changed,
do_build=BuildAction.none,
):
services = self.get_services_without_duplicate(service_names, include_deps=True)
for svc in services:
svc.ensure_image_exists(do_build=do_build)
plans = self._get_convergence_plans(services, strategy)
for service in services:
service.execute_convergence_plan(
plans[service.name],
detached=True,
start=False)
def events(self, service_names=None):
def build_container_event(event, container):
time = datetime.datetime.fromtimestamp(event['time'])
time = time.replace(
microsecond=microseconds_from_time_nano(event['timeNano']))
return {
'time': time,
'type': 'container',
'action': event['status'],
'id': container.id,
'service': container.service,
'attributes': {
'name': container.name,
'image': event['from'],
},
'container': container,
}
service_names = set(service_names or self.service_names)
for event in self.client.events(
filters={'label': self.labels()},
decode=True
):
# The first part of this condition is a guard against some events
# broadcasted by swarm that don't have a status field.
# See https://github.com/docker/compose/issues/3316
if 'status' not in event or event['status'] in IMAGE_EVENTS:
# We don't receive any image events because labels aren't applied
# to images
continue
# TODO: get labels from the API v1.22 , see github issue 2618
try:
# this can fail if the container has been removed
container = Container.from_id(self.client, event['id'])
except APIError:
continue
if container.service not in service_names:
continue
yield build_container_event(event, container)
def up(self,
service_names=None,
start_deps=True,
strategy=ConvergenceStrategy.changed,
do_build=BuildAction.none,
timeout=None,
detached=False,
remove_orphans=False,
scale_override=None,
rescale=True):
warn_for_swarm_mode(self.client)
self.initialize()
self.find_orphan_containers(remove_orphans)
if scale_override is None:
scale_override = {}
services = self.get_services_without_duplicate(
service_names,
include_deps=start_deps)
for svc in services:
svc.ensure_image_exists(do_build=do_build)
plans = self._get_convergence_plans(services, strategy)
def do(service):
return service.execute_convergence_plan(
plans[service.name],
timeout=timeout,
detached=detached,
scale_override=scale_override.get(service.name),
rescale=rescale
)
def get_deps(service):
return {
(self.get_service(dep), config)
for dep, config in service.get_dependency_configs().items()
}
results, errors = parallel.parallel_execute(
services,
do,
operator.attrgetter('name'),
None,
get_deps
)
if errors:
raise ProjectError(
'Encountered errors while bringing up the project.'
)
return [
container
for svc_containers in results
if svc_containers is not None
for container in svc_containers
]
def initialize(self):
self.networks.initialize()
self.volumes.initialize()
def _get_convergence_plans(self, services, strategy):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans and
plans[name].action in ('recreate', 'create')
]
if updated_dependencies and strategy.allows_recreate:
log.debug('%s has upstream changes (%s)',
service.name,
", ".join(updated_dependencies))
plan = service.convergence_plan(ConvergenceStrategy.always)
else:
plan = service.convergence_plan(strategy)
plans[service.name] = plan
return plans
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False):
services = self.get_services(service_names, include_deps=False)
if parallel_pull:
def pull_service(service):
service.pull(ignore_pull_failures, True)
parallel.parallel_execute(
services,
pull_service,
operator.attrgetter('name'),
'Pulling',
limit=5)
else:
for service in services:
service.pull(ignore_pull_failures)
def push(self, service_names=None, ignore_push_failures=False):
for service in self.get_services(service_names, include_deps=False):
service.push(ignore_push_failures)
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
return list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})])
)
def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = self._labeled_containers(stopped, one_off)
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
return [c for c in containers if matches_service_names(c)]
def find_orphan_containers(self, remove_orphans):
def _find():
containers = self._labeled_containers()
for ctnr in containers:
service_name = ctnr.labels.get(LABEL_SERVICE)
if service_name not in self.service_names:
yield ctnr
orphans = list(_find())
if not orphans:
return
if remove_orphans:
for ctnr in orphans:
log.info('Removing orphan container "{0}"'.format(ctnr.name))
ctnr.kill()
ctnr.remove(force=True)
else:
log.warning(
'Found orphan containers ({0}) for this project. If '
'you removed or renamed this service in your compose '
'file, you can run this command with the '
'--remove-orphans flag to clean it up.'.format(
', '.join(["{}".format(ctnr.name) for ctnr in orphans])
)
)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
def build_container_operation_with_timeout_func(self, operation, options):
def container_operation_with_timeout(container):
if options.get('timeout') is None:
service = self.get_service(container.service)
options['timeout'] = service.stop_timeout(None)
return getattr(container, operation)(**options)
return container_operation_with_timeout
def get_volumes_from(project, service_dict):
volumes_from = service_dict.pop('volumes_from', None)
if not volumes_from:
return []
def build_volume_from(spec):
if spec.type == 'service':
try:
return spec._replace(source=project.get_service(spec.source))
except NoSuchService:
pass
if spec.type == 'container':
try:
container = Container.from_id(project.client, spec.source)
return spec._replace(source=container)
except APIError:
pass
raise ConfigurationError(
"Service \"{}\" mounts volumes from \"{}\", which is not the name "
"of a service or container.".format(
service_dict['name'],
spec.source))
return [build_volume_from(vf) for vf in volumes_from]
def get_secrets(service, service_secrets, secret_defs):
secrets = []
for secret in service_secrets:
secret_def = secret_defs.get(secret.source)
if not secret_def:
raise ConfigurationError(
"Service \"{service}\" uses an undefined secret \"{secret}\" "
.format(service=service, secret=secret.source))
if secret_def.get('external_name'):
log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
"External secrets are not available to containers created by "
"docker-compose.".format(service=service, secret=secret.source))
continue
if secret.uid or secret.gid or secret.mode:
log.warn(
"Service \"{service}\" uses secret \"{secret}\" with uid, "
"gid, or mode. These fields are not supported by this "
"implementation of the Compose file".format(
service=service, secret=secret.source
)
)
secrets.append({'secret': secret, 'file': secret_def.get('file')})
return secrets
def warn_for_swarm_mode(client):
info = client.info()
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
if info.get('ServerVersion', '').startswith('ucp'):
# UCP does multi-node scheduling with traditional Compose files.
return
log.warn(
"The Docker Engine you're using is running in swarm mode.\n\n"
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
"All containers will be scheduled on the current node.\n\n"
"To deploy your application across the swarm, "
"use `docker stack deploy`.\n"
)
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class ProjectError(Exception):
def __init__(self, msg):
self.msg = msg
| 35.202808
| 101
| 0.602792
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.