hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8cbdf21fb37924f7d67a1b4ea5a61f589e22f36
| 301
|
py
|
Python
|
dataType.py
|
SWLBot/electronic-blackboard
|
8c149919c65a36d0d15fed09f1242a3e7d66c728
|
[
"Apache-2.0"
] | 2
|
2017-08-29T02:46:22.000Z
|
2017-09-08T17:16:59.000Z
|
dataType.py
|
SWLBot/electronic-blackboard
|
8c149919c65a36d0d15fed09f1242a3e7d66c728
|
[
"Apache-2.0"
] | 103
|
2017-03-02T12:51:57.000Z
|
2018-04-11T11:46:01.000Z
|
dataType.py
|
SWLBot/electronic-blackboard
|
8c149919c65a36d0d15fed09f1242a3e7d66c728
|
[
"Apache-2.0"
] | 12
|
2017-04-14T02:42:38.000Z
|
2017-08-29T02:46:22.000Z
|
class DataType:
def __init__(self, type_id=None, type_name=None, type_dir=None):
if not (type_id and type_name and type_dir):
raise ValueError("Missing arguement for DataType")
self.type_id = type_id
self.type_name = type_name
self.type_dir = type_dir
| 33.444444
| 68
| 0.667774
|
0b4a6ca0658f46385446747ea4872a882ee41424
| 38,370
|
py
|
Python
|
cms/tests/test_cache.py
|
devyntk/django-cms
|
f889a30e94f268394ae9abf32c032239d0a9be55
|
[
"BSD-3-Clause"
] | 5,659
|
2015-01-01T02:42:30.000Z
|
2020-10-07T02:38:29.000Z
|
cms/tests/test_cache.py
|
devyntk/django-cms
|
f889a30e94f268394ae9abf32c032239d0a9be55
|
[
"BSD-3-Clause"
] | 3,264
|
2015-01-02T10:11:48.000Z
|
2020-10-08T13:15:07.000Z
|
cms/tests/test_cache.py
|
devyntk/django-cms
|
f889a30e94f268394ae9abf32c032239d0a9be55
|
[
"BSD-3-Clause"
] | 2,132
|
2015-01-01T11:28:21.000Z
|
2020-10-06T09:09:11.000Z
|
import time
from django.conf import settings
from django.template import Context
from sekizai.context import SekizaiContext
from cms.api import add_plugin, create_page, create_title
from cms.cache import _get_cache_version, invalidate_cms_page_cache
from cms.cache.placeholder import (
_get_placeholder_cache_key, _get_placeholder_cache_version,
_get_placeholder_cache_version_key, _set_placeholder_cache_version,
clear_placeholder_cache, get_placeholder_cache, set_placeholder_cache,
)
from cms.exceptions import PluginAlreadyRegistered
from cms.models import Page
from cms.plugin_pool import plugin_pool
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.project.pluginapp.plugins.caching.cms_plugins import (
DateTimeCacheExpirationPlugin, LegacyCachePlugin, NoCachePlugin,
SekizaiPlugin, TimeDeltaCacheExpirationPlugin, TTLCacheExpirationPlugin,
VaryCacheOnPlugin,
)
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import get_timezone_name
class CacheTestCase(CMSTestCase):
def tearDown(self):
from django.core.cache import cache
super().tearDown()
cache.clear()
def setUp(self):
from django.core.cache import cache
super().setUp()
cache.clear()
def test_cache_placeholder(self):
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
page1_url = page1.get_absolute_url()
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(5, 9)):
self.render_template_obj(template, {}, request)
request = self.get_request(page1_url)
request.session['cms_edit'] = True
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(2):
self.render_template_obj(template, {}, request)
# toolbar
with self.login_user_context(self.get_superuser()):
request = self.get_request(page1_url)
request.session['cms_edit'] = True
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
request.toolbar.show_toolbar = True
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(4):
self.render_template_obj(template, {}, request)
page1.publish('en')
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict(
CMS_PAGE_CACHE=False,
MIDDLEWARE=[mw for mw in settings.MIDDLEWARE if mw not in exclude],
)
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(13, 25)):
self.client.get(page1_url)
with self.assertNumQueries(FuzzyInt(5, 14)):
self.client.get(page1_url)
overrides['CMS_PLACEHOLDER_CACHE'] = False
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(7, 18)):
self.client.get(page1_url)
def test_no_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot='body')[0]
placeholder2 = page1.placeholders.filter(slot='right-column')[0]
try:
plugin_pool.register_plugin(NoCachePlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, 'TextPlugin', 'en', body="English")
add_plugin(placeholder2, 'TextPlugin', 'en', body="Deutsch")
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Request the page without the 'no-cache' plugin
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(18, 25)):
response1 = self.client.get(page1_url)
content1 = response1.content
# Fetch it again, it is cached.
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
response2 = self.client.get(page1_url)
content2 = response2.content
self.assertEqual(content1, content2)
# Once again with PAGE_CACHE=False, to prove the cache can
# be disabled
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(5, 24)):
response3 = self.client.get(page1_url)
content3 = response3.content
self.assertEqual(content1, content3)
# Add the 'no-cache' plugin
add_plugin(placeholder1, "NoCachePlugin", 'en')
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(4, 6)):
output = self.render_template_obj(template, {}, request)
with self.assertNumQueries(FuzzyInt(14, 24)):
response = self.client.get(page1_url)
self.assertTrue("no-cache" in response['Cache-Control'])
resp1 = response.content.decode('utf8').split("$$$")[1]
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(5):
output2 = self.render_template_obj(template, {}, request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(8, 17)):
response = self.client.get(page1_url)
resp2 = response.content.decode('utf8').split("$$$")[1]
self.assertNotEqual(output, output2)
self.assertNotEqual(resp1, resp2)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_timedelta_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TimeDeltaCacheExpirationPlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *TimeDeltaCacheExpirationPlugin, expires in 45s.
add_plugin(placeholder1, "TimeDeltaCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1.get_absolute_url())
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get(page1.get_absolute_url())
self.assertTrue('max-age=45' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(TimeDeltaCacheExpirationPlugin)
def test_datetime_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
try:
plugin_pool.register_plugin(DateTimeCacheExpirationPlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "DateTimeCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get(page1_url)
self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(DateTimeCacheExpirationPlugin)
def TTLCacheExpirationPlugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TTLCacheExpirationPlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "TTLCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get('/en/')
self.assertTrue('max-age=50' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(TTLCacheExpirationPlugin)
def test_expiration_cache_plugins(self):
"""
Tests that when used in combination, the page is cached to the
shortest TTL.
"""
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TTLCacheExpirationPlugin)
try:
plugin_pool.register_plugin(DateTimeCacheExpirationPlugin)
except PluginAlreadyRegistered:
pass
try:
plugin_pool.register_plugin(NoCachePlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "TTLCacheExpirationPlugin", 'en')
add_plugin(placeholder2, "DateTimeCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 26)):
response = self.client.get(page1_url)
resp1 = response.content.decode('utf8').split("$$$")[1]
self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control']) # noqa
cache_control1 = response['Cache-Control']
expires1 = response['Expires']
time.sleep(1) # This ensures that the cache has aged measurably
# Request it again, this time, it comes from the cache
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
response = self.client.get(page1_url)
resp2 = response.content.decode('utf8').split("$$$")[1]
# Content will be the same
self.assertEqual(resp2, resp1)
# Cache-Control will be different because the cache has aged
self.assertNotEqual(response['Cache-Control'], cache_control1)
# However, the Expires timestamp will be the same
self.assertEqual(response['Expires'], expires1)
plugin_pool.unregister_plugin(TTLCacheExpirationPlugin)
plugin_pool.unregister_plugin(DateTimeCacheExpirationPlugin)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_dual_legacy_cache_plugins(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(LegacyCachePlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Adds a no-cache plugin. In older versions of the CMS, this would
# prevent the page from caching in, but since this plugin also defines
# get_cache_expiration() it is ignored.
add_plugin(placeholder1, "LegacyCachePlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)):
response = self.client.get(page1_url)
self.assertTrue('no-cache' not in response['Cache-Control'])
plugin_pool.unregister_plugin(LegacyCachePlugin)
def test_cache_page(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
page1_url = page1.get_absolute_url()
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1_url, 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated)
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that the cache is invalidated on unpublishing the page
#
old_version = _get_cache_version()
page1.unpublish('en')
self.assertGreater(_get_cache_version(), old_version)
#
# Test that this means the page is actually not cached.
#
page1.publish('en')
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that the above behavior is different when CMS_PAGE_CACHE is
# set to False (disabled)
#
with self.settings(CMS_PAGE_CACHE=False):
# Test that the page is initially un-cached
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are still requires DB
# access.
#
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
def test_no_page_cache_on_toolbar_edit(self):
with self.settings(CMS_PAGE_CACHE=True):
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en')
page1_url = page1.get_absolute_url()
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Publish
page1.publish('en')
# Set edit mode
session = self.client.session
session['cms_edit'] = True
session.save()
# Make an initial ?edit request
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
# Disable edit mode
session = self.client.session
session['cms_edit'] = False
session.save()
# Set the cache
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
# Assert cached content was used
with self.assertNumQueries(0):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
# Set edit mode once more
session = self.client.session
session['cms_edit'] = True
session.save()
# Assert no cached content was used
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get('{}?edit'.format(page1_url))
self.assertEqual(response.status_code, 200)
def test_invalidate_restart(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
page1_url = page1.get_absolute_url()
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1.get_path(), 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated)
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
old_plugins = plugin_pool.plugins
plugin_pool.clear()
plugin_pool.discover_plugins()
plugin_pool.plugins = old_plugins
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
def test_sekizai_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(SekizaiPlugin)
add_plugin(placeholder1, "SekizaiPlugin", 'en')
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
page1.publish('en')
response = self.client.get(page1.get_absolute_url())
self.assertContains(response, 'alert(')
response = self.client.get(page1.get_absolute_url())
self.assertContains(response, 'alert(')
def test_cache_invalidation(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder = page1.placeholders.get(slot="body")
add_plugin(placeholder, "TextPlugin", 'en', body="First content")
page1.publish('en')
response = self.client.get(page1_url)
self.assertContains(response, 'First content')
response = self.client.get(page1_url)
self.assertContains(response, 'First content')
add_plugin(placeholder, "TextPlugin", 'en', body="Second content")
page1.publish('en')
response = self.client.get(page1_url)
self.assertContains(response, 'Second content')
def test_render_placeholder_cache(self):
"""
Regression test for #4223
Assert that placeholder cache is cleared correctly when a plugin is saved
"""
invalidate_cms_page_cache()
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder
###
# add the test plugin
##
test_plugin = add_plugin(ph1, "TextPlugin", "en", body="Some text")
test_plugin.save()
request = self.get_request()
content_renderer = self.get_content_renderer(request)
# asserting initial text
context = SekizaiContext()
context['request'] = self.get_request()
text = content_renderer.render_placeholder(ph1, context)
self.assertEqual(text, "Some text")
# deleting local plugin cache
del ph1._plugins_cache
test_plugin.body = 'Other text'
test_plugin.save()
# plugin text has changed, so the placeholder rendering
text = content_renderer.render_placeholder(ph1, context)
self.assertEqual(text, "Other text")
def test_render_placeholderfield_cache_in_custom_model(self):
"""
Regression test for #6912
Assert that placeholder of a placeholderfield in custom model has its cache cleared correctly when mark_as_dirty is called in the admin
"""
invalidate_cms_page_cache()
# Create an instance of a custom model containing a placeholderfield
ex = Example1(char_1="one", char_2="two", char_3="tree", char_4="four")
ex.save()
ph1 = ex.placeholder
# Add a first plugin
test_plugin = add_plugin(ph1, "TextPlugin", "en", body="Some text")
test_plugin.save()
# Create a first request using render_placeholder to ensure that the content is equal to the first plugin content
request = self.get_request()
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context["request"] = self.get_request()
text = content_renderer.render_placeholder(ph1, context, use_cache=True)
self.assertEqual(text, "Some text")
# Add a second plugin in the placeholder
test_plugin = add_plugin(ph1, "TextPlugin", "en", body="Some other text")
test_plugin.save()
# Clear plugins cache to ensure that cms.utils.plugins.get_plugins() will refetch the plugins
del ph1._plugins_cache
# Create a second request using render_placeholder to ensure that the content is still equal to the first plugin content as cache was not cleared yet
request = self.get_request()
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context["request"] = self.get_request()
text = content_renderer.render_placeholder(ph1, context, use_cache=True)
self.assertEqual(text, "Some text")
# Mark placeholder as dirty as it is done in cms.admin.placeholderadmin file
ph1.mark_as_dirty("en", clear_cache=False)
# Create a last request to ensure that rendered content contains the two plugins content
request = self.get_request()
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context["request"] = self.get_request()
text = content_renderer.render_placeholder(ph1, context, use_cache=True)
self.assertEqual(text, "Some textSome other text")
class PlaceholderCacheTestCase(CMSTestCase):
def setUp(self):
from django.core.cache import cache
super().setUp()
cache.clear()
self.page = create_page(
'en test page', 'nav_playground.html', 'en', published=True)
# Now create and publish as 'de' title
create_title('de', "de test page", self.page)
self.page.publish('de')
self.placeholder = self.page.placeholders.filter(slot="body")[0]
plugin_pool.register_plugin(VaryCacheOnPlugin)
add_plugin(self.placeholder, 'TextPlugin', 'en', body='English')
add_plugin(self.placeholder, 'TextPlugin', 'de', body='Deutsch')
add_plugin(self.placeholder, 'VaryCacheOnPlugin', 'en')
add_plugin(self.placeholder, 'VaryCacheOnPlugin', 'de')
self.en_request = self.get_request('/en/')
self.en_request.current_page = Page.objects.get(pk=self.page.pk)
self.en_us_request = self.get_request('/en/')
self.en_us_request.META['HTTP_COUNTRY_CODE'] = 'US'
self.en_uk_request = self.get_request('/en/')
self.en_uk_request.META['HTTP_COUNTRY_CODE'] = 'UK'
self.de_request = self.get_request('/de/')
self.de_request.current_page = Page.objects.get(pk=self.page.pk)
def tearDown(self):
from django.core.cache import cache
super().tearDown()
plugin_pool.unregister_plugin(VaryCacheOnPlugin)
cache.clear()
def test_get_placeholder_cache_version_key(self):
cache_version_key = '{prefix}|placeholder_cache_version|id:{id}|lang:{lang}|site:{site}'.format(
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
)
self.assertEqual(
_get_placeholder_cache_version_key(self.placeholder, 'en', 1),
cache_version_key
)
def test_set_clear_get_placeholder_cache_version(self):
initial, _ = _get_placeholder_cache_version(self.placeholder, 'en', 1)
clear_placeholder_cache(self.placeholder, 'en', 1)
version, _ = _get_placeholder_cache_version(self.placeholder, 'en', 1)
self.assertGreater(version, initial)
def test_get_placeholder_cache_key(self):
version, vary_on_list = _get_placeholder_cache_version(self.placeholder, 'en', 1)
desired_key = '{prefix}|render_placeholder|id:{id}|lang:{lang}|site:{site}|tz:{tz}|v:{version}|country-code:{cc}'.format( # noqa
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
tz=get_timezone_name(),
version=version,
cc='_',
)
_set_placeholder_cache_version(self.placeholder, 'en', 1, version, vary_on_list=vary_on_list, duration=1)
actual_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_request)
self.assertEqual(actual_key, desired_key)
en_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_request)
de_key = _get_placeholder_cache_key(self.placeholder, 'de', 1, self.de_request)
self.assertNotEqual(en_key, de_key)
en_us_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_us_request)
self.assertNotEqual(en_key, en_us_key)
desired_key = '{prefix}|render_placeholder|id:{id}|lang:{lang}|site:{site}|tz:{tz}|v:{version}|country-code:{cc}'.format( # noqa
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
tz=get_timezone_name(),
version=version,
cc='US',
)
self.assertEqual(en_us_key, desired_key)
def test_set_get_placeholder_cache(self):
# Test with a super-long prefix
en_renderer = self.get_content_renderer(self.en_request)
en_context = Context({
'request': self.en_request,
})
en_us_renderer = self.get_content_renderer(self.en_us_request)
en_us_context = Context({
'request': self.en_us_request,
})
en_uk_renderer = self.get_content_renderer(self.en_uk_request)
en_uk_context = Context({
'request': self.en_uk_request,
})
en_content = en_renderer.render_placeholder(self.placeholder, en_context, 'en', width=350)
en_us_content = en_us_renderer.render_placeholder(self.placeholder, en_us_context, 'en', width=350)
en_uk_content = en_uk_renderer.render_placeholder(self.placeholder, en_uk_context, 'en', width=350)
del self.placeholder._plugins_cache
de_renderer = self.get_content_renderer(self.de_request)
de_context = Context({
'request': self.de_request,
})
de_content = de_renderer.render_placeholder(self.placeholder, de_context, 'de', width=350)
self.assertNotEqual(en_content, de_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_content, self.en_request)
cached_en_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_request)
self.assertEqual(cached_en_content, en_content)
set_placeholder_cache(self.placeholder, 'de', 1, de_content, self.de_request)
cached_de_content = get_placeholder_cache(self.placeholder, 'de', 1, self.de_request)
self.assertNotEqual(cached_en_content, cached_de_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_us_content, self.en_us_request)
cached_en_us_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_us_request)
self.assertNotEqual(cached_en_content, cached_en_us_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_uk_content, self.en_uk_request)
cached_en_uk_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_uk_request)
self.assertNotEqual(cached_en_us_content, cached_en_uk_content)
def test_set_get_placeholder_cache_with_long_prefix(self):
"""
This is for testing that everything continues to work even when the
cache-keys are hashed.
"""
# Use an absurdly long cache prefix to get us in the right neighborhood...
with self.settings(CMS_CACHE_PREFIX="super_lengthy_prefix" * 9): # 180 chars
en_crazy_request = self.get_request('/en/')
en_crazy_renderer = self.get_content_renderer(self.de_request)
# Use a ridiculously long "country code" (80 chars), already we're at 260 chars.
en_crazy_request.META['HTTP_COUNTRY_CODE'] = 'US' * 40 # 80 chars
en_crazy_context = Context({'request': en_crazy_request})
en_crazy_content = en_crazy_renderer.render_placeholder(
self.placeholder,
en_crazy_context,
language='en',
width=350,
)
set_placeholder_cache(self.placeholder, 'en', 1, en_crazy_content, en_crazy_request)
# Prove that it is hashed...
crazy_cache_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, en_crazy_request)
key_length = len(crazy_cache_key)
# 221 = 180 (prefix length) + 1 (separator) + 40 (sha1 hash)
self.assertTrue('render_placeholder' not in crazy_cache_key and key_length == 221)
# Prove it still works as expected
cached_en_crazy_content = get_placeholder_cache(self.placeholder, 'en', 1, en_crazy_request)
self.assertEqual(en_crazy_content, cached_en_crazy_content)
| 44.461182
| 157
| 0.634454
|
58f122dfaac48ac475ce17fa824162918989cc0b
| 1,944
|
py
|
Python
|
tests/inferfaces_tests/test_misc.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | null | null | null |
tests/inferfaces_tests/test_misc.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | 1
|
2019-04-13T10:15:48.000Z
|
2019-04-13T10:15:48.000Z
|
tests/inferfaces_tests/test_misc.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | null | null | null |
import pytest
from tests.test_data.certifications import CERTIFICATIONS
from tests.test_data.countries import COUNTRIES
from tests.test_data.genres import GENRES
from tests.test_data.languages import LANGUAGES
from tests.test_data.lists import TRENDING_LISTS
from tests.test_data.networks import NETWORKS
from tests.utils import mk_mock_client
from trakt.core.exceptions import ArgumentError
def test_countries():
client = mk_mock_client({r".*countries.*": [COUNTRIES, 200]})
with pytest.raises(ArgumentError):
client.countries.get_countries(type="qwerty")
countries = client.countries.get_countries(type="shows")
assert countries[0].code == COUNTRIES[0]["code"]
def test_certifications():
client = mk_mock_client({r".*certifications.*": [CERTIFICATIONS, 200]})
with pytest.raises(ArgumentError):
client.certifications.get_certifications(type="qwerty")
certifications = client.certifications.get_certifications(type="shows")
assert certifications[0].slug == CERTIFICATIONS["us"][0]["slug"]
def test_genres():
client = mk_mock_client({r".*genres.*": [GENRES, 200]})
genres = client.genres.get_genres(type="shows")
assert genres[0].name == GENRES[0]["name"]
def test_languages():
client = mk_mock_client({r".*languages.*": [LANGUAGES, 200]})
languages = client.languages.get_languages(type="movies")
assert languages[0].name == LANGUAGES[0]["name"]
def test_lists():
resp = [TRENDING_LISTS, 200, {"X-Pagination-Page-Count": 1}]
client = mk_mock_client({r".*lists/(trending|popular).*": resp})
tre = list(client.lists.get_trending())
pop = list(client.lists.get_popular())
assert tre[0].like_count == pop[0].like_count == TRENDING_LISTS[0]["like_count"]
def test_networks():
client = mk_mock_client({r".*networks.*": [NETWORKS, 200]})
networks = client.networks.get_networks()
assert networks[0].name == NETWORKS[0]["name"]
| 32.949153
| 84
| 0.721193
|
e9bba821d98eb0fc08d3dede3c1b79aabf62a3e7
| 7,197
|
py
|
Python
|
docusign_esign/models/connect_failure_result.py
|
joekohlsdorf/docusign-esign-python-client
|
40407544f79c88716d36fabf36f65c3ef1a5c3ba
|
[
"MIT"
] | 58
|
2017-10-18T23:06:57.000Z
|
2021-04-15T23:14:58.000Z
|
docusign_esign/models/connect_failure_result.py
|
joekohlsdorf/docusign-esign-python-client
|
40407544f79c88716d36fabf36f65c3ef1a5c3ba
|
[
"MIT"
] | 49
|
2017-10-27T05:54:09.000Z
|
2021-04-29T22:06:17.000Z
|
docusign_esign/models/connect_failure_result.py
|
joekohlsdorf/docusign-esign-python-client
|
40407544f79c88716d36fabf36f65c3ef1a5c3ba
|
[
"MIT"
] | 49
|
2017-09-16T07:23:41.000Z
|
2021-05-07T20:21:20.000Z
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ConnectFailureResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'config_id': 'str',
'config_url': 'str',
'envelope_id': 'str',
'status': 'str',
'status_message': 'str'
}
attribute_map = {
'config_id': 'configId',
'config_url': 'configUrl',
'envelope_id': 'envelopeId',
'status': 'status',
'status_message': 'statusMessage'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ConnectFailureResult - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._config_id = None
self._config_url = None
self._envelope_id = None
self._status = None
self._status_message = None
self.discriminator = None
setattr(self, "_{}".format('config_id'), kwargs.get('config_id', None))
setattr(self, "_{}".format('config_url'), kwargs.get('config_url', None))
setattr(self, "_{}".format('envelope_id'), kwargs.get('envelope_id', None))
setattr(self, "_{}".format('status'), kwargs.get('status', None))
setattr(self, "_{}".format('status_message'), kwargs.get('status_message', None))
@property
def config_id(self):
"""Gets the config_id of this ConnectFailureResult. # noqa: E501
Reserved: TBD # noqa: E501
:return: The config_id of this ConnectFailureResult. # noqa: E501
:rtype: str
"""
return self._config_id
@config_id.setter
def config_id(self, config_id):
"""Sets the config_id of this ConnectFailureResult.
Reserved: TBD # noqa: E501
:param config_id: The config_id of this ConnectFailureResult. # noqa: E501
:type: str
"""
self._config_id = config_id
@property
def config_url(self):
"""Gets the config_url of this ConnectFailureResult. # noqa: E501
Reserved: TBD # noqa: E501
:return: The config_url of this ConnectFailureResult. # noqa: E501
:rtype: str
"""
return self._config_url
@config_url.setter
def config_url(self, config_url):
"""Sets the config_url of this ConnectFailureResult.
Reserved: TBD # noqa: E501
:param config_url: The config_url of this ConnectFailureResult. # noqa: E501
:type: str
"""
self._config_url = config_url
@property
def envelope_id(self):
"""Gets the envelope_id of this ConnectFailureResult. # noqa: E501
The envelope ID of the envelope status that failed to post. # noqa: E501
:return: The envelope_id of this ConnectFailureResult. # noqa: E501
:rtype: str
"""
return self._envelope_id
@envelope_id.setter
def envelope_id(self, envelope_id):
"""Sets the envelope_id of this ConnectFailureResult.
The envelope ID of the envelope status that failed to post. # noqa: E501
:param envelope_id: The envelope_id of this ConnectFailureResult. # noqa: E501
:type: str
"""
self._envelope_id = envelope_id
@property
def status(self):
"""Gets the status of this ConnectFailureResult. # noqa: E501
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:return: The status of this ConnectFailureResult. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ConnectFailureResult.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:param status: The status of this ConnectFailureResult. # noqa: E501
:type: str
"""
self._status = status
@property
def status_message(self):
"""Gets the status_message of this ConnectFailureResult. # noqa: E501
# noqa: E501
:return: The status_message of this ConnectFailureResult. # noqa: E501
:rtype: str
"""
return self._status_message
@status_message.setter
def status_message(self, status_message):
"""Sets the status_message of this ConnectFailureResult.
# noqa: E501
:param status_message: The status_message of this ConnectFailureResult. # noqa: E501
:type: str
"""
self._status_message = status_message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConnectFailureResult, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConnectFailureResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConnectFailureResult):
return True
return self.to_dict() != other.to_dict()
| 30.888412
| 202
| 0.603724
|
0872e03ff5f0a9a2901f41108fb862367088ebee
| 1,158
|
py
|
Python
|
examples/like_and_follow_your_last_media_likers.py
|
Pacu2/instabot
|
f27a95c0821f44f63a616c848ba3564f8eee6107
|
[
"Apache-2.0"
] | null | null | null |
examples/like_and_follow_your_last_media_likers.py
|
Pacu2/instabot
|
f27a95c0821f44f63a616c848ba3564f8eee6107
|
[
"Apache-2.0"
] | null | null | null |
examples/like_and_follow_your_last_media_likers.py
|
Pacu2/instabot
|
f27a95c0821f44f63a616c848ba3564f8eee6107
|
[
"Apache-2.0"
] | null | null | null |
"""
instabot example
Workflow:
Like and follow likers of last medias from your timeline feed.
"""
import sys
import os
import time
import random
from tqdm import tqdm
import argparse
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
def like_and_follow(bot, user_id, nlikes=3):
bot.like_user(user_id, amount=nlikes)
bot.follow(user_id)
return True
def like_and_follow_media_likers(bot, media, nlikes=3):
for user in tqdm(bot.get_media_likers(media), desc="Media likers"):
like_and_follow(bot, user)
time.sleep(10 + 20 * random.random())
return True
def like_and_follow_your_feed_likers(bot, nlikes=3):
last_media = bot.get_your_medias()[0]
return like_and_follow_media_likers(bot, last_media, nlikes=3)
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
like_and_follow_your_feed_likers(bot)
| 24.125
| 71
| 0.721934
|
07883e81cd1e5bf5758f07e5a5d6e9c841e3ce6d
| 213
|
py
|
Python
|
extensions/.stubs/pycadsys/pycad/runtime/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | 1
|
2020-03-25T03:27:24.000Z
|
2020-03-25T03:27:24.000Z
|
extensions/.stubs/pycadsys/pycad/runtime/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
extensions/.stubs/pycadsys/pycad/runtime/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
__all__ = [
'upopen', 'cs', 'dbdict', 'dbtrans',
'serializable', 'utils', 'edx', 'gex']
from pycad.runtime.wraps import dbtrans, upopen, cs, dbdict, serializable
from pycad.runtime import utils, edx, gex
| 35.5
| 73
| 0.676056
|
27f1d0152d91b30341d1fa77824cd64f6a11267e
| 600
|
py
|
Python
|
Desktop/cs61a/hw/hw01/quiz/quiz01.py
|
cpvb13/cal-hack-5-proj
|
13e31fff3f56b57030c34147b04cef1d6309c62b
|
[
"MIT"
] | null | null | null |
Desktop/cs61a/hw/hw01/quiz/quiz01.py
|
cpvb13/cal-hack-5-proj
|
13e31fff3f56b57030c34147b04cef1d6309c62b
|
[
"MIT"
] | null | null | null |
Desktop/cs61a/hw/hw01/quiz/quiz01.py
|
cpvb13/cal-hack-5-proj
|
13e31fff3f56b57030c34147b04cef1d6309c62b
|
[
"MIT"
] | null | null | null |
def multiple(a, b):
"""Return the smallest number n that is a multiple of both a and b.
>>> multiple(3, 4)
12
>>> multiple(14, 21)
42
"""
"*** YOUR CODE HERE ***"
def unique_digits(n):
"""Return the number of unique digits in positive integer n
>>> unique_digits(8675309) # All are unique
7
>>> unique_digits(1313131) # 1 and 3
2
>>> unique_digits(13173131) # 1, 3, and 7
3
>>> unique_digits(10000) # 0 and 1
2
>>> unique_digits(101) # 0 and 1
2
>>> unique_digits(10) # 0 and 1
2
"""
"*** YOUR CODE HERE ***"
| 21.428571
| 71
| 0.551667
|
646416ca7364a767ed72fe492a7b5e12219fb3d0
| 8,270
|
py
|
Python
|
src/ppv/util/paths.py
|
jcbird/ppv
|
d550f4fff9cb0309d43b0d51e1406355ee0231be
|
[
"BSD-3-Clause"
] | 1
|
2020-10-09T08:19:35.000Z
|
2020-10-09T08:19:35.000Z
|
src/ppv/util/paths.py
|
jcbird/ppv
|
d550f4fff9cb0309d43b0d51e1406355ee0231be
|
[
"BSD-3-Clause"
] | 19
|
2020-09-25T23:33:53.000Z
|
2021-03-12T22:28:16.000Z
|
src/ppv/util/paths.py
|
jcbird/ppv
|
d550f4fff9cb0309d43b0d51e1406355ee0231be
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Utility module
"""
from .. import config
from pathlib import Path
import os
import fnmatch
def platePlans_par():
return config.plate_dir / 'platePlans.par'
def plate_plans():
return config.plate_dir / 'platePlans_sdss5.fits'
def platenum_as_str(platenum):
"""String representation of platenumber with leading zeros if necessary.
Parameters
----------
platenum : int
Number of plate
Returns
-------
str
String representation of plate number.
"""
return '{:06d}'.format(platenum)
def plate_batch(platenum):
"""
Given platenumber, get the path to the directory
containing the platenum directory; e.g., '0150XX' given
15020 as input.
Parameters
----------
platenum : str
Number of plate (string to include leading zeros)
"""
# Turns 15020 into '0150XX'
batch_num = f'{platenum_as_str(platenum)[:-2]}XX'
return config.plate_dir / batch_num
def plate(platenum):
"""Given platenumber, get the directory name containing
the plate files.
Parameters
----------
platenum : str
Number of plate (string to include leading zeros)
"""
return plate_batch(platenum) / platenum_as_str(platenum)
def plateholes_file(platenum):
"""string representation of plateHoles files with correct formatting.
Parameters
----------
platenum :
Number of plate (string to include leading zeros)
"""
return 'plateHoles-{}.par'.format(platenum_as_str(platenum))
def plateholes(platenum):
"""gets path plateholes file.
Parameters
----------
platenum : str
Number of plate (string to include leading zeros)
"""
filename = plateholes_file(platenum)
return plate(platenum) / filename
# five_plates
##############
def fiveplates_description():
"""
path to description file in five_plates repo.
"""
description_file = 'plateruns_description.txt'
return config.fiveplates_dir / description_file
def _five_plates_relpaths():
tree_ = os.walk(config.fiveplates_dir)
dirs_ = [Path(root_dir) for (root_dir, _, _) in tree_]
relpaths = [dir_.relative_to(config.fiveplates_dir) for dir_ in dirs_
if dir_.name.endswith('m')] # only keep in '(m)apper'
return relpaths
def _five_plates_available_plateruns():
relpaths = _five_plates_relpaths()
return [relpath.name for relpath in relpaths]
def fiveplates_platerun(platerun):
"""
gets directory of platerun in five_plates repo.
Parameters
----------
platerun : str
identifier of platerun, e.g. '2020.08.x.mwm-bhm'
"""
return config.fiveplates_dir / platerun
def fp_files(platerun):
"""
get list of files in a five_plates platerun directory.
Useful for fuzzyish file finding.
"""
return os.listdir(fiveplates_platerun(platerun))
def fp_platedata(platerun):
"""
path to summary file in five_plates repo.
Parameters
----------
platerun : str
identifier of platerun, e.g. '2020.08.x.mwm-bhm'
"""
_guess = f'plate_data_{platerun}*.txt'
pd_file_s = filter(lambda F: fnmatch.fnmatch(F, _guess),
fp_files(platerun))
pd_files = list(pd_file_s)
if len(pd_files) == 0: # no plate data yet
_message = f'''\
Unable to load fiveplates plate data file for:
{platerun}.
Please IGNORE this warning UNLESS:
You need to access fiveplates data for this platerun,
please confirm the plate_data file exists and
perform a fresh pull of five_plates.
'''
print(_message)
return None
else:
if len(pd_files) == 1:
pd_file = pd_files[0]
else:
pd_file = list(filter(lambda F: 'initial' in F, pd_files))[0]
return fiveplates_platerun(platerun) / pd_file
def fp_defaultparams(platerun):
"""
path to default parameter file in five_plates repo.
One for each platerun
Parameters
----------
platerun : str
identifier of platerun, e.g. '2020.08.x.mwm-bhm'
"""
param_file = f'{platerun}_default_parameters.txt'
return fiveplates_platerun(platerun) / param_file
def fiveplates_summary(platerun):
"""
path to summary file in five_plates repo.
Parameters
----------
platerun : str
identifier of platerun, e.g. '2020.08.x.mwm-bhm'
"""
summary_file = f'plate_data_{platerun}.txt'
return fiveplates_platerun(platerun) / summary_file
def fiveplates_cartons(platerun, version='v6'):
"""
path to cartons file in five_plates repo.
Parameters
----------
platerun : str
identifier of platerun, e.g. '2020.08.x.mwm-bhm'
"""
cartons_file = f'cartons_list.{version}.txt'
return fiveplates_platerun(platerun) / cartons_file
def fiveplates_priority(platerun, filling_scheme):
"""
path to cartons file in five_plates repo.
Parameters
----------
platerun : str
identifier of platerun, e.g. '2020.08.x.mwm-bhm'
filling_scheme : str
FiberFilling column in fiveplates_cartons file, e.g., 'MWM_30min'
"""
priority_file = f'{filling_scheme}_order.txt'
return fiveplates_platerun(platerun) / priority_file
def fiveplates_targetlists(platerun):
"""
path to zip file containing targetlists in five_plates repo.
Parameters
----------
platerun : str
identifier of platerun, e.g. '2020.08.x.mwm-bhm'
"""
target_files = f'{platerun}_targetlists.zip'
return fiveplates_platerun(platerun) / target_files
def fp_field_designID_str(field, designID):
return f'{field}_des{designID}'
def fp_field_designID_dir(field, designID):
return f'targetlists/{fp_field_designID_str(field, designID)}'
def fiveplates_platedef(field, designID):
"""
path to plate definition file WITHIN targetlists zip file.
"""
pre_ = 'targetlists'
# platenum_as_str also works for designIDs, just zero-padding to 6 digits
pldef_file = f'plateDefinition-{platenum_as_str(designID)}.txt'
return f'{pre_}/{fp_field_designID_str(field, designID)}/{pldef_file}'
def fiveplates_fieldfiles(platerun):
"""
path to zip file containing fields_files in five_plates repo.
Parameters
----------
platerun : str
identifier of platerun, e.g. '2020.08.x.mwm-bhm'
field_files is either 'field_files' or 'design_files
"""
field_files = f'{platerun}_field_files.zip'
return fiveplates_platerun(platerun) / field_files
def fiveplates_clean_field_file(field):
"""
string representation of targets_clean file for field within
fiveplates_field_files zip file.
Parameters
----------
field : str
identifier of field, e.g. 'GG_010'
"""
return f'{field}_targets_clean.txt'
def fiveplates_field_file(field):
"""
string representation of targets.txt file for field within
fiveplates_field_files zip file.
Parameters
----------
field : str
identifier of field, e.g. 'GG_010'
"""
return f'{field}_targets.txt'
def fiveplates_designfiles(platerun):
"""
path to zip file containing designs_files in five_plates repo.
Parameters
----------
platerun : str
identifier of platerun, e.g. '2020.08.x.mwm-bhm'
design_files is either 'design_files' or 'design_files
"""
design_files = f'{platerun}_design_files.zip'
return fiveplates_platerun(platerun) / design_files
def fiveplates_clean_design_file(field, designID):
"""
string representation of targets_clean file for field within
fiveplates_field_files zip file.
Parameters
----------
field : str
identifier of field, e.g. 'GG_010'
"""
return f'{field}_des{designID}_targets_clean.txt'
def fiveplates_design_file(field, designID):
"""
string representation of targets file for field within
fiveplates_design_files zip file.
Parameters
----------
field : str
identifier of field, e.g. 'GG_010'
"""
return f'{field}_des{designID}_targets.txt'
| 25.68323
| 77
| 0.648126
|
19f393c4f2b7298941041ecb402ab9f6a2d2b33f
| 8,717
|
py
|
Python
|
ase20_supplementary/webui/serving.py
|
skanav/cst_transform
|
361a23293cf0359af7a7d17cf465483ffe4e7545
|
[
"Apache-2.0"
] | null | null | null |
ase20_supplementary/webui/serving.py
|
skanav/cst_transform
|
361a23293cf0359af7a7d17cf465483ffe4e7545
|
[
"Apache-2.0"
] | null | null | null |
ase20_supplementary/webui/serving.py
|
skanav/cst_transform
|
361a23293cf0359af7a7d17cf465483ffe4e7545
|
[
"Apache-2.0"
] | 1
|
2021-07-02T16:04:14.000Z
|
2021-07-02T16:04:14.000Z
|
import argparse
from flask import Flask, abort, request
from flask_restful import Resource, Api
import os
import uuid
import traceback
import threading
import json
from glob import glob
from pycparser import c_ast
from pycparserext import ext_c_generator
from pycparser import preprocess_file
from pycparserext.ext_c_parser import GnuCParser, FuncDeclExt
from model.run_predict import run_predict
import re
def stripComments(code):
code = str(code)
return re.sub('^ *//.*\n?', '', code)
app = Flask(__name__, static_url_path="/static/")
api = Api(app)
allowed_dirs = set(['js', 'css', 'semantic'])
threads = {}
checkpoints = {
'bmc-ki': ['../checkpoints/bmc', '../labels/tool_order_bmc-ki.json'],
'sc': ['../checkpoints/compositions', '../labels/tool_order_sc.json'],
'algorithms': ['../checkpoints/algorithms', '../labels/tool_order_algorithms.json'],
'tools': ['../checkpoints/tools', '../labels/tool_order_tools.json']
}
def preprocess(path):
text = preprocess_file(path)
cparser = GnuCParser()
ast = cparser.parse(text, path)
generator = ext_c_generator.GnuCGenerator()
with open(path, "w") as o:
o.write(generator.visit(ast))
return ast
def get_funcs(ast):
functions = []
has_init = False
for decl in ast.ext:
if isinstance(decl, c_ast.FuncDef):
func_def = decl.decl.name
functions.append(func_def)
elif isinstance(decl, c_ast.Decl) and isinstance(decl.type, FuncDeclExt):
func_def = decl.name
functions.append(func_def)
else:
has_init = True
#if has_init:
functions = ['__init__'] + functions
return functions
def index_main(keys, strict=False):
for i, k in enumerate(keys):
if (strict and k == 'main') or (not strict and 'main' in k.lower()):
return i
return 1
def locate_funcs(keys, funcs):
out = {}
#align init
if funcs[0] == '__init__':
out[keys[0]] = funcs[0]
keys = keys[1:]
funcs = funcs[1:]
kmain = index_main(keys)
fmain = index_main(funcs, strict=True)
#align
left = fmain
right = len(funcs) - fmain
p = 0
for i in range(max(0, kmain-left), min(len(keys), kmain+right)):
out[keys[i]] = funcs[p]
p += 1
return out
def match_att(att_file, ast):
with open(att_file, "r") as i:
attention = json.load(i)
output = {}
funcs = get_funcs(ast)
attention = {
k: v for k , v in attention.items() if 'NOOP' not in k and\
'prediction' not in k
}
func_map = locate_funcs(list(attention.keys()), funcs)
for k, func_name in func_map.items():
output[func_name] = attention[k]
with open(att_file, "w") as o:
json.dump(output, o, indent=4)
def run_clf_predict(pred_dir, req_file):
base = os.path.abspath(__file__)
base = os.path.dirname(base)
check_cfg = checkpoints[checkpoint]
path = os.path.join(base, check_cfg[0])
att_file = os.path.join(pred_dir, 'attention.json')
ast = preprocess(req_file)
index = os.path.join(base, "../resources/token_clang.json")
tool_index = os.path.join(base, check_cfg[1])
P = run_predict(path, req_file, att_file, indexer_path=index,
tools=tool_index)
P = [X[0] for X in sorted(P.items(), key=lambda X: X[1], reverse=True)]
P = [P[0]]
match_att(att_file, ast)
with open(os.path.join(pred_dir, "prediction.json"), "w") as o:
json.dump(P, o)
return P, ast
def to_func_ast(ast):
return c_ast.FileAST(ast)
def to_func(ast):
functions = {}
for decl in ast.ext:
func_def = "__init__"
if isinstance(decl, c_ast.FuncDef):
func_def = decl.decl.name
if func_def not in functions:
functions[func_def] = []
functions[func_def].append(decl)
for k in list(functions):
functions[k] = to_func_ast(functions[k])
return functions
def start_prediction(id, pred_dir, req_file):
P, ast = run_clf_predict(pred_dir, req_file)
F = to_func(ast)
generator = ext_c_generator.GnuCGenerator()
for name, ast in F.items():
path = os.path.join(pred_dir, name+".c")
with open(path, "w") as o:
out = generator.visit(ast)
o.write(out)
def request_predict(form):
id = str(uuid.uuid4())
path = os.path.join(".", "process", id)
os.makedirs(path)
file_path = os.path.join(path, "file.c")
text = form['data']
text = "\n".join([ll.rstrip() for ll in text.splitlines() if ll.strip()])
text = stripComments(text)
with open(file_path, "w") as o:
o.write(text)
thread = threading.Thread(
target=start_prediction,
args=(id, path, file_path)
)
thread.start()
threads[id] = thread
return id
@app.route("/")
def index():
return app.send_static_file('index.html')
@app.route("/<string:type>/<path:path>")
def send_static(type, path):
if type not in allowed_dirs:
return abort(404)
path = os.path.join(type, path)
path = os.path.normpath(path)
if not path.startswith(type):
return abort(404)
return app.send_static_file(path)
class PredictionTask(Resource):
def get(self, predict_id):
path = os.path.join(".", "process", predict_id)
if not os.path.exists(path):
return abort(404)
exc = os.path.join(path, 'exception')
if os.path.exists(exc):
with open(exc, "r") as i:
return {'exception': i.read(), 'finish': True}
path = os.path.join(".", "process", predict_id)
state = {
'attention': os.path.isfile(os.path.join(path, 'attention.json')),
'pred': os.path.isfile(os.path.join(path, 'prediction.json'))
}
finished = True
for v in state.values():
finished = finished and v
state['finish'] = finished
if not finished and predict_id not in threads:
return {'exception': "Seem to be an old request!",
'finished': True}
return state
def put(self):
return {'request_id': request_predict(
request.form
)}
class CFileResource(Resource):
def get(self, id, func_name=None):
path = os.path.join(".", "process", id)
if not os.path.exists(path):
return abort(404)
if func_name is None:
func_names = []
for p in glob(os.path.join(path, "*.c")):
b = os.path.basename(p)
if b == 'file.c':
continue
func_names.append(
b.replace(".c", "")
)
return {'functions': func_names}
path = os.path.join(".", "process", id, func_name+".c")
if not os.path.exists(path):
return abort(404)
with open(path, "r") as i:
return i.read()
class AttentionResource(Resource):
def get(self, id):
path = os.path.join(".", "process", id, "attention.json")
if not os.path.isfile(path):
return abort(404)
with open(path, "r") as i:
return json.load(i)
class PredictionResource(Resource):
def get(self, id):
path = os.path.join(".", "process", id, "prediction.json")
if not os.path.isfile(path):
return abort(404)
with open(path, "r") as i:
return json.load(i)
api.add_resource(PredictionTask, '/api/task/',
'/api/task/<string:predict_id>/')
api.add_resource(CFileResource, '/api/cfile/<string:id>/',
'/api/cfile/<string:id>/<string:func_name>/')
api.add_resource(AttentionResource, "/api/attention/<string:id>/")
api.add_resource(PredictionResource, "/api/prediction/<string:id>/")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint", help="Choose a checkpoint from [bmc-ki, algorithms, sc, tools].")
args = parser.parse_args()
checkpoint = args.checkpoint
if checkpoint not in checkpoints:
print("Checkpoint does not exists: %s" % checkpoint)
print("Choose a checkpoint from [bmc-ki, algorithms, sc, tools].")
exit()
app.run(debug=True)
| 26.256024
| 104
| 0.572215
|
2c0caca1c8507c6e5b2e7ce5684865299b53df38
| 839
|
py
|
Python
|
2017/February/3_countcross/countcross.test.py
|
alantao5056/USACO_Silver
|
6998cb916692af58a0b40b1a4aff0708ee1106b8
|
[
"MIT"
] | null | null | null |
2017/February/3_countcross/countcross.test.py
|
alantao5056/USACO_Silver
|
6998cb916692af58a0b40b1a4aff0708ee1106b8
|
[
"MIT"
] | null | null | null |
2017/February/3_countcross/countcross.test.py
|
alantao5056/USACO_Silver
|
6998cb916692af58a0b40b1a4aff0708ee1106b8
|
[
"MIT"
] | null | null | null |
import unittest
from countcross import main
class countcrossTest(unittest.TestCase):
testDataFolder = 'test'
def do_test(self, testNumber):
testFile = self.testDataFolder + "/" + str(testNumber)
main(testFile + ".in", testFile + "_actual.out")
# compare the result
expectedOut = open(testFile + ".out", 'r')
actualOut = open(testFile + "_actual.out", 'r')
expectedLines = expectedOut.readlines()
actualLines = actualOut.readlines()
expectedOut.close()
actualOut.close()
self.assertEqual(actualLines, expectedLines)
def generate_test(testNumber):
def test(self):
self.do_test(testNumber)
return test
if __name__ == '__main__':
for i in range(1, 11):
test_name = 'test_%s' % str(i)
test = generate_test(i)
setattr(countcrossTest, test_name, test)
unittest.main()
| 27.064516
| 58
| 0.68534
|
c06712a98e099d9cfc8cd115af0cd122c4c16d1a
| 12,209
|
py
|
Python
|
sympy/matrices/tests/test_determinant.py
|
MartinThoma/sympy
|
009d0031bec7222ffa472e52148a2b4e441cd3a5
|
[
"BSD-3-Clause"
] | 2
|
2021-01-09T23:11:25.000Z
|
2021-01-11T15:04:22.000Z
|
sympy/matrices/tests/test_determinant.py
|
MartinThoma/sympy
|
009d0031bec7222ffa472e52148a2b4e441cd3a5
|
[
"BSD-3-Clause"
] | 2
|
2020-08-18T15:21:59.000Z
|
2020-08-18T19:35:29.000Z
|
sympy/matrices/tests/test_determinant.py
|
MartinThoma/sympy
|
009d0031bec7222ffa472e52148a2b4e441cd3a5
|
[
"BSD-3-Clause"
] | 3
|
2021-02-16T16:40:49.000Z
|
2022-03-07T18:28:41.000Z
|
import random
from sympy.core.numbers import I
from sympy import symbols, Symbol, Rational, sqrt, Poly
from sympy.matrices import Matrix, eye, ones
from sympy.abc import x, y, z
from sympy.testing.pytest import raises
from sympy.matrices.matrices import MatrixDeterminant
from sympy.matrices.common import NonSquareMatrixError, _MinimalMatrix, _CastableMatrix
class DeterminantOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixDeterminant):
pass
def test_determinant():
for M in [Matrix(), Matrix([[1]])]:
assert (
M.det() ==
M._eval_det_bareiss() ==
M._eval_det_berkowitz() ==
M._eval_det_lu() ==
1)
M = Matrix(( (-3, 2),
( 8, -5) ))
assert M.det(method="bareiss") == -1
assert M.det(method="berkowitz") == -1
assert M.det(method="lu") == -1
M = Matrix(( (x, 1),
(y, 2*y) ))
assert M.det(method="bareiss") == 2*x*y - y
assert M.det(method="berkowitz") == 2*x*y - y
assert M.det(method="lu") == 2*x*y - y
M = Matrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) ))
assert M.det(method="bareiss") == 1
assert M.det(method="berkowitz") == 1
assert M.det(method="lu") == 1
M = Matrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="bareiss") == -289
assert M.det(method="berkowitz") == -289
assert M.det(method="lu") == -289
M = Matrix(( ( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16) ))
assert M.det(method="bareiss") == 0
assert M.det(method="berkowitz") == 0
assert M.det(method="lu") == 0
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) ))
assert M.det(method="bareiss") == 275
assert M.det(method="berkowitz") == 275
assert M.det(method="lu") == 275
M = Matrix(( ( 3, 0, 0, 0),
(-2, 1, 0, 0),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="bareiss") == 60
assert M.det(method="berkowitz") == 60
assert M.det(method="lu") == 60
M = Matrix(( ( 1, 0, 0, 0),
( 5, 0, 0, 0),
( 9, 10, 11, 0),
(13, 14, 15, 16) ))
assert M.det(method="bareiss") == 0
assert M.det(method="berkowitz") == 0
assert M.det(method="lu") == 0
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(0, 0, 0, 0, 3) ))
assert M.det(method="bareiss") == 243
assert M.det(method="berkowitz") == 243
assert M.det(method="lu") == 243
M = Matrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) ))
assert M.det(method="bareiss") == -55
assert M.det(method="berkowitz") == -55
assert M.det(method="lu") == -55
M = Matrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) ))
assert M.det(method="bareiss") == 11664
assert M.det(method="berkowitz") == 11664
assert M.det(method="lu") == 11664
M = Matrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) ))
assert M.det(method="bareiss") == 123
assert M.det(method="berkowitz") == 123
assert M.det(method="lu") == 123
M = Matrix(( (x, y, z),
(1, 0, 0),
(y, z, x) ))
assert M.det(method="bareiss") == z**2 - x*y
assert M.det(method="berkowitz") == z**2 - x*y
assert M.det(method="lu") == z**2 - x*y
# issue 13835
a = symbols('a')
M = lambda n: Matrix([[i + a*j for i in range(n)]
for j in range(n)])
assert M(5).det() == 0
assert M(6).det() == 0
assert M(7).det() == 0
def test_issue_14517():
M = Matrix([
[ 0, 10*I, 10*I, 0],
[10*I, 0, 0, 10*I],
[10*I, 0, 5 + 2*I, 10*I],
[ 0, 10*I, 10*I, 5 + 2*I]])
ev = M.eigenvals()
# test one random eigenvalue, the computation is a little slow
test_ev = random.choice(list(ev.keys()))
assert (M - test_ev*eye(4)).det() == 0
def test_legacy_det():
# Minimal support for legacy keys for 'method' in det()
# Partially copied from test_determinant()
M = Matrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="bareis") == -289
assert M.det(method="det_lu") == -289
assert M.det(method="det_LU") == -289
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) ))
assert M.det(method="bareis") == 275
assert M.det(method="det_lu") == 275
assert M.det(method="Bareis") == 275
M = Matrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) ))
assert M.det(method="bareis") == -55
assert M.det(method="det_lu") == -55
assert M.det(method="BAREISS") == -55
M = Matrix(( ( 3, 0, 0, 0),
(-2, 1, 0, 0),
( 0, -2, 5, 0),
( 5, 0, 3, 4) ))
assert M.det(method="bareiss") == 60
assert M.det(method="berkowitz") == 60
assert M.det(method="lu") == 60
M = Matrix(( ( 1, 0, 0, 0),
( 5, 0, 0, 0),
( 9, 10, 11, 0),
(13, 14, 15, 16) ))
assert M.det(method="bareiss") == 0
assert M.det(method="berkowitz") == 0
assert M.det(method="lu") == 0
M = Matrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(0, 0, 0, 0, 3) ))
assert M.det(method="bareiss") == 243
assert M.det(method="berkowitz") == 243
assert M.det(method="lu") == 243
M = Matrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) ))
assert M.det(method="bareis") == 11664
assert M.det(method="det_lu") == 11664
assert M.det(method="BERKOWITZ") == 11664
M = Matrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) ))
assert M.det(method="bareis") == 123
assert M.det(method="det_lu") == 123
assert M.det(method="LU") == 123
def eye_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: 0)
def test_det():
a = DeterminantOnlyMatrix(2, 3, [1, 2, 3, 4, 5, 6])
raises(NonSquareMatrixError, lambda: a.det())
z = zeros_Determinant(2)
ey = eye_Determinant(2)
assert z.det() == 0
assert ey.det() == 1
x = Symbol('x')
a = DeterminantOnlyMatrix(0, 0, [])
b = DeterminantOnlyMatrix(1, 1, [5])
c = DeterminantOnlyMatrix(2, 2, [1, 2, 3, 4])
d = DeterminantOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 8])
e = DeterminantOnlyMatrix(4, 4,
[x, 1, 2, 3, 4, 5, 6, 7, 2, 9, 10, 11, 12, 13, 14, 14])
from sympy.abc import i, j, k, l, m, n
f = DeterminantOnlyMatrix(3, 3, [i, l, m, 0, j, n, 0, 0, k])
g = DeterminantOnlyMatrix(3, 3, [i, 0, 0, l, j, 0, m, n, k])
h = DeterminantOnlyMatrix(3, 3, [x**3, 0, 0, i, x**-1, 0, j, k, x**-2])
# the method keyword for `det` doesn't kick in until 4x4 matrices,
# so there is no need to test all methods on smaller ones
assert a.det() == 1
assert b.det() == 5
assert c.det() == -2
assert d.det() == 3
assert e.det() == 4*x - 24
assert e.det(method='bareiss') == 4*x - 24
assert e.det(method='berkowitz') == 4*x - 24
assert f.det() == i*j*k
assert g.det() == i*j*k
assert h.det() == 1
raises(ValueError, lambda: e.det(iszerofunc="test"))
def test_adjugate():
x = Symbol('x')
e = DeterminantOnlyMatrix(4, 4,
[x, 1, 2, 3, 4, 5, 6, 7, 2, 9, 10, 11, 12, 13, 14, 14])
adj = Matrix([
[ 4, -8, 4, 0],
[ 76, -14*x - 68, 14*x - 8, -4*x + 24],
[-122, 17*x + 142, -21*x + 4, 8*x - 48],
[ 48, -4*x - 72, 8*x, -4*x + 24]])
assert e.adjugate() == adj
assert e.adjugate(method='bareiss') == adj
assert e.adjugate(method='berkowitz') == adj
a = DeterminantOnlyMatrix(2, 3, [1, 2, 3, 4, 5, 6])
raises(NonSquareMatrixError, lambda: a.adjugate())
def test_util():
R = Rational
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.norm() == sqrt(14)
assert v1.project(v2) == Matrix(1, 3, [R(39)/25, R(52)/25, R(13)/5])
assert Matrix.zeros(1, 2) == Matrix(1, 2, [0, 0])
assert ones(1, 2) == Matrix(1, 2, [1, 1])
assert v1.copy() == v1
# cofactor
assert eye(3) == eye(3).cofactor_matrix()
test = Matrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactor_matrix() == \
Matrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactor_matrix() == \
Matrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
def test_cofactor_and_minors():
x = Symbol('x')
e = DeterminantOnlyMatrix(4, 4,
[x, 1, 2, 3, 4, 5, 6, 7, 2, 9, 10, 11, 12, 13, 14, 14])
m = Matrix([
[ x, 1, 3],
[ 2, 9, 11],
[12, 13, 14]])
cm = Matrix([
[ 4, 76, -122, 48],
[-8, -14*x - 68, 17*x + 142, -4*x - 72],
[ 4, 14*x - 8, -21*x + 4, 8*x],
[ 0, -4*x + 24, 8*x - 48, -4*x + 24]])
sub = Matrix([
[x, 1, 2],
[4, 5, 6],
[2, 9, 10]])
assert e.minor_submatrix(1, 2) == m
assert e.minor_submatrix(-1, -1) == sub
assert e.minor(1, 2) == -17*x - 142
assert e.cofactor(1, 2) == 17*x + 142
assert e.cofactor_matrix() == cm
assert e.cofactor_matrix(method="bareiss") == cm
assert e.cofactor_matrix(method="berkowitz") == cm
raises(ValueError, lambda: e.cofactor(4, 5))
raises(ValueError, lambda: e.minor(4, 5))
raises(ValueError, lambda: e.minor_submatrix(4, 5))
a = DeterminantOnlyMatrix(2, 3, [1, 2, 3, 4, 5, 6])
assert a.minor_submatrix(0, 0) == Matrix([[5, 6]])
raises(ValueError, lambda:
DeterminantOnlyMatrix(0, 0, []).minor_submatrix(0, 0))
raises(NonSquareMatrixError, lambda: a.cofactor(0, 0))
raises(NonSquareMatrixError, lambda: a.minor(0, 0))
raises(NonSquareMatrixError, lambda: a.cofactor_matrix())
def test_charpoly():
x, y = Symbol('x'), Symbol('y')
z, t = Symbol('z'), Symbol('t')
from sympy.abc import a,b,c
m = DeterminantOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert eye_Determinant(3).charpoly(x) == Poly((x - 1)**3, x)
assert eye_Determinant(3).charpoly(y) == Poly((y - 1)**3, y)
assert m.charpoly() == Poly(x**3 - 15*x**2 - 18*x, x)
raises(NonSquareMatrixError, lambda: Matrix([[1], [2]]).charpoly())
n = DeterminantOnlyMatrix(4, 4, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert n.charpoly() == Poly(x**4, x)
n = DeterminantOnlyMatrix(4, 4, [45, 0, 0, 0, 0, 23, 0, 0, 0, 0, 87, 0, 0, 0, 0, 12])
assert n.charpoly() == Poly(x**4 - 167*x**3 + 8811*x**2 - 173457*x + 1080540, x)
n = DeterminantOnlyMatrix(3, 3, [x, 0, 0, a, y, 0, b, c, z])
assert n.charpoly() == Poly(t**3 - (x+y+z)*t**2 + t*(x*y+y*z+x*z) - x*y*z , t)
| 32.470745
| 89
| 0.458432
|
2265cb4d9b031e7d410699140db2a2977246a01e
| 20,885
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/_local_network_gateways_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/_local_network_gateways_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/_local_network_gateways_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2015_06_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LocalNetworkGateway"]
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2015_06_15.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2015_06_15.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2015_06_15.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LocalNetworkGatewayListResult"]
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2015_06_15.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
| 50.083933
| 209
| 0.672923
|
ba59a35fcf5cbd8dd552456ec3539477c5db1fca
| 8,197
|
py
|
Python
|
genomics_data_index/storage/MaskedGenomicRegions.py
|
apetkau/genomics-data-index
|
d0cc119fd57b8cbd701affb1c84450cf7832fa01
|
[
"Apache-2.0"
] | 12
|
2021-05-03T20:56:05.000Z
|
2022-01-04T14:52:19.000Z
|
genomics_data_index/storage/MaskedGenomicRegions.py
|
apetkau/thesis-index
|
6c96e9ed75d8e661437effe62a939727a0b473fc
|
[
"Apache-2.0"
] | 30
|
2021-04-26T23:03:40.000Z
|
2022-02-25T18:41:14.000Z
|
genomics_data_index/storage/MaskedGenomicRegions.py
|
apetkau/genomics-data-index
|
d0cc119fd57b8cbd701affb1c84450cf7832fa01
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
import tempfile
from pathlib import Path
from typing import List, Set, Dict, Generator, Tuple
import pandas as pd
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from pybedtools import BedTool
from genomics_data_index.storage.model import NUCLEOTIDE_UNKNOWN, NUCLEOTIDE_UNKNOWN_TYPE
class MaskedGenomicRegions:
def __init__(self, mask: BedTool):
self._mask = mask.sort().merge()
@property
def mask(self):
return self._mask
def intersect(self, other: MaskedGenomicRegions) -> MaskedGenomicRegions:
return MaskedGenomicRegions(self._mask.intersect(other._mask))
def subtract(self, other: MaskedGenomicRegions) -> MaskedGenomicRegions:
subtraction = self._mask.subtract(other._mask)
return MaskedGenomicRegions(subtraction)
def union(self, other: MaskedGenomicRegions) -> MaskedGenomicRegions:
union = self._mask.cat(other._mask, postmerge=True, force_truncate=True)
return MaskedGenomicRegions(union)
def mask_to_features(self) -> pd.DataFrame:
mask_features = []
ref = 1
alt = NUCLEOTIDE_UNKNOWN
nuc_type = NUCLEOTIDE_UNKNOWN_TYPE
for sequence_name, position in self.positions_iter(start_position_index='1'):
variant_id = f'{sequence_name}:{position}:{ref}:{alt}'
mask_features.append([sequence_name, position, ref, alt, nuc_type, variant_id])
return pd.DataFrame(mask_features, columns=['CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'VARIANT_ID'])
def mask_genome(self, genome_file: Path, mask_char: str = '?', remove: bool = True) -> Dict[str, SeqRecord]:
"""
Gets a SeqRecord with all those regions on the passed genome that are in the masked regions removed
(or masked with mask_char).
:param genome_file: The genome file to mask.
:param mask_char: The character to mask with.
:param remove: Whether or not to remove masked sequence data.
:return: A Dictionary mapping a sequence name to a SeqRecord containing all those regions on the sequence
within the masked regions removed (or masked with mask_char)
"""
with tempfile.TemporaryDirectory() as out_f:
seq_records = {}
output_fasta = Path(out_f) / 'masked.fasta'
self._mask.mask_fasta(fi=str(genome_file), fo=str(output_fasta), mc=mask_char)
for record in SeqIO.parse(output_fasta, 'fasta'):
if remove:
record.seq = record.seq.ungap(mask_char)
seq_records[record.id] = record
return seq_records
def write(self, file: Path):
self._mask.saveas(str(file), compressed=True)
@classmethod
def union_all(cls, masked_regions: List[MaskedGenomicRegions]):
if len(masked_regions) == 0:
raise Exception('Cannot merge empty list')
elif len(masked_regions) == 1:
return masked_regions[0]
else:
start_mask = masked_regions.pop()
union = start_mask._mask.cat(*[o._mask for o in masked_regions], postmerge=True, force_truncate=True)
return MaskedGenomicRegions(union)
@classmethod
def from_sequences(cls, sequences: List[SeqRecord]) -> MaskedGenomicRegions:
def is_missing(char):
return char.upper() == 'N' or char == '-'
# pybedtools internally stores as 0-based BED file intervals
# https://daler.github.io/pybedtools/intervals.html#bed-is-0-based-others-are-1-based
mask_intervals = []
for record in sequences:
start = 0
in_mask = False
for idx, char in enumerate(record.seq):
if in_mask:
if not is_missing(char):
in_mask = False
# pybedtools stop position is not included in interval
stop = idx
mask_intervals.append((record.id, start, stop))
else:
if is_missing(char):
in_mask = True
start = idx
# Finish recording last interval if it exists (e.g., if last bit of sequence was like 'NNNN')
if in_mask:
stop = len(record)
mask_intervals.append((record.id, start, stop))
bedtool_intervals = BedTool(mask_intervals)
return MaskedGenomicRegions(bedtool_intervals)
@classmethod
def from_file(cls, file: Path) -> MaskedGenomicRegions:
bed_file_data = BedTool(str(file))
return MaskedGenomicRegions(bed_file_data)
@classmethod
def from_vcf_file(cls, file: Path) -> MaskedGenomicRegions:
bed_file_data = BedTool(str(file)).merge()
return MaskedGenomicRegions(bed_file_data)
@classmethod
def empty_mask(cls):
return MaskedGenomicRegions(BedTool('', from_string=True))
def is_empty(self):
return len(self) == 0
def sequence_names(self) -> Set[str]:
"""
Gets a set of sequence names from this genomic regions mask.
:return: A set of sequence names.
"""
return {x.chrom for x in self._mask}
def contains(self, sequence: str, position: int, start_position_index: str = '0') -> bool:
if start_position_index != '0' and start_position_index != '1':
raise Exception((f'Unknown value start_position_index=[{start_position_index}].'
'Should be "0" or "1" to indicate which is the starting base position'))
elif start_position_index == '1':
position = position - 1
for i in self._mask:
if i.chrom == sequence and i.start <= position < i.end:
return True
return False
def _validate_start_position_index(self, start_position_index: str) -> None:
if start_position_index not in ['0', '1']:
raise Exception((f'Unknown value start_position_index=[{start_position_index}].'
'Should be "0" or "1" to indicate which is the starting base position'))
def overlaps_range(self, sequence: str, start: int, stop: int, start_position_index: str = '0') -> bool:
self._validate_start_position_index(start_position_index)
if start_position_index == '1':
start = start - 1
stop = stop - 1
if stop <= start:
raise Exception(f'start=[{start}] is less than stop=[{stop}]')
for i in self._mask:
if i.chrom == sequence:
if i.start <= start and i.end > start:
return True
elif start < i.end and stop > i.end:
return True
return False
def positions_iter(self, start_position_index: str = '0') -> Generator[Tuple[str, int], None, None]:
"""
Creates an iterator to iterate over all ('sequence', 'position') in this mask.
:param start_position_index: Whether positions should be in 0-base coordinates or 1-base coordinates.
See https://bedtools.readthedocs.io/en/latest/content/general-usage.html#bed-format
for a description of the differences in coordinates.
:return: An iterator which will return tuples like ('sequence', 'position') for every
position in this mask.
"""
self._validate_start_position_index(start_position_index)
for sequence in self._mask:
sequence_name = sequence.chrom
start = sequence.start
end = sequence.end
if start_position_index == '1':
start = start + 1
end = end + 1
for pos in range(start, end):
yield sequence_name, pos
def __len__(self) -> int:
"""
Calculates length of underlying masked intervals. Assumes the intervals have been merged beforehand.
:return: The length of the masked intervals.
"""
total = 0
for i in self._mask:
total += len(i)
return total
| 40.37931
| 120
| 0.615957
|
f319327e463b8600d62a3f1b55574af07a738ba9
| 5,337
|
py
|
Python
|
tests/scene_saver_test.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 4
|
2021-02-04T03:57:52.000Z
|
2022-02-08T18:19:58.000Z
|
tests/scene_saver_test.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 68
|
2021-05-06T08:52:46.000Z
|
2022-03-23T16:46:03.000Z
|
tests/scene_saver_test.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 1
|
2021-02-04T03:21:57.000Z
|
2021-02-04T03:21:57.000Z
|
import copy
from generator.scene_saver import (
_strip_debug_data,
_strip_debug_misleading_data,
_strip_debug_object_data,
find_next_filename,
)
def create_test_object():
return {
'id': 'thing1',
'type': 'thing_1',
'debug': {
'info': ['a', 'b', 'c', 'd'],
'goalString': 'abcd',
'materialCategory': ['wood'],
'dimensions': {'x': 13, 'z': 42},
'offset': {'x': 13, 'z': 42},
'closedDimensions': {'x': 13, 'z': 42},
'closedOffset': {'x': 13, 'z': 42},
'enclosedAreas': [{}],
'openAreas': [{}],
'movement': {},
'isParentOf': [],
'untrainedCategory': True,
'untrainedColor': True,
'untrainedCombination': False,
'untrainedShape': True,
'untrainedSize': True,
'color': ['black', 'white'],
'shape': 'shape',
'size': 'small',
'weight': 'light',
'role': 'target',
'isTarget': True,
'boundsAtStep': [],
'configHeight': [],
'configSize': []
},
'shows': [{
'stepBegin': 0,
'boundingBox': 'dummy'
}]
}
def test_find_next_filename():
filename, index = find_next_filename('', 1, '01')
assert filename == '1'
assert index == 1
filename, index = find_next_filename('', 2, '01')
assert filename == '2'
assert index == 2
filename, index = find_next_filename('tests/file', 1, '01')
assert filename == 'tests/file1'
assert index == 1
filename, index = find_next_filename('tests/file', 2, '01')
assert filename == 'tests/file3'
assert index == 3
filename, index = find_next_filename('tests/file', 1, '02')
assert filename == 'tests/file01'
assert index == 1
filename, index = find_next_filename('tests/file', 2, '02')
assert filename == 'tests/file03'
assert index == 3
filename, index = find_next_filename('tests/file', 1, '01', suffix='.txt')
assert filename == 'tests/file2'
assert index == 2
filename, index = find_next_filename(
'tests/file',
1,
'01',
suffix='_debug.json'
)
assert filename == 'tests/file2'
assert index == 2
def test_strip_debug_data():
scene = {
'debug': {
'floorColors': ['grey'],
'wallColors': ['blue']
},
'objects': [create_test_object()],
'goal': {
'category': 'test',
'domainsInfo': {
'domainsTag': True
},
'objectsInfo': {
'objectsTag': True
},
'sceneInfo': {
'sceneTag': True
},
'metadata': {
'target': {
'id': 'golden_idol',
'info': ['gold', 'idol']
}
}
}
}
expected = {
'objects': [{
'id': 'thing1',
'type': 'thing_1',
'shows': [{
'stepBegin': 0
}]
}],
'goal': {
'category': 'test',
'metadata': {
'target': {
'id': 'golden_idol'
}
}
}
}
_strip_debug_data(scene)
assert scene == expected
def test_strip_debug_misleading_data():
obj = create_test_object()
expected = copy.deepcopy(obj)
expected['debug']['movement'] = {
'deepExit': {
'key': 'value'
},
'deepStop': {
'key': 'value'
},
'moveExit': {
'key': 'value'
},
'moveStop': {
'key': 'value'
},
'tossExit': {
'key': 'value'
},
'tossStop': {
'key': 'value'
}
}
obj['debug']['movement'] = {
'deepExit': {
'xDistanceByStep': [1],
'yDistanceByStep': [2],
'zDistanceByStep': [3],
'key': 'value'
},
'deepStop': {
'xDistanceByStep': [1],
'yDistanceByStep': [2],
'zDistanceByStep': [3],
'key': 'value'
},
'moveExit': {
'xDistanceByStep': [1],
'yDistanceByStep': [2],
'zDistanceByStep': [3],
'key': 'value'
},
'moveStop': {
'xDistanceByStep': [1],
'yDistanceByStep': [2],
'zDistanceByStep': [3],
'key': 'value'
},
'tossExit': {
'xDistanceByStep': [1],
'yDistanceByStep': [2],
'zDistanceByStep': [3],
'key': 'value'
},
'tossStop': {
'xDistanceByStep': [1],
'yDistanceByStep': [2],
'zDistanceByStep': [3],
'key': 'value'
}
}
_strip_debug_misleading_data({'objects': [obj]})
assert obj == expected
def test_strip_debug_object_data():
obj = create_test_object()
expected = {
'id': 'thing1',
'type': 'thing_1',
'shows': [{
'stepBegin': 0
}]
}
_strip_debug_object_data(obj)
assert obj == expected
| 25.293839
| 78
| 0.439198
|
fbe8cbab388d492ae114589d5d8ce0a2c25b2190
| 1,340
|
py
|
Python
|
examples/markov/markov_rw_norm.py
|
Bhumbra/probayes
|
e5ac193076e4188b9b38c0e18466223ab4d041f7
|
[
"BSD-3-Clause"
] | null | null | null |
examples/markov/markov_rw_norm.py
|
Bhumbra/probayes
|
e5ac193076e4188b9b38c0e18466223ab4d041f7
|
[
"BSD-3-Clause"
] | null | null | null |
examples/markov/markov_rw_norm.py
|
Bhumbra/probayes
|
e5ac193076e4188b9b38c0e18466223ab4d041f7
|
[
"BSD-3-Clause"
] | null | null | null |
""" Example of a Markov chain random walk simulation
using a continuous transition function.
"""
import probayes as pb
import numpy as np
import scipy.stats
from pylab import *; ion()
n_steps = 10000
set_lims = [-np.pi, np.pi]
def tran(succ, pred):
loc = -np.sin(pred)
scale = 1. + 0.5 * np.cos(pred)
return scipy.stats.norm.pdf(succ, loc=loc, scale=scale)
def tcdf(succ, pred):
loc = -np.sin(pred)
scale = 1. + 0.5 * np.cos(pred)
return scipy.stats.norm.cdf(succ, loc=loc, scale=scale)
def ticdf(succ, pred):
loc = -np.sin(pred)
scale = 1. + 0.5 * np.cos(pred)
return scipy.stats.norm.ppf(succ, loc=loc, scale=scale)
x = pb.RV('x', set_lims)
x.set_tran(tran, order={'x': 'pred', "x'": 'succ'})
x.set_tfun((tcdf, ticdf), order={'x': 'pred', "x'": 'succ'})
steps = [None] * n_steps
pred = np.empty(n_steps, dtype=float)
succ = np.empty(n_steps, dtype=float)
cond = np.empty(n_steps, dtype=float)
print('Simulating...')
for i in range(n_steps):
if i == 0:
steps[i] = x.step({0})
else:
steps[i] = x.step(succ[i-1])
pred[i] = steps[i]['x']
succ[i] = steps[i]["x'"]
cond[i] = steps[i].prob
print('...done')
# PLOT DATA
figure()
c_norm = Normalize(vmin=np.min(cond), vmax=np.max(cond))
c_map = cm.jet(c_norm(cond))
scatter(pred, succ, color=c_map, marker='.')
xlabel('Predecessor')
ylabel('Succesor')
| 24.814815
| 60
| 0.643284
|
e7ea0166e283b9b2f8f32e3c803d848cb86c79ec
| 790
|
py
|
Python
|
synoptic/__init__.py
|
gitter-badger/SynopticPy
|
05d1c1d5b69f0efa22d87a3e2c3e1896c2c37ca6
|
[
"MIT"
] | 12
|
2021-03-13T19:18:35.000Z
|
2022-03-28T13:14:59.000Z
|
synoptic/__init__.py
|
gitter-badger/SynopticPy
|
05d1c1d5b69f0efa22d87a3e2c3e1896c2c37ca6
|
[
"MIT"
] | 4
|
2020-09-20T00:52:20.000Z
|
2022-03-30T19:31:40.000Z
|
synoptic/__init__.py
|
gitter-badger/SynopticPy
|
05d1c1d5b69f0efa22d87a3e2c3e1896c2c37ca6
|
[
"MIT"
] | 6
|
2021-01-10T13:21:11.000Z
|
2022-03-25T02:01:17.000Z
|
## Brian Blaylock
## September 11, 2020
"""
============
Synoptic API
============
Retrieve and plot mesonet data from thousands of stations via the Synoptic Data
Mesonet API: https://developers.synopticdata.com/mesonet/.
Usage
-----
There are two recommended ways to import these functions.
``` python
# Method 1: Import full module
import synoptic.services as ss
import synoptic.plots as sp
```
``` python
# Method 2: Import individual functions
from synoptic.services import stations_timeseries
```
"""
__author__ = "Brian Blaylock"
__email__ = "blaylockbk@gmail.com"
__url__ = "https://github.com/blaylockbk/SynopticPy"
try:
from synoptic.accessors import *
except:
warnings.warn("Could not import synoptic.accessors")
pass
# 🙋🏻♂️ Thank you for using SynopticPy!")
| 20.25641
| 79
| 0.717722
|
1a0a486e00668d8922398d5e5f728ce3f3b5c956
| 3,340
|
py
|
Python
|
dbInterface.py
|
ddsnowboard/SimpleDBInterface
|
8d409e768b11b4b4d70ebf26bebe92289bb33511
|
[
"MIT"
] | null | null | null |
dbInterface.py
|
ddsnowboard/SimpleDBInterface
|
8d409e768b11b4b4d70ebf26bebe92289bb33511
|
[
"MIT"
] | null | null | null |
dbInterface.py
|
ddsnowboard/SimpleDBInterface
|
8d409e768b11b4b4d70ebf26bebe92289bb33511
|
[
"MIT"
] | null | null | null |
import ENV
# Uncomment this when you start on the postgres layer
# import psycopg2
import sqlite3
#TODO: Replace every pragma table_info() call with getColumns()
class Database:
class Table:
def __init__(self, connection, name):
self.connection = connection
self.name = name
if ENV.DATABASE == "sqlite":
self.columns = [i[1] for i in self.connection.cursor().execute("pragma table_info(%s)" % self.name)]
def select(self, **kwargs):
# Someday I should have a Selection object, but that's for another day
inputString = "select %s from %s" % (", ".join(self.columns), self.name)
output = []
if not kwargs:
output = list(self.connection.cursor().execute(inputString))
else:
inputString = "%s where %s" % (inputString, " and ".join("%s=?" % i for i in kwargs.keys()))
output = list(self.connection.cursor().execute("select %s from %s where %s" % (",".join(self.columns), self.name, " and ".join("%s=?" % i for i in kwargs.keys())),
[i for i in kwargs.values()]))
ret = [{i:j for i, j in zip(self.columns, currLine)} for currLine in output]
return ret
def insert(self, **kwargs):
for col in self.connection.cursor().execute("pragma table_info(%s)" % self.name):
id, name, type, notnull, default, pk = col
if not name in kwargs.keys():
if notnull:
raise Exception("You have to pass in a value for %s! It can't be null!" % name)
self.connection.cursor().execute("insert into %s (%s) VALUES (%s)" % (self.name, ",".join(kwargs.keys()), ",".join("?" for i in kwargs.keys())),
kwargs.values())
self.connection.commit()
def getColumns(self):
# The pragma returns a list of tuples of the form (id| name| type| notnull| default| pk)
c = self.connection.cursor().execute("pragma table_info(%s)" % self.name)
c = self.connection.cursor().execute("pragma table_info(%s)" % self.name)
return {row[1]: {i: j for (i, j) in zip(("id", "type", "notnull", "default", "pk"), row[:1] + row[2:])} for row in c}
def __init__(self):
if ENV.DATABASE == "pgsql":
raise NotImplementedError("I haven't done the postgres layer yet")
elif ENV.DATABASE == "sqlite":
self.connection = sqlite3.connect(ENV.DB_FILE)
else:
raise Exception("%s isn't the name of a database I know about" % ENV.DATABASE)
def getTable(self, tableName):
return Database.Table(self.connection, tableName)
def createTable(name, *cols):
if not reduce(lambda x, y: x and type(y) == type({}), cols):
raise Exception("You didn't give dictionaries for the columns!")
connection = sqlite3.connect(ENV.DB_FILE)
i = cols[0]
query = "create table %s (%s)" % (name, ",".join("%s %s %s" % (i["name"], i["type"], "not null" if not i.get("null", True) else "") for i in cols))
connection.cursor().execute(query)
connection.commit()
db = Database()
table = db.getTable(name)
return table
| 49.850746
| 179
| 0.562575
|
3f7ea909693fa33e535d768a45760d7f75a7e5ce
| 22,675
|
py
|
Python
|
leaderboard/Face-Pose-Net/utils/pose_utils.py
|
showkeyjar/beauty
|
7c944cf896c899d9e23b2e50e293103bb03fe6cd
|
[
"MulanPSL-1.0"
] | 1
|
2022-01-29T12:32:38.000Z
|
2022-01-29T12:32:38.000Z
|
leaderboard/Face-Pose-Net/utils/pose_utils.py
|
showkeyjar/beauty
|
7c944cf896c899d9e23b2e50e293103bb03fe6cd
|
[
"MulanPSL-1.0"
] | null | null | null |
leaderboard/Face-Pose-Net/utils/pose_utils.py
|
showkeyjar/beauty
|
7c944cf896c899d9e23b2e50e293103bb03fe6cd
|
[
"MulanPSL-1.0"
] | null | null | null |
import sys
import os
import numpy as np
import cv2
import math
from math import cos, sin, atan2, asin
import fileinput
## Index to remap landmarks in case we flip an image
repLand = [ 17,16,15,14,13,12,11,10, 9,8,7,6,5,4,3,2,1,27,26,25, \
24,23,22,21,20,19,18,28,29,30,31,36,35,34,33,32,46,45,44,43, \
48,47,40,39,38,37,42,41,55,54,53,52,51,50,49,60,59,58,57,56, \
65,64,63,62,61,68,67,66 ]
def increaseBbox(bbox, factor):
tlx = bbox[0]
tly = bbox[1]
brx = bbox[2]
bry = bbox[3]
dx = factor
dy = factor
dw = 1 + factor
dh = 1 + factor
#Getting bbox height and width
w = brx-tlx;
h = bry-tly;
tlx2 = tlx - w * dx
tly2 = tly - h * dy
brx2 = tlx + w * dw
bry2 = tly + h * dh
nbbox = np.zeros( (4,1), dtype=np.float32 )
nbbox[0] = tlx2
nbbox[1] = tly2
nbbox[2] = brx2
nbbox[3] = bry2
return nbbox
def increaseBbox_rescaleCASIA(bbox, factor):
tlx = bbox[0]
tly = bbox[1]
brx = bbox[2]
bry = bbox[3]
ww = brx - tlx;
hh = bry - tly;
cx = tlx + ww/2;
cy = tly + hh/2;
tsize = max(ww,hh)/2;
bl = cx - factor[0]*tsize;
bt = cy - factor[1]*tsize;
br = cx + factor[2]*tsize;
bb = cy + factor[3]*tsize;
nbbox = np.zeros( (4,1), dtype=np.float32 )
nbbox[0] = bl;
nbbox[1] = bt;
nbbox[2] = br;
nbbox[3] = bb;
return nbbox
def increaseBbox_rescaleYOLO(bbox, im):
rescaleFrontal = [1.4421, 2.2853, 1.4421, 1.4286];
rescaleCS2 = [0.9775, 1.5074, 0.9563, 0.9436];
l = bbox[0]
t = bbox[1]
ww = bbox[2]
hh = bbox[3]
# Approximate LM tight BB
h = im.shape[0];
w = im.shape[1];
cx = l + ww/2;
cy = t + hh/2;
tsize = max(ww,hh)/2;
l = cx - tsize;
t = cy - tsize;
cx = l + (2*tsize)/(rescaleCS2[0]+rescaleCS2[2]) * rescaleCS2[0];
cy = t + (2*tsize)/(rescaleCS2[1]+rescaleCS2[3]) * rescaleCS2[1];
tsize = 2*tsize/(rescaleCS2[0]+rescaleCS2[2]);
"""
# Approximate inplane align (frontal)
nbbox = np.zeros( (4,1), dtype=np.float32 )
nbbox[0] = cx - rescaleFrontal[0]*tsize;
nbbox[1] = cy - rescaleFrontal[1]*tsize;
nbbox[2] = cx + rescaleFrontal[2]*tsize;
nbbox[3] = cy + rescaleFrontal[3]*tsize;
"""
nbbox = np.zeros( (4,1), dtype=np.float32 )
nbbox[0] = cx - tsize;
nbbox[1] = cy - tsize;
nbbox[2] = cx + tsize;
nbbox[3] = cy + tsize;
return nbbox
def image_bbox_processing_v2(img, bbox, landmarks=None):
img_h, img_w, img_c = img.shape
lt_x = bbox[0]
lt_y = bbox[1]
rb_x = bbox[2]
rb_y = bbox[3]
fillings = np.zeros( (4,1), dtype=np.int32)
if lt_x < 0: ## 0 for python
fillings[0] = math.ceil(-lt_x)
if lt_y < 0:
fillings[1] = math.ceil(-lt_y)
if rb_x > img_w-1:
fillings[2] = math.ceil(rb_x - img_w + 1)
if rb_y > img_h-1:
fillings[3] = math.ceil(rb_y - img_h + 1)
new_bbox = np.zeros( (4,1), dtype=np.float32 )
# img = [zeros(size(img,1),fillings(1),img_c), img]
# img = [zeros(fillings(2), size(img,2),img_c); img]
# img = [img, zeros(size(img,1), fillings(3),img_c)]
# new_img = [img; zeros(fillings(4), size(img,2),img_c)]
imgc = img.copy()
if fillings[0] > 0:
img_h, img_w, img_c = imgc.shape
imgc = np.hstack( [np.zeros( (img_h, fillings[0][0], img_c), dtype=np.uint8 ), imgc] )
if fillings[1] > 0:
img_h, img_w, img_c = imgc.shape
imgc = np.vstack( [np.zeros( (fillings[1][0], img_w, img_c), dtype=np.uint8 ), imgc] )
if fillings[2] > 0:
img_h, img_w, img_c = imgc.shape
imgc = np.hstack( [ imgc, np.zeros( (img_h, fillings[2][0], img_c), dtype=np.uint8 ) ] )
if fillings[3] > 0:
img_h, img_w, img_c = imgc.shape
imgc = np.vstack( [ imgc, np.zeros( (fillings[3][0], img_w, img_c), dtype=np.uint8) ] )
new_bbox[0] = lt_x + fillings[0]
new_bbox[1] = lt_y + fillings[1]
new_bbox[2] = rb_x + fillings[0]
new_bbox[3] = rb_y + fillings[1]
if len(landmarks) == 0: #len(landmarks) == 0: #landmarks == None:
return imgc, new_bbox
else:
landmarks_new = np.zeros([landmarks.shape[0], landmarks.shape[1]])
#print "landmarks_new's shape: \n"
#print landmarks_new.shape
landmarks_new[:,0] = landmarks[:,0] + fillings[0]
landmarks_new[:,1] = landmarks[:,1] + fillings[1]
return imgc, new_bbox, landmarks_new
#return imgc, new_bbox
def image_bbox_processing_v3(img, bbox):
img_h, img_w, img_c = img.shape
lt_x = bbox[0]
lt_y = bbox[1]
rb_x = bbox[2]
rb_y = bbox[3]
fillings = np.zeros( (4,1), dtype=np.int32)
if lt_x < 0: ## 0 for python
fillings[0] = math.ceil(-lt_x)
if lt_y < 0:
fillings[1] = math.ceil(-lt_y)
if rb_x > img_w-1:
fillings[2] = math.ceil(rb_x - img_w + 1)
if rb_y > img_h-1:
fillings[3] = math.ceil(rb_y - img_h + 1)
new_bbox = np.zeros( (4,1), dtype=np.float32 )
# img = [zeros(size(img,1),fillings(1),img_c), img]
# img = [zeros(fillings(2), size(img,2),img_c); img]
# img = [img, zeros(size(img,1), fillings(3),img_c)]
# new_img = [img; zeros(fillings(4), size(img,2),img_c)]
imgc = img.copy()
if fillings[0] > 0:
img_h, img_w, img_c = imgc.shape
imgc = np.hstack( [np.zeros( (img_h, fillings[0][0], img_c), dtype=np.uint8 ), imgc] )
if fillings[1] > 0:
img_h, img_w, img_c = imgc.shape
imgc = np.vstack( [np.zeros( (fillings[1][0], img_w, img_c), dtype=np.uint8 ), imgc] )
if fillings[2] > 0:
img_h, img_w, img_c = imgc.shape
imgc = np.hstack( [ imgc, np.zeros( (img_h, fillings[2][0], img_c), dtype=np.uint8 ) ] )
if fillings[3] > 0:
img_h, img_w, img_c = imgc.shape
imgc = np.vstack( [ imgc, np.zeros( (fillings[3][0], img_w, img_c), dtype=np.uint8) ] )
new_bbox[0] = lt_x + fillings[0]
new_bbox[1] = lt_y + fillings[1]
new_bbox[2] = rb_x + fillings[0]
new_bbox[3] = rb_y + fillings[1]
return imgc, new_bbox
def preProcessImage(im, lmks, bbox, factor, _alexNetSize, flipped):
sys.stdout.flush()
if flipped == 1: # flip landmarks and indices if it's flipped imag
lmks = flip_lmk_idx(im, lmks)
lmks_flip = lmks
lt_x = bbox[0]
lt_y = bbox[1]
rb_x = lt_x + bbox[2]
rb_y = lt_y + bbox[3]
w = bbox[2]
h = bbox[3]
center = ( (lt_x+rb_x)/2, (lt_y+rb_y)/2 )
side_length = max(w,h);
# make the bbox be square
bbox = np.zeros( (4,1), dtype=np.float32 )
bbox[0] = center[0] - side_length/2
bbox[1] = center[1] - side_length/2
bbox[2] = center[0] + side_length/2
bbox[3] = center[1] + side_length/2
img_2, bbox_green = image_bbox_processing_v2(im, bbox)
#%% Get the expanded square bbox
bbox_red = increaseBbox(bbox_green, factor)
bbox_red2 = increaseBbox(bbox, factor)
bbox_red2[2] = bbox_red2[2] - bbox_red2[0]
bbox_red2[3] = bbox_red2[3] - bbox_red2[1]
bbox_red2 = np.reshape(bbox_red2, [4])
img_3, bbox_new, lmks = image_bbox_processing_v2(img_2, bbox_red, lmks)
#%% Crop and resized
bbox_new = np.ceil( bbox_new )
side_length = max( bbox_new[2] - bbox_new[0], bbox_new[3] - bbox_new[1] )
bbox_new[2:4] = bbox_new[0:2] + side_length
bbox_new = bbox_new.astype(int)
crop_img = img_3[bbox_new[1][0]:bbox_new[3][0], bbox_new[0][0]:bbox_new[2][0], :];
lmks_new = np.zeros([lmks.shape[0],2])
lmks_new[:,0] = lmks[:,0] - bbox_new[0][0]
lmks_new[:,1] = lmks[:,1] - bbox_new[1][0]
resized_crop_img = cv2.resize(crop_img, ( _alexNetSize, _alexNetSize ), interpolation = cv2.INTER_CUBIC)
old_h, old_w, channels = crop_img.shape
lmks_new2 = np.zeros([lmks.shape[0],2])
lmks_new2[:,0] = lmks_new[:,0] * _alexNetSize / old_w
lmks_new2[:,1] = lmks_new[:,1] * _alexNetSize / old_h
#print _alexNetSize, old_w, old_h
return resized_crop_img, lmks_new2, bbox_red2, lmks_flip, side_length, center
def resize_crop_rescaleCASIA(im, bbox, lmks, factor):
lt_x = bbox[0]
lt_y = bbox[1]
rb_x = lt_x + bbox[2]
rb_y = lt_y + bbox[3]
bbox = np.reshape([lt_x, lt_y, rb_x, rb_y], [-1])
# Get the expanded square bbox
bbox_red = increaseBbox_rescaleCASIA(bbox, factor)
img_3, bbox_new, lmks = image_bbox_processing_v2(im, bbox_red, lmks);
lmks_filling = lmks.copy()
#%% Crop and resized
bbox_new = np.ceil( bbox_new )
side_length = max( bbox_new[2] - bbox_new[0], bbox_new[3] - bbox_new[1] )
bbox_new[2:4] = bbox_new[0:2] + side_length
#bbox_new[0] = max(0, bbox_new[0])
#bbox_new[1] = max(0, bbox_new[1])
#bbox_new[2] = min(img_3.shape[1]-1, bbox_new[2])
#bbox_new[3] = min(img_3.shape[0]-1, bbox_new[3])
bbox_new = bbox_new.astype(int)
crop_img = img_3[bbox_new[1][0]:bbox_new[3][0], bbox_new[0][0]:bbox_new[2][0], :];
lmks_new = np.zeros([lmks.shape[0],2])
lmks_new[:,0] = lmks[:,0] - bbox_new[0][0]
lmks_new[:,1] = lmks[:,1] - bbox_new[1][0]
old_h, old_w, channels = crop_img.shape
resized_crop_img = cv2.resize(crop_img, ( 224, 224 ), interpolation = cv2.INTER_CUBIC)
lmks_new2 = np.zeros([lmks.shape[0],2])
lmks_new2[:,0] = lmks_new[:,0] * 224 / old_w
lmks_new2[:,1] = lmks_new[:,1] * 224 / old_h
return resized_crop_img, bbox_new, lmks_new2, lmks_filling, old_h, old_w, img_3
def resize_crop_rescaleCASIA_v2(im, bbox, lmks, factor, bbox_type):
# Get the expanded square bbox
if bbox_type == "casia":
lt_x = bbox[0]
lt_y = bbox[1]
rb_x = lt_x + bbox[2]
rb_y = lt_y + bbox[3]
bbox = np.reshape([lt_x, lt_y, rb_x, rb_y], [-1])
bbox_red = increaseBbox_rescaleCASIA(bbox, factor)
elif bbox_type == "yolo":
lt_x = bbox[0]
lt_y = bbox[1]
rb_x = lt_x + bbox[2]
rb_y = lt_y + bbox[3]
w = bbox[2]
h = bbox[3]
center = ( (lt_x+rb_x)/2, (lt_y+rb_y)/2 )
side_length = max(w,h);
# make the bbox be square
bbox = np.zeros( (4,1), dtype=np.float32 )
bbox[0] = center[0] - side_length/2
bbox[1] = center[1] - side_length/2
bbox[2] = center[0] + side_length/2
bbox[3] = center[1] + side_length/2
img_2, bbox_green = image_bbox_processing_v3(im, bbox)
#%% Get the expanded square bbox
bbox_red = increaseBbox(bbox_green, factor)
img_3, bbox_new, lmks = image_bbox_processing_v2(im, bbox_red, lmks);
lmks_filling = lmks.copy()
#%% Crop and resized
bbox_new = np.ceil( bbox_new )
side_length = max( bbox_new[2] - bbox_new[0], bbox_new[3] - bbox_new[1] )
bbox_new[2:4] = bbox_new[0:2] + side_length
#bbox_new[0] = max(0, bbox_new[0])
#bbox_new[1] = max(0, bbox_new[1])
#bbox_new[2] = min(img_3.shape[1]-1, bbox_new[2])
#bbox_new[3] = min(img_3.shape[0]-1, bbox_new[3])
bbox_new = bbox_new.astype(int)
crop_img = img_3[bbox_new[1][0]:bbox_new[3][0], bbox_new[0][0]:bbox_new[2][0], :];
lmks_new = np.zeros([lmks.shape[0],2])
lmks_new[:,0] = lmks[:,0] - bbox_new[0][0]
lmks_new[:,1] = lmks[:,1] - bbox_new[1][0]
old_h, old_w, channels = crop_img.shape
resized_crop_img = cv2.resize(crop_img, ( 224, 224 ), interpolation = cv2.INTER_CUBIC)
lmks_new2 = np.zeros([lmks.shape[0],2])
lmks_new2[:,0] = lmks_new[:,0] * 224 / old_w
lmks_new2[:,1] = lmks_new[:,1] * 224 / old_h
return resized_crop_img, bbox_new, lmks_new2, lmks_filling, old_h, old_w, img_3
def resize_crop_AFLW(im, bbox, lmks):
lt_x = bbox[0]
lt_y = bbox[1]
rb_x = lt_x + bbox[2]
rb_y = lt_y + bbox[3]
bbox = np.reshape([lt_x, lt_y, rb_x, rb_y], [-1])
crop_img = img[bbox[1]:bbox[3], bbox[0]:bbox[2], :];
lmks_new = np.zeros([lmks.shape[0],2])
lmks_new[:,0] = lmks[:,0] - bbox[0]
lmks_new[:,1] = lmks[:,1] - bbox[1]
old_h, old_w, channels = crop_img.shape
resized_crop_img = cv2.resize(crop_img, ( 224, 224 ), interpolation = cv2.INTER_CUBIC)
lmks_new2 = np.zeros([lmks.shape[0],2])
lmks_new2[:,0] = lmks_new[:,0] * 224 / old_w
lmks_new2[:,1] = lmks_new[:,1] * 224 / old_h
bbox_new = np.zeros([4])
bbox_new[0] = bbox[0] * 224 / old_w
bbox_new[1] = bbox[1] * 224 / old_h
bbox_new[2] = bbox[2] * 224 / old_w
bbox_new[3] = bbox[3] * 224 / old_h
bbox_new[2] = bbox_new[2] - bbox_new[0] # box width
bbox_new[3] = bbox_new[3] - bbox_new[1] # box height
return resized_crop_img, bbox_new, lmks_new2
def preProcessImage_v2(im, bbox, factor, _resNetSize, if_cropbyLmks_rescaleCASIA):
sys.stdout.flush()
if if_cropbyLmks_rescaleCASIA == 0:
lt_x = bbox[0]
lt_y = bbox[1]
rb_x = lt_x + bbox[2]
rb_y = lt_y + bbox[3]
w = bbox[2]
h = bbox[3]
center = ( (lt_x+rb_x)/2, (lt_y+rb_y)/2 )
side_length = max(w,h);
# make the bbox be square
bbox = np.zeros( (4,1), dtype=np.float32 )
bbox[0] = center[0] - side_length/2
bbox[1] = center[1] - side_length/2
bbox[2] = center[0] + side_length/2
bbox[3] = center[1] + side_length/2
img_2, bbox_green = image_bbox_processing_v2(im, bbox)
#%% Get the expanded square bbox
bbox_red = increaseBbox(bbox_green, factor)
img_3, bbox_new = image_bbox_processing_v2(img_2, bbox_red)
elif if_cropbyLmks_rescaleCASIA == 1:
bbox[2] = bbox[0] + bbox[2]
bbox[3] = bbox[1] + bbox[3]
bbox_red = increaseBbox_rescaleCASIA(bbox, factor)
#print bbox_red
img_3, bbox_new = image_bbox_processing_v3(im, bbox_red)
else:
bbox2 = increaseBbox_rescaleYOLO(bbox, im)
bbox_red = increaseBbox_rescaleCASIA(bbox2, factor)
img_3, bbox_new = image_bbox_processing_v2(im, bbox_red)
#bbox_red2 = increaseBbox(bbox, factor)
#bbox_red2[2] = bbox_red2[2] - bbox_red2[0]
#bbox_red2[3] = bbox_red2[3] - bbox_red2[1]
#bbox_red2 = np.reshape(bbox_red2, [4])
#%% Crop and resized
bbox_new = np.ceil( bbox_new )
side_length = max( bbox_new[2] - bbox_new[0], bbox_new[3] - bbox_new[1] )
bbox_new[2:4] = bbox_new[0:2] + side_length
bbox_new = bbox_new.astype(int)
crop_img = img_3[bbox_new[1][0]:bbox_new[3][0], bbox_new[0][0]:bbox_new[2][0], :];
#print crop_img.shape
resized_crop_img = cv2.resize(crop_img, ( _resNetSize, _resNetSize ), interpolation = cv2.INTER_CUBIC)
return resized_crop_img
def preProcessImage_useGTBBox(im, lmks, bbox, factor, _alexNetSize, flipped, to_train_scale, yolo_bbox):
sys.stdout.flush()
#print bbox, yolo_bbox, to_train_scale
if flipped == 1: # flip landmarks and indices if it's flipped imag
lmks = flip_lmk_idx(im, lmks)
lmks_flip = lmks
lt_x = bbox[0]
lt_y = bbox[1]
rb_x = lt_x + bbox[2]
rb_y = lt_y + bbox[3]
w = bbox[2]
h = bbox[3]
center = ( (lt_x+rb_x)/2, (lt_y+rb_y)/2 )
side_length = max(w,h);
# make the bbox be square
bbox = np.zeros( (4,1), dtype=np.float32 )
#print bbox
bbox_red = np.zeros( (4,1), dtype=np.float32 )
if to_train_scale == 1:
_, _, _, _, side_length2, center2 = preProcessImage(im, lmks, yolo_bbox, factor, _alexNetSize, flipped)
center3 = ( (center[0]+center2[0])/2, (center[1]+center2[1])/2 )
bbox[0] = center3[0] - side_length2/2
bbox[1] = center3[1] - side_length2/2
bbox[2] = center3[0] + side_length2/2
bbox[3] = center3[1] + side_length2/2
bbox_red[0] = center3[0] - side_length2/2
bbox_red[1] = center3[1] - side_length2/2
bbox_red[2] = side_length2
bbox_red[3] = side_length2
else:
bbox[0] = center[0] - side_length/2
bbox[1] = center[1] - side_length/2
bbox[2] = center[0] + side_length/2
bbox[3] = center[1] + side_length/2
#print center, side_length, bbox[0], bbox[1], bbox[2], bbox[3]
bbox_red[0] = center[0] - side_length/2
bbox_red[1] = center[1] - side_length/2
bbox_red[2] = side_length
bbox_red[3] = side_length
bbox_red = np.reshape(bbox_red, [4])
#print bbox, bbox_red
img_2, bbox_green = image_bbox_processing_v2(im, bbox)
#print img_2.shape, bbox_green
#%% Crop and resized
bbox_new = np.ceil( bbox_green )
side_length = max( bbox_new[2] - bbox_new[0], bbox_new[3] - bbox_new[1] )
bbox_new[2:4] = bbox_new[0:2] + side_length
bbox_new = bbox_new.astype(int)
#print bbox_new
crop_img = img_2[bbox_new[1][0]:bbox_new[3][0], bbox_new[0][0]:bbox_new[2][0], :];
lmks_new = np.zeros([68,2])
lmks_new[:,0] = lmks[:,0] - bbox_new[0][0]
lmks_new[:,1] = lmks[:,1] - bbox_new[1][0]
#print crop_img.shape
resized_crop_img = cv2.resize(crop_img, ( _alexNetSize, _alexNetSize ), interpolation = cv2.INTER_CUBIC)
old_h, old_w, channels = crop_img.shape
lmks_new2 = np.zeros([68,2])
lmks_new2[:,0] = lmks_new[:,0] * _alexNetSize / old_w
lmks_new2[:,1] = lmks_new[:,1] * _alexNetSize / old_h
#print _alexNetSize, old_w, old_h
return resized_crop_img, lmks_new2, bbox_red, lmks_flip
def replaceInFile(filep, before, after):
for line in fileinput.input(filep, inplace=True):
print(line.replace(before,after),)
def flip_lmk_idx(img, lmarks):
# Flipping X values for landmarks \
lmarks[:,0] = img.shape[1] - lmarks[:,0]
# Creating flipped landmarks with new indexing
lmarks_flip = np.zeros((68,2))
for i in range(len(repLand)):
lmarks_flip[i,:] = lmarks[repLand[i]-1,:]
return lmarks_flip
def pose_to_LMs(pose_Rt):
pose_Rt = np.reshape(pose_Rt, [6])
ref_lm = np.loadtxt('./lm_m10.txt', delimiter=',')
ref_lm_t = np.transpose(ref_lm)
numLM = ref_lm_t.shape[1]
#PI = np.array([[ 4.22519775e+03,0.00000000e+00,1.15000000e+02], [0.00000000e+00, 4.22519775e+03, 1.15000000e+02], [0, 0, 1]]);
PI = np.array([[ 2.88000000e+03, 0.00000000e+00, 1.12000000e+02], [0.00000000e+00, 2.88000000e+03, 1.12000000e+02], [0, 0, 1]]);
rvecs = pose_Rt[0:3]
tvec = np.reshape(pose_Rt[3:6], [3,1])
tsum = np.repeat(tvec,numLM,1)
rmat, jacobian = cv2.Rodrigues(rvecs, None)
transformed_lms = np.matmul(rmat, ref_lm_t) + tsum
transformed_lms = np.matmul(PI, transformed_lms)
transformed_lms[0,:] = transformed_lms[0,:]/transformed_lms[2,:]
transformed_lms[1,:] = transformed_lms[1,:]/transformed_lms[2,:]
lms = np.transpose(transformed_lms[:2,:])
return lms
def RotationMatrix(angle_x, angle_y, angle_z):
# get rotation matrix by rotate angle
phi = angle_x; # pitch
gamma = angle_y; # yaw
theta = angle_z; # roll
R_x = np.array([ [1, 0, 0] , [0, np.cos(phi), np.sin(phi)] , [0, -np.sin(phi), np.cos(phi)] ]);
R_y = np.array([ [np.cos(gamma), 0, -np.sin(gamma)] , [0, 1, 0] , [np.sin(gamma), 0, np.cos(gamma)] ]);
R_z = np.array([ [np.cos(theta), np.sin(theta), 0] , [-np.sin(theta), np.cos(theta), 0] , [0, 0, 1] ]);
R = np.matmul( R_x , np.matmul(R_y , R_z) );
return R
def matrix2angle(R):
''' compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf
Args:
R: (3,3). rotation matrix
Returns:
x: yaw
y: pitch
z: roll
'''
# assert(isRotationMatrix(R))
if R[2,0] !=1 or R[2,0] != -1:
#x = asin(R[2,0])
#y = atan2(R[2,1]/cos(x), R[2,2]/cos(x))
#z = atan2(R[1,0]/cos(x), R[0,0]/cos(x))
x = -asin(R[2,0])
#x = np.pi - x
y = atan2(R[2,1]/cos(x), R[2,2]/cos(x))
z = atan2(R[1,0]/cos(x), R[0,0]/cos(x))
else:# Gimbal lock
z = 0 #can be anything
if R[2,0] == -1:
x = np.pi/2
y = z + atan2(R[0,1], R[0,2])
else:
x = -np.pi/2
y = -z + atan2(-R[0,1], -R[0,2])
return x, y, z
def P2sRt(P):
''' decompositing camera matrix P.
Args:
P: (3, 4). Affine Camera Matrix.
Returns:
s: scale factor.
R: (3, 3). rotation matrix.
t2d: (2,). 2d translation.
t3d: (3,). 3d translation.
'''
#t2d = P[:2, 3]
t3d = P[:, 3]
R1 = P[0:1, :3]
R2 = P[1:2, :3]
s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2.0
r1 = R1/np.linalg.norm(R1)
r2 = R2/np.linalg.norm(R2)
r3 = np.cross(r1, r2)
R = np.concatenate((r1, r2, r3), 0)
return s, R, t3d
| 31.981664
| 201
| 0.536626
|
1f3d6155472165e884a6535f66840d083c8da433
| 650
|
py
|
Python
|
symposion/proposals/migrations/0002_proposalsection_anonymous.py
|
pyohio/symposion
|
f8ec9c7e7daab4658061867d1294c1c126dd2919
|
[
"BSD-3-Clause"
] | null | null | null |
symposion/proposals/migrations/0002_proposalsection_anonymous.py
|
pyohio/symposion
|
f8ec9c7e7daab4658061867d1294c1c126dd2919
|
[
"BSD-3-Clause"
] | 5
|
2015-07-16T19:46:00.000Z
|
2018-03-11T05:58:48.000Z
|
symposion/proposals/migrations/0002_proposalsection_anonymous.py
|
pyohio/symposion
|
f8ec9c7e7daab4658061867d1294c1c126dd2919
|
[
"BSD-3-Clause"
] | 1
|
2017-01-27T21:18:26.000Z
|
2017-01-27T21:18:26.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-08-11 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_proposals', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proposalsection',
name='anonymous',
field=models.BooleanField(default=False, help_text='If this option is switched on, reviewers will not be able to see the names of the proposers or coproposers of any proposal in this section.', verbose_name='Anonymous review'),
),
]
| 30.952381
| 239
| 0.675385
|
42cc2fc9fdf4440ec030c057168ec9e9d41f17ab
| 1,060
|
py
|
Python
|
August Leetcode/Reorder List.py
|
parikshitgupta1/leetcode
|
eba6c11740dc7597204af127c0f4c2163376294f
|
[
"MIT"
] | null | null | null |
August Leetcode/Reorder List.py
|
parikshitgupta1/leetcode
|
eba6c11740dc7597204af127c0f4c2163376294f
|
[
"MIT"
] | null | null | null |
August Leetcode/Reorder List.py
|
parikshitgupta1/leetcode
|
eba6c11740dc7597204af127c0f4c2163376294f
|
[
"MIT"
] | null | null | null |
class Solution:
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
:type head: ListNode
:rtype: void Do not return anything, modify head in-place instead.
"""
if not head or not head.next:
return
slow = head
fast = head.next
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
slow = slow.next
if fast.next:
fast = fast.next
# reverse the second half of the list
prev = slow
curr = slow.next
prev.next = None
while curr:
tmp = curr.next
curr.next = prev
prev = curr
curr = tmp
# turn the list into zigzag manner
trav = head
while fast.next:
tmp1 = trav.next
tmp2 = fast.next
trav.next = fast
fast.next = tmp1
trav = tmp1
fast = tmp2
| 27.894737
| 74
| 0.486792
|
95caf1d2fecd4b241b754224abd99a0f6bed3009
| 5,504
|
py
|
Python
|
rest_api/samples/JSON_demos/6D2P.py
|
Hitachi-Data-Systems/ivy
|
07a77c271cad7f682d7fbff497bf74a76ecd5378
|
[
"Apache-2.0"
] | 6
|
2016-09-12T16:23:53.000Z
|
2021-12-16T23:08:34.000Z
|
rest_api/samples/JSON_demos/6D2P.py
|
Hitachi-Data-Systems/ivy
|
07a77c271cad7f682d7fbff497bf74a76ecd5378
|
[
"Apache-2.0"
] | null | null | null |
rest_api/samples/JSON_demos/6D2P.py
|
Hitachi-Data-Systems/ivy
|
07a77c271cad7f682d7fbff497bf74a76ecd5378
|
[
"Apache-2.0"
] | null | null | null |
import ivyrest
ivy = ivyrest.IvyObj("localhost")
host_list = ["sun159"]
select_list = [ {'serial_number' : '83011441' } ]
ivy.set_output_folder_root(".")
print(ivy.test_folder())
ivy.set_test_name("6D2P")
summary_filename = ivy.test_folder() + "/all/" + ivy.test_name() + ".all=all.summary.csv"
lun_filename = ivy.test_folder() + "/available_test_LUNs.csv"
print(summary_filename)
print(lun_filename)
ivy.hosts_luns(hosts = host_list, select = select_list)
ivy.create_rollup(name="Workload")
ivy.create_rollup(name="Workload+host")
ivy.create_rollup(name="Port")
#//////////Random Read Miss////////////////
ioparams = {
'iosequencer' : 'random_steady',
'IOPS' : 'max',
'fractionRead' : 1.0,
'VolumeCoverageFractionStart' : 0.0,
'VolumeCoverageFractionEnd' : 1.0,
'blocksize' : 8,
'maxtags' : 64
}
ivy.set_io_sequencer_template(**ioparams)
ivy.create_workload(name = "RandomReadMiss", iosequencer = "random_steady", parameters="")
for i in [64,44,30,20,14,9,6,4,3,2,1]:
ivy.edit_rollup(name = "all=all", parameters = " maxTags=" + str(i))
ivy.go(stepname="RR_maxTags_" + str(i), subinterval_seconds=10, warmup_seconds=120, measure_seconds=180)
summary_filename = ivy.test_folder() + "/all/" + ivy.test_name() + ".all=all.summary.csv"
lun_filename = ivy.test_folder() + "/available_test_LUNs.csv"
max_IOPS = float(ivy.csv_cell_value(filename = summary_filename, row = int(ivy.csv_rows(filename = summary_filename)) - 1, col = "Overall IOPS"))/float(ivy.csv_rows(filename = lun_filename))
for j in [0.7,0.45,0.25,0.1,0.01]:
ivy.edit_rollup(name = "all=all", parameters = "IOPS="+ str(max_IOPS * j) +", maxTags=1")
ivy.go(stepname="RR_maxTags_1(" + str(j*100) + "%)", subinterval_seconds=10, warmup_seconds=120, measure_seconds=180)
ivy.delete_workload (name = "RandomReadMiss")
#//////////Random Write Miss////////////////
ioparams = {
'iosequencer' : 'random_steady',
'IOPS' : 'max',
'fractionRead' : 0.0,
'VolumeCoverageFractionStart' : 0.0,
'VolumeCoverageFractionEnd' : 1.0,
'blocksize' : 8,
'maxtags' : 16
}
ivy.set_io_sequencer_template(**ioparams)
ivy.create_workload(name = "RandomWriteMiss", iosequencer = "random_steady", parameters="")
for i in [16,13,11,9,7,6,5,4,3,2,1]:
ivy.edit_rollup(name = "all=all", parameters = " maxTags=" + str(i))
ivy.go(stepname="RR_maxTags_" + str(i), subinterval_seconds=10, warmup_seconds=120, measure_seconds=180)
summary_filename = ivy.test_folder() + "/all/" + ivy.test_name() + ".all=all.summary.csv"
lun_filename = ivy.test_folder() + "/available_test_LUNs.csv"
max_IOPS = float(ivy.csv_cell_value(filename = summary_filename, row = int(ivy.csv_rows(filename = summary_filename)) - 1, col = "Overall IOPS"))/float(ivy.csv_rows(filename = lun_filename))
for j in [0.7,0.45,0.25,0.1,0.01]:
ivy.edit_rollup(name = "all=all", parameters = "IOPS="+ str(max_IOPS * j) +", maxTags=1")
ivy.go(stepname="RR_maxTags_1(" + str(j*100) + "%)", subinterval_seconds=10, warmup_seconds=120, measure_seconds=180)
ivy.delete_workload (name = "RandomWriteMiss")
#//////////Sequential Read////////////////
ioparams = {
'iosequencer' : 'sequential',
'IOPS' : 'max',
'fractionRead' : 1.0,
'VolumeCoverageFractionStart' : 0.0,
'VolumeCoverageFractionEnd' : 1.0,
'blocksize' : 256,
'maxtags' : 32
}
ivy.set_io_sequencer_template(**ioparams)
ivy.create_workload(name = "SequentialRead", iosequencer = "sequential", parameters="")
for i in [32,24,17,13,9,7,5,4,3,2,1]:
ivy.edit_rollup(name = "all=all", parameters = " maxTags=" + str(i))
ivy.go(stepname="RR_maxTags_" + str(i), subinterval_seconds=10, warmup_seconds=120, measure_seconds=180)
summary_filename = ivy.test_folder() + "/all/" + ivy.test_name() + ".all=all.summary.csv"
lun_filename = ivy.test_folder() + "/available_test_LUNs.csv"
max_IOPS = float(ivy.csv_cell_value(filename = summary_filename, row = int(ivy.csv_rows(filename = summary_filename)) - 1, col = "Overall IOPS"))/float(ivy.csv_rows(filename = lun_filename))
for j in [0.7,0.45,0.25,0.1,0.01]:
ivy.edit_rollup(name = "all=all", parameters = "IOPS="+ str(max_IOPS * j) +", maxTags=1")
ivy.go(stepname="RR_maxTags_1(" + str(j*100) + "%)", subinterval_seconds=10, warmup_seconds=120, measure_seconds=180)
ivy.delete_workload (name = "SequentialRead")
#//////////Sequential Write////////////////
ioparams = {
'iosequencer' : 'sequential',
'IOPS' : 'max',
'fractionRead' : 0.0,
'VolumeCoverageFractionStart' : 0.0,
'VolumeCoverageFractionEnd' : 1.0,
'blocksize' : 256,
'maxtags' : 32
}
ivy.set_io_sequencer_template(**ioparams)
ivy.create_workload(name = "SequentialWrite", iosequencer = "sequential", parameters="")
for i in [32,24,17,13,9,7,5,4,3,2,1]:
ivy.edit_rollup(name = "all=all", parameters = " maxTags=" + str(i))
ivy.go(stepname="RR_maxTags_" + str(i), subinterval_seconds=10, warmup_seconds=120, measure_seconds=180)
summary_filename = ivy.test_folder() + "/all/" + ivy.test_name() + ".all=all.summary.csv"
lun_filename = ivy.test_folder() + "/available_test_LUNs.csv"
max_IOPS = float(ivy.csv_cell_value(filename = summary_filename, row = int(ivy.csv_rows(filename = summary_filename)) - 1, col = "Overall IOPS"))/float(ivy.csv_rows(filename = lun_filename))
for j in [0.7,0.45,0.25,0.1,0.01]:
ivy.edit_rollup(name = "all=all", parameters = "IOPS="+ str(max_IOPS * j) +", maxTags=1")
ivy.go(stepname="RR_maxTags_1(" + str(j*100) + "%)", subinterval_seconds=10, warmup_seconds=120, measure_seconds=180)
ivy.delete_workload (name = "SequentialWrite")
| 39.035461
| 190
| 0.697311
|
fa789d41f455e0df69a210aba89da0bb04ccdd0d
| 1,525
|
py
|
Python
|
ImageDenoising/lib/model.py
|
jiunbae/ITE4053
|
873d53493b7588f67406e0e6ed0e74e5e3f957bc
|
[
"MIT"
] | 5
|
2019-06-20T09:54:04.000Z
|
2021-06-15T04:22:49.000Z
|
ImageDenoising/lib/model.py
|
jiunbae/ITE4053
|
873d53493b7588f67406e0e6ed0e74e5e3f957bc
|
[
"MIT"
] | null | null | null |
ImageDenoising/lib/model.py
|
jiunbae/ITE4053
|
873d53493b7588f67406e0e6ed0e74e5e3f957bc
|
[
"MIT"
] | 1
|
2019-04-19T04:52:34.000Z
|
2019-04-19T04:52:34.000Z
|
from tensorflow.keras.utils import Sequence
from tensorflow.keras import optimizers as optim
from network import DenoisingNetwork
from utils.callback import CustomCallback
class DenoisingModel(object):
def __init__(self, mode: str):
self.klass = DenoisingNetwork
self.model = self.klass(mode)
def train(self,
train_generator: Sequence,
val_generator: Sequence,
config: object, epochs: int) \
-> None:
optimizer = optim.Adam(lr=config.lr,
decay=config.lr_decay)
self.klass.compile(self.model,
optimizer=optimizer,
loss=self.klass.loss,
metric=self.klass.metric)
self.model.fit_generator(
train_generator,
epochs=epochs,
steps_per_epoch=len(train_generator),
validation_data=val_generator,
validation_steps=100,
workers=4,
use_multiprocessing=True,
callbacks=[
# TensorBoard(log_dir=config.log, write_graph=True, write_images=True),
# CustomCallback(log_dir=config.log, interval=config.interval,
# train=train_generator[0], test=[v for v in val_generator]),
]
)
def predict(self, inputs):
result, *_ = self.model.predict(inputs)
return result
def save(self, path: str):
self.model.save(path)
| 31.122449
| 92
| 0.572459
|
db300953a077196b1f33f8f1e858abcc65d681f4
| 1,406
|
py
|
Python
|
webvpn.py
|
bin2021125/auto-daily-health-report
|
63b4809e97f595ba5b17bec80cee0f32c0b717d8
|
[
"MIT"
] | null | null | null |
webvpn.py
|
bin2021125/auto-daily-health-report
|
63b4809e97f595ba5b17bec80cee0f32c0b717d8
|
[
"MIT"
] | null | null | null |
webvpn.py
|
bin2021125/auto-daily-health-report
|
63b4809e97f595ba5b17bec80cee0f32c0b717d8
|
[
"MIT"
] | null | null | null |
import requests
import json
import sys
from bs4 import BeautifulSoup
# request with webvpn.xmu.edu.cn
def with_webvpn(session, header, vpn_username, vpn_password):
try:
login_page = session.get("https://webvpn.xmu.edu.cn/login",
headers=header).text
soup = BeautifulSoup(login_page, 'lxml')
need_captcha = soup.select('input[name="needCaptcha"]')[0]['value']
if need_captcha == 'true':
print(json.dumps({
"status": "failed",
"reason": "WebVPN Login failed (captcha required)"
}, indent=4))
sys.exit(1)
captcha_id = soup.select('input[name="captcha_id"]')[0]['value']
vpn_login_url = "https://webvpn.xmu.edu.cn/do-login"
login_data = {
"auth_type": "local",
"username": vpn_username,
"password": vpn_password,
"sms_code":"",
"captcha": "",
"needCaptcha": False,
"captcha_id": captcha_id
}
session.post(vpn_login_url,
login_data,
headers=header,
allow_redirects=True)
return session
except KeyError:
print(json.dumps({
"status": "failed",
"reason": "WebVPN Login failed (server error)"
}, indent=4))
sys.exit(1)
| 29.914894
| 75
| 0.527738
|
f03948bc77d8b1fd40835385a00bbc894db43cfc
| 895
|
py
|
Python
|
rl_quad/rl_scripts/train/quadcopter-stable-baselines-ddpg.py
|
vivekagra/Biplane-Quadrotor
|
afe69216494842f5bfe16cbcc0cdcc6ef0de7769
|
[
"BSD-3-Clause"
] | null | null | null |
rl_quad/rl_scripts/train/quadcopter-stable-baselines-ddpg.py
|
vivekagra/Biplane-Quadrotor
|
afe69216494842f5bfe16cbcc0cdcc6ef0de7769
|
[
"BSD-3-Clause"
] | null | null | null |
rl_quad/rl_scripts/train/quadcopter-stable-baselines-ddpg.py
|
vivekagra/Biplane-Quadrotor
|
afe69216494842f5bfe16cbcc0cdcc6ef0de7769
|
[
"BSD-3-Clause"
] | null | null | null |
import gym
import time
import numpy as np, pandas as pd
import matplotlib.pyplot as plt
from stable_baselines.ddpg.policies import MlpPolicy
from stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec
from stable_baselines import DDPG
from rl_quad.environment.continous import QuadEnvCont
env = QuadEnvCont()
n_actions = env.action_space.shape[-1]
param_noise = None
action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))
model = DDPG(MlpPolicy, env, verbose=1, param_noise=param_noise, action_noise=action_noise, tensorboard_log="/home/ayush/Projects/rl_quad/training/logs/")
model.learn(total_timesteps=100000)
model.save("quad-ddpg-v1-100k")
model.learn(total_timesteps=100000)
model.save("quad-ddpg-v1-200k")
model.learn(total_timesteps=200000)
model.save("quad-ddpg-v1-400k")
| 34.423077
| 154
| 0.820112
|
653f3d126c9950c17fb6dd172757205541017a4a
| 164
|
py
|
Python
|
solutions/python3/1009.py
|
sm2774us/amazon_interview_prep_2021
|
f580080e4a6b712b0b295bb429bf676eb15668de
|
[
"MIT"
] | 42
|
2020-08-02T07:03:49.000Z
|
2022-03-26T07:50:15.000Z
|
solutions/python3/1009.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | null | null | null |
solutions/python3/1009.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | 40
|
2020-02-08T02:50:24.000Z
|
2022-03-26T15:38:10.000Z
|
class Solution:
def bitwiseComplement(self, N: int, M = 0, m = 0) -> int:
return N ^ M if M and M >= N else self.bitwiseComplement(N, M + 2 ** m, m + 1)
| 54.666667
| 86
| 0.579268
|
f02927b79773fed353148dec47bddd43b4a71d8a
| 2,541
|
py
|
Python
|
NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/coordinates/builtin_frames/galactic.py
|
sahirsharma/Martian
|
062e9b47849512863c16713811f347ad7e121b56
|
[
"MIT"
] | null | null | null |
NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/coordinates/builtin_frames/galactic.py
|
sahirsharma/Martian
|
062e9b47849512863c16713811f347ad7e121b56
|
[
"MIT"
] | null | null | null |
NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/coordinates/builtin_frames/galactic.py
|
sahirsharma/Martian
|
062e9b47849512863c16713811f347ad7e121b56
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ... import units as u
from ..angles import Angle
from ..representation import SphericalRepresentation
from ..baseframe import BaseCoordinateFrame, RepresentationMapping
# these are needed for defining the NGP
from .fk5 import FK5
from .fk4 import FK4NoETerms
class Galactic(BaseCoordinateFrame):
"""
Galactic Coordinates.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
l : `Angle`, optional, must be keyword
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `Angle`, optional, must be keyword
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
"""
frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'l'),
RepresentationMapping('lat', 'b')],
'cartesian': [RepresentationMapping('x', 'w'),
RepresentationMapping('y', 'u'),
RepresentationMapping('z', 'v')]
}
frame_specific_representation_info['unitspherical'] = \
frame_specific_representation_info['spherical']
default_representation = SphericalRepresentation
# North galactic pole and zeropoint of l in FK4/FK5 coordinates. Needed for
# transformations to/from FK4/5
# These are from the IAU's definition of galactic coordinates
_ngp_B1950 = FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree)
_lon0_B1950 = Angle(123, u.degree)
# These are *not* from Reid & Brunthaler 2004 - instead, they were
# derived by doing:
#
# >>> FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree).transform_to(FK5)
#
# This gives better consistency with other codes than using the values
# from Reid & Brunthaler 2004 and the best self-consistency between FK5
# -> Galactic and FK5 -> FK4 -> Galactic. The lon0 angle was found by
# optimizing the self-consistency.
_ngp_J2000 = FK5(ra=192.8594812065348*u.degree, dec=27.12825118085622*u.degree)
_lon0_J2000 = Angle(122.9319185680026, u.degree)
| 39.703125
| 83
| 0.679654
|
d56b0eb15af312264b478aff3c44536a57ec3342
| 5,489
|
py
|
Python
|
evaluation/crate/run_docker_images.py
|
seveirbian/gear-old
|
8d3529a9bf42e652a9d7475c9d14e9a6afc69a76
|
[
"Apache-2.0"
] | null | null | null |
evaluation/crate/run_docker_images.py
|
seveirbian/gear-old
|
8d3529a9bf42e652a9d7475c9d14e9a6afc69a76
|
[
"Apache-2.0"
] | null | null | null |
evaluation/crate/run_docker_images.py
|
seveirbian/gear-old
|
8d3529a9bf42e652a9d7475c9d14e9a6afc69a76
|
[
"Apache-2.0"
] | null | null | null |
import sys
# package need to be installed, pip install docker
import docker
import time
import yaml
import os
import random
import subprocess
import signal
import urllib2
import shutil
import xlwt
# package need to be installed, pip install crate
from crate import client as crate_client
auto = False
private_registry = "202.114.10.146:9999/"
apppath = ""
# run paraments
hostPort = 4200
localVolume = ""
pwd = os.path.split(os.path.realpath(__file__))[0]
runEnvironment = ["CRATE_HEAP_SIZE=1g", ]
runPorts = {"4200/tcp": hostPort,}
runVolumes = {}
runWorking_dir = ""
runCommand = "crate -Cnetwork.host=_site_ -Cdiscovery.type=single-node"
waitline = ""
# result
result = [["tag", "finishTime"], ]
class Runner:
def __init__(self, images):
self.images_to_pull = images
def check(self):
# detect whether the file exists, if true, delete it
if os.path.exists("./images_run.txt"):
os.remove("./images_run.txt")
def run(self):
self.check()
client = docker.from_env()
# if don't give a tag, then all image under this registry will be pulled
repos = self.images_to_pull[0]["repo"]
for repo in repos:
tags = self.images_to_pull[1][repo]
for tag in tags:
private_repo = private_registry + repo + ":" + tag
if localVolume != "":
if os.path.exists(localVolume) == False:
os.makedirs(localVolume)
print "start running: ", private_repo
# create a random name
runName = '%d' % (random.randint(1,100000000))
# get present time
startTime = time.time()
# run images
container = client.containers.create(image=private_repo, environment=runEnvironment,
ports=runPorts, volumes=runVolumes, working_dir=runWorking_dir,
command=runCommand, name=runName, detach=True,
cpu_period=100000, cpu_quota=150000, mem_limit="2g", )
container.start()
while True:
if time.time() - startTime > 600:
break
try:
connection = crate_client.connect("http://localhost:4200", username="crate")
cursor = connection.cursor()
cursor.execute('''CREATE TABLE GAMES
(ID INT PRIMARY KEY NOT NULL,
NAME STRING);''')
print "successfully create table games!"
cursor.execute(
"""INSERT INTO GAMES (ID, NAME)
VALUES (?, ?)""",
(1, "Three kingdoms"))
print "successfully insert!"
cursor.execute("UPDATE GAMES set NAME = 'Dota2' where ID=1;")
print "successfully update!"
cursor.execute("SELECT ID, NAME from GAMES;")
rows = cursor.fetchall()
print rows
cursor.execute("DELETE from GAMES where ID=1;")
print "successfully delete!"
connection.close()
break
except:
time.sleep(0.1) # wait 100ms
pass
# print run time
finishTime = time.time() - startTime
print "finished in " , finishTime, "s\n"
try:
container.kill()
except:
print "kill fail!"
pass
container.remove(force=True)
# record the image and its Running time
result.append([tag, finishTime])
if auto != True:
raw_input("Next?")
else:
time.sleep(5)
if localVolume != "":
shutil.rmtree(localVolume)
class Generator:
def __init__(self, profilePath=""):
self.profilePath = profilePath
def generateFromProfile(self):
if self.profilePath == "":
print "Error: profile path is null"
with open(self.profilePath, 'r') as f:
self.images = yaml.load(f, Loader=yaml.FullLoader)
return self.images
def get_net_data():
netCard = "/proc/net/dev"
fd = open(netCard, "r")
for line in fd.readlines():
if line.find("enp0s3") >= 0:
field = line.split()
data = float(field[1]) / 1024.0 / 1024.0
fd.close()
return data
if __name__ == "__main__":
if len(sys.argv) == 2:
auto = True
generator = Generator(os.path.split(os.path.realpath(__file__))[0]+"/image_versions.yaml")
images = generator.generateFromProfile()
runner = Runner(images)
runner.run()
# create a workbook sheet
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("run_time")
for row in range(len(result)):
for column in range(len(result[row])):
sheet.write(row, column, result[row][column])
workbook.save(os.path.split(os.path.realpath(__file__))[0]+"/run.xls")
| 30.494444
| 100
| 0.51248
|
1d213260204545351f2fed943f7184a06462d29c
| 2,279
|
py
|
Python
|
src/oscar/apps/customer/history.py
|
QueoLda/django-oscar
|
8dd992d82e31d26c929b3caa0e08b57e9701d097
|
[
"BSD-3-Clause"
] | 4,639
|
2015-01-01T00:42:33.000Z
|
2022-03-29T18:32:12.000Z
|
src/oscar/apps/customer/history.py
|
QueoLda/django-oscar
|
8dd992d82e31d26c929b3caa0e08b57e9701d097
|
[
"BSD-3-Clause"
] | 2,215
|
2015-01-02T22:32:51.000Z
|
2022-03-29T12:16:23.000Z
|
src/oscar/apps/customer/history.py
|
QueoLda/django-oscar
|
8dd992d82e31d26c929b3caa0e08b57e9701d097
|
[
"BSD-3-Clause"
] | 2,187
|
2015-01-02T06:33:31.000Z
|
2022-03-31T15:32:36.000Z
|
import json
from django.conf import settings
from oscar.core.loading import get_model
Product = get_model('catalogue', 'Product')
class CustomerHistoryManager:
cookie_name = settings.OSCAR_RECENTLY_VIEWED_COOKIE_NAME
cookie_kwargs = {
'max_age': settings.OSCAR_RECENTLY_VIEWED_COOKIE_LIFETIME,
'secure': settings.OSCAR_RECENTLY_VIEWED_COOKIE_SECURE,
'httponly': True,
}
max_products = settings.OSCAR_RECENTLY_VIEWED_PRODUCTS
@classmethod
def get(cls, request):
"""
Return a list of recently viewed products
"""
ids = cls.extract(request)
# Reordering as the ID order gets messed up in the query
product_dict = Product.objects.browsable().in_bulk(ids)
ids.reverse()
return [product_dict[product_id] for product_id in ids if product_id in product_dict]
@classmethod
def extract(cls, request, response=None):
"""
Extract the IDs of products in the history cookie
"""
ids = []
if cls.cookie_name in request.COOKIES:
try:
ids = json.loads(request.COOKIES[cls.cookie_name])
except ValueError:
# This can occur if something messes up the cookie
if response:
response.delete_cookie(cls.cookie_name)
else:
# Badly written web crawlers send garbage in double quotes
if not isinstance(ids, list):
ids = []
return ids
@classmethod
def add(cls, ids, new_id):
"""
Add a new product ID to the list of product IDs
"""
if new_id in ids:
ids.remove(new_id)
ids.append(new_id)
if len(ids) > cls.max_products:
ids = ids[len(ids) - cls.max_products:]
return ids
@classmethod
def update(cls, product, request, response):
"""
Updates the cookies that store the recently viewed products
removing possible duplicates.
"""
ids = cls.extract(request, response)
updated_ids = cls.add(ids, product.id)
response.set_cookie(
cls.cookie_name,
json.dumps(updated_ids),
**cls.cookie_kwargs)
| 30.797297
| 93
| 0.604212
|
59556a383894f6a2011e4a187e43d1f8cf87eb82
| 1,721
|
py
|
Python
|
configs/body/upernet_swin_tiny2.py
|
SeHwanJoo/mmsegmentation_body
|
31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac
|
[
"Apache-2.0"
] | null | null | null |
configs/body/upernet_swin_tiny2.py
|
SeHwanJoo/mmsegmentation_body
|
31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac
|
[
"Apache-2.0"
] | null | null | null |
configs/body/upernet_swin_tiny2.py
|
SeHwanJoo/mmsegmentation_body
|
31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/upernet_swin_BN.py', 'dataset2.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py'
]
model = dict(
pretrained=\
'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth', # noqa
backbone=dict(
embed_dims=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
use_abs_pos_embed=False,
drop_path_rate=0.3,
patch_norm=True,
pretrain_style='official'),
decode_head=dict(in_channels=[128, 256, 512, 1024], num_classes=3),
auxiliary_head=dict(in_channels=512, num_classes=3)
)
# AdamW optimizer, no weight decay for position embedding & layer norm
# in backbone
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.00006,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(
_delete_=True,
policy='cyclic',
target_ratio=(1, 0.01),
cyclic_times=1,
step_ratio_up=0.05)
# lr_config = dict(
# _delete_=True,
# policy='poly',
# warmup='linear',
# warmup_iters=400,
# warmup_ratio=1e-6,
# power=1.0,
# min_lr=0.0)
# lr_config = dict(
# _delete_=True,
# policy='step',
# warmup='linear',
# warmup_iters=400,
# warmup_ratio=1e-6,
# step=[60, 90])
evaluation = dict(metric='mDice')
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
checkpoint_config = dict(max_keep_ckpts=3)
| 27.31746
| 119
| 0.63161
|
96aad07299bbb955fe17721e65da58e2c54b0b95
| 496
|
py
|
Python
|
scripts/csv2shapefile.py
|
rmsare-lanl/dengue-example
|
0737e344d501473c9fb0fc1eec219141f9d59fd7
|
[
"MIT"
] | null | null | null |
scripts/csv2shapefile.py
|
rmsare-lanl/dengue-example
|
0737e344d501473c9fb0fc1eec219141f9d59fd7
|
[
"MIT"
] | null | null | null |
scripts/csv2shapefile.py
|
rmsare-lanl/dengue-example
|
0737e344d501473c9fb0fc1eec219141f9d59fd7
|
[
"MIT"
] | null | null | null |
"""
Convert a CSV with Lat, Lon coordinates to an ESRI Shapefile
"""
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
def csv_to_shapefile(in_filename):
out_filename = in_filename.replace('.csv', '.shp')
df = pd.read_csv(in_filename)
crs = {'init': 'epsg:4326'}
geometry = [Point(xy) for xy in zip(df.Longitude, df.Latitude)]
df = gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
df.to_file(driver='ESRI Shapefile', filename=out_filename)
| 29.176471
| 67
| 0.71371
|
cb6789a21a72bd9b8261cd38227d77083ee6c1b5
| 5,606
|
py
|
Python
|
pulser/tests/test_waveforms.py
|
Yash-10/Pulser
|
afd16e0789b2621f00f6661df6d33ff27c44ac94
|
[
"Apache-2.0"
] | null | null | null |
pulser/tests/test_waveforms.py
|
Yash-10/Pulser
|
afd16e0789b2621f00f6661df6d33ff27c44ac94
|
[
"Apache-2.0"
] | null | null | null |
pulser/tests/test_waveforms.py
|
Yash-10/Pulser
|
afd16e0789b2621f00f6661df6d33ff27c44ac94
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from unittest.mock import patch
import numpy as np
import pytest
from pulser.json.coders import PulserEncoder, PulserDecoder
from pulser.parametrized import Variable, ParamObj
from pulser.waveforms import (ConstantWaveform, RampWaveform, BlackmanWaveform,
CustomWaveform, CompositeWaveform)
np.random.seed(20201105)
constant = ConstantWaveform(100, -3)
ramp = RampWaveform(2e3, 5, 19)
arb_samples = np.random.random(52)
custom = CustomWaveform(arb_samples)
blackman = BlackmanWaveform(40, np.pi)
composite = CompositeWaveform(blackman, constant, custom)
def test_duration():
with pytest.raises(TypeError, match='needs to be castable to an int'):
ConstantWaveform("s", -1)
RampWaveform([0, 1, 3], 1, 0)
with pytest.raises(ValueError, match='positive duration'):
ConstantWaveform(15, -10)
RampWaveform(-20, 3, 4)
with pytest.warns(UserWarning):
wf = BlackmanWaveform(np.pi*10, 1)
assert wf.duration == 31
assert custom.duration == 52
assert composite.duration == 192
def test_change_duration():
with pytest.raises(NotImplementedError):
custom.change_duration(53)
new_cte = constant.change_duration(103)
assert constant.duration == 100
assert new_cte.duration == 103
new_blackman = blackman.change_duration(30)
assert np.isclose(new_blackman.integral, blackman.integral)
assert new_blackman != blackman
new_ramp = ramp.change_duration(100)
assert new_ramp.duration == 100
assert new_ramp != ramp
def test_samples():
assert np.all(constant.samples == -3)
bm_samples = np.clip(np.blackman(40), 0, np.inf)
bm_samples *= np.pi / np.sum(bm_samples) / 1e-3
comp_samples = np.concatenate([bm_samples, np.full(100, -3), arb_samples])
assert np.all(np.isclose(composite.samples, comp_samples))
def test_integral():
assert np.isclose(blackman.integral, np.pi)
assert constant.integral == -0.3
assert ramp.integral == 24
def test_draw():
with patch('matplotlib.pyplot.show'):
composite.draw()
blackman.draw()
def test_eq():
assert constant == CustomWaveform(np.full(100, -3))
assert constant != -3
assert constant != CustomWaveform(np.full(48, -3))
def test_first_last():
assert constant.first_value == constant.last_value
assert ramp.first_value == 5
assert ramp.last_value == 19
assert blackman.first_value == 0
assert blackman.last_value == 0
assert composite.first_value == 0
assert composite.last_value == arb_samples[-1]
assert custom.first_value == arb_samples[0]
def test_hash():
assert hash(constant) == hash(tuple(np.full(100, -3)))
assert hash(ramp) == hash(tuple(np.linspace(5, 19, num=2000)))
def test_composite():
with pytest.raises(ValueError, match='Needs at least two waveforms'):
CompositeWaveform()
CompositeWaveform(composite)
CompositeWaveform([blackman, custom])
CompositeWaveform(10)
with pytest.raises(TypeError, match='not a valid waveform'):
CompositeWaveform(composite, 'constant')
assert composite.waveforms == [blackman, constant, custom]
wf = CompositeWaveform(blackman, constant)
msg = ('BlackmanWaveform(40 ns, Area: 3.14), ' +
'ConstantWaveform(100 ns, -3 rad/µs)')
assert wf.__str__() == f'Composite({msg})'
assert wf.__repr__() == f'CompositeWaveform(140 ns, [{msg}])'
def test_custom():
data = np.arange(16, dtype=float)
wf = CustomWaveform(data)
assert wf.__str__() == 'Custom'
assert wf.__repr__() == f'CustomWaveform(16 ns, {data!r})'
def test_ramp():
assert ramp.slope == 7e-3
def test_blackman():
with pytest.raises(TypeError):
BlackmanWaveform(100, np.array([1, 2]))
wf = BlackmanWaveform(100, -2)
assert np.isclose(wf.integral, -2)
assert np.all(wf.samples <= 0)
assert wf == BlackmanWaveform(100, np.array([-2]))
with pytest.raises(ValueError, match="matching signs"):
BlackmanWaveform.from_max_val(-10, np.pi)
wf = BlackmanWaveform.from_max_val(10, 2*np.pi)
assert np.isclose(wf.integral, 2*np.pi)
assert np.max(wf.samples) < 10
wf = BlackmanWaveform.from_max_val(-10, -np.pi)
assert np.isclose(wf.integral, -np.pi)
assert np.min(wf.samples) > -10
var = Variable("var", float)
wf_var = BlackmanWaveform.from_max_val(-10, var)
assert isinstance(wf_var, ParamObj)
var._assign(-np.pi)
assert wf_var.build() == wf
def test_ops():
assert -constant == ConstantWaveform(100, 3)
assert ramp * 2 == RampWaveform(2e3, 10, 38)
assert --custom == custom
assert blackman / 2 == BlackmanWaveform(40, np.pi / 2)
assert composite * 1 == composite
with pytest.raises(ZeroDivisionError):
constant / 0
def test_serialization():
for wf in [constant, ramp, custom, blackman, composite]:
s = json.dumps(wf, cls=PulserEncoder)
assert wf == json.loads(s, cls=PulserDecoder)
| 30.802198
| 79
| 0.68944
|
739804a3c26e85545f3b83f84e92dfcdbf55bd9c
| 625
|
py
|
Python
|
python_modules/libraries/dagster-azure/dagster_azure/adls2/__init__.py
|
johannkm/dagster-okteto
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
[
"Apache-2.0"
] | 1
|
2020-08-10T23:03:37.000Z
|
2020-08-10T23:03:37.000Z
|
python_modules/libraries/dagster-azure/dagster_azure/adls2/__init__.py
|
johannkm/dagster-okteto
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-azure/dagster_azure/adls2/__init__.py
|
johannkm/dagster-okteto
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
[
"Apache-2.0"
] | 1
|
2020-08-20T14:20:31.000Z
|
2020-08-20T14:20:31.000Z
|
from .fake_adls2_resource import FakeADLS2Resource, FakeADLS2ServiceClient
from .file_cache import ADLS2FileCache, adls2_file_cache
from .file_manager import ADLS2FileHandle, ADLS2FileManager
from .intermediate_store import ADLS2IntermediateStore
from .object_store import ADLS2ObjectStore
from .resources import adls2_file_manager, adls2_resource
from .system_storage import (
adls2_intermediate_storage,
adls2_plus_default_intermediate_storage_defs,
adls2_plus_default_storage_defs,
adls2_system_storage,
)
from .utils import create_adls2_client
# from .solids import ADLS2Coordinate, file_handle_to_adls2
| 39.0625
| 74
| 0.864
|
26963202e053536d16d24c99db962ce723a01a85
| 10,666
|
py
|
Python
|
butler/block_service/block_service.py
|
constantinpape/butler
|
b831457624f6f9c88a4f5905c78487eda5d274bb
|
[
"MIT"
] | null | null | null |
butler/block_service/block_service.py
|
constantinpape/butler
|
b831457624f6f9c88a4f5905c78487eda5d274bb
|
[
"MIT"
] | null | null | null |
butler/block_service/block_service.py
|
constantinpape/butler
|
b831457624f6f9c88a4f5905c78487eda5d274bb
|
[
"MIT"
] | null | null | null |
import os
import json
import threading
import time
from collections import deque
from ..base import BaseRequestHandler, BaseService, BaseClient
class BlockRequestHandler(BaseRequestHandler):
"""
BlockRequestHandler
"""
def format_request(self, request):
"""
Format the request: single word will result in requesting a new block,
3 words will confirm this block
"""
request = request.split()
# if we have a length of 1, a new block is requested, otherwise a block is confirmed
if len(request) == 1:
return None
elif len(request) == 3:
if not all(reg.isdigit() for reg in request):
raise RuntimeError("Invalid block request")
return [int(req) for req in request]
else:
raise RuntimeError("Invalid block request")
def format_response(self, response):
"""
Format the response: return 0 or 1 for a confirmation request,
return the block offsets for a block request,
return "stop" if all requests are processed (None)
"""
if isinstance(response, bool):
response = "0" if response else "1"
elif isinstance(response, list):
assert len(response) == 3
response = " ".join(map(str, response))
elif response is None:
response = "stop"
else:
raise RuntimeError("Invalid response")
return response
class BlockService(BaseService):
"""
Provide workers with block offsets from requests.
Blocks need to be confirmed and the service periodically checks for
blocks that are over the time limit.
"""
def __init__(self, block_file, time_limit,
check_interval=60, num_retries=2, out_prefix=None):
# initialize the base class
super(BlockService, self).__init__()
self.logger.info(" Init BlockService:")
# time limit and check interval;
assert time_limit > check_interval
self.time_limit = time_limit
self.check_interval = check_interval
self.logger.info(" time_limit: %i and check_interval: %i" % (time_limit, check_interval))
# number of retries for failed blocks
self.num_retries = num_retries
self.try_counter = 0
self.logger.info(" num_retries: %i" % num_retries)
# the outpath to serialize failed blocks
self.out_prefix = out_prefix
if self.out_prefix is not None:
self.logger.info(" Will serialize failed blocks at: %s" % self.out_prefix)
else:
self.logger.warn(" Will not serialize failed blocks, you can serialize them by passing argument `out_prefix`")
# load the coordinates of the blocks that will be processed
# make a queue containing all block offsets
assert os.path.exists(block_file), block_file
with open(block_file, 'r') as f:
self.block_queue = deque(reversed(json.load(f)))
self.logger.info(" Loaded block list from: %s" % block_file)
self.logger.info(" Added %i blocks to queue" % len(self.block_queue))
# list to keep track of ids that are currently processed
self.in_progress = []
self.time_stamps = []
# list of offsets that have been processed
self.processed_list = []
# list of failed blocks
self.failed_blocks = []
self.lock = threading.Lock()
# start the background thread that checks for failed jobs
self.bg_thread = threading.Thread(target=self.check_progress_list, args=())
self.bg_thread.daemon = True
self.bg_thread.start()
def process_request(self, request):
self.logger.debug(" Process incomig request: %s" % str(request))
# request a new block
if request is None:
return self.request_block()
# confirm a block
else:
return self.confirm_block(request)
# check the progress list for blocks that have exceeded the time limit
def check_progress_list(self):
while self.server_is_running:
time.sleep(self.check_interval)
with self.lock:
now = time.time()
self.logger.debug(" Checking progress list for %i blocks" % len(self.time_stamps))
# find blocks that have exceeded the time limit
failed_block_ids = [ii for ii, time_stamp in enumerate(self.time_stamps)
if now - time_stamp > self.time_limit]
self.logger.info(" Found %i blocks over the time limit" % len(failed_block_ids))
# remove failed blocks and time stamps from in progress and
# append failed blocks to the failed list
# NOTE: we need to iterate in reverse order to delete the correct elements
for ii in sorted(failed_block_ids, reverse=True):
del self.time_stamps[ii]
self.failed_blocks.append(self.in_progress.pop(ii))
# request the next block to be processed
# if no more blocks are present, return None
def request_block(self):
# return a block offset if we still have blocks in the quee
if len(self.block_queue) > 0:
with self.lock:
block_offsets = self.block_queue.pop()
self.in_progress.append(block_offsets)
self.time_stamps.append(time.time())
self.logger.debug(" Returning block offsets: %s" % str(block_offsets))
# otherwise, wait for the ones in progress to finish (or be cancelled)
# then either repopulate, or exit
else:
# NOTE this must not be locked, otherwise
# we end up with a deadlock with the lock in `check_progress_list`
while self.in_progress:
time.sleep(self.check_interval)
continue
with self.lock:
# we need to check again inf the block queue is empty, because it might have been repopulated
# in the meantime already
if len(self.block_queue) > 0:
block_offsets = self.block_queue.pop()
self.in_progress.append(block_offsets)
self.time_stamps.append(time.time())
self.logger.debug(" Returning block offsets: %s" % str(block_offsets))
elif self.try_counter < self.num_retries and self.failed_blocks:
self.logger.info(" Exhausted block queue, repopulating for %i time" % self.try_counter)
block_offsets = self.repopulate_queue()
self.try_counter += 1
else:
block_offsets = None
if self.server_is_running:
self.logger.info(" Exhausted block queue, shutting down service")
self.serialize_status()
self.shutdown_server()
return block_offsets
# confirm that a block has been processed
def confirm_block(self, block_offset):
# see of the offset is still in the in-progress
# list and remove it.
# if not, the time limit was exceeded and something is most likely wrong
# with the block and the block was put on the failed block list
self.logger.debug(" Confirming block %s" % str(block_offset))
try:
with self.lock:
index = self.in_progress.index(block_offset)
del self.in_progress[index]
del self.time_stamps[index]
self.processed_list.append(block_offset)
success = True
self.logger.debug(" Block %s was processed properly." % str(block_offset))
except ValueError:
success = False
self.logger.debug(" Block %s is over time limit and was added to failed blocks" % str(block_offset))
return success
def repopulate_queue(self):
self.block_queue.extendleft(self.failed_blocks)
self.failed_blocks = []
block_offsets = self.block_queue.pop()
self.in_progress.append(block_offsets)
self.time_stamps.append(time.time())
self.logger.debug(" Returning block offsets: %s" % str(block_offsets))
return block_offsets
def serialize_status(self, from_interrupt=False):
"""
Serialize the status (failed blocks, processed blocks, in-progress blocks)
"""
if from_interrupt:
self.logger.info(" serialize_status called after interrupt")
else:
self.logger.info(" serialize_status called after regular shutdown")
if self.out_prefix is not None:
if self.failed_blocks:
out_failed_blocks = self.out_prefix + "failed_blocks.json"
self.logger.info(" Serialized list of failed blocks with %i entries to %s" %
(len(self.failed_blocks), out_failed_blocks))
with open(out_failed_blocks, 'w') as f:
json.dump(self.failed_blocks, f)
if self.processed_list:
out_processed_blocks = self.out_prefix + "processed_blocks.json"
self.logger.info(" Serialized list of processed blocks with %i entries to %s" %
(len(self.processed_list), out_processed_blocks))
with open(out_processed_blocks, 'w') as f:
json.dump(self.processed_list, f)
if self.in_progress:
out_in_progress = self.out_prefix + "inprogress_blocks.json"
self.logger.info(" Serialized list of in-progress blocks with %i entries to %s" %
(len(self.in_progress), out_in_progress))
with open(self.out_prefix + "inprogress_blocks.json", 'w') as f:
json.dump(self.in_progress, f)
class BlockClient(BaseClient):
"""
"""
def format_request(self, request):
"""
Format incoming request.
Must return string.
"""
return "1" if request is None else " ".join(map(str, request))
def format_response(self, response):
"""
Format incoming response.
"""
response = response.split()
# if the response has length 3, it consists of
# block coordinates
if len(response) == 3:
return list(map(int, response))
else:
response = response[0]
return None if response is 'stop' else bool(int(response))
| 41.992126
| 122
| 0.6036
|
ac6feb5caf955768b4843cdc8f59dabcf7873c56
| 398
|
py
|
Python
|
sktimeline/views/__init__.py
|
aaronmauro/sktimeline
|
3a83b8973959c2d6bf49021cd8efb0ead81b9395
|
[
"MIT"
] | 2
|
2016-06-14T17:02:42.000Z
|
2016-10-24T14:49:25.000Z
|
sktimeline/views/__init__.py
|
aaronmauro/sktimeline
|
3a83b8973959c2d6bf49021cd8efb0ead81b9395
|
[
"MIT"
] | 3
|
2016-06-27T13:20:53.000Z
|
2017-03-18T14:21:27.000Z
|
sktimeline/views/__init__.py
|
aaronmauro/sktimeline
|
3a83b8973959c2d6bf49021cd8efb0ead81b9395
|
[
"MIT"
] | 2
|
2016-06-14T17:03:05.000Z
|
2016-09-01T14:18:44.000Z
|
from sktimeline import *
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash("Please login")
return redirect(url_for('login_page'))
return wrap
def page_not_found():
return render_template("errors/404.html")
import admin
import dashboard
import general
| 19.9
| 50
| 0.625628
|
df3897fa9f1ae6fb255558c609f8d4e5c08a562e
| 4,288
|
py
|
Python
|
model/modules/shared_conv.py
|
LiXiaoli921/FOTS.PyTorch
|
9319dc767217e3efea6a2ee3403d92c344418154
|
[
"MIT"
] | 1
|
2019-02-11T11:25:38.000Z
|
2019-02-11T11:25:38.000Z
|
model/modules/shared_conv.py
|
huizhang0110/FOTS.PyTorch
|
8661f8847ad63356d15c86ae9c183766e4ff6885
|
[
"MIT"
] | null | null | null |
model/modules/shared_conv.py
|
huizhang0110/FOTS.PyTorch
|
8661f8847ad63356d15c86ae9c183766e4ff6885
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
import torch
import math
SPEEDUP_SCALE = 512
class SharedConv(nn.Module):
'''
sharded convolutional layers
'''
def __init__(self, bbNet: nn.Module):
super(SharedConv, self).__init__()
self.backbone = bbNet
self.backbone.eval()
# Feature-merging branch
# self.toplayer = nn.Conv2d(2048, 256, kernel_size = 1, stride = 1, padding = 0) # Reduce channels
self.mergeLayers0 = DummyLayer()
self.mergeLayers1 = HLayer(2048 + 1024, 128)
self.mergeLayers2 = HLayer(128 + 512, 64)
self.mergeLayers3 = HLayer(64 + 256, 32)
self.mergeLayers4 = nn.Conv2d(32, 32, kernel_size = 3, padding = 1)
self.bn5 = nn.BatchNorm2d(32)
# Output Layer
self.textScale = 512
self.scoreMap = nn.Conv2d(32, 1, kernel_size = 1)
self.geoMap = nn.Conv2d(32, 4, kernel_size = 1)
self.angleMap = nn.Conv2d(32, 1, kernel_size = 1)
def forward(self, input):
input = self.__mean_image_subtraction(input)
# bottom up
f = self.__foward_backbone(input)
g = [None] * 4
h = [None] * 4
# i = 1
h[0] = self.mergeLayers0(f[0])
g[0] = self.__unpool(h[0])
# i = 2
h[1] = self.mergeLayers1(g[0], f[1])
g[1] = self.__unpool(h[1])
# i = 3
h[2] = self.mergeLayers2(g[1], f[2])
g[2] = self.__unpool(h[2])
# i = 4
h[3] = self.mergeLayers3(g[2], f[3])
g[3] = self.__unpool(h[3])
# final stage
final = self.mergeLayers4(h[3])
final = self.bn5(final)
final = F.relu(final)
score = self.scoreMap(final)
score = torch.sigmoid(score)
geoMap = self.geoMap(final)
# 出来的是 normalise 到 0 -1 的值是到上下左右的距离,但是图像他都缩放到 512 * 512 了,但是 gt 里是算的绝对数值来的
geoMap = torch.sigmoid(geoMap) * 512
angleMap = self.angleMap(final)
angleMap = (torch.sigmoid(angleMap) - 0.5) * math.pi / 2
geometry = torch.cat([geoMap, angleMap], dim = 1)
return score, geometry
def __foward_backbone(self, input):
conv2 = None
conv3 = None
conv4 = None
output = None # n * 7 * 7 * 2048
for name, layer in self.backbone.named_children():
input = layer(input)
if name == 'layer1':
conv2 = input
elif name == 'layer2':
conv3 = input
elif name == 'layer3':
conv4 = input
elif name == 'layer4':
output = input
break
return output, conv4, conv3, conv2
def __unpool(self, input):
_, _, H, W = input.shape
return F.interpolate(input, mode = 'bilinear', scale_factor = 2, align_corners = True)
def __mean_image_subtraction(self, images, means = [123.68, 116.78, 103.94]):
'''
image normalization
:param images: bs * w * h * channel
:param means:
:return:
'''
num_channels = images.data.shape[1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
for i in range(num_channels):
images.data[:, i, :, :] -= means[i]
return images
class DummyLayer(nn.Module):
def forward(self, input_f):
return input_f
class HLayer(nn.Module):
def __init__(self, inputChannels, outputChannels):
"""
:param inputChannels: channels of g+f
:param outputChannels:
"""
super(HLayer, self).__init__()
self.conv2dOne = nn.Conv2d(inputChannels, outputChannels, kernel_size = 1)
self.bnOne = nn.BatchNorm2d(outputChannels)
self.conv2dTwo = nn.Conv2d(outputChannels, outputChannels, kernel_size = 3, padding = 1)
self.bnTwo = nn.BatchNorm2d(outputChannels)
def forward(self, inputPrevG, inputF):
input = torch.cat([inputPrevG, inputF], dim = 1)
output = self.conv2dOne(input)
output = self.bnOne(output)
output = F.relu(output)
output = self.conv2dTwo(output)
output = self.bnTwo(output)
output = F.relu(output)
return output
| 27.844156
| 107
| 0.565532
|
d4027717b6cf3557f730f3390681ca6bc8eb59b5
| 13,747
|
py
|
Python
|
GCSs_filtering_and_overlapping.py
|
sutormin94/TopoI_Topo-Seq_1
|
78f03bc3a6e2249b6f47fe838c2ba7d8d761b596
|
[
"MIT"
] | null | null | null |
GCSs_filtering_and_overlapping.py
|
sutormin94/TopoI_Topo-Seq_1
|
78f03bc3a6e2249b6f47fe838c2ba7d8d761b596
|
[
"MIT"
] | null | null | null |
GCSs_filtering_and_overlapping.py
|
sutormin94/TopoI_Topo-Seq_1
|
78f03bc3a6e2249b6f47fe838c2ba7d8d761b596
|
[
"MIT"
] | null | null | null |
###############################################
##Dmitry Sutormin, 2018##
##Topo-Seq analysis##
#The script takes raw GCSs data, returns only trusted GCSs,
#computes GCSs shared between different conditions,
#draws Venn diagrams of the sets overlappings,
#writes GCSs sets.
###############################################
#######
#Packages to be imported.
#######
import os
import matplotlib.pyplot as plt
import collections
from matplotlib_venn import venn2, venn3, venn3_circles
import numpy as np
#######
#Variables to be defined.
#######
print('Variables to be defined:')
#Path to the working directory
pwd="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Science\TopoI-ChIP-Seq\TopA_ChIP-Seq\EcTopoI_G116S_M320V_Topo-Seq\TCS_motifs\\"
#Input data
path_to_replicas={'TopoI_Topo_Seq_1': {'Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_1_Ara_TCSs_called_thr_15.BroadPeak", 'No_Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_1_no_Ara_TCSs_called_thr_15.BroadPeak"},
'TopoI_Topo_Seq_2': {'Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_2_Ara_TCSs_called_thr_15.BroadPeak", 'No_Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_2_no_Ara_TCSs_called_thr_15.BroadPeak"},
'TopoI_Topo_Seq_3': {'Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_3_Ara_TCSs_called_thr_15.BroadPeak", 'No_Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_3_no_Ara_TCSs_called_thr_15.BroadPeak"}}
#Configuration of the output for the GCSs data in replicas.
Replicas_path_out="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Science\TopoI-ChIP-Seq\TopA_ChIP-Seq\EcTopoI_G116S_M320V_Topo-Seq\TCS_motifs\\Replicas_1_2_3_Tresholds_trusted_TCSs\\"
if not os.path.exists(Replicas_path_out):
os.makedirs(Replicas_path_out)
Set_name="Thr_15"
All_conditions_name="TopoI_Topo_Seq_123_TCSs_merged"
#Configuration of the output for GCSs trusted.
Out_path=Replicas_path_out + "TopoI_Topo_Seq_123_TCSs_called_thr_15.BroadPeak"
#Outpath for Venn diagrams.
plot_outpath=Replicas_path_out
#######
#Parsing raw GCSs coordinates, returns dictionary - GCSs_coordinate:N3E.
#######
def read_GCSs_file(GCSs_file_path):
GCSs_dict={}
GCSs_in=open(GCSs_file_path, 'r')
for line in GCSs_in:
line=line.rstrip().split('\t')
if line[0] not in ['GCSs_coordinate']:
GCSs_dict[int(line[1])]=float(line[6])
GCSs_in.close()
return GCSs_dict
#######
#Filter controls.
#######
def filter_controls(replicas_path_dict):
#Merges a range of replicates
TCSs_replicas_dict={}
for set_name, set_pair in replicas_path_dict.items(): #Iterates replicas
#Read files with raw GCSs
Raw_TCSs_dict_Ara=read_GCSs_file(set_pair['Ara'])
Raw_TCSs_dict_no_Ara=read_GCSs_file(set_pair['No_Ara'])
Raw_TCSs_dict_Ara_filtered={}
for TCS_coordinate, TCS_signal in Raw_TCSs_dict_Ara.items():
if TCS_coordinate not in Raw_TCSs_dict_no_Ara:
Raw_TCSs_dict_Ara_filtered[TCS_coordinate]=TCS_signal
TCSs_replicas_dict[set_name]=Raw_TCSs_dict_Ara_filtered
return TCSs_replicas_dict
#######
#Combines replicates into one GCSs table.
#######
def combine_replicates(replicas_path_dict, path_out, name):
#Filter controls.
TCSs_replicas_dict=filter_controls(replicas_path_dict)
#Merges a range of replicates
GCSs_replicas_dict={}
names_ar=[]
for key, Raw_GCSs_dict in TCSs_replicas_dict.items(): #Iterates replicas
names_ar.append(key)
for k, v in Raw_GCSs_dict.items(): #Iterates raw GCSs
#Table filling process initiation
if len(names_ar)==1:
GCSs_replicas_dict[k]=[v]
#Table filling process continuing (the table already contains at least one GCSs set)
else:
#If GCSs is already in the table
if k in GCSs_replicas_dict:
GCSs_replicas_dict[k].append(v)
#If this is the first occurrence of the element in a NON empty table.
else:
add_el=[]
for j in range(len(names_ar)-1):
add_el.append(0)
add_el.append(v)
GCSs_replicas_dict[k]=add_el
#If table body line contains less elements than header does, hence add zero.
for k, v in GCSs_replicas_dict.items():
if len(v)<len(names_ar):
GCSs_replicas_dict[k].append(0)
#Sorting the list of dictionary keys.
GCSs_replicas_dict_sorted=collections.OrderedDict(sorted(GCSs_replicas_dict.items()))
#Writes merged GCSs data
fileout=open(f'{path_out}{name}_TCSs_replicates.txt', 'w')
#TCSs_out.write(f'{Genome_ID}\t{TCSs_list_F[i][0]}\t{TCSs_list_F[i][0]+1}\tTCS_{i}_F\t10\t.\t{TCSs_list_F[i][1]}\t-1\t-1\n')
#Header
fileout.write('TCSs_coordinate\t')
for i in names_ar:
fileout.write(str(i) + '_N3E\t')
fileout.write('\n')
#Body of the table
for k, v in GCSs_replicas_dict_sorted.items():
fileout.write(str(k) + '\t')
for i in GCSs_replicas_dict_sorted[k]:
fileout.write(str(i) + '\t')
fileout.write('\n')
fileout.close()
return GCSs_replicas_dict
#Prepares GCSs table for all conditions
#combine_replicates(path_to_replicas, Replicas_path_out, All_conditions_name)
#######
#Returns only trusted GCSs - observed at least 2 times within 3 biological replicates.
#Data organization: 1. coordinate of GCSs, 2.-4. N3E values for biological replicates 1-3
#######
def trusted(ar):
av_height=0
ind=0
for i in range(len(ar)):
if ar[i]>0:
ind=ind+1
av_height=av_height+ar[i]
if ind>1:
return av_height/ind
else:
return "No signal"
def trusted_GCSs_calling(GCSs_dictionary):
ar=[]
for k, v in GCSs_dictionary.items():
if trusted(v)!="No signal":
ar.append([k, trusted(v)])
return ar
def replicas_comb_trust_wrapper(replicas_dict, path_out, name):
print('Now working with: ' + str(name))
cur_GCSs_dict=combine_replicates(replicas_dict, path_out, name)
cur_GCSs_trusted=trusted_GCSs_calling(cur_GCSs_dict)
print('Number of trusted TCSs for ' + str(name) + ' : ' + str(len(cur_GCSs_trusted)))
return cur_GCSs_trusted
TCSs_trusted=replicas_comb_trust_wrapper(path_to_replicas, Replicas_path_out, All_conditions_name)
#Antibs_GCSs_sets=[Cfx, RifCfx, Micro, Oxo]
#######
#GCSs shared between pairs of antibiotics - Cfx, Micro and Oxo and between Cfx and RifCfx.
#######
def pairs_construction(ar1, ar2):
double=[]
for i in range(len(ar1)):
for j in range(len(ar2)):
if ar1[i][0]==ar2[j][0]:
double.append([ar1[i][0], ar1[i][1], ar2[j][1]]) #GCSs coordinate, N3E_1, N3E_2
return double
#Cfx_RifCfx_shared_GCSs=pairs_construction(Cfx, RifCfx)
#print('Number of GCSs shared between Cfx and RifCfx: ' + str(len(Cfx_RifCfx_shared_GCSs)) + '\n')
#
#Cfx_Micro_shared_GCSs=pairs_construction(Cfx, Micro)
#Cfx_Oxo_shared_GCSs=pairs_construction(Cfx, Oxo)
#Micro_Oxo_shared_GCSs=pairs_construction(Micro, Oxo)
#
#print('Number of GCSs shared between Cfx and Micro: ' + str(len(Cfx_Micro_shared_GCSs)))
#print('Number of GCSs shared between Cfx and Oxo: ' + str(len(Cfx_Oxo_shared_GCSs)))
#print('Number of GCSs shared between Micro and Oxo: ' + str(len(Micro_Oxo_shared_GCSs)) + '\n')
#
#Antibs_GCSs_sets_pair_shared=[Cfx_Micro_shared_GCSs, Cfx_Oxo_shared_GCSs, Micro_Oxo_shared_GCSs]
#######
#GCSs shared between 3 antibiotics
#######
def triple_construction(ar12, ar3):
triple=[]
for i in range(len(ar12)):
for j in range(len(ar3)):
if ar12[i][0]==ar3[j][0]:
triple.append([ar12[i][0], ar12[i][1], ar12[i][2], ar3[j][1]]) #GCSs coordinate, N3E_1, N3E_2, N3E_3
return triple
#Cfx_Micro_Oxo_shared_GCSs=triple_construction(Cfx_Micro_shared_GCSs, Oxo)
#print('Number of GCSs shared between Cfx, Micro and Oxo: ' + str(len(Cfx_Micro_Oxo_shared_GCSs)) +'\n')
#######
#Parses replicas, overlaps lists of GCSs, output data for Venn diagram construction.
#######
def replicates_parsing_to_list_and_overlapping(replicas_dict, name):
#Parsing
GCSs_dict={}
for k, v in replicas_dict.items(): #Iterate replicas.
GCSs_dict[k]=[]
for c, h in read_GCSs_file(v).items(): #Iterate GCSs.
GCSs_dict[k].append([c, h])
#Overlapping
one_two=pairs_construction(GCSs_dict[name+str(1)], GCSs_dict[name+str(2)])
one_three=pairs_construction(GCSs_dict[name+str(1)], GCSs_dict[name+str(3)])
two_three=pairs_construction(GCSs_dict[name+str(2)], GCSs_dict[name+str(3)])
one_two_three=triple_construction(one_two, GCSs_dict[name+str(3)])
#Venn input description (for 3 sets): one, two, three, one_two, one_three, two_three, one_two_three
venn_input=[len(GCSs_dict[name+str(1)])-len(one_two)-len(one_three)+len(one_two_three),
len(GCSs_dict[name+str(2)])-len(one_two)-len(two_three)+len(one_two_three),
len(one_two)-len(one_two_three),
len(GCSs_dict[name+str(3)])-len(one_three)-len(two_three)+len(one_two_three),
len(one_three)-len(one_two_three), len(two_three)-len(one_two_three),
len(one_two_three)]
return venn_input
#######
#Venn diagram represents GCSs sets overlapping.
#description2: one, two, one_two
#description3: one, two, one_two, three, one_three, two_three, one_two_three
#######
#venn_data_2=[len(Cfx)-len(Cfx_RifCfx_shared_GCSs), len(RifCfx)-len(Cfx_RifCfx_shared_GCSs), len(Cfx_RifCfx_shared_GCSs)]
#venn_data_3=[len(Cfx)-len(Cfx_Micro_shared_GCSs)-len(Cfx_Oxo_shared_GCSs)+len(Cfx_Micro_Oxo_shared_GCSs),
# len(Micro)-len(Cfx_Micro_shared_GCSs)-len(Micro_Oxo_shared_GCSs)+len(Cfx_Micro_Oxo_shared_GCSs),
# len(Cfx_Micro_shared_GCSs)-len(Cfx_Micro_Oxo_shared_GCSs),
# len(Oxo)-len(Cfx_Oxo_shared_GCSs)-len(Micro_Oxo_shared_GCSs)+len(Cfx_Micro_Oxo_shared_GCSs),
# len(Cfx_Oxo_shared_GCSs)-len(Cfx_Micro_Oxo_shared_GCSs),
# len(Micro_Oxo_shared_GCSs)-len(Cfx_Micro_Oxo_shared_GCSs),
# len(Cfx_Micro_Oxo_shared_GCSs)]
#venn2(subsets = (venn_data_2), set_labels = ("Ciprofloxacin", "Rifampicin Ciprofloxacin"))
#plt.savefig(plot_outpath+'Cfx_RifCfx_venn.png', dpi=320)
#plt.close()
#
#print("Cfx Micro Oxo subsets volumes: " + str(venn_data_3))
#venn3(subsets = (venn_data_3), set_labels = ('Ciprofloxacin', 'Microcin B17', 'Oxolinic acid'))
#plt.savefig(plot_outpath+'Cfx_Micro_Oxo_venn.png', dpi=320)
#plt.close()
#
#venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_cfx_replicas, 'Cfx_')), set_labels = ('Cfx_1', 'Cfx_2', 'Cfx_3'))
#plt.savefig(plot_outpath+'Cfx_replicas_venn.png', dpi=320)
#plt.close()
#
#venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_rifcfx_replicas, 'RifCfx_')), set_labels = ('RifCfx_1', 'RifCfx_2', 'RifCfx_3'))
#plt.savefig(plot_outpath+'RifCfx_replicas_venn.png', dpi=320)
#plt.close()
#
#venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_microcin_replicas, 'Micro_')), set_labels = ('Micro_1', 'Micro_2', 'Micro_3'))
#plt.savefig(plot_outpath+'Micro_replicas_venn.png', dpi=320)
#plt.close()
#
#venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_oxo_replicas, 'Oxo_')), set_labels = ('Oxo_1', 'Oxo_2', 'Oxo_3'))
#plt.savefig(plot_outpath+'Oxo_replicas_venn.png', dpi=320)
#plt.close()
#######
#GCSs sets average N3E estimation.
#######
def average_height(ar):
av_he=0
for i in range(len(ar)):
peak_he=np.mean(ar[i][1:])
av_he=av_he+peak_he
return av_he/len(ar)
#print('Cfx average GCSs N3E: ' + str(average_height(Cfx)))
#print('Micro average GCSs N3E: ' + str(average_height(Micro)))
#print('Oxo average GCSs N3E: ' + str(average_height(Oxo)))
#print('Cfx and Micro average GCSs N3E: ' + str(average_height(Cfx_Micro_shared_GCSs)))
#print('Cfx and Oxo average GCSs N3E: ' + str(average_height(Cfx_Oxo_shared_GCSs)))
#print('Micro and Oxo average GCSs N3E: ' + str(average_height(Micro_Oxo_shared_GCSs)))
#print('Cfx, Micro and Oxo average GCSs N3E: ' + str(average_height(Cfx_Micro_Oxo_shared_GCSs)) + '\n')
#######
#Write down files with GCSs lists - trusted or shared.
#######
#All_GCSs_sets={Cfx_path: Antibs_GCSs_sets[0],
# RifCfx_path: Antibs_GCSs_sets[1],
# Micro_path: Antibs_GCSs_sets[2],
# Oxo_path: Antibs_GCSs_sets[3],
# Cfx_Micro_path: Antibs_GCSs_sets_pair_shared[0],
# Cfx_Oxo_path: Antibs_GCSs_sets_pair_shared[1],
# Micro_Oxo_path: Antibs_GCSs_sets_pair_shared[2],
# Cfx_Micro_Oxo_path: Cfx_Micro_Oxo_shared_GCSs}
def write_GCSs_file(dictionary):
for k, v in dictionary.items(): #Iterates lists to be written
v.sort(key=lambda tup: tup[0]) #Sorting lists by the zero elements of the sublists they consist of
fileout=open(k, 'w')
fileout.write('GCSs_coordinate\tN3E\n')
for i in range(len(v)):
fileout.write(str(v[i][0]) + '\t' + str(np.mean(v[i][1:])) + '\n')
fileout.close()
return
#write_GCSs_file(All_GCSs_sets)
def write_Cfx_RifCfx_shared_GCSs(ar, path):
fileout=open(path, 'w')
fileout.write('GCSs_coordinate\tCfx_N3E\tRifCfx_N3E\n')
ar.sort(key=lambda tup: tup[0])
for i in range(len(ar)):
fileout.write(str(ar[i][0]) + '\t' + str(ar[i][1]) + '\t' + str(ar[i][2]) + '\n')
fileout.close()
return
#write_Cfx_RifCfx_shared_GCSs(Cfx_RifCfx_shared_GCSs, Cfx_RifCfx_shared_GCSs_path)
#
#print('Script ended its work succesfully!')
| 40.671598
| 229
| 0.690623
|
c0ed8fe87c9d603b23082c844620fd772181f7f0
| 5,061
|
py
|
Python
|
src/tests/kremlin/book/conf.py
|
TakuKitamura/verimqtt-c
|
30109f66df126e5860f2329ce2ad3cfb7f12d9da
|
[
"MIT"
] | 340
|
2016-07-21T23:24:48.000Z
|
2022-02-16T22:23:01.000Z
|
src/tests/kremlin/book/conf.py
|
TakuKitamura/verimqtt-c
|
30109f66df126e5860f2329ce2ad3cfb7f12d9da
|
[
"MIT"
] | 208
|
2016-09-06T20:07:49.000Z
|
2022-03-03T20:22:22.000Z
|
src/tests/kremlin/book/conf.py
|
TakuKitamura/verimqtt-c
|
30109f66df126e5860f2329ce2ad3cfb7f12d9da
|
[
"MIT"
] | 53
|
2016-08-18T14:08:36.000Z
|
2022-02-25T21:55:50.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
#sys.path.insert(0, os.path.abspath('../fstar-mode.el/etc/'))
# -- Project information -----------------------------------------------------
project = u'The KreMLin user manual'
copyright = u'2018, Jonathan Protzenko'
author = u'Jonathan Protzenko'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
# 'fslit.sphinx4fstar',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TheKreMLinusermanualdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TheKreMLinusermanual.tex', u'The KreMLin user manual Documentation',
u'Jonathan Protzenko', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thekremlinusermanual', u'The KreMLin user manual Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TheKreMLinusermanual', u'The KreMLin user manual Documentation',
author, 'TheKreMLinusermanual', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
fslit_include_fixme = True
| 31.04908
| 86
| 0.657578
|
f557cbeae5f90e13811b4c4df356e8bbaa299ab5
| 1,896
|
py
|
Python
|
SCIENTIFIC EXPEDITION/TheHiddenWord.py
|
kei-academic/CheckiO
|
9f4c1fa44704f302ce95f5d9e20c4fa0beda06c3
|
[
"MIT"
] | 1
|
2021-12-26T21:52:02.000Z
|
2021-12-26T21:52:02.000Z
|
SCIENTIFIC EXPEDITION/TheHiddenWord.py
|
kei-academic/CheckiO
|
9f4c1fa44704f302ce95f5d9e20c4fa0beda06c3
|
[
"MIT"
] | null | null | null |
SCIENTIFIC EXPEDITION/TheHiddenWord.py
|
kei-academic/CheckiO
|
9f4c1fa44704f302ce95f5d9e20c4fa0beda06c3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import itertools as it
def checkio(text, word):
horizontal = text.lower().replace(' ', '').split('\n')
for i, row in enumerate(horizontal, 1):
index = row.find(word)
if index >= 0:
return [i, index+1, i, index+len(word)]
vertical = [''.join(line) for line in it.zip_longest(*horizontal, fillvalue=' ')]
for i, col in enumerate(vertical, 1):
index = col.find(word)
if index >= 0:
return [index+1, i, index+len(word), i]
#another pattern
def find_word_in_multiline(lines):
for row, line in enumerate(lines):
col = line.find(word)
if col != -1:
return True, row + 1, col + 1
else:
return False, 0, 0
cut = [line.lower().replace(' ', '') for line in text.splitlines()]
found, y, x = find_word_in_multiline(cut)
if found:
return [y, x, y, x + len(word) - 1]
else:
transposed_cut = [''.join(chars) for chars in it.zip_longest(*cut, fillvalue=' ')]
found, x, y = find_word_in_multiline(transposed_cut)
return [y, x, y + len(word) - 1, x]
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio("""DREAMING of apples on a wall,
And dreaming often, dear,
I dreamed that, if I counted all,
-How many would appear?""", "ten") == [2, 14, 2, 16]
assert checkio("""He took his vorpal sword in hand:
Long time the manxome foe he sought--
So rested he by the Tumtum tree,
And stood awhile in thought.
And as in uffish thought he stood,
The Jabberwock, with eyes of flame,
Came whiffling through the tulgey wood,
And burbled as it came!""", "noir") == [4, 16, 7, 16]
print("Coding complete? Click 'Check' to earn cool rewards!")
| 37.92
| 90
| 0.584916
|
f3c4136186a40555bb52f8e9a0c412298bb2ba06
| 874
|
py
|
Python
|
libraries/botbuilder-ai/botbuilder/ai/qna/models/query_results.py
|
Fl4v/botbuilder-python
|
4003d713beb8fb986a01cfd11632eabc65858618
|
[
"MIT"
] | 388
|
2019-05-07T15:53:21.000Z
|
2022-03-28T20:29:46.000Z
|
libraries/botbuilder-ai/botbuilder/ai/qna/models/query_results.py
|
Fl4v/botbuilder-python
|
4003d713beb8fb986a01cfd11632eabc65858618
|
[
"MIT"
] | 1,286
|
2019-05-07T23:38:19.000Z
|
2022-03-31T10:44:16.000Z
|
libraries/botbuilder-ai/botbuilder/ai/qna/models/query_results.py
|
Fl4v/botbuilder-python
|
4003d713beb8fb986a01cfd11632eabc65858618
|
[
"MIT"
] | 168
|
2019-05-14T20:23:25.000Z
|
2022-03-16T06:49:14.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List
from msrest.serialization import Model
from .query_result import QueryResult
class QueryResults(Model):
""" Contains answers for a user query. """
_attribute_map = {
"answers": {"key": "answers", "type": "[QueryResult]"},
"active_learning_enabled": {"key": "activeLearningEnabled", "type": "bool"},
}
def __init__(
self, answers: List[QueryResult], active_learning_enabled: bool = None, **kwargs
):
"""
Parameters:
-----------
answers: The answers for a user query.
active_learning_enabled: The active learning enable flag.
"""
super().__init__(**kwargs)
self.answers = answers
self.active_learning_enabled = active_learning_enabled
| 28.193548
| 88
| 0.643021
|
49c43a54b67120af551a701e0f40b42e7cf48bd1
| 8,694
|
py
|
Python
|
mysite/timesheets/views.py
|
xanderyzwich/Timesheets
|
15685ac7b786d3e66bd24e8a3a252f193ee8f49b
|
[
"MIT"
] | null | null | null |
mysite/timesheets/views.py
|
xanderyzwich/Timesheets
|
15685ac7b786d3e66bd24e8a3a252f193ee8f49b
|
[
"MIT"
] | 1
|
2019-06-11T21:23:49.000Z
|
2019-06-11T21:23:49.000Z
|
mysite/timesheets/views.py
|
xanderyzwich/Timesheets
|
15685ac7b786d3e66bd24e8a3a252f193ee8f49b
|
[
"MIT"
] | null | null | null |
"""Django views for the Timesheet application"""
import calendar
import datetime
from django.db.models import Sum
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.template import loader
from .models import Task, Employee, App, Defect, Adhoc, Timesheet, TimesheetForm
# Views not tied to a model
def index(request):
"""Timesheet entry view utilizes Form defined in models.py"""
if request.method == 'POST':
form = TimesheetForm(request.POST)
if form.is_valid():
form.save()
else:
return render(request, 'timesheets/index.html', {'form': form})
return render(request, 'timesheets/index.html', {'form': TimesheetForm()})
def report(request, year=None, month=None, day=None):
"""Used to generate report of all labor in a given year, month or day"""
limited, time_string = time_limit(year, month, day)
context = {
'object': 'Timesheet',
'report': time_string,
'data': summary(limited),
'timesheet_list': limited,
'total': limited.aggregate(Sum('hours'))
}
return render(request, 'timesheets/timesheet.html', context)
# Views tied to models
# Listed alphabetically
# Adhoc Model Views
def adhocs(request):
"""List of all adhoc task entries"""
adhoc_list = Adhoc.objects.all()
data_list = list()
for item in adhoc_list:
hours = Timesheet.objects.filter(adhoc__id=item.id).aggregate(sum=Sum('hours')).get('sum')
if hours is None:
hours = 0
if int(item.hours_projected) > 0:
item.description += ' - ' + str(item.hours_projected) + ' hours'
data_list.append(ListItem(item.id, item.description, hours))
template = loader.get_template('timesheets/list.html')
context = {
'object_list': data_list,
'title': 'Adhoc Tasks',
'object_model': 'adhoc'
}
return HttpResponse(template.render(context, request))
def adhoc(request, adhoc_id, year=None, month=None, day=None):
"""Summary and data for a specific adhoc entry"""
adhoc = get_object_or_404(Adhoc, pk=adhoc_id)
limited, time_string = time_limit(year, month, day)
limited = limited.filter(adhoc__id=adhoc_id)
context = {
'object': adhoc,
'report': time_string,
'data': summary(limited),
'timesheet_list': limited,
'total': limited.aggregate(Sum('hours'))
}
return render(request, 'timesheets/timesheet.html', context)
# App Model Views
def apps(request):
"""List of all app entries"""
app_list = App.objects.all()
data_list = list()
for item in app_list:
hours = Timesheet.objects.filter(app__id=item.id).aggregate(sum=Sum('hours')).get('sum')
if hours is None:
hours = str(0)
data_list.append(ListItem(item.id, str(item.name), hours))
template = loader.get_template('timesheets/list.html')
context = {
'object_list': data_list,
'title': 'Supported Apps',
'object_model': 'app',
}
return HttpResponse(template.render(context, request))
def app(request, app_id, year=None, month=None, day=None):
"""Summary and data for a specific app"""
app = get_object_or_404(App,pk=app_id)
limited, time_string = time_limit(year, month, day)
limited = limited.filter(app__id=app_id)
context = {
'object': app,
'report': time_string,
'data': summary(limited),
'timesheet_list': limited,
'total': limited.aggregate(Sum('hours'))
}
return render(request, 'timesheets/timesheet.html', context)
# Defect Model Views
def defects(request):
"""List of all defect entries"""
defect_list = Defect.objects.all()
data_list = list()
for item in defect_list:
hours = Timesheet.objects.filter(defect__id=item.id).aggregate(sum=Sum('hours')).get('sum')
if hours is None:
hours = str(0)
data_list.append(ListItem(item.id, str(item.app) + ': ' + str(item.description), hours))
template = loader.get_template('timesheets/list.html')
context = {
'object_list': data_list,
'title': 'Supported Defects',
'object_model': 'defect',
}
return HttpResponse(template.render(context, request))
def defect(request, defect_id, year=None, month=None, day=None):
"""Summary and data for a specific defect"""
defect = get_object_or_404(Defect, pk=defect_id)
limited, time_string = time_limit(year, month, day)
limited = limited.filter(defect__id=defect_id)
context = {
'object': defect,
'report': time_string,
'data': summary(limited),
'timesheet_list': limited,
'total': limited.aggregate(Sum('hours'))
}
return render(request, 'timesheets/timesheet.html', context)
# Employee Model Views
def employees(request):
"""List of all support employees"""
employee_list = Employee.objects.all()
data_list = list()
for item in employee_list:
hours = Timesheet.objects.filter(emp__id=item.id).aggregate(sum=Sum('hours')).get('sum')
if hours is None:
hours = str(0)
data_list.append(ListItem(item.id, item.name(), hours))
template = loader.get_template('timesheets/list.html')
context = {
'object_list': data_list,
'title': 'Support Employees',
'object_model': 'employee',
}
return HttpResponse(template.render(context, request))
def employee(request, employee_id, year=None, month=None, day=None):
"""Summary and data for a specific employee"""
employee = get_object_or_404(Employee, pk=employee_id)
limited, time_string = time_limit(year, month, day)
limited = limited.filter(emp__id=employee_id)
context = {
'object': employee,
'report': time_string,
'data': summary(limited),
'timesheet_list': limited,
'total': limited.aggregate(Sum('hours'))
}
return render(request, 'timesheets/timesheet.html', context)
# Task Model Views
def tasks(request):
"""List of all tasks (includes adhoc and defect collective data)"""
task_list = Task.objects.all()
data_list = list()
for item in task_list:
hours = Timesheet.objects.filter(task__type=item.type).aggregate(sum=Sum('hours')).get('sum')
if hours is None:
hours = str(0)
data_list.append(ListItem(item.id, item.type, hours))
template = loader.get_template('timesheets/list.html')
context = {
'object_list': data_list,
'title': 'Support Tasks',
'object_model': 'task',
}
return HttpResponse(template.render(context, request))
def task(request, task_id, year=None, month=None, day=None):
"""Summary and data for a specific task (or the adhoc/defect collection)"""
task = get_object_or_404(Task, pk=task_id)
limited, time_string = time_limit(year, month, day)
limited = limited.filter(task__id=task_id)
context = {
'object': task,
'report': time_string,
'data': summary(limited),
'timesheet_list': limited,
'total': limited.aggregate(Sum('hours'))
}
return render(request, 'timesheets/timesheet.html', context)
# Summarize the result_set by employee
def summary(result_set):
"""Generate summary by employee from full data list"""
return result_set.values('emp__id', 'emp__first_name', 'emp__last_name').order_by().annotate(sum=Sum('hours'))
# filter the result_set by year, month, and day as requested
def time_limit(year, month, day):
"""Return timesheet entries for a given time period, also a time_string for display use"""
today = datetime.date.today()
time_string = ' report for '
if day is None:
if month is None:
if year is None:
timesheet_list = Timesheet.objects.filter(date__month=today.month, date__year=today.year)
time_string += "this month"
else:
timesheet_list = Timesheet.objects.filter(date__year=year)
time_string += str(year)
else:
timesheet_list = Timesheet.objects.filter(date__month=month, date__year=year)
time_string += calendar.month_name[month] + ' ' + str(year)
else:
timesheet_list = Timesheet.objects.filter(date__month=month, date__year=year, date__day=day)
time_string += str(day) + ' ' + calendar.month_name[month] + ' ' + str(year)
return timesheet_list, time_string
class ListItem:
def __init__(self, id, description, total):
self.id = id
self.description = description
self.total = total
| 33.438462
| 114
| 0.648263
|
6b8afe151f0f02460b0b1ff56cfca93582383b11
| 8,120
|
py
|
Python
|
python/jittor/pool.py
|
cjld/jittor
|
2015d06c73bfc8aa4e1d06150bf30b463c9fce94
|
[
"Apache-2.0"
] | null | null | null |
python/jittor/pool.py
|
cjld/jittor
|
2015d06c73bfc8aa4e1d06150bf30b463c9fce94
|
[
"Apache-2.0"
] | null | null | null |
python/jittor/pool.py
|
cjld/jittor
|
2015d06c73bfc8aa4e1d06150bf30b463c9fce94
|
[
"Apache-2.0"
] | null | null | null |
# ***************************************************************
# Copyright (c) 2020 Jittor. Authors:
# Guowei Yang <471184555@qq.com>
# Wenyang Zhou <576825820@qq.com>
# Meng-Hao Guo <guomenghao1997@gmail.com>
# Dun Liang <randonlang@gmail.com>.
#
# All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import jittor as jt
from jittor import init, Module
import numpy as np
import math
class Pool(Module):
def __init__(self, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False, op="maximum"):
assert dilation == None
assert return_indices == None
self.kernel_size = kernel_size
self.op = op
self.stride = stride if stride else kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
def execute(self, x):
N,C,H,W = x.shape
if (self.ceil_mode == False):
h = (H+self.padding*2-self.kernel_size)//self.stride+1
w = (W+self.padding*2-self.kernel_size)//self.stride+1
else:
h = (H+self.padding*2-self.kernel_size + self.stride - 1)//self.stride+1
w = (W+self.padding*2-self.kernel_size + self.stride - 1)//self.stride+1
if (self.op == 'maximum' or self.op == 'minimum'):
if (self.op == 'maximum'):
op = 'max'
else:
op = 'min'
out = jt.code([N,C,h,w], x.dtype, [x],
cuda_src=f'''
__global__ static void kernel1(@ARGS_DEF) {{
@PRECALC
int p3 = threadIdx.x;
int s3 = blockDim.x;
int p2 = threadIdx.y + blockIdx.x * blockDim.y;
int s2 = blockDim.y * gridDim.x;
int i1 = blockIdx.y;
int i0 = blockIdx.z;
for (int i3 = p3; i3 < outshape3; i3 += s3)
for (int i2 = p2; i2 < outshape2; i2 += s2) {{
int k3 = i3*{self.stride}-{self.padding};
int k2 = i2*{self.stride}-{self.padding};
int k3_ = min(k3 + {self.kernel_size}, in0shape3);
int k2_ = min(k2 + {self.kernel_size}, in0shape2);
k3 = max(0, k3);
k2 = max(0, k2);
@out(i0, i1, i2, i3) = @in0(i0, i1, k2, k3);
for (int p = k2; p < k2_; ++p)
for (int q = k3; q < k3_; ++q)
@out(i0, i1, i2, i3) = {op}(@out(i0, i1, i2, i3), @in0(i0, i1, p, q));
}}
}}
int tx = min(1024, outshape3);
int ty = min(1024 / tx, outshape2);
int bx = (outshape2 - 1) / ty + 1;
int by = outshape1;
int bz = outshape0;
dim3 s1(bx, by, bz);
dim3 s2(tx, ty);
kernel1<<<s1, s2>>>(@ARGS);
''',
cuda_grad_src=[f'''
__global__ static void kernel3(@ARGS_DEF) {{
@PRECALC
int p3 = threadIdx.x;
int s3 = blockDim.x;
int p2 = threadIdx.y + blockIdx.x * blockDim.y;
int s2 = blockDim.y * gridDim.x;
int i1 = blockIdx.y;
int i0 = blockIdx.z;
for (int i3 = p3; i3 < poutshape3; i3 += s3)
for (int i2 = p2; i2 < poutshape2; i2 += s2) {{
int k3 = i3*{self.stride}-{self.padding};
int k2 = i2*{self.stride}-{self.padding};
int k3_ = min(k3 + {self.kernel_size}, in0shape3);
int k2_ = min(k2 + {self.kernel_size}, in0shape2);
k3 = max(0, k3);
k2 = max(0, k2);
int bo=1;
for (int p = k2; p < k2_ && bo; ++p)
for (int q = k3; q < k3_ && bo; ++q) {{
if (@pout(i0,i1,i2,i3) == @in0(i0,i1,p,q)) {{
atomicAdd(&@out(i0,i1,p,q), @dout(i0,i1,i2,i3));
bo=0;
}}
}}
}}
}}
cudaMemsetAsync(outp, 0, out->size);
int tx = min(1024, poutshape3);
int ty = min(1024 / tx, poutshape2);
int bx = (poutshape2 - 1) / ty + 1;
int by = poutshape1;
int bz = poutshape0;
dim3 s1_(bx, by, bz);
dim3 s2_(tx, ty);
kernel3<<<s1_, s2_>>>(@ARGS);
'''],
cpu_src=f'''
for (int i0=0; i0<outshape0; i0++)
for (int i1=0; i1<outshape1; i1++)
for (int i2=0; i2<outshape2; i2++)
for (int i3=0; i3<outshape3; i3++) {{
int k2 = i2*{self.stride}-{self.padding};
int k3 = i3*{self.stride}-{self.padding};
int k2_ = std::min(k2 + {self.kernel_size}, in0shape2);
int k3_ = std::min(k3 + {self.kernel_size}, in0shape3);
k2 = std::max(0, k2);
k3 = std::max(0, k3);
@out(i0, i1, i2, i3) = @in0(i0, i1, k2, k3);
for (int p = k2; p < k2_; ++p)
for (int q = k3; q < k3_; ++q)
@out(i0, i1, i2, i3) = std::{op}(@out(i0, i1, i2, i3), @in0(i0, i1, p, q));
}}
''',
cpu_grad_src = [f'''
for (int i=0; i<outshape0; i++)
for (int j=0; j<outshape1; j++)
for (int k=0; k<outshape2; k++)
for (int l=0; l<outshape3; l++) @out(i,j,k,l) = 0;
for (int i0=0; i0<poutshape0; i0++)
for (int i1=0; i1<poutshape1; i1++)
for (int i2=0; i2<poutshape2; i2++)
for (int i3=0; i3<poutshape3; i3++) {{
int k3 = i3*{self.stride}-{self.padding};
int k2 = i2*{self.stride}-{self.padding};
int k3_ = std::min(k3 + {self.kernel_size}, in0shape3);
int k2_ = std::min(k2 + {self.kernel_size}, in0shape2);
k3 = std::max(0, k3);
k2 = std::max(0, k2);
int bo=1;
for (int p = k2; p < k2_ && bo; ++p)
for (int q = k3; q < k3_ && bo; ++q) {{
if (@pout(i0,i1,i2,i3) == @in0(i0,i1,p,q)) {{
@out(i0,i1,p,q) += @dout(i0,i1,i2,i3);
bo=0;
}}
}}
}}
'''])
return out
else:
xx = x.reindex([N,C,h,w,self.kernel_size,self.kernel_size], [
"i0", # Nid
"i1", # Cid
f"i2*{self.stride}-{self.padding}+i4", # Hid
f"i3*{self.stride}-{self.padding}+i5", # Wid
])
return xx.reduce(self.op, [4,5])
def pool(x, size, op, padding, stride = 1):
return Pool(size, stride, padding, op=op)(x)
| 49.212121
| 127
| 0.375246
|
db1687b254fa5fa0c598f9a066a51763170649b9
| 1,688
|
py
|
Python
|
106/save1_passed.py
|
rayjustinhuang/BitesofPy
|
03b694c5259ff607621419d9677c5caff90a6057
|
[
"MIT"
] | null | null | null |
106/save1_passed.py
|
rayjustinhuang/BitesofPy
|
03b694c5259ff607621419d9677c5caff90a6057
|
[
"MIT"
] | null | null | null |
106/save1_passed.py
|
rayjustinhuang/BitesofPy
|
03b694c5259ff607621419d9677c5caff90a6057
|
[
"MIT"
] | null | null | null |
text = """
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
"""
vowels = 'aeiou'
def strip_vowels(text: str) -> (str, int):
"""Replace all vowels in the input text string by a star
character (*).
Return a tuple of (replaced_text, number_of_vowels_found)
So if this function is called like:
strip_vowels('hello world')
... it would return:
('h*ll* w*rld', 3)
The str/int types in the function defintion above are part
of Python's new type hinting:
https://docs.python.org/3/library/typing.html"""
newtext = list(text)
counter = 0
for i in range(len(newtext)):
# print(newtext[i])
if newtext[i].lower() in vowels:
newtext[i] = "*"
counter +=1
finaltext = ''.join(str(l) for l in newtext)
return (finaltext, counter)
pass
print(strip_vowels(text))
| 33.098039
| 69
| 0.694313
|
d0b4874cf0754f0ec01d7e169a9e35e1a525775c
| 4,976
|
py
|
Python
|
openapi-python-client/openapi_client/models/incident_statistics_result_dto.py
|
yanavasileva/camunda-bpm-examples
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
[
"Apache-2.0"
] | null | null | null |
openapi-python-client/openapi_client/models/incident_statistics_result_dto.py
|
yanavasileva/camunda-bpm-examples
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
[
"Apache-2.0"
] | null | null | null |
openapi-python-client/openapi_client/models/incident_statistics_result_dto.py
|
yanavasileva/camunda-bpm-examples
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class IncidentStatisticsResultDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'incident_type': 'str',
'incident_count': 'int'
}
attribute_map = {
'incident_type': 'incidentType',
'incident_count': 'incidentCount'
}
def __init__(self, incident_type=None, incident_count=None, local_vars_configuration=None): # noqa: E501
"""IncidentStatisticsResultDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._incident_type = None
self._incident_count = None
self.discriminator = None
if incident_type is not None:
self.incident_type = incident_type
if incident_count is not None:
self.incident_count = incident_count
@property
def incident_type(self):
"""Gets the incident_type of this IncidentStatisticsResultDto. # noqa: E501
The type of the incident the number of incidents is aggregated for. See the [User Guide](https://docs.camunda.org/manual/7.13/user-guide/process-engine/incidents/#incident-types) for a list of incident types. # noqa: E501
:return: The incident_type of this IncidentStatisticsResultDto. # noqa: E501
:rtype: str
"""
return self._incident_type
@incident_type.setter
def incident_type(self, incident_type):
"""Sets the incident_type of this IncidentStatisticsResultDto.
The type of the incident the number of incidents is aggregated for. See the [User Guide](https://docs.camunda.org/manual/7.13/user-guide/process-engine/incidents/#incident-types) for a list of incident types. # noqa: E501
:param incident_type: The incident_type of this IncidentStatisticsResultDto. # noqa: E501
:type: str
"""
self._incident_type = incident_type
@property
def incident_count(self):
"""Gets the incident_count of this IncidentStatisticsResultDto. # noqa: E501
The total number of incidents for the corresponding incident type. # noqa: E501
:return: The incident_count of this IncidentStatisticsResultDto. # noqa: E501
:rtype: int
"""
return self._incident_count
@incident_count.setter
def incident_count(self, incident_count):
"""Sets the incident_count of this IncidentStatisticsResultDto.
The total number of incidents for the corresponding incident type. # noqa: E501
:param incident_count: The incident_count of this IncidentStatisticsResultDto. # noqa: E501
:type: int
"""
self._incident_count = incident_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IncidentStatisticsResultDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IncidentStatisticsResultDto):
return True
return self.to_dict() != other.to_dict()
| 32.953642
| 230
| 0.628215
|
fa4cd739670d5163f60f9a9c4f0b0dc64b511ff2
| 11,471
|
py
|
Python
|
Bengali.AI Handwritten Grapheme Classification/image_augmentation/HengCherKeng.py
|
nixingyang/Kaggle-Face-Verification
|
b5f9908f4c23dc78b3e6b647c7add8f2b0d84663
|
[
"MIT"
] | null | null | null |
Bengali.AI Handwritten Grapheme Classification/image_augmentation/HengCherKeng.py
|
nixingyang/Kaggle-Face-Verification
|
b5f9908f4c23dc78b3e6b647c7add8f2b0d84663
|
[
"MIT"
] | null | null | null |
Bengali.AI Handwritten Grapheme Classification/image_augmentation/HengCherKeng.py
|
nixingyang/Kaggle-Face-Verification
|
b5f9908f4c23dc78b3e6b647c7add8f2b0d84663
|
[
"MIT"
] | 5
|
2016-09-05T03:13:32.000Z
|
2018-11-29T07:55:23.000Z
|
import random
import cv2
import numpy as np
from albumentations import ImageOnlyTransform
def do_identity(image):
return image
def do_random_projective(image, magnitude=0.5):
mag = np.random.uniform(-1, 1) * 0.5 * magnitude
height, width = image.shape[:2]
x0, y0 = 0, 0
x1, y1 = 1, 0
x2, y2 = 1, 1
x3, y3 = 0, 1
mode = np.random.choice(['top', 'bottom', 'left', 'right'])
if mode == 'top':
x0 += mag
x1 -= mag
if mode == 'bottom':
x3 += mag
x2 -= mag
if mode == 'left':
y0 += mag
y3 -= mag
if mode == 'right':
y1 += mag
y2 -= mag
s = np.array([
[0, 0],
[1, 0],
[1, 1],
[0, 1],
]) * [[width, height]]
d = np.array([
[x0, y0],
[x1, y1],
[x2, y2],
[x3, y3],
]) * [[width, height]]
transform = cv2.getPerspectiveTransform(s.astype(np.float32),
d.astype(np.float32))
image = cv2.warpPerspective(image,
transform, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
return image
def do_random_perspective(image, magnitude=0.5):
mag = np.random.uniform(-1, 1, (4, 2)) * 0.25 * magnitude
height, width = image.shape[:2]
s = np.array([
[0, 0],
[1, 0],
[1, 1],
[0, 1],
])
d = s + mag
s *= [[width, height]]
d *= [[width, height]]
transform = cv2.getPerspectiveTransform(s.astype(np.float32),
d.astype(np.float32))
image = cv2.warpPerspective(image,
transform, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
return image
def do_random_scale(image, magnitude=0.5):
s = 1 + np.random.uniform(-1, 1) * magnitude * 0.5
height, width = image.shape[:2]
transform = np.array([
[s, 0, 0],
[0, s, 0],
], np.float32)
image = cv2.warpAffine(image,
transform, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
return image
def do_random_shear_x(image, magnitude=0.5):
sx = np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
transform = np.array([
[1, sx, 0],
[0, 1, 0],
], np.float32)
image = cv2.warpAffine(image,
transform, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
return image
def do_random_shear_y(image, magnitude=0.5):
sy = np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
transform = np.array([
[1, 0, 0],
[sy, 1, 0],
], np.float32)
image = cv2.warpAffine(image,
transform, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
return image
def do_random_stretch_x(image, magnitude=0.5):
sx = 1 + np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
transform = np.array([
[sx, 0, 0],
[0, 1, 0],
], np.float32)
image = cv2.warpAffine(image,
transform, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
return image
def do_random_stretch_y(image, magnitude=0.5):
sy = 1 + np.random.uniform(-1, 1) * magnitude
height, width = image.shape[:2]
transform = np.array([
[1, 0, 0],
[0, sy, 0],
], np.float32)
image = cv2.warpAffine(image,
transform, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
return image
def do_random_rotate(image, magnitude=0.5):
angle = 1 + np.random.uniform(-1, 1) * 30 * magnitude
height, width = image.shape[:2]
cx, cy = width // 2, height // 2
transform = cv2.getRotationMatrix2D((cx, cy), -angle, 1.0)
image = cv2.warpAffine(image,
transform, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
return image
#----
def do_random_grid_distortion(image, magnitude=0.5):
num_step = 5
distort = magnitude
# http://pythology.blogspot.sg/2014/03/interpolation-on-regular-distorted-grid.html
distort_x = [
1 + random.uniform(-distort, distort) for i in range(num_step + 1)
]
distort_y = [
1 + random.uniform(-distort, distort) for i in range(num_step + 1)
]
#---
height, width = image.shape[:2]
xx = np.zeros(width, np.float32)
step_x = width // num_step
prev = 0
for i, x in enumerate(range(0, width, step_x)):
start = x
end = x + step_x
if end > width:
end = width
cur = width
else:
cur = prev + step_x * distort_x[i]
xx[start:end] = np.linspace(prev, cur, end - start)
prev = cur
yy = np.zeros(height, np.float32)
step_y = height // num_step
prev = 0
for idx, y in enumerate(range(0, height, step_y)):
start = y
end = y + step_y
if end > height:
end = height
cur = height
else:
cur = prev + step_y * distort_y[idx]
yy[start:end] = np.linspace(prev, cur, end - start)
prev = cur
map_x, map_y = np.meshgrid(xx, yy)
map_x = map_x.astype(np.float32)
map_y = map_y.astype(np.float32)
image = cv2.remap(image,
map_x,
map_y,
interpolation=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
return image
# *** intensity ***
def do_random_contast(image, magnitude=0.5):
alpha = 1 + random.uniform(-1, 1) * magnitude
image = image.astype(np.float32) * alpha
image = np.clip(image, 0, 255)
return image
def do_random_block_fade(image, magnitude=0.5):
size = [0.1, magnitude]
height, width = image.shape[:2]
#get bounding box
m = image.copy()
cv2.rectangle(m, (0, 0), (height, width), 1, 5)
m = image < 0.5
if m.sum() == 0:
return image
m = np.where(m)
y0, y1, x0, x1 = np.min(m[0]), np.max(m[0]), np.min(m[1]), np.max(m[1])
w = x1 - x0
h = y1 - y0
if w * h < 10:
return image
ew, eh = np.random.uniform(*size, 2)
ew = int(ew * w)
eh = int(eh * h)
ex = np.random.randint(0, w - ew) + x0
ey = np.random.randint(0, h - eh) + y0
image[ey:ey + eh, ex:ex + ew] *= np.random.uniform(0.1, 0.5) #1 #
image = np.clip(image, 0, 255)
return image
# *** noise ***
# https://www.kaggle.com/ren4yu/bengali-morphological-ops-as-image-augmentation
def do_random_erode(image, magnitude=0.5):
s = int(round(1 + np.random.uniform(0, 1) * magnitude * 6))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, tuple((s, s)))
image = cv2.erode(image, kernel, iterations=1)
return image
def do_random_dilate(image, magnitude=0.5):
s = int(round(1 + np.random.uniform(0, 1) * magnitude * 6))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, tuple((s, s)))
image = cv2.dilate(image, kernel, iterations=1)
return image
def do_random_sprinkle(image, magnitude=0.5):
size = 16
num_sprinkle = int(round(1 + np.random.randint(10) * magnitude))
image = image.copy()
image_small = cv2.resize(image, dsize=None, fx=0.25, fy=0.25)
m = np.where(image_small > 0.25)
num = len(m[0])
if num == 0:
return image
s = size // 2
i = np.random.choice(num, num_sprinkle)
for y, x in zip(m[0][i], m[1][i]):
y = y * 4 + 2
x = x * 4 + 2
image[y - s:y + s, x - s:x + s] = 0 #0.5 #1 #
return image
#https://stackoverflow.com/questions/14435632/impulse-gaussian-and-salt-and-pepper-noise-with-opencv
def do_random_noise(image, magnitude=0.5):
height, width = image.shape[:2]
noise = np.random.uniform(-1, 1, (height, width)) * magnitude * 0.7
image = image + noise
image = np.clip(image, 0, 255)
return image
def do_random_line(image, magnitude=0.5):
num_lines = int(round(1 + np.random.randint(8) * magnitude))
# ---
height, width = image.shape[:2]
image = image.copy()
def line0():
return (0, 0), (width - 1, 0)
def line1():
return (0, height - 1), (width - 1, height - 1)
def line2():
return (0, 0), (0, height - 1)
def line3():
return (width - 1, 0), (width - 1, height - 1)
def line4():
x0, x1 = np.random.choice(width, 2)
return (x0, 0), (x1, height - 1)
def line5():
y0, y1 = np.random.choice(height, 2)
return (0, y0), (width - 1, y1)
for _ in range(num_lines):
p = np.array([1 / 4, 1 / 4, 1 / 4, 1 / 4, 1, 1])
func = np.random.choice([line0, line1, line2, line3, line4, line5],
p=p / p.sum())
(x0, y0), (x1, y1) = func()
color = np.random.uniform(0, 1)
thickness = np.random.randint(1, 5)
line_type = np.random.choice([cv2.LINE_AA, cv2.LINE_4, cv2.LINE_8])
cv2.line(image, (x0, y0), (x1, y1), color, thickness, line_type)
return image
class HengCherKeng(ImageOnlyTransform): # pylint: disable=abstract-method
def apply(self, img, **params):
image = img.copy()
for op in np.random.choice([
do_identity, lambda image: do_random_projective(image, 0.4),
lambda image: do_random_perspective(image, 0.4),
lambda image: do_random_scale(image, 0.4),
lambda image: do_random_rotate(image, 0.4),
lambda image: do_random_shear_x(image, 0.5),
lambda image: do_random_shear_y(image, 0.4),
lambda image: do_random_stretch_x(image, 0.5),
lambda image: do_random_stretch_y(image, 0.5),
lambda image: do_random_grid_distortion(image, 0.4)
], 1):
image = op(image)
for op in np.random.choice([
do_identity,
lambda image: do_random_erode(image, 0.4),
lambda image: do_random_dilate(image, 0.4),
lambda image: do_random_sprinkle(image, 0.5),
lambda image: do_random_line(image, 0.5),
], 1):
image = op(image)
for op in np.random.choice([
do_identity,
lambda image: do_random_contast(image, 0.5),
lambda image: do_random_block_fade(image, 0.5),
], 1):
image = op(image)
return image
| 28.6775
| 100
| 0.522884
|
714c71f2b1480ca24cc48adbb06f3c52d5ce7e90
| 4,327
|
py
|
Python
|
dateparser/data/date_translation_data/ee.py
|
Rodp63/dateparser
|
938a9573234679b603210bd47cc93eb258b1f1df
|
[
"BSD-3-Clause"
] | null | null | null |
dateparser/data/date_translation_data/ee.py
|
Rodp63/dateparser
|
938a9573234679b603210bd47cc93eb258b1f1df
|
[
"BSD-3-Clause"
] | null | null | null |
dateparser/data/date_translation_data/ee.py
|
Rodp63/dateparser
|
938a9573234679b603210bd47cc93eb258b1f1df
|
[
"BSD-3-Clause"
] | null | null | null |
info = {
"name": "ee",
"date_order": "MDY",
"january": [
"dzove",
"dzv"
],
"february": [
"dzd",
"dzodze"
],
"march": [
"ted",
"tedoxe"
],
"april": [
"afɔ",
"afɔfĩe"
],
"may": [
"dam",
"dama"
],
"june": [
"mas",
"masa"
],
"july": [
"sia",
"siamlɔm"
],
"august": [
"dea",
"deasiamime"
],
"september": [
"any",
"anyɔnyɔ"
],
"october": [
"kel",
"kele"
],
"november": [
"ade",
"adeɛmekpɔxe"
],
"december": [
"dzm",
"dzome"
],
"monday": [
"dzo",
"dzoɖa"
],
"tuesday": [
"bla",
"blaɖa"
],
"wednesday": [
"kuɖ",
"kuɖa"
],
"thursday": [
"yaw",
"yawoɖa"
],
"friday": [
"fiɖ",
"fiɖa"
],
"saturday": [
"mem",
"memleɖa"
],
"sunday": [
"kɔs",
"kɔsiɖa"
],
"am": [
"ŋdi"
],
"pm": [
"ɣetrɔ"
],
"year": [
"ƒe"
],
"month": [
"ɣleti"
],
"week": [
"kɔsiɖa ɖeka"
],
"day": [
"ŋkeke"
],
"hour": [
"gaƒoƒo"
],
"minute": [
"aɖabaƒoƒo"
],
"second": [
"sekend"
],
"relative-type": {
"0 day ago": [
"egbe"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 month ago": [
"ɣleti sia"
],
"0 second ago": [
"fifi"
],
"0 week ago": [
"kɔsiɖa sia"
],
"0 year ago": [
"ƒe sia"
],
"1 day ago": [
"etsɔ si va yi"
],
"1 month ago": [
"ɣleti si va yi"
],
"1 week ago": [
"kɔsiɖa si va yi"
],
"1 year ago": [
"ƒe si va yi"
],
"in 1 day": [
"etsɔ si gbɔna"
],
"in 1 month": [
"ɣleti si gbɔ na"
],
"in 1 week": [
"kɔsiɖa si gbɔ na"
],
"in 1 year": [
"ƒe si gbɔ na"
]
},
"relative-type-regex": {
"\\1 day ago": [
"ŋkeke (\\d+) si va yi",
"ŋkeke (\\d+) si wo va yi"
],
"\\1 hour ago": [
"gaƒoƒo (\\d+) si va yi",
"gaƒoƒo (\\d+) si wo va yi"
],
"\\1 minute ago": [
"aɖabaƒoƒo (\\d+) si va yi",
"aɖabaƒoƒo (\\d+) si wo va yi"
],
"\\1 month ago": [
"ɣleti (\\d+) si va yi",
"ɣleti (\\d+) si wo va yi"
],
"\\1 second ago": [
"sekend (\\d+) si va yi",
"sekend (\\d+) si wo va yi"
],
"\\1 week ago": [
"kɔsiɖa (\\d+) si va yi",
"kɔsiɖa (\\d+) si wo va yi"
],
"\\1 year ago": [
"le ƒe (\\d+) si va yi me",
"ƒe (\\d+) si va yi",
"ƒe (\\d+) si va yi me",
"ƒe (\\d+) si wo va yi"
],
"in \\1 day": [
"le ŋkeke (\\d+) me",
"le ŋkeke (\\d+) wo me"
],
"in \\1 hour": [
"le gaƒoƒo (\\d+) me",
"le gaƒoƒo (\\d+) wo me"
],
"in \\1 minute": [
"le aɖabaƒoƒo (\\d+) me",
"le aɖabaƒoƒo (\\d+) wo me"
],
"in \\1 month": [
"le ɣleti (\\d+) me",
"le ɣleti (\\d+) wo me"
],
"in \\1 second": [
"le sekend (\\d+) me",
"le sekend (\\d+) wo me"
],
"in \\1 week": [
"le kɔsiɖa (\\d+) me",
"le kɔsiɖa (\\d+) wo me"
],
"in \\1 year": [
"le ƒe (\\d+) me",
"le ƒe (\\d+) si gbɔna me"
]
},
"locale_specific": {
"ee-TG": {
"name": "ee-TG"
}
},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
| 18.491453
| 42
| 0.279871
|
1fcf0c05e74151f9105c5622e46dc1fe367ea395
| 191
|
py
|
Python
|
build/lib/biosamples_beta/biosample.py
|
Kerruba/biosamples_py_api
|
eac77ace86582d87f5be6273fbfd3b464cdd94e4
|
[
"MIT"
] | null | null | null |
build/lib/biosamples_beta/biosample.py
|
Kerruba/biosamples_py_api
|
eac77ace86582d87f5be6273fbfd3b464cdd94e4
|
[
"MIT"
] | null | null | null |
build/lib/biosamples_beta/biosample.py
|
Kerruba/biosamples_py_api
|
eac77ace86582d87f5be6273fbfd3b464cdd94e4
|
[
"MIT"
] | null | null | null |
from .biosamples_api import *
class BioSample:
type = "sample"
def __init__(self, doc):
self._doc = doc
self.relations = []
def get(self, prop_name):
return self._doc[prop_name]
| 14.692308
| 29
| 0.691099
|
dffbd8e299ff2d37f3b68614b49a434a1fd65069
| 2,322
|
py
|
Python
|
MNIST_framework/main.py
|
vanessadamario/data_efficiency
|
fc702d2241d737591163697332e3de1d0a0ed085
|
[
"MIT"
] | null | null | null |
MNIST_framework/main.py
|
vanessadamario/data_efficiency
|
fc702d2241d737591163697332e3de1d0a0ed085
|
[
"MIT"
] | null | null | null |
MNIST_framework/main.py
|
vanessadamario/data_efficiency
|
fc702d2241d737591163697332e3de1d0a0ed085
|
[
"MIT"
] | 1
|
2021-12-27T00:46:35.000Z
|
2021-12-27T00:46:35.000Z
|
import os
import argparse
from os.path import join
from runs import experiments
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
parser = argparse.ArgumentParser()
parser.add_argument('--experiment_index', type=int, required=True)
parser.add_argument('--offset_index', type=int, required=False)
parser.add_argument('--host_filesystem', type=str, required=True)
parser.add_argument('--run', type=str, required=True)
parser.add_argument('--check_train', type=bool, required=False)
# this is for repetitions
parser.add_argument('--repetition_folder_path', type=str, required=False)
FLAGS = parser.parse_args()
# where to save and retrieve the experiments
output_path = {
'om': '/om/user/vanessad/MNIST_framework',
'om2': '/om2/user/vanessad/MNIST_framework',
'vanessa': '/Users/vanessa/Desktop/test'}[FLAGS.host_filesystem]
# output_path = join(output_path, "results_repetitions_300/")
# we want to repeat the experiments
# for natural results:
# results/MNIST_natural_debug
output_path = join(output_path, 'results', FLAGS.repetition_folder_path + '/')
# output_path = join(output_path, 'results_500_epochs', 'results/')
os.makedirs(output_path, exist_ok=True)
if FLAGS.offset_index is None:
FLAGS.offset_index = 0
if FLAGS.check_train is None:
FLAGS.check_train = False
def generate_experiments(id):
""" Generation of the experiments. """
experiments.generate_experiments(output_path)
# TODO: modify the method generate experiment
def run_train(id):
""" Run the experiments. """
from runs.train import check_and_train
opt = experiments.get_experiment(output_path, id) # Experiment instance
check_and_train(opt, output_path, FLAGS.check_train)
def find_id(id):
""" Retrieve the information related to the ID experiment. """
experiments.get_experiment(output_path, id)
def remove_id(id):
from runs import remove_id as remove
remove.run(id, output_path)
def update_json(id):
from runs.update import check_update
""" Write on the json if the experiments are completed,
by changing the flag. """
check_update(output_path)
switcher = {
'train': run_train,
'find_id': find_id,
'gen': generate_experiments,
'remove': remove_id,
'update': update_json
}
switcher[FLAGS.run](FLAGS.experiment_index + FLAGS.offset_index)
| 29.025
| 78
| 0.741171
|
87e1126533e9fb7bb9570e0cd3ba569357d5086f
| 7,632
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
fxavier/xbusness
|
10e6455243a8b66775df6f3a11eec6e8a8bea3f7
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
fxavier/xbusness
|
10e6455243a8b66775df6f3a11eec6e8a8bea3f7
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
fxavier/xbusness
|
10e6455243a8b66775df6f3a11eec6e8a8bea3f7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-09-26 15:24
from decimal import Decimal
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(blank=True, default='Moçambique', max_length=255, null=True)),
('city', models.CharField(blank=True, max_length=255, null=True)),
('village', models.CharField(blank=True, max_length=255, null=True)),
('number', models.CharField(blank=True, max_length=255, null=True)),
('avenue', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'verbose_name': 'Address',
'verbose_name_plural': 'Addresses',
},
),
migrations.CreateModel(
name='Bank',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('account', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Brand',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('description', models.CharField(max_length=255)),
],
options={
'verbose_name_plural': 'categories',
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('additional_info', models.CharField(blank=True, max_length=1025, null=True)),
('email', models.CharField(blank=True, max_length=255, null=True)),
('created_at', models.DateTimeField(editable=False)),
('modified_at', models.DateTimeField()),
('address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address', to='core.address')),
('bank', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bank', to='core.bank')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='Unity',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=3)),
('description', models.CharField(max_length=16)),
],
options={
'verbose_name_plural': 'Unities',
},
),
migrations.CreateModel(
name='Customer',
fields=[
('person_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.person')),
('credit_limit', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=15)),
],
bases=('core.person',),
),
migrations.CreateModel(
name='Provider',
fields=[
('person_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.person')),
('branch', models.CharField(blank=True, max_length=255, null=True)),
],
bases=('core.person',),
),
migrations.CreateModel(
name='Transporter',
fields=[
('person_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.person')),
],
options={
'verbose_name': 'Transporter',
},
bases=('core.person',),
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=30)),
('barcode', models.CharField(blank=True, max_length=16, null=True)),
('slug', models.SlugField()),
('description', models.CharField(max_length=255)),
('cost', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=16, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('sale', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=16, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('additional_info', models.CharField(blank=True, max_length=255, null=True)),
('minimum_stock', models.IntegerField(default=0)),
('current_stock', models.IntegerField(default=0)),
('brand', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='core.brand')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='core.category')),
('unity', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='core.unity')),
],
),
migrations.AddField(
model_name='person',
name='phone',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='phone', to='core.phone'),
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=255, null=True)),
('registration_plate', models.CharField(blank=True, max_length=255, null=True)),
('vehicle_transporter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vehicle', to='core.transporter')),
],
),
]
| 50.543046
| 188
| 0.585561
|
33e09919ed33f236d5e18c91bd9ca3e3deaa7b77
| 3,530
|
py
|
Python
|
rplugin/python3/floobits/floocommon/msg.py
|
Joe-Davidson1802/floobits-neovim
|
983b853a4e24537f337c4653c708428f65113162
|
[
"Apache-2.0"
] | null | null | null |
rplugin/python3/floobits/floocommon/msg.py
|
Joe-Davidson1802/floobits-neovim
|
983b853a4e24537f337c4653c708428f65113162
|
[
"Apache-2.0"
] | null | null | null |
rplugin/python3/floobits/floocommon/msg.py
|
Joe-Davidson1802/floobits-neovim
|
983b853a4e24537f337c4653c708428f65113162
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
from . import shared as G
assert G
str = str
from .exc_fmt import str_e
python2 = False
LOG_LEVELS = {
'DEBUG': 1,
'MSG': 2,
'WARN': 3,
'ERROR': 4,
}
LOG_LEVELS_REVERSE = {
1: 'DEBUG',
2: 'MSG',
3: 'WARN',
4: 'ERROR',
}
LOG_LEVEL = LOG_LEVELS['MSG']
LOG_FILE = os.path.join(G.BASE_DIR, 'msgs.floobits.log')
try:
fd = open(LOG_FILE, 'w')
fd.close()
except Exception as e:
pass
def safe_print(msg):
# Some environments can have trouble printing unicode:
# "When print() is not outputting to the terminal (being redirected to
# a file, for instance), print() decides that it does not know what
# locale to use for that file and so it tries to convert to ASCII instead."
# See: https://pythonhosted.org/kitchen/unicode-frustrations.html#frustration-3-inconsistent-treatment-of-output
try:
print(msg)
except UnicodeEncodeError:
print((msg.encode('utf-8')))
# Overridden by each editor
def editor_log(msg):
safe_print(msg)
def floobits_log(msg):
# TODO: ridiculously inefficient
try:
fd = open(LOG_FILE, 'ab')
fmsg = msg
try:
fmsg = fmsg.encode('utf-8')
except Exception:
pass
fd.write(fmsg)
fd.write(b'\n')
fd.close()
except Exception as e:
safe_print(str_e(e))
class MSG(object):
# Default to LOG_LEVEL MSG
def __init__(self, msg, timestamp=None, username=None, level=2):
self.msg = msg
self.timestamp = timestamp or time.time()
self.username = username
self.level = level
def display(self):
if self.level < LOG_LEVEL:
return
msg = str(self)
if G.LOG_TO_CONSOLE or G.CHAT_VIEW is None:
floobits_log(msg)
safe_print(msg)
else:
editor_log(msg)
def __str__(self):
if python2:
return self.__unicode__().encode('utf-8')
return self.__unicode__()
def __unicode__(self):
if self.username:
msg = '[{time}] {level}: <{user}> {msg}'
else:
msg = '[{time}] {level}: {msg}'
level = LOG_LEVELS_REVERSE.get(self.level, 'UNKNOWN').rjust(5)
try:
return str(msg).format(level=level, user=self.username, time=time.ctime(self.timestamp), msg=self.msg)
except UnicodeEncodeError:
return str(msg).format(level=level, user=self.username, time=time.ctime(self.timestamp), msg=self.msg.encode(
'utf-8'))
def msg_format(message, *args, **kwargs):
try:
message = str(message)
except UnicodeEncodeError:
message = str(message)
for arg in args:
try:
message += str(arg)
except UnicodeEncodeError:
message += arg
if kwargs:
message = message.format(**kwargs)
return message
def _log(message, level, *args, **kwargs):
if level >= LOG_LEVEL:
# TODO: kill MSG class and just format and print the thing right away
MSG(msg_format(message, *args, **kwargs), level=level).display()
def debug(message, *args, **kwargs):
_log(message, LOG_LEVELS['DEBUG'], *args, **kwargs)
def log(message, *args, **kwargs):
_log(message, LOG_LEVELS['MSG'], *args, **kwargs)
def warn(message, *args, **kwargs):
_log(message, LOG_LEVELS['WARN'], *args, **kwargs)
def error(message, *args, **kwargs):
_log(message, LOG_LEVELS['ERROR'], *args, **kwargs)
| 24.859155
| 121
| 0.600567
|
5ddc7ff624356449f24ebaf08397f3debdf5eb14
| 33,425
|
py
|
Python
|
src/oci/apigateway/gateway_client.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2020-09-10T22:09:45.000Z
|
2021-12-24T17:00:07.000Z
|
src/oci/apigateway/gateway_client.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/apigateway/gateway_client.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry, circuit_breaker # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import apigateway_type_mapping
missing = Sentinel("Missing")
class GatewayClient(object):
"""
API for the API Gateway service. Use this API to manage gateways, deployments, and related items.
For more information, see
[Overview of API Gateway](/iaas/Content/APIGateway/Concepts/apigatewayoverview.htm).
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
:param obj circuit_breaker_strategy: (optional)
A circuit breaker strategy to apply to all calls made by this service client (i.e. at the client level).
This client uses :py:data:`~oci.circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY` as default if no circuit breaker strategy is provided.
The specifics of circuit breaker strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/circuit_breakers.html>`__.
:param function circuit_breaker_callback: (optional)
Callback function to receive any exceptions triggerred by the circuit breaker.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20190501',
'service_endpoint_template': 'https://apigateway.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False),
'circuit_breaker_strategy': kwargs.get('circuit_breaker_strategy', circuit_breaker.GLOBAL_CIRCUIT_BREAKER_STRATEGY)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
if base_client_init_kwargs.get('circuit_breaker_strategy') is None:
base_client_init_kwargs['circuit_breaker_strategy'] = circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY
self.base_client = BaseClient("gateway", config, signer, apigateway_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
self.circuit_breaker_callback = kwargs.get('circuit_breaker_callback')
def change_gateway_compartment(self, gateway_id, change_gateway_compartment_details, **kwargs):
"""
Changes the gateway compartment.
:param str gateway_id: (required)
The ocid of the gateway.
:param oci.apigateway.models.ChangeGatewayCompartmentDetails change_gateway_compartment_details: (required)
Details of the target compartment.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request id for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/apigateway/change_gateway_compartment.py.html>`__ to see an example of how to use change_gateway_compartment API.
"""
resource_path = "/gateways/{gatewayId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_gateway_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"gatewayId": gateway_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_gateway_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_gateway_compartment_details)
def create_gateway(self, create_gateway_details, **kwargs):
"""
Creates a new gateway.
:param oci.apigateway.models.CreateGatewayDetails create_gateway_details: (required)
Details for the new gateway.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request id for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.apigateway.models.Gateway`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/apigateway/create_gateway.py.html>`__ to see an example of how to use create_gateway API.
"""
resource_path = "/gateways"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_gateway got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_gateway_details,
response_type="Gateway")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_gateway_details,
response_type="Gateway")
def delete_gateway(self, gateway_id, **kwargs):
"""
Deletes the gateway with the given identifier.
:param str gateway_id: (required)
The ocid of the gateway.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request id for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/apigateway/delete_gateway.py.html>`__ to see an example of how to use delete_gateway API.
"""
resource_path = "/gateways/{gatewayId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_gateway got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"gatewayId": gateway_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def get_gateway(self, gateway_id, **kwargs):
"""
Gets a gateway by identifier.
:param str gateway_id: (required)
The ocid of the gateway.
:param str opc_request_id: (optional)
The client request id for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.apigateway.models.Gateway`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/apigateway/get_gateway.py.html>`__ to see an example of how to use get_gateway API.
"""
resource_path = "/gateways/{gatewayId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_gateway got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"gatewayId": gateway_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Gateway")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Gateway")
def list_gateways(self, compartment_id, **kwargs):
"""
Returns a list of gateways.
:param str compartment_id: (required)
The ocid of the compartment in which to list resources.
:param str certificate_id: (optional)
Filter gateways by the certificate ocid.
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str lifecycle_state: (optional)
A filter to return only resources that match the given lifecycle state.
Example: `SUCCEEDED`
Allowed values are: "CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'. The default order depends on the sortBy value.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. You can provide one sort order (`sortOrder`).
Default order for `timeCreated` is descending. Default order for
`displayName` is ascending. The `displayName` sort order is case
sensitive.
Allowed values are: "timeCreated", "displayName"
:param str opc_request_id: (optional)
The client request id for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.apigateway.models.GatewayCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/apigateway/list_gateways.py.html>`__ to see an example of how to use list_gateways API.
"""
resource_path = "/gateways"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"certificate_id",
"display_name",
"lifecycle_state",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_gateways got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"certificateId": kwargs.get("certificate_id", missing),
"displayName": kwargs.get("display_name", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="GatewayCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="GatewayCollection")
def update_gateway(self, gateway_id, update_gateway_details, **kwargs):
"""
Updates the gateway with the given identifier.
:param str gateway_id: (required)
The ocid of the gateway.
:param oci.apigateway.models.UpdateGatewayDetails update_gateway_details: (required)
The information to be updated.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request id for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/apigateway/update_gateway.py.html>`__ to see an example of how to use update_gateway API.
"""
resource_path = "/gateways/{gatewayId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_gateway got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"gatewayId": gateway_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_gateway_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_gateway_details)
| 49.082232
| 261
| 0.652536
|
719995a44b5e5c1b11ba33d14d966a075bdd5c5a
| 190
|
py
|
Python
|
src/swem.py
|
Lain-progressivehouse/probspace-youtube
|
04740862fb28fb9a38131554369d6c54eb560fc5
|
[
"MIT"
] | 5
|
2020-06-29T04:32:07.000Z
|
2021-02-08T03:54:29.000Z
|
src/swem.py
|
Lain-progressivehouse/probspace-youtube
|
04740862fb28fb9a38131554369d6c54eb560fc5
|
[
"MIT"
] | null | null | null |
src/swem.py
|
Lain-progressivehouse/probspace-youtube
|
04740862fb28fb9a38131554369d6c54eb560fc5
|
[
"MIT"
] | null | null | null |
import gensim
import MeCab
mecab = MeCab.Tagger("-Owakati")
def get_word_list(document):
document = document.lower()
return list(filter("".__ne__, mecab.parse(document).split()))
| 19
| 65
| 0.715789
|
735b485484e2fdb118b8c6105ed416ac294d5d48
| 14,821
|
py
|
Python
|
src/network.py
|
viswanathgs/dist-dqn
|
c7e407b1ef6f06c17fb784f3e119cdf20dd4824a
|
[
"MIT"
] | 206
|
2016-07-14T18:46:29.000Z
|
2021-11-08T12:51:56.000Z
|
src/network.py
|
binderwang/dist-dqn
|
c7e407b1ef6f06c17fb784f3e119cdf20dd4824a
|
[
"MIT"
] | 1
|
2021-05-25T03:14:16.000Z
|
2021-05-25T03:14:16.000Z
|
src/network.py
|
binderwang/dist-dqn
|
c7e407b1ef6f06c17fb784f3e119cdf20dd4824a
|
[
"MIT"
] | 29
|
2016-07-14T20:02:37.000Z
|
2021-12-04T15:38:26.000Z
|
from functools import partial
from six.moves import zip
import tensorflow as tf
# Base-class for the Deep Q-Network architecture. Constructs the TensorFlow
# graph with layers, weights, biases, loss-function, optimizer, etc. for
# a network of given type. Currently, a simple network with two hidden layers,
# and a convolutional neural-network are support.
#
# New network architectures can be added by sub-classing Network and
# implmementing the _init_params() and _init_layers() methods.
class Network:
x_placeholder = None
q_placeholder = None
action_placeholder = None
q_output = None
train_op = None
target_q_output = None
target_update_ops = None
summary_op = None
global_step = None
def __init__(self, input_shape, num_actions, num_replicas=1,
ps_device=None, worker_device=None):
self.input_shape = list(input_shape)
self.num_actions = num_actions
self.num_replicas = num_replicas # Used for synchronous training if enabled
self.ps_device = ps_device # Device constraints used by param server
self.worker_device = worker_device # Used for target param replication
@staticmethod
def create_network(config, input_shape, num_actions, num_replicas=1,
ps_device=None, worker_device=None):
"""
Creates and returns a network type based on config.network.
"""
Net = {
'simple': SimpleNetwork,
'cnn': ConvNetwork,
}.get(config.network, None)
if Net is None:
raise RuntimeError('Unsupported network type {}'.format(config.network))
net = Net(
input_shape=input_shape,
num_actions=num_actions,
num_replicas=num_replicas,
ps_device=ps_device,
worker_device=worker_device,
)
net._init_network(config)
return net
def _init_network(self, config):
# Placeholders
self.x_placeholder = tf.placeholder(tf.float32, [None] + self.input_shape)
self.q_placeholder = tf.placeholder(tf.float32, [None])
self.action_placeholder = tf.placeholder(tf.float32,
[None, self.num_actions])
summaries = []
# Params and layers
with tf.device(self.ps_device):
params = self._init_params(
config,
input_shape=self.input_shape,
output_size=self.num_actions,
summaries=summaries,
)
self.q_output, reg_loss = self._init_layers(
config,
inputs=self.x_placeholder,
params=params,
summaries=summaries,
)
# Loss and training
self.global_step = tf.Variable(0, name='global_step', trainable=False)
loss = self._init_loss(
config,
q=self.q_output,
expected_q=self.q_placeholder,
actions=self.action_placeholder,
reg_loss=reg_loss,
summaries=summaries,
)
self.train_op = self._init_optimizer(
config,
params=params,
loss=loss,
num_replicas=self.num_replicas,
global_step=self.global_step,
summaries=summaries,
)
# Target network
self.target_q_output, self.target_update_ops = self._init_target_network(
config,
inputs=self.x_placeholder,
input_shape=self.input_shape,
output_size=self.num_actions,
params=params,
ps_device=self.ps_device,
worker_device=self.worker_device,
summaries=summaries,
)
# Merge all the summaries in this graph
if summaries:
self.summary_op = tf.merge_summary(summaries)
@classmethod
def _init_params(cls, config, input_shape, output_size, summaries=None):
"""
Setup the trainable params for the network. Subclasses should
implement this to create all the weights and biases.
@return: Tuple of weights and biases
"""
raise NotImplementedError
@classmethod
def _init_layers(cls, config, inputs, params, summaries=None):
"""
Setup the layers and trainable params of the network. Subclasses should
implement this to initialize the appropriate network architecture.
@param inputs: Placeholder for the input layer
@param params: Tuple of weights and biases returned by _init_params()
@return: (output_layer, regularized_loss)
"""
raise NotImplementedError
@classmethod
def _init_loss(cls, config, q, expected_q, actions, reg_loss=None,
summaries=None):
"""
Setup the loss function and apply regularization is provided.
@return: loss_op
"""
q_masked = tf.reduce_sum(tf.mul(q, actions), reduction_indices=[1])
loss = tf.reduce_mean(tf.squared_difference(q_masked, expected_q))
if reg_loss is not None:
loss += config.reg_param * reg_loss
if summaries is not None:
summaries.append(tf.scalar_summary('loss', loss))
return loss
@classmethod
def _init_optimizer(cls, config, params, loss, num_replicas=1,
global_step=None, summaries=None):
"""
Setup the optimizer for the provided params based on the loss function.
Relies on config.optimizer to select the type of optimizer.
@return: train_op
"""
Optimizer = {
'adadelta': tf.train.AdadeltaOptimizer,
'adagrad': tf.train.AdagradOptimizer,
'adam': tf.train.AdamOptimizer,
'ftrl': tf.train.FtrlOptimizer,
'sgd': tf.train.GradientDescentOptimizer,
'momentum': partial(tf.train.MomentumOptimizer, momentum=config.momentum),
'rmsprop': partial(tf.train.RMSPropOptimizer, decay=config.rmsprop_decay),
}.get(config.optimizer, None)
if Optimizer is None:
raise RuntimeError('Unsupported optimizer {}'.format(config.optimizer))
# TODO: Experiment with gating gradients for improved parallelism
# https://www.tensorflow.org/versions/r0.9/api_docs/python/train.html#gating-gradients
optimizer = Optimizer(learning_rate=config.lr)
# Synchronize gradient updates if enabled
if config.sync:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer,
replicas_to_aggregate=num_replicas,
replica_id=config.task_id,
)
# Explicitly pass the list of trainable params instead of defaulting to
# GraphKeys.TRAINABLE_VARIABLES. Otherwise, when this network becomes a
# subgraph when in-graph replication is configured, TRAINABLE_VARIABLES
# will contain params from all graph replicas due to global namespacing.
train_op = optimizer.minimize(
loss,
var_list=params,
global_step=global_step,
)
return train_op
def _init_target_network(cls, config, inputs, input_shape, output_size,
params, ps_device=None, worker_device=None,
summaries=None):
"""
Setup the target network used for minibatch training, and the
update operations to periodically update the target network with
the trained network.
@return: target_q_output, [target_update_ops]
"""
if not config.disable_target_replication:
# Replicate the target network params within each worker instead of it
# being managed by the param server. Since the target network is frozen
# for many steps, this cuts down the communication overhead of
# transferring them from the param server's device during each train loop.
# Also, they need to be marked as local variables so that all workers
# initialize them locally. Otherwise, non-chief workers are forever
# waiting for the chief worker to initialize the replicated target params.
target_param_device = worker_device
collections = tf.GraphKeys.LOCAL_VARIABLES
else:
# If target param replication is disabled, param server takes the
# ownership of target params. Allocate on the same device as the other
# params managed by the param server.
target_param_device = ps_device
collections = None
# Initialize the target weights and layers
with tf.variable_scope('target'):
with tf.device(target_param_device):
target_params = cls._init_params(
config,
input_shape=input_shape,
output_size=output_size,
collections=collections,
summaries=summaries,
)
target_q_output, _ = cls._init_layers(
config,
inputs=inputs,
params=target_params,
summaries=summaries,
)
# Create assign ops to periodically update the target network
target_update_ops = \
[tf.assign(target_p, p) for target_p, p in zip(target_params, params)]
return target_q_output, target_update_ops
# Simple fully connected network with two fully connected layers with
# tanh activations and a final Affine layer.
class SimpleNetwork(Network):
HIDDEN1_SIZE = 20
HIDDEN2_SIZE = 20
@classmethod
def _init_params(cls, config, input_shape, output_size, collections=None,
summaries=None):
if len(input_shape) != 1:
raise RuntimeError('%s expects 1-d input' % cls.__name__)
input_size = input_shape[0]
weight_init = tf.truncated_normal_initializer(stddev=0.01)
bias_init = tf.constant_initializer(value=0.0)
# First hidden layer
with tf.variable_scope('hidden1'):
shape = [input_size, cls.HIDDEN1_SIZE]
w1 = tf.get_variable('w', shape, initializer=weight_init,
collections=collections)
b1 = tf.get_variable('b', cls.HIDDEN1_SIZE, initializer=bias_init,
collections=collections)
# Second hidden layer
with tf.variable_scope('hidden2'):
shape = [cls.HIDDEN1_SIZE, cls.HIDDEN2_SIZE]
w2 = tf.get_variable('w', shape, initializer=weight_init,
collections=collections)
b2 = tf.get_variable('b', cls.HIDDEN2_SIZE, initializer=bias_init,
collections=collections)
# Output layer
with tf.variable_scope('output'):
shape = [cls.HIDDEN2_SIZE, output_size]
w3 = tf.get_variable('w', shape, initializer=weight_init,
collections=collections)
b3 = tf.get_variable('b', output_size, initializer=bias_init,
collections=collections)
return (w1, b1, w2, b2, w3, b3)
@classmethod
def _init_layers(cls, config, inputs, params, summaries=None):
w1, b1, w2, b2, w3, b3 = params
# Layers
with tf.name_scope('hidden1'):
a1 = tf.nn.tanh(tf.matmul(inputs, w1) + b1, name='tanh')
with tf.name_scope('hidden2'):
a2 = tf.nn.tanh(tf.matmul(a1, w2) + b2, name='tanh')
with tf.name_scope('output'):
output = tf.add(tf.matmul(a2, w3), b3, name='affine')
# L2 regularization for weights excluding biases
reg_loss = sum(tf.nn.l2_loss(w) for w in [w1, w2, w3])
return output, reg_loss
# Convolutional network described in
# https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
class ConvNetwork(Network):
CONV1_FILTERS = 32
CONV1_SIZE = 8
CONV1_STRIDE = 4
CONV2_FILTERS = 64
CONV2_SIZE = 4
CONV2_STRIDE = 2
CONV3_FILTERS = 64
CONV3_SIZE = 3
CONV3_STRIDE = 1
POOL_SIZE = [1, 2, 2, 1]
POOL_STRIDE = [1, 2, 2, 1]
FULLY_CONNECTED_SIZE = 256
@classmethod
def _init_params(cls, config, input_shape, output_size, collections=None,
summaries=None):
if len(input_shape) != 3:
raise RuntimeError('%s expects 3-d input' % cls.__class__.__name__)
weight_init = tf.truncated_normal_initializer(stddev=0.01)
bias_init = tf.constant_initializer(value=0.0)
# First hidden conv-pool layer
with tf.variable_scope('conv1'):
shape = \
[cls.CONV1_SIZE, cls.CONV1_SIZE, input_shape[2], cls.CONV1_FILTERS]
w1 = tf.get_variable('w', shape, initializer=weight_init,
collections=collections)
b1 = tf.get_variable('b', cls.CONV1_FILTERS, initializer=bias_init,
collections=collections)
# Second hidden conv-pool layer
with tf.variable_scope('conv2'):
shape = \
[cls.CONV2_SIZE, cls.CONV2_SIZE, cls.CONV1_FILTERS, cls.CONV2_FILTERS]
w2 = tf.get_variable('w', shape, initializer=weight_init,
collections=collections)
b2 = tf.get_variable('b', cls.CONV2_FILTERS, initializer=bias_init,
collections=collections)
# Third hidden conv-pool layer
with tf.variable_scope('conv3'):
shape = \
[cls.CONV3_SIZE, cls.CONV3_SIZE, cls.CONV2_FILTERS, cls.CONV3_FILTERS]
w3 = tf.get_variable('w', shape, initializer=weight_init,
collections=collections)
b3 = tf.get_variable('b', cls.CONV3_FILTERS, initializer=bias_init,
collections=collections)
# Final fully-connected hidden layer
with tf.variable_scope('fcl'):
shape = [cls.FULLY_CONNECTED_SIZE, cls.FULLY_CONNECTED_SIZE]
w4 = tf.get_variable('w', shape, initializer=weight_init,
collections=collections)
b4 = tf.get_variable('b', cls.FULLY_CONNECTED_SIZE, initializer=bias_init,
collections=collections)
# Output layer
with tf.variable_scope('output'):
shape = [cls.FULLY_CONNECTED_SIZE, output_size]
w5 = tf.get_variable('w', shape, initializer=weight_init,
collections=collections)
b5 = tf.get_variable('b', output_size, initializer=bias_init,
collections=collections)
return (w1, b1, w2, b2, w3, b3, w4, b4, w5, b5)
@classmethod
def _init_layers(cls, config, inputs, params, summaries=None):
w1, b1, w2, b2, w3, b3, w4, b4, w5, b5 = params
# Layers
with tf.name_scope('conv1'):
a1 = cls.conv_pool(inputs, w1, b1, cls.CONV1_STRIDE)
with tf.name_scope('conv2'):
a2 = cls.conv_pool(a1, w2, b2, cls.CONV2_STRIDE)
with tf.name_scope('conv3'):
a3 = cls.conv_pool(a2, w3, b3, cls.CONV3_STRIDE)
with tf.name_scope('fcl'):
a3_flat = tf.reshape(a3, [-1, cls.FULLY_CONNECTED_SIZE])
a4 = tf.nn.relu(tf.matmul(a3_flat, w4) + b4, name='relu')
with tf.name_scope('output'):
output = tf.add(tf.matmul(a4, w5), b5, name='affine')
# L2 regularization for fully-connected weights
reg_loss = sum(tf.nn.l2_loss(w) for w in [w4, w5])
return output, reg_loss
@classmethod
def conv_stride(cls, stride):
return [1, stride, stride, 1]
@classmethod
def conv_pool(cls, inputs, filters, bias, stride):
conv = tf.nn.conv2d(inputs, filters, strides=cls.conv_stride(stride),
padding='SAME', name='conv')
return cls.max_pool(tf.nn.relu(conv + bias))
@classmethod
def max_pool(cls, a):
return tf.nn.max_pool(a, ksize=cls.POOL_SIZE, strides=cls.POOL_STRIDE,
padding='SAME', name='pool')
| 34.872941
| 90
| 0.669995
|
6447222e3a168244d1e6769edf7da90216838d2d
| 5,845
|
py
|
Python
|
image_downloader.py
|
guzdy/tumblr_crawler
|
c584c7078983c97f62c64ea33c3f9150bb0580b9
|
[
"MIT"
] | 2
|
2017-10-15T10:58:14.000Z
|
2017-12-08T14:11:31.000Z
|
image_downloader.py
|
guzdy/tumblr_crawler
|
c584c7078983c97f62c64ea33c3f9150bb0580b9
|
[
"MIT"
] | null | null | null |
image_downloader.py
|
guzdy/tumblr_crawler
|
c584c7078983c97f62c64ea33c3f9150bb0580b9
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# -×- coding: utf-8 -*-
import asyncio
from concurrent import futures
import json
import requests
import os
import multiprocessing
import re
import datetime
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# 禁用安全请求警告
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"Upgrade-Insecure-Requests": "1"
}
PROCESSES = multiprocessing.cpu_count()
class ImagesDownloader(object):
def crawl_image(self, blogname, proxies=None, timeout=None):
"""该 class 的控制模块, num 为 posts 总数, start 为起始数"""
num = 1
start = 0
text = TextWriter(blogname)
with futures.ProcessPoolExecutor(max_workers=PROCESSES) as ex:
while num > start:
data_json = self.page_download(blogname, start, proxies, timeout)
item_list, num = self.page_parse(data_json)
start += 50
for item in item_list:
ex.submit(self.media_download, item['media'], item['slug'],
blogname, proxies=proxies, timeout=timeout)
text.process_item(item)
text.close()
def page_download(self, blogname, start, proxies=None, timeout=None, retry_times=0):
url_raw = "http://{0}.tumblr.com/api/read/json?type=photo&num=50&start={1}"
url = url_raw.format(blogname, start)
print('尝试下载: ', url)
resp = requests.get(url, proxies=proxies, headers=HEADERS, verify=False)
if resp.status_code != 200:
if retry_times > 3:
print('多次尝试下载后失败,结束图片页面下载')
return
retry_times += 1
return self.page_download(self, blogname, proxies, timeout, retry_times)
data_json = json.loads(resp.text.strip('var tumblr_api_read = ').strip(';\n'))
return data_json
def page_parse(self, data):
num = data['posts-total']
if num == 0:
return
item_list = []
for post in data['posts']:
item = {}
print(post)
if post.get('photos'):
photos = []
for photo in post["photos"]:
photos.append(photo['photo-url-1280'])
else:
photos = [post['photo-url-1280']]
item['media'] = photos
slug = post.get('slug')
item['slug'] = slug
tags_list = post.get('tags')
item['tags_list'] = tags_list
item_list.append(item)
print("ITEM INFO:", item_list)
return item_list, num
def media_download(self, urls, filename, blogname, proxies=None, timeout=None):
event_loop = asyncio.new_event_loop()
event_loop.run_until_complete(self.async_download(urls, filename, blogname,
proxies, timeout))
event_loop.close()
return
async def async_download(self, urls, filename_raw, blogname, proxies, timeout):
""" 指定下载路径, 并下载图片。 """
print('File name RAW:', filename_raw)
if not os.path.isdir(blogname):
os.mkdir(blogname)
num = 1
for url in urls:
if not filename_raw:
print("Image Url:", url)
filename = url.split('/')[-1]
else:
if num == 1:
filename = filename_raw + '.jpg'
else:
filename = "{0}({1}).jpg".format(filename_raw, str(num))
num += 1
file_path = os.path.join(blogname, filename)
print("Image File Path: ", file_path)
if not os.path.isfile(file_path):
await self._async_download(url, file_path, proxies, timeout)
async def _async_download(self, url, file_path, proxies, timeout, retry_times=0):
""" 下载图片, 出现错误,最多重试三次, . """
try:
resp = requests.get(url, proxies=proxies, stream=True,
timeout=timeout, headers=HEADERS, verify=False)
if resp.status_code != 200:
raise Exception('尝试下载图片时,出现错误,重试' % resp.status_code)
with open(file_path, 'wb') as f:
for chunk in resp.iter_content(1024 * 100):
f.write(chunk)
except Exception as e:
print("%s: %s" % (e, url))
# try again
if retry_times < 3:
retry_times += 1
# 如需设置proxies, 在下行代码设置设置
await self._async_download(url, file_path, proxies, timeout, retry_times)
else:
print("Download Fail(retried 3 times): ", url)
return
class TextWriter(object):
"""把必要的内容文本写入保存"""
def __init__(self, blogname):
if not os.path.isdir(blogname):
os.mkdir(blogname)
strtime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
file_path = os.path.join(blogname, '0.'+blogname+' image '+strtime+'.txt')
self.file = open(file_path, 'w')
def close(self):
self.file.close()
def process_item(self, item):
if item.get("media"):
for url in item['media']:
line = url + '\n'
self.file.write(line)
if item.get('tags'):
self.file.write('Tags:')
for tag in item['tags']:
self.file.write(tag)
self.file.write('\n')
if item.get('slug'):
text = re.sub(r'\xa0|\n', ' ', item['slug'].strip())
text = re.sub(r'\s+', ' ', text)
self.file.write(text+'\n')
self.file.write('\n\n')
return
| 35.858896
| 89
| 0.545081
|
9c05086429ddfdaf77ab6525bb8ba742e095a115
| 1,172
|
py
|
Python
|
test/functional/p2p_mempool.py
|
daface45/cerebralcoin
|
0ea3caf2b22113c31c8fd3672f9dc6fa092ffd29
|
[
"MIT"
] | 1
|
2021-10-07T01:18:40.000Z
|
2021-10-07T01:18:40.000Z
|
test/functional/p2p_mempool.py
|
daface45/cerebralcoin
|
0ea3caf2b22113c31c8fd3672f9dc6fa092ffd29
|
[
"MIT"
] | null | null | null |
test/functional/p2p_mempool.py
|
daface45/cerebralcoin
|
0ea3caf2b22113c31c8fd3672f9dc6fa092ffd29
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Cerebralcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.messages import msg_mempool
from test_framework.mininode import P2PInterface
from test_framework.test_framework import CerebralcoinTestFramework
from test_framework.util import assert_equal
class P2PMempoolTests(CerebralcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
# Add a p2p connection
self.nodes[0].add_p2p_connection(P2PInterface())
#request mempool
self.nodes[0].p2p.send_message(msg_mempool())
self.nodes[0].p2p.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 33.485714
| 73
| 0.739761
|
6f53d980bcef0c32e35b658122d5b47219b95909
| 1,151
|
py
|
Python
|
rmgpy/reduction/test_data/minimal/input.py
|
keceli/RMG-Py
|
17c7870195a4feb6e8bf8974292f9bcdca1a1d9d
|
[
"MIT"
] | 7
|
2017-10-04T16:04:14.000Z
|
2021-03-27T21:54:41.000Z
|
rmgpy/reduction/test_data/minimal/input.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 72
|
2016-06-06T18:18:49.000Z
|
2019-11-17T03:21:10.000Z
|
rmgpy/reduction/test_data/minimal/input.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 6
|
2017-10-04T15:37:05.000Z
|
2021-12-29T06:50:16.000Z
|
# Data sources
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# List of species
species(
label='ethane',
reactive=True,
structure=SMILES("CC"),
)
# Reaction systems
simpleReactor(
temperature=(1350,'K'),
pressure=(1.0,'bar'),
initialMoleFractions={
"ethane": 1.0,
},
# terminationConversion={
# 'ethane': 0.9,
# },
terminationTime=(1e-3,'s'),
)
# simpleReactor(
# temperature=(1750,'K'),
# pressure=(10.0,'bar'),
# initialMoleFractions={
# "ethane": 1.0,
# },
# # terminationConversion={
# # 'ethane': 0.9,
# # },
# terminationTime=(1e-2,'s'),
# )
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceKeepInEdge=0.0,
toleranceMoveToCore=0.1,
toleranceInterruptSimulation=0.1,
maximumEdgeSpecies=100000
)
options(
units='si',
saveRestartPeriod=None,
generatePlots=False,
saveEdgeSpecies=True,
saveSimulationProfiles=True,
)
| 18.564516
| 47
| 0.60556
|
dd08eebb3e8ed2db59c65066c76b131892021def
| 328
|
py
|
Python
|
tests/t_sync_attachment.py
|
cjr0707/CrawlUtils
|
723f0b8ef2a617ff0ca1b51e35a5ded43ab76ff0
|
[
"MIT"
] | 1
|
2021-03-11T03:00:10.000Z
|
2021-03-11T03:00:10.000Z
|
tests/t_sync_attachment.py
|
cjr0707/CrawlUtils
|
723f0b8ef2a617ff0ca1b51e35a5ded43ab76ff0
|
[
"MIT"
] | null | null | null |
tests/t_sync_attachment.py
|
cjr0707/CrawlUtils
|
723f0b8ef2a617ff0ca1b51e35a5ded43ab76ff0
|
[
"MIT"
] | null | null | null |
from crawl_utils.file import extract_attachment
url = 'http://www.xinjiang.gov.cn/xinjiang/fgwjx/202009/d9bafda1ba5541db8d8d499934c20208.shtml'
with open('./htmls/attachment/sync_attachment.html') as f:
html = f.read()
attachment_list = extract_attachment(html, url, attachment_format_list=['txt'])
print(attachment_list)
| 36.444444
| 95
| 0.795732
|
40c669b92d7526d1052de3743c908902beb57f6f
| 535
|
py
|
Python
|
Udemy/100 Days of Code - The Complete Python Pro Bootcamp for 2022/Day_33_Project - ISS Overhead Notifier/test.py
|
douglasadones/Cursos-Online
|
53e9af499da8567db4d17cb25fbe1db9b2f0585e
|
[
"MIT"
] | 1
|
2021-12-22T13:06:25.000Z
|
2021-12-22T13:06:25.000Z
|
Udemy/100 Days of Code - The Complete Python Pro Bootcamp for 2022/Day_33_Project - ISS Overhead Notifier/test.py
|
douglasadones/Cursos-Online
|
53e9af499da8567db4d17cb25fbe1db9b2f0585e
|
[
"MIT"
] | null | null | null |
Udemy/100 Days of Code - The Complete Python Pro Bootcamp for 2022/Day_33_Project - ISS Overhead Notifier/test.py
|
douglasadones/Cursos-Online
|
53e9af499da8567db4d17cb25fbe1db9b2f0585e
|
[
"MIT"
] | null | null | null |
import requests
MY_LAT = -2.919150 # Sua latitude
MY_LONG = -41.752270 # Sua longitude
parametros = {
"lat": MY_LAT,
"lng": MY_LONG,
"formatted": 0,
}
response = requests.get(url="https://api.sunrise-sunset.org/json", params=parametros)
response.raise_for_status()
data = response.json()
nascer_do_sol = int(data["results"]["sunrise"].split("T")[1].split(":")[0]) # Apenas a hora do nascer do sol
por_do_sol = int(data["results"]["sunset"].split("T")[1].split(":")[0]) # Apenas a hora do por do sol
print(nascer_do_sol)
| 38.214286
| 109
| 0.678505
|
4c42cfdba72def18bc91dc6070e57fea78642354
| 1,596
|
py
|
Python
|
pyflac/__init__.py
|
sonos/pyFLAC
|
3c27540b465b5915050459ebdfb6eedc0fc6291a
|
[
"Apache-2.0"
] | 85
|
2021-04-23T07:00:06.000Z
|
2022-02-20T18:41:52.000Z
|
pyflac/__init__.py
|
sonos/pyFLAC
|
3c27540b465b5915050459ebdfb6eedc0fc6291a
|
[
"Apache-2.0"
] | 6
|
2021-04-22T20:46:03.000Z
|
2022-03-30T11:04:15.000Z
|
pyflac/__init__.py
|
sonos/pyFLAC
|
3c27540b465b5915050459ebdfb6eedc0fc6291a
|
[
"Apache-2.0"
] | 5
|
2021-04-24T10:01:33.000Z
|
2022-02-07T06:15:07.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# pyFLAC
#
# Copyright (c) 2011-2021, Sonos, Inc.
# All rights reserved.
#
# ------------------------------------------------------------------------------
__title__ = 'pyFLAC'
__version__ = '2.0.0'
__all__ = [
'StreamEncoder',
'FileEncoder',
'EncoderState',
'EncoderInitException',
'EncoderProcessException',
'StreamDecoder',
'FileDecoder',
'DecoderState',
'DecoderInitException',
'DecoderProcessException'
]
import os
import platform
from cffi import FFI
# ------------------------------------------------------------------------------
# Fix DLL load for Windows
#
# Since there is no rpath equivalent for Windows, we just explicitly load
# the libFLAC DLL here.
# ------------------------------------------------------------------------------
if platform.system() == 'Windows':
ffi = FFI()
base_path = os.path.dirname(os.path.abspath(__file__))
if platform.architecture()[0] == '32bit':
libflac = ffi.dlopen(os.path.join(base_path, 'libraries', 'windows-i686', 'libFLAC-8.dll'))
elif platform.architecture()[0] == '64bit':
libflac = ffi.dlopen(os.path.join(base_path, 'libraries', 'windows-x86_64', 'libFLAC-8.dll'))
# flake8: noqa: F401
from .encoder import (
StreamEncoder,
FileEncoder,
EncoderState,
EncoderInitException,
EncoderProcessException
)
from .decoder import (
StreamDecoder,
FileDecoder,
DecoderState,
DecoderInitException,
DecoderProcessException
)
| 25.741935
| 101
| 0.537594
|
b10afae7301ac4ee28b31d6f09da4f1a0569e574
| 16,979
|
py
|
Python
|
chia/full_node/mempool_check_conditions.py
|
Albertjan90/chia-blockchain
|
24b4533e7dd225c065c234eeaea25f06118a088b
|
[
"Apache-2.0"
] | 1
|
2021-06-30T00:03:41.000Z
|
2021-06-30T00:03:41.000Z
|
chia/full_node/mempool_check_conditions.py
|
Albertjan90/chia-blockchain
|
24b4533e7dd225c065c234eeaea25f06118a088b
|
[
"Apache-2.0"
] | null | null | null |
chia/full_node/mempool_check_conditions.py
|
Albertjan90/chia-blockchain
|
24b4533e7dd225c065c234eeaea25f06118a088b
|
[
"Apache-2.0"
] | null | null | null |
import logging
import time
from typing import Tuple, Dict, List, Optional, Set
from clvm import SExp
from chia.consensus.cost_calculator import NPCResult
from chia.consensus.condition_costs import ConditionCost
from chia.full_node.generator import create_generator_args, setup_generator_args
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import NIL
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_record import CoinRecord
from chia.types.condition_with_args import ConditionWithArgs
from chia.types.generator_types import BlockGenerator
from chia.types.name_puzzle_condition import NPC
from chia.util.clvm import int_from_bytes, int_to_bytes
from chia.util.condition_tools import ConditionOpcode, conditions_by_opcode
from chia.util.errors import Err, ValidationError
from chia.util.ints import uint32, uint64, uint16
from chia.wallet.puzzles.generator_loader import GENERATOR_FOR_SINGLE_COIN_MOD
from chia.wallet.puzzles.rom_bootstrap_generator import get_generator
GENERATOR_MOD = get_generator()
def mempool_assert_announcement(condition: ConditionWithArgs, announcements: Set[bytes32]) -> Optional[Err]:
"""
Check if an announcement is included in the list of announcements
"""
announcement_hash = bytes32(condition.vars[0])
if announcement_hash not in announcements:
return Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
return None
log = logging.getLogger(__name__)
def mempool_assert_my_coin_id(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:
"""
Checks if CoinID matches the id from the condition
"""
if unspent.coin.name() != condition.vars[0]:
log.warning(f"My name: {unspent.coin.name()} got: {condition.vars[0].hex()}")
return Err.ASSERT_MY_COIN_ID_FAILED
return None
def mempool_assert_absolute_block_height_exceeds(
condition: ConditionWithArgs, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the next block index exceeds the block index from the condition
"""
try:
block_index_exceeds_this = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < block_index_exceeds_this:
return Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
return None
def mempool_assert_relative_block_height_exceeds(
condition: ConditionWithArgs, unspent: CoinRecord, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the coin age exceeds the age from the condition
"""
try:
expected_block_age = int_from_bytes(condition.vars[0])
block_index_exceeds_this = expected_block_age + unspent.confirmed_block_index
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < block_index_exceeds_this:
return Err.ASSERT_HEIGHT_RELATIVE_FAILED
return None
def mempool_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp: uint64) -> Optional[Err]:
"""
Check if the current time in seconds exceeds the time specified by condition
"""
try:
expected_seconds = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if timestamp is None:
timestamp = uint64(int(time.time()))
if timestamp < expected_seconds:
return Err.ASSERT_SECONDS_ABSOLUTE_FAILED
return None
def mempool_assert_relative_time_exceeds(
condition: ConditionWithArgs, unspent: CoinRecord, timestamp: uint64
) -> Optional[Err]:
"""
Check if the current time in seconds exceeds the time specified by condition
"""
try:
expected_seconds = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if timestamp is None:
timestamp = uint64(int(time.time()))
if timestamp < expected_seconds + unspent.timestamp:
return Err.ASSERT_SECONDS_RELATIVE_FAILED
return None
def mempool_assert_my_parent_id(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:
"""
Checks if coin's parent ID matches the ID from the condition
"""
if unspent.coin.parent_coin_info != condition.vars[0]:
return Err.ASSERT_MY_PARENT_ID_FAILED
return None
def mempool_assert_my_puzzlehash(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:
"""
Checks if coin's puzzlehash matches the puzzlehash from the condition
"""
if unspent.coin.puzzle_hash != condition.vars[0]:
return Err.ASSERT_MY_PUZZLEHASH_FAILED
return None
def mempool_assert_my_amount(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:
"""
Checks if coin's amount matches the amount from the condition
"""
if unspent.coin.amount != int_from_bytes(condition.vars[0]):
return Err.ASSERT_MY_AMOUNT_FAILED
return None
def parse_aggsig(args: SExp) -> List[bytes]:
pubkey = args.first().atom
args = args.rest()
message = args.first().atom
if len(pubkey) != 48:
raise ValidationError(Err.INVALID_CONDITION)
if len(message) > 1024:
raise ValidationError(Err.INVALID_CONDITION)
return [pubkey, message]
def parse_create_coin(args: SExp) -> List[bytes]:
puzzle_hash = args.first().atom
args = args.rest()
if len(puzzle_hash) != 32:
raise ValidationError(Err.INVALID_CONDITION)
amount_int = args.first().as_int()
if amount_int >= 2 ** 64:
raise ValidationError(Err.COIN_AMOUNT_EXCEEDS_MAXIMUM)
if amount_int < 0:
raise ValidationError(Err.COIN_AMOUNT_NEGATIVE)
# note that this may change the representation of amount. If the original
# buffer had redundant leading zeroes, they will be stripped
return [puzzle_hash, int_to_bytes(amount_int)]
def parse_seconds(args: SExp, error_code: Err) -> Optional[List[bytes]]:
seconds_int = args.first().as_int()
# this condition is inherently satisified, there is no need to keep it
if seconds_int <= 0:
return None
if seconds_int >= 2 ** 64:
raise ValidationError(error_code)
# note that this may change the representation of seconds. If the original
# buffer had redundant leading zeroes, they will be stripped
return [int_to_bytes(seconds_int)]
def parse_height(args: SExp, error_code: Err) -> Optional[List[bytes]]:
height_int = args.first().as_int()
# this condition is inherently satisified, there is no need to keep it
if height_int <= 0:
return None
if height_int >= 2 ** 32:
raise ValidationError(error_code)
# note that this may change the representation of the height. If the original
# buffer had redundant leading zeroes, they will be stripped
return [int_to_bytes(height_int)]
def parse_fee(args: SExp) -> List[bytes]:
fee_int = args.first().as_int()
if fee_int >= 2 ** 64 or fee_int < 0:
raise ValidationError(Err.RESERVE_FEE_CONDITION_FAILED)
# note that this may change the representation of the fee. If the original
# buffer had redundant leading zeroes, they will be stripped
return [int_to_bytes(fee_int)]
def parse_hash(args: SExp, error_code: Err) -> List[bytes]:
h = args.first().atom
if len(h) != 32:
raise ValidationError(error_code)
return [h]
def parse_amount(args: SExp) -> List[bytes]:
amount_int = args.first().as_int()
if amount_int < 0:
raise ValidationError(Err.ASSERT_MY_AMOUNT_FAILED)
if amount_int >= 2 ** 64:
raise ValidationError(Err.ASSERT_MY_AMOUNT_FAILED)
# note that this may change the representation of amount. If the original
# buffer had redundant leading zeroes, they will be stripped
return [int_to_bytes(amount_int)]
def parse_announcement(args: SExp) -> List[bytes]:
msg = args.first().atom
if len(msg) > 1024:
raise ValidationError(Err.INVALID_CONDITION)
return [msg]
def parse_condition_args(args: SExp, condition: ConditionOpcode) -> Tuple[int, Optional[List[bytes]]]:
"""
Parse a list with exactly the expected args, given opcode,
from an SExp into a list of bytes. If there are fewer or more elements in
the list, raise a RuntimeError. If the condition is inherently true (such as
a time- or height lock with a negative time or height, the returned list is None
"""
op = ConditionOpcode
cc = ConditionCost
if condition is op.AGG_SIG_UNSAFE or condition is op.AGG_SIG_ME:
return cc.AGG_SIG.value, parse_aggsig(args)
elif condition is op.CREATE_COIN:
return cc.CREATE_COIN.value, parse_create_coin(args)
elif condition is op.ASSERT_SECONDS_ABSOLUTE:
return cc.ASSERT_SECONDS_ABSOLUTE.value, parse_seconds(args, Err.ASSERT_SECONDS_ABSOLUTE_FAILED)
elif condition is op.ASSERT_SECONDS_RELATIVE:
return cc.ASSERT_SECONDS_RELATIVE.value, parse_seconds(args, Err.ASSERT_SECONDS_RELATIVE_FAILED)
elif condition is op.ASSERT_HEIGHT_ABSOLUTE:
return cc.ASSERT_HEIGHT_ABSOLUTE.value, parse_height(args, Err.ASSERT_HEIGHT_ABSOLUTE_FAILED)
elif condition is op.ASSERT_HEIGHT_RELATIVE:
return cc.ASSERT_HEIGHT_RELATIVE.value, parse_height(args, Err.ASSERT_HEIGHT_RELATIVE_FAILED)
elif condition is op.ASSERT_MY_COIN_ID:
return cc.ASSERT_MY_COIN_ID.value, parse_hash(args, Err.ASSERT_MY_COIN_ID_FAILED)
elif condition is op.RESERVE_FEE:
return cc.RESERVE_FEE.value, parse_fee(args)
elif condition is op.CREATE_COIN_ANNOUNCEMENT:
return cc.CREATE_COIN_ANNOUNCEMENT.value, parse_announcement(args)
elif condition is op.ASSERT_COIN_ANNOUNCEMENT:
return cc.ASSERT_COIN_ANNOUNCEMENT.value, parse_hash(args, Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
elif condition is op.CREATE_PUZZLE_ANNOUNCEMENT:
return cc.CREATE_PUZZLE_ANNOUNCEMENT.value, parse_announcement(args)
elif condition is op.ASSERT_PUZZLE_ANNOUNCEMENT:
return cc.ASSERT_PUZZLE_ANNOUNCEMENT.value, parse_hash(args, Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
elif condition is op.ASSERT_MY_PARENT_ID:
return cc.ASSERT_MY_PARENT_ID.value, parse_hash(args, Err.ASSERT_MY_PARENT_ID_FAILED)
elif condition is op.ASSERT_MY_PUZZLEHASH:
return cc.ASSERT_MY_PUZZLEHASH.value, parse_hash(args, Err.ASSERT_MY_PUZZLEHASH_FAILED)
elif condition is op.ASSERT_MY_AMOUNT:
return cc.ASSERT_MY_AMOUNT.value, parse_amount(args)
else:
raise ValidationError(Err.INVALID_CONDITION)
CONDITION_OPCODES: Set[bytes] = set(item.value for item in ConditionOpcode)
def parse_condition(cond: SExp, safe_mode: bool) -> Tuple[int, Optional[ConditionWithArgs]]:
condition = cond.first().as_atom()
if condition in CONDITION_OPCODES:
opcode: ConditionOpcode = ConditionOpcode(condition)
cost, args = parse_condition_args(cond.rest(), opcode)
cvl = ConditionWithArgs(opcode, args) if args is not None else None
elif not safe_mode:
opcode = ConditionOpcode.UNKNOWN
cvl = ConditionWithArgs(opcode, cond.rest().as_atom_list())
cost = 0
else:
raise ValidationError(Err.INVALID_CONDITION)
return cost, cvl
def get_name_puzzle_conditions(
generator: BlockGenerator, max_cost: int, *, cost_per_byte: int, safe_mode: bool
) -> NPCResult:
"""
This executes the generator program and returns the coins and their
conditions. If the cost of the program (size, CLVM execution and conditions)
exceed max_cost, the function fails. In order to accurately take the size
of the program into account when calculating cost, cost_per_byte must be
specified.
safe_mode determines whether the clvm program and conditions are executed in
strict mode or not. When in safe/strict mode, unknow operations or conditions
are considered failures. This is the mode when accepting transactions into
the mempool.
"""
try:
block_program, block_program_args = setup_generator_args(generator)
max_cost -= len(bytes(generator.program)) * cost_per_byte
if max_cost < 0:
return NPCResult(uint16(Err.INVALID_BLOCK_COST.value), [], uint64(0))
if safe_mode:
clvm_cost, result = GENERATOR_MOD.run_safe_with_cost(max_cost, block_program, block_program_args)
else:
clvm_cost, result = GENERATOR_MOD.run_with_cost(max_cost, block_program, block_program_args)
max_cost -= clvm_cost
if max_cost < 0:
return NPCResult(uint16(Err.INVALID_BLOCK_COST.value), [], uint64(0))
npc_list: List[NPC] = []
for res in result.first().as_iter():
conditions_list: List[ConditionWithArgs] = []
if len(res.first().atom) != 32:
raise ValidationError(Err.INVALID_CONDITION)
spent_coin_parent_id: bytes32 = res.first().as_atom()
res = res.rest()
if len(res.first().atom) != 32:
raise ValidationError(Err.INVALID_CONDITION)
spent_coin_puzzle_hash: bytes32 = res.first().as_atom()
res = res.rest()
spent_coin_amount: uint64 = uint64(res.first().as_int())
res = res.rest()
spent_coin: Coin = Coin(spent_coin_parent_id, spent_coin_puzzle_hash, spent_coin_amount)
for cond in res.first().as_iter():
cost, cvl = parse_condition(cond, safe_mode)
max_cost -= cost
if max_cost < 0:
return NPCResult(uint16(Err.INVALID_BLOCK_COST.value), [], uint64(0))
if cvl is not None:
conditions_list.append(cvl)
conditions_dict = conditions_by_opcode(conditions_list)
if conditions_dict is None:
conditions_dict = {}
npc_list.append(
NPC(spent_coin.name(), spent_coin.puzzle_hash, [(a, b) for a, b in conditions_dict.items()])
)
return NPCResult(None, npc_list, uint64(clvm_cost))
except ValidationError as e:
return NPCResult(uint16(e.code.value), [], uint64(0))
except Exception:
return NPCResult(uint16(Err.GENERATOR_RUNTIME_ERROR.value), [], uint64(0))
def get_puzzle_and_solution_for_coin(generator: BlockGenerator, coin_name: bytes, max_cost: int):
try:
block_program = generator.program
if not generator.generator_args:
block_program_args = NIL
else:
block_program_args = create_generator_args(generator.generator_refs())
cost, result = GENERATOR_FOR_SINGLE_COIN_MOD.run_with_cost(
max_cost, block_program, block_program_args, coin_name
)
puzzle = result.first()
solution = result.rest().first()
return None, puzzle, solution
except Exception as e:
return e, None, None
def mempool_check_conditions_dict(
unspent: CoinRecord,
coin_announcement_names: Set[bytes32],
puzzle_announcement_names: Set[bytes32],
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
prev_transaction_block_height: uint32,
timestamp: uint64,
) -> Optional[Err]:
"""
Check all conditions against current state.
"""
for con_list in conditions_dict.values():
cvp: ConditionWithArgs
for cvp in con_list:
error: Optional[Err] = None
if cvp.opcode is ConditionOpcode.ASSERT_MY_COIN_ID:
error = mempool_assert_my_coin_id(cvp, unspent)
elif cvp.opcode is ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT:
error = mempool_assert_announcement(cvp, coin_announcement_names)
elif cvp.opcode is ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT:
error = mempool_assert_announcement(cvp, puzzle_announcement_names)
elif cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE:
error = mempool_assert_absolute_block_height_exceeds(cvp, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_RELATIVE:
error = mempool_assert_relative_block_height_exceeds(cvp, unspent, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_ABSOLUTE:
error = mempool_assert_absolute_time_exceeds(cvp, timestamp)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_RELATIVE:
error = mempool_assert_relative_time_exceeds(cvp, unspent, timestamp)
elif cvp.opcode is ConditionOpcode.ASSERT_MY_PARENT_ID:
error = mempool_assert_my_parent_id(cvp, unspent)
elif cvp.opcode is ConditionOpcode.ASSERT_MY_PUZZLEHASH:
error = mempool_assert_my_puzzlehash(cvp, unspent)
elif cvp.opcode is ConditionOpcode.ASSERT_MY_AMOUNT:
error = mempool_assert_my_amount(cvp, unspent)
if error:
return error
return None
| 41.412195
| 113
| 0.714648
|
59aad9a907f683f0e4a08ea320aced8d098f2743
| 1,224
|
py
|
Python
|
apps/feedback/migrations/0001_initial.py
|
lsdlab/djshop_toturial
|
6d450225cc05e6a1ecd161de2b522e1af0b68cc0
|
[
"MIT"
] | null | null | null |
apps/feedback/migrations/0001_initial.py
|
lsdlab/djshop_toturial
|
6d450225cc05e6a1ecd161de2b522e1af0b68cc0
|
[
"MIT"
] | 6
|
2020-06-07T15:18:58.000Z
|
2021-09-22T19:07:33.000Z
|
apps/feedback/migrations/0001_initial.py
|
lsdlab/djshop_toturial
|
6d450225cc05e6a1ecd161de2b522e1af0b68cc0
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.1 on 2019-12-26 22:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('merchant', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('type', models.TextField(choices=[('1', '投诉'), ('2', '售后'), ('3', '求购'), ('4', '咨询')], default='4', max_length=1)),
('content', models.TextField()),
('solved', models.BooleanField(default=False)),
('merchant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='merchant_feedbacks', to='merchant.Merchant')),
],
options={
'verbose_name': '用户反馈',
'verbose_name_plural': '用户反馈',
'ordering': ['-created_at', '-updated_at'],
},
),
]
| 36
| 152
| 0.55719
|
42561360bb9a9dab9af61fab721366ea472c7a0e
| 8,769
|
py
|
Python
|
core/test.py
|
lindagaw/Eos
|
a125aca20007fbc55c4a5ae0c7baeb85a1375e1a
|
[
"MIT"
] | null | null | null |
core/test.py
|
lindagaw/Eos
|
a125aca20007fbc55c4a5ae0c7baeb85a1375e1a
|
[
"MIT"
] | null | null | null |
core/test.py
|
lindagaw/Eos
|
a125aca20007fbc55c4a5ae0c7baeb85a1375e1a
|
[
"MIT"
] | null | null | null |
"""Test script to classify target data."""
import torch
import torch.nn as nn
from utils import make_variable
from sklearn.metrics import accuracy_score
import numpy as np
from scipy.spatial import distance
import os
def get_distribution(src_encoder, tgt_encoder, src_classifier, tgt_classifier, critic, data_loader, which_data_loader):
print("Start calculating the mahalanobis distances' mean and standard deviation ... ")
vectors = []
for (images, labels) in data_loader:
images = make_variable(images, volatile=True).squeeze_()
labels = make_variable(labels).squeeze_()
torch.no_grad()
src_preds = src_classifier(torch.squeeze(src_encoder(images))).detach().cpu().numpy()
tgt_preds = tgt_classifier(torch.squeeze(tgt_encoder(images))).detach().cpu().numpy()
critic_at_src = critic(torch.squeeze(src_encoder(images))).detach().cpu().numpy()
critic_at_tgt = critic(torch.squeeze(tgt_encoder(images))).detach().cpu().numpy()
for image, label, src_pred, tgt_pred, src_critic, tgt_critic \
in zip(images, labels, src_preds, tgt_preds, critic_at_src, critic_at_tgt):
vectors.append(np.linalg.norm(src_critic.tolist() + tgt_critic.tolist()))
#print('processing vector ' + str(src_critic.tolist() + tgt_critic.tolist()))
mean = np.asarray(vectors).mean(axis=0)
cov = np.cov(vectors)
try:
iv = np.linalg.inv(cov)
except:
iv = cov
mahalanobis = np.asarray([distance.mahalanobis(v, mean, iv) for v in vectors])
mahalanobis_mean = np.mean(mahalanobis)
mahalanobis_std = np.std(mahalanobis)
np.save('snapshots//' + which_data_loader + '_mahalanobis_mean.npy', mahalanobis_mean)
np.save('snapshots//' + which_data_loader + '_mahalanobis_std.npy', mahalanobis_std)
np.save('snapshots//' + which_data_loader + '_iv.npy', iv)
np.save('snapshots//' + which_data_loader + '_mean.npy', mean)
print("Finished obtaining the mahalanobis distances' mean and standard deviation on " + which_data_loader)
return mahalanobis_mean, mahalanobis_std, iv, mean
def is_in_distribution(vector, mahalanobis_mean, mahalanobis_std, mean, iv):
upper_coefficient = 0.1
lower_coefficient = 0.1
upper = mahalanobis_mean + upper_coefficient * mahalanobis_std
lower = mahalanobis_mean - lower_coefficient * mahalanobis_std
mahalanobis = distance.mahalanobis(vector, mean, iv)
if lower < mahalanobis and mahalanobis < upper:
return True
else:
return False
def eval_ADDA(src_encoder, tgt_encoder, src_classifier, tgt_classifier, critic, data_loader):
src_mahalanobis_std = np.load('snapshots//' + 'src' + '_mahalanobis_std.npy')
src_mahalanobis_mean = np.load('snapshots//' + 'src' + '_mahalanobis_mean.npy')
src_iv = np.load('snapshots//' + 'src' + '_iv.npy')
src_mean = np.load('snapshots//' + 'src' + '_mean.npy')
tgt_mahalanobis_std = np.load('snapshots//' + 'tgt' + '_mahalanobis_std.npy')
tgt_mahalanobis_mean = np.load('snapshots//' + 'tgt' + '_mahalanobis_mean.npy')
tgt_iv = np.load('snapshots//' + 'tgt' + '_iv.npy')
tgt_mean = np.load('snapshots//' + 'tgt' + '_mean.npy')
"""Evaluation for target encoder by source classifier on target dataset."""
tgt_encoder.eval()
src_encoder.eval()
src_classifier.eval()
tgt_classifier.eval()
# init loss and accuracy
# set loss function
criterion = nn.CrossEntropyLoss()
# evaluate network
y_trues = []
y_preds = []
for (images, labels) in data_loader:
images = make_variable(images, volatile=True)
labels = make_variable(labels).squeeze_()
torch.no_grad()
src_preds = src_classifier(torch.squeeze(src_encoder(images))).detach().cpu().numpy()
tgt_preds = tgt_classifier(torch.squeeze(tgt_encoder(images))).detach().cpu().numpy()
critic_at_src = critic(torch.squeeze(src_encoder(images))).detach().cpu().numpy()
critic_at_tgt = critic(torch.squeeze(tgt_encoder(images))).detach().cpu().numpy()
for image, label, src_pred, tgt_pred, src_critic, tgt_critic \
in zip(images, labels, src_preds, tgt_preds, critic_at_src, critic_at_tgt):
vector = np.linalg.norm(src_critic.tolist() + tgt_critic.tolist())
# ouf of distribution:
if not is_in_distribution(vector, tgt_mahalanobis_mean, tgt_mahalanobis_std, tgt_mean, tgt_iv) \
and not is_in_distribution(vector, src_mahalanobis_mean, src_mahalanobis_std, src_mean, src_iv):
continue
# if in distribution which the target:
elif is_in_distribution(vector, tgt_mahalanobis_mean, tgt_mahalanobis_std, tgt_mean, tgt_iv):
y_pred = np.argmax(tgt_pred)
else:
y_pred = np.argmax(src_pred)
#y_pred = np.argmax(tgt_pred)
y_preds.append(y_pred)
y_trues.append(label.detach().cpu().numpy())
print("Avg Accuracy = {:2%}".format(accuracy_score(y_true=y_trues, y_pred=y_preds)))
def eval_tgt_with_probe(encoder, critic, src_classifier, tgt_classifier, data_loader):
"""Evaluation for target encoder by source classifier on target dataset."""
# set eval state for Dropout and BN layers
encoder.eval()
src_classifier.eval()
tgt_classifier.eval()
# init loss and accuracy
loss = 0.0
acc = 0.0
f1 = 0.0
ys_pred = []
ys_true = []
# set loss function
criterion = nn.CrossEntropyLoss()
flag = False
# evaluate network
for (images, labels) in data_loader:
images = make_variable(images, volatile=True)
labels = make_variable(labels).squeeze_()
probeds = critic(encoder(images))
for image, label, probed in zip(images, labels, probeds):
if torch.argmax(probed) == 1:
pred = torch.argmax(src_classifier(torch.squeeze(encoder(torch.unsqueeze(image, 0))))).detach().cpu().numpy()
else:
pred = torch.argmax(tgt_classifier(torch.squeeze(encoder(torch.unsqueeze(image, 0))))).detach().cpu().numpy()
ys_pred.append(np.squeeze(pred))
ys_true.append(np.squeeze(label.detach().cpu().numpy()))
loss /= len(data_loader)
acc /= len(data_loader.dataset)
#f1 /= len(data_loader.dataset)
print("Avg Accuracy = {:2%}".format(accuracy_score(y_true=ys_true, y_pred=ys_pred)))
def eval_tgt_with_probe(encoder, critic, src_classifier, tgt_classifier, data_loader):
"""Evaluation for target encoder by source classifier on target dataset."""
# set eval state for Dropout and BN layers
encoder.eval()
src_classifier.eval()
tgt_classifier.eval()
# init loss and accuracy
loss = 0
acc = 0
f1 = 0
ys_pred = []
ys_true = []
# set loss function
criterion = nn.CrossEntropyLoss()
flag = False
# evaluate network
for (images, labels) in data_loader:
images = make_variable(images, volatile=True)
labels = make_variable(labels).squeeze_()
probeds = critic(torch.squeeze(encoder(images)))
for image, label, probed in zip(images, labels, probeds):
if torch.argmax(probed) == 1:
pred = torch.argmax(src_classifier(torch.squeeze(encoder(torch.unsqueeze(image, 0))))).detach().cpu().numpy()
else:
pred = torch.argmax(tgt_classifier(torch.squeeze(encoder(torch.unsqueeze(image, 0))))).detach().cpu().numpy()
ys_pred.append(np.squeeze(pred))
ys_true.append(np.squeeze(label.detach().cpu().numpy()))
acc = accuracy_score(ys_true, ys_pred)
print("Avg Loss = {}, Accuracy = {:2%}".format(loss, acc))
def eval_tgt(encoder, classifier, data_loader):
"""Evaluation for target encoder by source classifier on target dataset."""
# set eval state for Dropout and BN layers
encoder.eval()
classifier.eval()
# init loss and accuracy
loss = 0.0
acc = 0.0
ys_true = []
ys_pred = []
# set loss function
criterion = nn.CrossEntropyLoss()
# evaluate network
for (images, labels) in data_loader:
images = make_variable(images, volatile=True)
labels = make_variable(labels).squeeze_()
preds = classifier(torch.squeeze(encoder(images)))
loss += criterion(preds, labels).data
for pred, label in zip(preds, labels):
ys_pred.append(torch.argmax(pred).detach().cpu().numpy())
ys_true.append(label.detach().cpu().numpy())
acc = accuracy_score(ys_true, ys_pred)
loss /= len(data_loader)
#acc /= len(data_loader.dataset)
print("Avg Loss = {}, Avg Accuracy = {:2%}".format(loss, acc))
| 38.126087
| 125
| 0.662105
|
6ba20480d585d17cdd307f57e280affca5373a1d
| 1,498
|
py
|
Python
|
model.py
|
christianversloot/keras-multilayer-perceptron
|
b3be50f78a682749934d183164ffe0e1a438fbe4
|
[
"CC0-1.0"
] | 2
|
2020-04-15T03:33:49.000Z
|
2020-10-20T13:12:17.000Z
|
model.py
|
christianversloot/keras-multilayer-perceptron
|
b3be50f78a682749934d183164ffe0e1a438fbe4
|
[
"CC0-1.0"
] | null | null | null |
model.py
|
christianversloot/keras-multilayer-perceptron
|
b3be50f78a682749934d183164ffe0e1a438fbe4
|
[
"CC0-1.0"
] | 2
|
2020-09-13T08:43:21.000Z
|
2021-03-28T19:56:10.000Z
|
# Imports
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
# Configuration options
feature_vector_length = 784
num_classes = 60000
# Load the data
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
# Reshape the data - MLPs do not understand such things as '2D'.
# Reshape to 28 x 28 pixels = 784 features
X_train = X_train.reshape(X_train.shape[0], feature_vector_length)
X_test = X_test.reshape(X_test.shape[0], feature_vector_length)
# Convert into greyscale
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# Convert target classes to categorical ones
Y_train = to_categorical(Y_train, num_classes)
Y_test = to_categorical(Y_test, num_classes)
# Set the input shape
input_shape = (feature_vector_length,)
print(f'Feature shape: {input_shape}')
# Create the model
model = Sequential()
model.add(Dense(350, input_shape=input_shape, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Configure the model and start training
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=10, batch_size=250, verbose=1, validation_split=0.2)
# Test the model after training
test_results = model.evaluate(X_test, Y_test, verbose=1)
print(f'Test results - Loss: {test_results[0]} - Accuracy: {test_results[1]}%')
| 32.565217
| 87
| 0.774366
|
2b6f4333f9330b87feb36a78deaac64cbbe0e9d3
| 1,773
|
py
|
Python
|
mbdiff/tests/conftest.py
|
PiotrZakrzewski/macrobase-diff
|
b496826e06e6f6cd4bc19741d4a1875c75b8666b
|
[
"Apache-2.0"
] | 1
|
2022-03-05T19:24:39.000Z
|
2022-03-05T19:24:39.000Z
|
mbdiff/tests/conftest.py
|
PiotrZakrzewski/macrobase-diff
|
b496826e06e6f6cd4bc19741d4a1875c75b8666b
|
[
"Apache-2.0"
] | 10
|
2020-11-21T09:24:29.000Z
|
2020-12-03T07:54:43.000Z
|
mbdiff/tests/conftest.py
|
PiotrZakrzewski/macrobase-diff
|
b496826e06e6f6cd4bc19741d4a1875c75b8666b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from pandas import DataFrame, Series
FIXTURE_DF_LEN = 10
@pytest.fixture
def int_column():
return Series([x for x in range(FIXTURE_DF_LEN)])
@pytest.fixture
def float_column():
return Series([x * 1.1 for x in range(FIXTURE_DF_LEN)])
@pytest.fixture
def cat_col_all_same():
return Series(["cat1" for _ in range(FIXTURE_DF_LEN)])
@pytest.fixture
def outlier_col():
return Series(["inlier" for _ in range(FIXTURE_DF_LEN)])
@pytest.fixture
def df_basic(int_column, float_column, cat_col_all_same, outlier_col):
return DataFrame(
{
"ints": int_column,
"floats": float_column,
"cats": cat_col_all_same,
"outlier": outlier_col,
}
)
@pytest.fixture
def df_attr_basic():
return DataFrame(
{
"attr1": ["a", "b"],
"attr2": ["a", "b"],
}
)
@pytest.fixture
def df_outliers(int_column, float_column, cat_col_all_same, outlier_col):
outlier_col[9] = "outlier"
float_column[9] = 999.8
cat_col_all_same[9] = "cat2"
return DataFrame(
{
"ints": int_column,
"floats": float_column,
"cats": cat_col_all_same,
"outlier": outlier_col,
}
)
@pytest.fixture
def df_outliers_balanced(int_column, float_column, cat_col_all_same, outlier_col):
outlier_col[9] = "outlier"
outlier_col[8] = "outlier"
float_column[9] = 999.7
float_column[8] = 999.8
cat_col_all_same[9] = "cat2"
cat_col_all_same[8] = "cat1"
cat_col_all_same[7] = "cat2"
return DataFrame(
{
"ints": int_column,
"floats": float_column,
"cats": cat_col_all_same,
"outlier": outlier_col,
}
)
| 21.888889
| 82
| 0.605189
|
9f477cbe62aede88783edb60a9464aeace69fc16
| 1,811
|
py
|
Python
|
third_party/webrtc/src/chromium/src/tools/perf/page_sets/simple_mobile_sites.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 27
|
2016-04-27T01:02:03.000Z
|
2021-12-13T08:53:19.000Z
|
third_party/webrtc/src/chromium/src/tools/perf/page_sets/simple_mobile_sites.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 2
|
2017-03-09T09:00:50.000Z
|
2017-09-21T15:48:20.000Z
|
third_party/webrtc/src/chromium/src/tools/perf/page_sets/simple_mobile_sites.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 17
|
2016-04-27T02:06:39.000Z
|
2019-12-18T08:07:00.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class SimplePage(page_module.Page):
def __init__(self, url, page_set):
super(SimplePage, self).__init__(
url=url,
page_set=page_set,
shared_page_state_class=shared_page_state.Shared10InchTabletPageState,
credentials_path='data/credentials.json')
self.archive_data_file = 'data/simple_mobile_sites.json'
def RunNavigateSteps(self, action_runner):
super(SimplePage, self).RunNavigateSteps(action_runner)
# TODO(epenner): Remove this wait (http://crbug.com/366933)
action_runner.Wait(5)
class SimpleScrollPage(SimplePage):
def __init__(self, url, page_set):
super(SimpleScrollPage, self).__init__(url=url, page_set=page_set)
def RunPageInteractions(self, action_runner):
# Make the scroll longer to reduce noise.
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage(direction='down', speed_in_pixels_per_second=300)
class SimpleMobileSitesPageSet(story.StorySet):
""" Simple mobile sites """
def __init__(self):
super(SimpleMobileSitesPageSet, self).__init__(
archive_data_file='data/simple_mobile_sites.json',
cloud_storage_bucket=story.PUBLIC_BUCKET)
scroll_page_list = [
# Why: Scrolls moderately complex pages (up to 60 layers)
'http://www.ebay.co.uk/',
'https://www.flickr.com/',
'http://www.apple.com/mac/',
'http://www.nyc.gov',
'http://m.nytimes.com/'
]
for url in scroll_page_list:
self.AddStory(SimpleScrollPage(url, self))
| 32.927273
| 80
| 0.731088
|
508997daceee8a42337af3245778d3e329a2c938
| 835
|
py
|
Python
|
ChalkBoard(practice)/Python/MiniProjs/PDF/PDF.py
|
NathanKinney/Gardyloo
|
eb08805f21bd530135fabda4a866d15eb781fc5f
|
[
"MIT"
] | 1
|
2018-06-26T23:05:09.000Z
|
2018-06-26T23:05:09.000Z
|
ChalkBoard(practice)/Python/MiniProjs/PDF/PDF.py
|
NathanKinney/Gardyloo
|
eb08805f21bd530135fabda4a866d15eb781fc5f
|
[
"MIT"
] | null | null | null |
ChalkBoard(practice)/Python/MiniProjs/PDF/PDF.py
|
NathanKinney/Gardyloo
|
eb08805f21bd530135fabda4a866d15eb781fc5f
|
[
"MIT"
] | null | null | null |
import PyPDF2
# opening and reading pdf
# f = open('Working_Business_Proposal.pdf', 'rb')
# pdf_reader = PyPDF2.PdfFileReader(f)
#
# pdf_reader.numPages
#
# page_one = pdf_reader.getPage(0)
# page_one_text = page_one.extractText()
#appending to pdf
# f = open('Working_Business_Proposal.pdf', 'rb')
# pdf_reader = PyPDF2.PdfFileReader(f)
# first_page = pdf_reader.getPage(0)
# pdf_writer = PyPDF2.PdfFileWriter()
# pdf_writer.addPage(first_page)
# pdf_output = open('Some_NEW_FILE.PDF', 'wb')
# pdf_writer.write(pdf_output)
# f.close()
# pdf_output.close()
# print(pdf_output)
f = open('Working_Business_Proposal.pdf', 'rb')
pdf_text = []
pdf_reader = PyPDF2.PdfFileReader(f)
for p in range(pdf_reader.numPages):
page = pdf_reader.getPage(p)
pdf_text.append(page.extractText())
for line in pdf_text:
print(line)
| 20.875
| 49
| 0.728144
|
2636f29aed57ecd50ccf3f441748703dfe1135b2
| 810
|
py
|
Python
|
tests/test_html.py
|
easydatapy/easytxt
|
9c2a424d3e39c50722c5b543b96c1450181f94e4
|
[
"BSD-3-Clause"
] | 4
|
2020-08-25T17:39:04.000Z
|
2020-08-31T20:14:37.000Z
|
tests/test_html.py
|
sitegroove/easytxt
|
9c2a424d3e39c50722c5b543b96c1450181f94e4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_html.py
|
sitegroove/easytxt
|
9c2a424d3e39c50722c5b543b96c1450181f94e4
|
[
"BSD-3-Clause"
] | null | null | null |
from easytxt import html
def test_to_text():
test_html_texts = [("<p>Some sentence</p>", ["Some sentence"])]
for text_html_tuple in test_html_texts:
html_text, expected_text = text_html_tuple
assert html.to_sentences(html_text) == expected_text
def test_validate():
test_valid_html_texts = [
"<p>Some sentence</p>",
"some <br/>sentence",
"some <BR>sentence",
'some <img src="Something" /> sentence',
"<title>Hallo</title>",
]
for test_valid_html_text in test_valid_html_texts:
assert html.validate(test_valid_html_text)
def test_validate_invalid():
test_invalid_html_texts = ["Some sentence"]
for test_invalid_html_text in test_invalid_html_texts:
assert html.validate(test_invalid_html_text) is False
| 27.931034
| 67
| 0.683951
|
e6e790e8e1c6d5c4ad6399218a3ec86d9bddcab5
| 3,521
|
py
|
Python
|
utils.py
|
songaal/rltrader
|
4aac8085dda1a58fbf30a313f2a4608398c971a3
|
[
"MIT"
] | 2
|
2020-06-13T07:18:10.000Z
|
2020-11-03T03:46:40.000Z
|
utils.py
|
songaal/rltrader
|
4aac8085dda1a58fbf30a313f2a4608398c971a3
|
[
"MIT"
] | null | null | null |
utils.py
|
songaal/rltrader
|
4aac8085dda1a58fbf30a313f2a4608398c971a3
|
[
"MIT"
] | 1
|
2020-05-16T08:41:29.000Z
|
2020-05-16T08:41:29.000Z
|
# import csv
# from datetime import datetime, timedelta
# import json
# import logging
#
# import os
# import tempfile
# import timeit
#
# import re
# import requests
#
# logging.basicConfig(format='[%(asctime)s %(levelname)s] (%(filename)s:%(lineno)d) %(message)s',
# level=os.environ.get('LOGLEVEL', 'DEBUG'))
#
# # Name the logger after the package.
# logger = logging.getLogger(__package__)
#
#
# def split_interval(time_interval):
# """
# 2h, 1d, 30m 과 같이 입력된 인터벌을 분리한다.
# :param time_interval:
# :return:
# """
# unit = None
# number = int(re.findall('\d+', time_interval)[0])
# maybeAlpha = time_interval[-1]
# if maybeAlpha.isalpha():
# unit = maybeAlpha.lower()
# return number, unit
#
#
# def ingest_filename(symbol, period, history):
# return '{}_{}_{}.csv'.format(symbol.replace('/', '_').lower(), period, history)
#
#
# def ingest_filepath(root_dir, exchange, symbol, start_date, end_date, period, history):
# filename = ingest_filename(symbol, period, history)
# base_dir = '{}/{}/{}-{}'.format(root_dir,
# exchange,
# start_date.strftime('%Y%m%d%H%M%Z'),
# end_date.strftime('%Y%m%d%H%M%Z')
# )
# try:
# os.makedirs(base_dir, exist_ok=True)
# except OSError as e:
# raise e
#
# return base_dir, os.path.join(base_dir, filename)
#
#
# def ingest_data(exchange, symbol, start, end, interval):
# api_gateway_endpoint = 'https://9u3jawxuod.execute-api.ap-northeast-2.amazonaws.com/v1_1'
#
# # 값, 단위 분리
# interval_num, interval_unit = split_interval(interval)
# interval = interval.lower()
# interval_unit = interval_unit.lower()
#
# if interval_unit in ['w', 'd', 'h']:
# # 주, 일, 시 단위
# resolution = interval_unit if interval_num == '1' else interval
# elif interval_unit in ['m']:
# # 분 단위
# resolution = interval_num
#
# url = '{}/history'.format(api_gateway_endpoint)
# params = {'resolution': resolution, 'from': start, 'to': end,
# 'symbol': symbol.upper(), 'exchange': exchange}
# logger.debug('Get candle: %s > %s', url, params)
# response = requests.request('GET', url, params=params)
# candles = json.loads(response.text)
#
# f = open('./data/chart_date/', 'w', encoding='utf-8', newline='')
# wr = csv.writer(f)
#
# if len(candles['t']) == 0:
# raise ValueError('[FAIL] candle data row 0')
#
# wr.writerow(['index', 'ts', 'open', 'high', 'low', 'close', 'volume'])
# for index in range(len(candles['t'])):
# if candles['o'][index] and candles['h'][index] and candles['l'][index] and candles['c'][index] and \
# candles['v'][index]:
# time = datetime.fromtimestamp(int(candles['t'][index]), tz=tz).strftime('%Y-%m-%d')
# wr.writerow([
# time,
# '{:d}'.format(candles['t'][index]),
# '{:.8f}'.format(candles['o'][index]),
# '{:.8f}'.format(candles['h'][index]),
# '{:.8f}'.format(candles['l'][index]),
# '{:.8f}'.format(candles['c'][index]),
# '{:.2f}'.format(candles['v'][index]),
# ])
# timer_end = timeit.default_timer()
# logger.debug('# {} Downloaded CandleFile. elapsed: {}'.format(symbol, str(timer_end - timer_start)))
# return base_dir
| 36.298969
| 110
| 0.552684
|
bcb84af53807142b1d27366708cd4bbacf5def88
| 7,790
|
py
|
Python
|
docs/conf.py
|
django-functest/django-functest
|
51cd027301ee5e62134a27bb1da727814055c02e
|
[
"BSD-3-Clause"
] | 71
|
2016-01-29T14:08:13.000Z
|
2022-03-11T16:24:00.000Z
|
docs/conf.py
|
django-functest/django-functest
|
51cd027301ee5e62134a27bb1da727814055c02e
|
[
"BSD-3-Clause"
] | 17
|
2016-02-13T19:48:54.000Z
|
2021-12-15T16:34:40.000Z
|
docs/conf.py
|
django-functest/django-functest
|
51cd027301ee5e62134a27bb1da727814055c02e
|
[
"BSD-3-Clause"
] | 8
|
2016-02-03T15:08:45.000Z
|
2020-11-11T11:33:35.000Z
|
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-functest'
copyright = u'2016-2018, Luke Plant'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.2"
# The full version, including alpha/beta/rc tags.
release = "1.2-dev"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-functestdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-functest.tex', u'django-functest Documentation',
u'Luke Plant', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-functest', u'django-functest Documentation',
[u'Luke Plant'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-functest', u'django-functest Documentation',
u'Luke Plant', 'django-functest', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.538462
| 80
| 0.714506
|
9cd5a5f3427c2dd3f6c62af97097c62c09bb4093
| 79,013
|
py
|
Python
|
py/vtproto/query_pb2.py
|
AndyDiamondstein/vitess
|
295c300cd22c109f8be7a454c03c96c6b8e3b55c
|
[
"BSD-3-Clause"
] | 1
|
2021-03-14T10:04:18.000Z
|
2021-03-14T10:04:18.000Z
|
py/vtproto/query_pb2.py
|
AndyDiamondstein/vitess
|
295c300cd22c109f8be7a454c03c96c6b8e3b55c
|
[
"BSD-3-Clause"
] | null | null | null |
py/vtproto/query_pb2.py
|
AndyDiamondstein/vitess
|
295c300cd22c109f8be7a454c03c96c6b8e3b55c
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: query.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import topodata_pb2 as topodata__pb2
import vtrpc_pb2 as vtrpc__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='query.proto',
package='query',
syntax='proto3',
serialized_pb=_b('\n\x0bquery.proto\x12\x05query\x1a\x0etopodata.proto\x1a\x0bvtrpc.proto\"T\n\x06Target\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\"\"\n\x0eVTGateCallerID\x12\x10\n\x08username\x18\x01 \x01(\t\"1\n\x05Value\x12\x19\n\x04type\x18\x01 \x01(\x0e\x32\x0b.query.Type\x12\r\n\x05value\x18\x02 \x01(\x0c\"V\n\x0c\x42indVariable\x12\x19\n\x04type\x18\x01 \x01(\x0e\x32\x0b.query.Type\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x1c\n\x06values\x18\x03 \x03(\x0b\x32\x0c.query.Value\"\xa2\x01\n\nBoundQuery\x12\x0b\n\x03sql\x18\x01 \x01(\t\x12<\n\x0e\x62ind_variables\x18\x02 \x03(\x0b\x32$.query.BoundQuery.BindVariablesEntry\x1aI\n\x12\x42indVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.query.BindVariable:\x02\x38\x01\"0\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x04type\x18\x02 \x01(\x0e\x32\x0b.query.Type\"&\n\x03Row\x12\x0f\n\x07lengths\x18\x01 \x03(\x12\x12\x0e\n\x06values\x18\x02 \x01(\x0c\"o\n\x0bQueryResult\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x15\n\rrows_affected\x18\x02 \x01(\x04\x12\x11\n\tinsert_id\x18\x03 \x01(\x04\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\"\x98\x01\n\x13GetSessionIdRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x10\n\x08keyspace\x18\x03 \x01(\t\x12\r\n\x05shard\x18\x04 \x01(\t\"*\n\x14GetSessionIdResponse\x12\x12\n\nsession_id\x18\x01 \x01(\x03\"\xdf\x01\n\x0e\x45xecuteRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12\x16\n\x0etransaction_id\x18\x05 \x01(\x03\x12\x12\n\nsession_id\x18\x06 \x01(\x03\"5\n\x0f\x45xecuteResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\xfe\x01\n\x13\x45xecuteBatchRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\"\n\x07queries\x18\x04 \x03(\x0b\x32\x11.query.BoundQuery\x12\x16\n\x0e\x61s_transaction\x18\x05 \x01(\x08\x12\x16\n\x0etransaction_id\x18\x06 \x01(\x03\x12\x12\n\nsession_id\x18\x07 \x01(\x03\";\n\x14\x45xecuteBatchResponse\x12#\n\x07results\x18\x01 \x03(\x0b\x32\x12.query.QueryResult\"\xcd\x01\n\x14StreamExecuteRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12\x12\n\nsession_id\x18\x05 \x01(\x03\";\n\x15StreamExecuteResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\xa3\x01\n\x0c\x42\x65ginRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x12\n\nsession_id\x18\x04 \x01(\x03\"\'\n\rBeginResponse\x12\x16\n\x0etransaction_id\x18\x01 \x01(\x03\"\xbc\x01\n\rCommitRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x12\n\nsession_id\x18\x05 \x01(\x03\"\x10\n\x0e\x43ommitResponse\"\xbe\x01\n\x0fRollbackRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x16\n\x0etransaction_id\x18\x04 \x01(\x03\x12\x12\n\nsession_id\x18\x05 \x01(\x03\"\x12\n\x10RollbackResponse\"\xb8\x01\n\x13\x42\x65ginExecuteRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\"r\n\x14\x42\x65ginExecuteResponse\x12\x1e\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0f.vtrpc.RPCError\x12\"\n\x06result\x18\x02 \x01(\x0b\x32\x12.query.QueryResult\x12\x16\n\x0etransaction_id\x18\x03 \x01(\x03\"\xd7\x01\n\x18\x42\x65ginExecuteBatchRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\"\n\x07queries\x18\x04 \x03(\x0b\x32\x11.query.BoundQuery\x12\x16\n\x0e\x61s_transaction\x18\x05 \x01(\x08\"x\n\x19\x42\x65ginExecuteBatchResponse\x12\x1e\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0f.vtrpc.RPCError\x12#\n\x07results\x18\x02 \x03(\x0b\x32\x12.query.QueryResult\x12\x16\n\x0etransaction_id\x18\x03 \x01(\x03\"\x97\x03\n\x11SplitQueryRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12 \n\x05query\x18\x04 \x01(\x0b\x32\x11.query.BoundQuery\x12\x14\n\x0csplit_column\x18\x05 \x03(\t\x12\x13\n\x0bsplit_count\x18\x06 \x01(\x03\x12\x1f\n\x17num_rows_per_query_part\x18\x08 \x01(\x03\x12\x12\n\nsession_id\x18\x07 \x01(\x03\x12\x35\n\talgorithm\x18\t \x01(\x0e\x32\".query.SplitQueryRequest.Algorithm\x12\x1a\n\x12use_split_query_v2\x18\n \x01(\x08\",\n\tAlgorithm\x12\x10\n\x0c\x45QUAL_SPLITS\x10\x00\x12\r\n\tFULL_SCAN\x10\x01\"A\n\nQuerySplit\x12 \n\x05query\x18\x01 \x01(\x0b\x32\x11.query.BoundQuery\x12\x11\n\trow_count\x18\x02 \x01(\x03\"8\n\x12SplitQueryResponse\x12\"\n\x07queries\x18\x01 \x03(\x0b\x32\x11.query.QuerySplit\"\x15\n\x13StreamHealthRequest\"\xb6\x01\n\rRealtimeStats\x12\x14\n\x0chealth_error\x18\x01 \x01(\t\x12\x1d\n\x15seconds_behind_master\x18\x02 \x01(\r\x12\x1c\n\x14\x62inlog_players_count\x18\x03 \x01(\x05\x12\x32\n*seconds_behind_master_filtered_replication\x18\x04 \x01(\x03\x12\x11\n\tcpu_usage\x18\x05 \x01(\x01\x12\x0b\n\x03qps\x18\x06 \x01(\x01\"\xa4\x01\n\x14StreamHealthResponse\x12\x1d\n\x06target\x18\x01 \x01(\x0b\x32\r.query.Target\x12\x0f\n\x07serving\x18\x02 \x01(\x08\x12.\n&tablet_externally_reparented_timestamp\x18\x03 \x01(\x03\x12,\n\x0erealtime_stats\x18\x04 \x01(\x0b\x32\x14.query.RealtimeStats*k\n\x04\x46lag\x12\x08\n\x04NONE\x10\x00\x12\x0f\n\nISINTEGRAL\x10\x80\x02\x12\x0f\n\nISUNSIGNED\x10\x80\x04\x12\x0c\n\x07ISFLOAT\x10\x80\x08\x12\r\n\x08ISQUOTED\x10\x80\x10\x12\x0b\n\x06ISTEXT\x10\x80 \x12\r\n\x08ISBINARY\x10\x80@*\xef\x02\n\x04Type\x12\r\n\tNULL_TYPE\x10\x00\x12\t\n\x04INT8\x10\x81\x02\x12\n\n\x05UINT8\x10\x82\x06\x12\n\n\x05INT16\x10\x83\x02\x12\x0b\n\x06UINT16\x10\x84\x06\x12\n\n\x05INT24\x10\x85\x02\x12\x0b\n\x06UINT24\x10\x86\x06\x12\n\n\x05INT32\x10\x87\x02\x12\x0b\n\x06UINT32\x10\x88\x06\x12\n\n\x05INT64\x10\x89\x02\x12\x0b\n\x06UINT64\x10\x8a\x06\x12\x0c\n\x07\x46LOAT32\x10\x8b\x08\x12\x0c\n\x07\x46LOAT64\x10\x8c\x08\x12\x0e\n\tTIMESTAMP\x10\x8d\x10\x12\t\n\x04\x44\x41TE\x10\x8e\x10\x12\t\n\x04TIME\x10\x8f\x10\x12\r\n\x08\x44\x41TETIME\x10\x90\x10\x12\t\n\x04YEAR\x10\x91\x06\x12\x0b\n\x07\x44\x45\x43IMAL\x10\x12\x12\t\n\x04TEXT\x10\x93\x30\x12\t\n\x04\x42LOB\x10\x94P\x12\x0c\n\x07VARCHAR\x10\x95\x30\x12\x0e\n\tVARBINARY\x10\x96P\x12\t\n\x04\x43HAR\x10\x97\x30\x12\x0b\n\x06\x42INARY\x10\x98P\x12\x08\n\x03\x42IT\x10\x99\x10\x12\t\n\x04\x45NUM\x10\x9a\x10\x12\x08\n\x03SET\x10\x9b\x10\x12\t\n\x05TUPLE\x10\x1c\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3')
,
dependencies=[topodata__pb2.DESCRIPTOR,vtrpc__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FLAG = _descriptor.EnumDescriptor(
name='Flag',
full_name='query.Flag',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ISINTEGRAL', index=1, number=256,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ISUNSIGNED', index=2, number=512,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ISFLOAT', index=3, number=1024,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ISQUOTED', index=4, number=2048,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ISTEXT', index=5, number=4096,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ISBINARY', index=6, number=8192,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3929,
serialized_end=4036,
)
_sym_db.RegisterEnumDescriptor(_FLAG)
Flag = enum_type_wrapper.EnumTypeWrapper(_FLAG)
_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='query.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NULL_TYPE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT8', index=1, number=257,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UINT8', index=2, number=770,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT16', index=3, number=259,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UINT16', index=4, number=772,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT24', index=5, number=261,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UINT24', index=6, number=774,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT32', index=7, number=263,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UINT32', index=8, number=776,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT64', index=9, number=265,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UINT64', index=10, number=778,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT32', index=11, number=1035,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT64', index=12, number=1036,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TIMESTAMP', index=13, number=2061,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATE', index=14, number=2062,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TIME', index=15, number=2063,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATETIME', index=16, number=2064,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='YEAR', index=17, number=785,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DECIMAL', index=18, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEXT', index=19, number=6163,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLOB', index=20, number=10260,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VARCHAR', index=21, number=6165,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VARBINARY', index=22, number=10262,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHAR', index=23, number=6167,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BINARY', index=24, number=10264,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BIT', index=25, number=2073,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ENUM', index=26, number=2074,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET', index=27, number=2075,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TUPLE', index=28, number=28,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4039,
serialized_end=4406,
)
_sym_db.RegisterEnumDescriptor(_TYPE)
Type = enum_type_wrapper.EnumTypeWrapper(_TYPE)
NONE = 0
ISINTEGRAL = 256
ISUNSIGNED = 512
ISFLOAT = 1024
ISQUOTED = 2048
ISTEXT = 4096
ISBINARY = 8192
NULL_TYPE = 0
INT8 = 257
UINT8 = 770
INT16 = 259
UINT16 = 772
INT24 = 261
UINT24 = 774
INT32 = 263
UINT32 = 776
INT64 = 265
UINT64 = 778
FLOAT32 = 1035
FLOAT64 = 1036
TIMESTAMP = 2061
DATE = 2062
TIME = 2063
DATETIME = 2064
YEAR = 785
DECIMAL = 18
TEXT = 6163
BLOB = 10260
VARCHAR = 6165
VARBINARY = 10262
CHAR = 6167
BINARY = 10264
BIT = 2073
ENUM = 2074
SET = 2075
TUPLE = 28
_SPLITQUERYREQUEST_ALGORITHM = _descriptor.EnumDescriptor(
name='Algorithm',
full_name='query.SplitQueryRequest.Algorithm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='EQUAL_SPLITS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FULL_SCAN', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3383,
serialized_end=3427,
)
_sym_db.RegisterEnumDescriptor(_SPLITQUERYREQUEST_ALGORITHM)
_TARGET = _descriptor.Descriptor(
name='Target',
full_name='query.Target',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='keyspace', full_name='query.Target.keyspace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shard', full_name='query.Target.shard', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tablet_type', full_name='query.Target.tablet_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=135,
)
_VTGATECALLERID = _descriptor.Descriptor(
name='VTGateCallerID',
full_name='query.VTGateCallerID',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='username', full_name='query.VTGateCallerID.username', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=171,
)
_VALUE = _descriptor.Descriptor(
name='Value',
full_name='query.Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='query.Value.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='query.Value.value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=173,
serialized_end=222,
)
_BINDVARIABLE = _descriptor.Descriptor(
name='BindVariable',
full_name='query.BindVariable',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='query.BindVariable.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='query.BindVariable.value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='values', full_name='query.BindVariable.values', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=224,
serialized_end=310,
)
_BOUNDQUERY_BINDVARIABLESENTRY = _descriptor.Descriptor(
name='BindVariablesEntry',
full_name='query.BoundQuery.BindVariablesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='query.BoundQuery.BindVariablesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='query.BoundQuery.BindVariablesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=402,
serialized_end=475,
)
_BOUNDQUERY = _descriptor.Descriptor(
name='BoundQuery',
full_name='query.BoundQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sql', full_name='query.BoundQuery.sql', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bind_variables', full_name='query.BoundQuery.bind_variables', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_BOUNDQUERY_BINDVARIABLESENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=313,
serialized_end=475,
)
_FIELD = _descriptor.Descriptor(
name='Field',
full_name='query.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='query.Field.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='query.Field.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=477,
serialized_end=525,
)
_ROW = _descriptor.Descriptor(
name='Row',
full_name='query.Row',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lengths', full_name='query.Row.lengths', index=0,
number=1, type=18, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='values', full_name='query.Row.values', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=527,
serialized_end=565,
)
_QUERYRESULT = _descriptor.Descriptor(
name='QueryResult',
full_name='query.QueryResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fields', full_name='query.QueryResult.fields', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rows_affected', full_name='query.QueryResult.rows_affected', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='insert_id', full_name='query.QueryResult.insert_id', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rows', full_name='query.QueryResult.rows', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=567,
serialized_end=678,
)
_GETSESSIONIDREQUEST = _descriptor.Descriptor(
name='GetSessionIdRequest',
full_name='query.GetSessionIdRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.GetSessionIdRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.GetSessionIdRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keyspace', full_name='query.GetSessionIdRequest.keyspace', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shard', full_name='query.GetSessionIdRequest.shard', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=681,
serialized_end=833,
)
_GETSESSIONIDRESPONSE = _descriptor.Descriptor(
name='GetSessionIdResponse',
full_name='query.GetSessionIdResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='session_id', full_name='query.GetSessionIdResponse.session_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=835,
serialized_end=877,
)
_EXECUTEREQUEST = _descriptor.Descriptor(
name='ExecuteRequest',
full_name='query.ExecuteRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.ExecuteRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.ExecuteRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='query.ExecuteRequest.target', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='query.ExecuteRequest.query', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction_id', full_name='query.ExecuteRequest.transaction_id', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_id', full_name='query.ExecuteRequest.session_id', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=880,
serialized_end=1103,
)
_EXECUTERESPONSE = _descriptor.Descriptor(
name='ExecuteResponse',
full_name='query.ExecuteResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='query.ExecuteResponse.result', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1105,
serialized_end=1158,
)
_EXECUTEBATCHREQUEST = _descriptor.Descriptor(
name='ExecuteBatchRequest',
full_name='query.ExecuteBatchRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.ExecuteBatchRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.ExecuteBatchRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='query.ExecuteBatchRequest.target', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='queries', full_name='query.ExecuteBatchRequest.queries', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='as_transaction', full_name='query.ExecuteBatchRequest.as_transaction', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction_id', full_name='query.ExecuteBatchRequest.transaction_id', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_id', full_name='query.ExecuteBatchRequest.session_id', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1161,
serialized_end=1415,
)
_EXECUTEBATCHRESPONSE = _descriptor.Descriptor(
name='ExecuteBatchResponse',
full_name='query.ExecuteBatchResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='query.ExecuteBatchResponse.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1417,
serialized_end=1476,
)
_STREAMEXECUTEREQUEST = _descriptor.Descriptor(
name='StreamExecuteRequest',
full_name='query.StreamExecuteRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.StreamExecuteRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.StreamExecuteRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='query.StreamExecuteRequest.target', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='query.StreamExecuteRequest.query', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_id', full_name='query.StreamExecuteRequest.session_id', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1479,
serialized_end=1684,
)
_STREAMEXECUTERESPONSE = _descriptor.Descriptor(
name='StreamExecuteResponse',
full_name='query.StreamExecuteResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='query.StreamExecuteResponse.result', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1686,
serialized_end=1745,
)
_BEGINREQUEST = _descriptor.Descriptor(
name='BeginRequest',
full_name='query.BeginRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.BeginRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.BeginRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='query.BeginRequest.target', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_id', full_name='query.BeginRequest.session_id', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1748,
serialized_end=1911,
)
_BEGINRESPONSE = _descriptor.Descriptor(
name='BeginResponse',
full_name='query.BeginResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction_id', full_name='query.BeginResponse.transaction_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1913,
serialized_end=1952,
)
_COMMITREQUEST = _descriptor.Descriptor(
name='CommitRequest',
full_name='query.CommitRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.CommitRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.CommitRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='query.CommitRequest.target', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction_id', full_name='query.CommitRequest.transaction_id', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_id', full_name='query.CommitRequest.session_id', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1955,
serialized_end=2143,
)
_COMMITRESPONSE = _descriptor.Descriptor(
name='CommitResponse',
full_name='query.CommitResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2145,
serialized_end=2161,
)
_ROLLBACKREQUEST = _descriptor.Descriptor(
name='RollbackRequest',
full_name='query.RollbackRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.RollbackRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.RollbackRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='query.RollbackRequest.target', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction_id', full_name='query.RollbackRequest.transaction_id', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_id', full_name='query.RollbackRequest.session_id', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2164,
serialized_end=2354,
)
_ROLLBACKRESPONSE = _descriptor.Descriptor(
name='RollbackResponse',
full_name='query.RollbackResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2356,
serialized_end=2374,
)
_BEGINEXECUTEREQUEST = _descriptor.Descriptor(
name='BeginExecuteRequest',
full_name='query.BeginExecuteRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.BeginExecuteRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.BeginExecuteRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='query.BeginExecuteRequest.target', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='query.BeginExecuteRequest.query', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2377,
serialized_end=2561,
)
_BEGINEXECUTERESPONSE = _descriptor.Descriptor(
name='BeginExecuteResponse',
full_name='query.BeginExecuteResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error', full_name='query.BeginExecuteResponse.error', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='result', full_name='query.BeginExecuteResponse.result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction_id', full_name='query.BeginExecuteResponse.transaction_id', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2563,
serialized_end=2677,
)
_BEGINEXECUTEBATCHREQUEST = _descriptor.Descriptor(
name='BeginExecuteBatchRequest',
full_name='query.BeginExecuteBatchRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.BeginExecuteBatchRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.BeginExecuteBatchRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='query.BeginExecuteBatchRequest.target', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='queries', full_name='query.BeginExecuteBatchRequest.queries', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='as_transaction', full_name='query.BeginExecuteBatchRequest.as_transaction', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2680,
serialized_end=2895,
)
_BEGINEXECUTEBATCHRESPONSE = _descriptor.Descriptor(
name='BeginExecuteBatchResponse',
full_name='query.BeginExecuteBatchResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error', full_name='query.BeginExecuteBatchResponse.error', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='results', full_name='query.BeginExecuteBatchResponse.results', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction_id', full_name='query.BeginExecuteBatchResponse.transaction_id', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2897,
serialized_end=3017,
)
_SPLITQUERYREQUEST = _descriptor.Descriptor(
name='SplitQueryRequest',
full_name='query.SplitQueryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effective_caller_id', full_name='query.SplitQueryRequest.effective_caller_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='immediate_caller_id', full_name='query.SplitQueryRequest.immediate_caller_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='query.SplitQueryRequest.target', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='query.SplitQueryRequest.query', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='split_column', full_name='query.SplitQueryRequest.split_column', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='split_count', full_name='query.SplitQueryRequest.split_count', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_rows_per_query_part', full_name='query.SplitQueryRequest.num_rows_per_query_part', index=6,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_id', full_name='query.SplitQueryRequest.session_id', index=7,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='algorithm', full_name='query.SplitQueryRequest.algorithm', index=8,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_split_query_v2', full_name='query.SplitQueryRequest.use_split_query_v2', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SPLITQUERYREQUEST_ALGORITHM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3020,
serialized_end=3427,
)
_QUERYSPLIT = _descriptor.Descriptor(
name='QuerySplit',
full_name='query.QuerySplit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='query.QuerySplit.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_count', full_name='query.QuerySplit.row_count', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3429,
serialized_end=3494,
)
_SPLITQUERYRESPONSE = _descriptor.Descriptor(
name='SplitQueryResponse',
full_name='query.SplitQueryResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='queries', full_name='query.SplitQueryResponse.queries', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3496,
serialized_end=3552,
)
_STREAMHEALTHREQUEST = _descriptor.Descriptor(
name='StreamHealthRequest',
full_name='query.StreamHealthRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3554,
serialized_end=3575,
)
_REALTIMESTATS = _descriptor.Descriptor(
name='RealtimeStats',
full_name='query.RealtimeStats',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='health_error', full_name='query.RealtimeStats.health_error', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seconds_behind_master', full_name='query.RealtimeStats.seconds_behind_master', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='binlog_players_count', full_name='query.RealtimeStats.binlog_players_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seconds_behind_master_filtered_replication', full_name='query.RealtimeStats.seconds_behind_master_filtered_replication', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cpu_usage', full_name='query.RealtimeStats.cpu_usage', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='qps', full_name='query.RealtimeStats.qps', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3578,
serialized_end=3760,
)
_STREAMHEALTHRESPONSE = _descriptor.Descriptor(
name='StreamHealthResponse',
full_name='query.StreamHealthResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='target', full_name='query.StreamHealthResponse.target', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='serving', full_name='query.StreamHealthResponse.serving', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tablet_externally_reparented_timestamp', full_name='query.StreamHealthResponse.tablet_externally_reparented_timestamp', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='realtime_stats', full_name='query.StreamHealthResponse.realtime_stats', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3763,
serialized_end=3927,
)
_TARGET.fields_by_name['tablet_type'].enum_type = topodata__pb2._TABLETTYPE
_VALUE.fields_by_name['type'].enum_type = _TYPE
_BINDVARIABLE.fields_by_name['type'].enum_type = _TYPE
_BINDVARIABLE.fields_by_name['values'].message_type = _VALUE
_BOUNDQUERY_BINDVARIABLESENTRY.fields_by_name['value'].message_type = _BINDVARIABLE
_BOUNDQUERY_BINDVARIABLESENTRY.containing_type = _BOUNDQUERY
_BOUNDQUERY.fields_by_name['bind_variables'].message_type = _BOUNDQUERY_BINDVARIABLESENTRY
_FIELD.fields_by_name['type'].enum_type = _TYPE
_QUERYRESULT.fields_by_name['fields'].message_type = _FIELD
_QUERYRESULT.fields_by_name['rows'].message_type = _ROW
_GETSESSIONIDREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_GETSESSIONIDREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_EXECUTEREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_EXECUTEREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_EXECUTEREQUEST.fields_by_name['target'].message_type = _TARGET
_EXECUTEREQUEST.fields_by_name['query'].message_type = _BOUNDQUERY
_EXECUTERESPONSE.fields_by_name['result'].message_type = _QUERYRESULT
_EXECUTEBATCHREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_EXECUTEBATCHREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_EXECUTEBATCHREQUEST.fields_by_name['target'].message_type = _TARGET
_EXECUTEBATCHREQUEST.fields_by_name['queries'].message_type = _BOUNDQUERY
_EXECUTEBATCHRESPONSE.fields_by_name['results'].message_type = _QUERYRESULT
_STREAMEXECUTEREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_STREAMEXECUTEREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_STREAMEXECUTEREQUEST.fields_by_name['target'].message_type = _TARGET
_STREAMEXECUTEREQUEST.fields_by_name['query'].message_type = _BOUNDQUERY
_STREAMEXECUTERESPONSE.fields_by_name['result'].message_type = _QUERYRESULT
_BEGINREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_BEGINREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_BEGINREQUEST.fields_by_name['target'].message_type = _TARGET
_COMMITREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_COMMITREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_COMMITREQUEST.fields_by_name['target'].message_type = _TARGET
_ROLLBACKREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_ROLLBACKREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_ROLLBACKREQUEST.fields_by_name['target'].message_type = _TARGET
_BEGINEXECUTEREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_BEGINEXECUTEREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_BEGINEXECUTEREQUEST.fields_by_name['target'].message_type = _TARGET
_BEGINEXECUTEREQUEST.fields_by_name['query'].message_type = _BOUNDQUERY
_BEGINEXECUTERESPONSE.fields_by_name['error'].message_type = vtrpc__pb2._RPCERROR
_BEGINEXECUTERESPONSE.fields_by_name['result'].message_type = _QUERYRESULT
_BEGINEXECUTEBATCHREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_BEGINEXECUTEBATCHREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_BEGINEXECUTEBATCHREQUEST.fields_by_name['target'].message_type = _TARGET
_BEGINEXECUTEBATCHREQUEST.fields_by_name['queries'].message_type = _BOUNDQUERY
_BEGINEXECUTEBATCHRESPONSE.fields_by_name['error'].message_type = vtrpc__pb2._RPCERROR
_BEGINEXECUTEBATCHRESPONSE.fields_by_name['results'].message_type = _QUERYRESULT
_SPLITQUERYREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID
_SPLITQUERYREQUEST.fields_by_name['immediate_caller_id'].message_type = _VTGATECALLERID
_SPLITQUERYREQUEST.fields_by_name['target'].message_type = _TARGET
_SPLITQUERYREQUEST.fields_by_name['query'].message_type = _BOUNDQUERY
_SPLITQUERYREQUEST.fields_by_name['algorithm'].enum_type = _SPLITQUERYREQUEST_ALGORITHM
_SPLITQUERYREQUEST_ALGORITHM.containing_type = _SPLITQUERYREQUEST
_QUERYSPLIT.fields_by_name['query'].message_type = _BOUNDQUERY
_SPLITQUERYRESPONSE.fields_by_name['queries'].message_type = _QUERYSPLIT
_STREAMHEALTHRESPONSE.fields_by_name['target'].message_type = _TARGET
_STREAMHEALTHRESPONSE.fields_by_name['realtime_stats'].message_type = _REALTIMESTATS
DESCRIPTOR.message_types_by_name['Target'] = _TARGET
DESCRIPTOR.message_types_by_name['VTGateCallerID'] = _VTGATECALLERID
DESCRIPTOR.message_types_by_name['Value'] = _VALUE
DESCRIPTOR.message_types_by_name['BindVariable'] = _BINDVARIABLE
DESCRIPTOR.message_types_by_name['BoundQuery'] = _BOUNDQUERY
DESCRIPTOR.message_types_by_name['Field'] = _FIELD
DESCRIPTOR.message_types_by_name['Row'] = _ROW
DESCRIPTOR.message_types_by_name['QueryResult'] = _QUERYRESULT
DESCRIPTOR.message_types_by_name['GetSessionIdRequest'] = _GETSESSIONIDREQUEST
DESCRIPTOR.message_types_by_name['GetSessionIdResponse'] = _GETSESSIONIDRESPONSE
DESCRIPTOR.message_types_by_name['ExecuteRequest'] = _EXECUTEREQUEST
DESCRIPTOR.message_types_by_name['ExecuteResponse'] = _EXECUTERESPONSE
DESCRIPTOR.message_types_by_name['ExecuteBatchRequest'] = _EXECUTEBATCHREQUEST
DESCRIPTOR.message_types_by_name['ExecuteBatchResponse'] = _EXECUTEBATCHRESPONSE
DESCRIPTOR.message_types_by_name['StreamExecuteRequest'] = _STREAMEXECUTEREQUEST
DESCRIPTOR.message_types_by_name['StreamExecuteResponse'] = _STREAMEXECUTERESPONSE
DESCRIPTOR.message_types_by_name['BeginRequest'] = _BEGINREQUEST
DESCRIPTOR.message_types_by_name['BeginResponse'] = _BEGINRESPONSE
DESCRIPTOR.message_types_by_name['CommitRequest'] = _COMMITREQUEST
DESCRIPTOR.message_types_by_name['CommitResponse'] = _COMMITRESPONSE
DESCRIPTOR.message_types_by_name['RollbackRequest'] = _ROLLBACKREQUEST
DESCRIPTOR.message_types_by_name['RollbackResponse'] = _ROLLBACKRESPONSE
DESCRIPTOR.message_types_by_name['BeginExecuteRequest'] = _BEGINEXECUTEREQUEST
DESCRIPTOR.message_types_by_name['BeginExecuteResponse'] = _BEGINEXECUTERESPONSE
DESCRIPTOR.message_types_by_name['BeginExecuteBatchRequest'] = _BEGINEXECUTEBATCHREQUEST
DESCRIPTOR.message_types_by_name['BeginExecuteBatchResponse'] = _BEGINEXECUTEBATCHRESPONSE
DESCRIPTOR.message_types_by_name['SplitQueryRequest'] = _SPLITQUERYREQUEST
DESCRIPTOR.message_types_by_name['QuerySplit'] = _QUERYSPLIT
DESCRIPTOR.message_types_by_name['SplitQueryResponse'] = _SPLITQUERYRESPONSE
DESCRIPTOR.message_types_by_name['StreamHealthRequest'] = _STREAMHEALTHREQUEST
DESCRIPTOR.message_types_by_name['RealtimeStats'] = _REALTIMESTATS
DESCRIPTOR.message_types_by_name['StreamHealthResponse'] = _STREAMHEALTHRESPONSE
DESCRIPTOR.enum_types_by_name['Flag'] = _FLAG
DESCRIPTOR.enum_types_by_name['Type'] = _TYPE
Target = _reflection.GeneratedProtocolMessageType('Target', (_message.Message,), dict(
DESCRIPTOR = _TARGET,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.Target)
))
_sym_db.RegisterMessage(Target)
VTGateCallerID = _reflection.GeneratedProtocolMessageType('VTGateCallerID', (_message.Message,), dict(
DESCRIPTOR = _VTGATECALLERID,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.VTGateCallerID)
))
_sym_db.RegisterMessage(VTGateCallerID)
Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), dict(
DESCRIPTOR = _VALUE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.Value)
))
_sym_db.RegisterMessage(Value)
BindVariable = _reflection.GeneratedProtocolMessageType('BindVariable', (_message.Message,), dict(
DESCRIPTOR = _BINDVARIABLE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.BindVariable)
))
_sym_db.RegisterMessage(BindVariable)
BoundQuery = _reflection.GeneratedProtocolMessageType('BoundQuery', (_message.Message,), dict(
BindVariablesEntry = _reflection.GeneratedProtocolMessageType('BindVariablesEntry', (_message.Message,), dict(
DESCRIPTOR = _BOUNDQUERY_BINDVARIABLESENTRY,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.BoundQuery.BindVariablesEntry)
))
,
DESCRIPTOR = _BOUNDQUERY,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.BoundQuery)
))
_sym_db.RegisterMessage(BoundQuery)
_sym_db.RegisterMessage(BoundQuery.BindVariablesEntry)
Field = _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), dict(
DESCRIPTOR = _FIELD,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.Field)
))
_sym_db.RegisterMessage(Field)
Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict(
DESCRIPTOR = _ROW,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.Row)
))
_sym_db.RegisterMessage(Row)
QueryResult = _reflection.GeneratedProtocolMessageType('QueryResult', (_message.Message,), dict(
DESCRIPTOR = _QUERYRESULT,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.QueryResult)
))
_sym_db.RegisterMessage(QueryResult)
GetSessionIdRequest = _reflection.GeneratedProtocolMessageType('GetSessionIdRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSESSIONIDREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.GetSessionIdRequest)
))
_sym_db.RegisterMessage(GetSessionIdRequest)
GetSessionIdResponse = _reflection.GeneratedProtocolMessageType('GetSessionIdResponse', (_message.Message,), dict(
DESCRIPTOR = _GETSESSIONIDRESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.GetSessionIdResponse)
))
_sym_db.RegisterMessage(GetSessionIdResponse)
ExecuteRequest = _reflection.GeneratedProtocolMessageType('ExecuteRequest', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.ExecuteRequest)
))
_sym_db.RegisterMessage(ExecuteRequest)
ExecuteResponse = _reflection.GeneratedProtocolMessageType('ExecuteResponse', (_message.Message,), dict(
DESCRIPTOR = _EXECUTERESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.ExecuteResponse)
))
_sym_db.RegisterMessage(ExecuteResponse)
ExecuteBatchRequest = _reflection.GeneratedProtocolMessageType('ExecuteBatchRequest', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEBATCHREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.ExecuteBatchRequest)
))
_sym_db.RegisterMessage(ExecuteBatchRequest)
ExecuteBatchResponse = _reflection.GeneratedProtocolMessageType('ExecuteBatchResponse', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEBATCHRESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.ExecuteBatchResponse)
))
_sym_db.RegisterMessage(ExecuteBatchResponse)
StreamExecuteRequest = _reflection.GeneratedProtocolMessageType('StreamExecuteRequest', (_message.Message,), dict(
DESCRIPTOR = _STREAMEXECUTEREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.StreamExecuteRequest)
))
_sym_db.RegisterMessage(StreamExecuteRequest)
StreamExecuteResponse = _reflection.GeneratedProtocolMessageType('StreamExecuteResponse', (_message.Message,), dict(
DESCRIPTOR = _STREAMEXECUTERESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.StreamExecuteResponse)
))
_sym_db.RegisterMessage(StreamExecuteResponse)
BeginRequest = _reflection.GeneratedProtocolMessageType('BeginRequest', (_message.Message,), dict(
DESCRIPTOR = _BEGINREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.BeginRequest)
))
_sym_db.RegisterMessage(BeginRequest)
BeginResponse = _reflection.GeneratedProtocolMessageType('BeginResponse', (_message.Message,), dict(
DESCRIPTOR = _BEGINRESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.BeginResponse)
))
_sym_db.RegisterMessage(BeginResponse)
CommitRequest = _reflection.GeneratedProtocolMessageType('CommitRequest', (_message.Message,), dict(
DESCRIPTOR = _COMMITREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.CommitRequest)
))
_sym_db.RegisterMessage(CommitRequest)
CommitResponse = _reflection.GeneratedProtocolMessageType('CommitResponse', (_message.Message,), dict(
DESCRIPTOR = _COMMITRESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.CommitResponse)
))
_sym_db.RegisterMessage(CommitResponse)
RollbackRequest = _reflection.GeneratedProtocolMessageType('RollbackRequest', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.RollbackRequest)
))
_sym_db.RegisterMessage(RollbackRequest)
RollbackResponse = _reflection.GeneratedProtocolMessageType('RollbackResponse', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKRESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.RollbackResponse)
))
_sym_db.RegisterMessage(RollbackResponse)
BeginExecuteRequest = _reflection.GeneratedProtocolMessageType('BeginExecuteRequest', (_message.Message,), dict(
DESCRIPTOR = _BEGINEXECUTEREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.BeginExecuteRequest)
))
_sym_db.RegisterMessage(BeginExecuteRequest)
BeginExecuteResponse = _reflection.GeneratedProtocolMessageType('BeginExecuteResponse', (_message.Message,), dict(
DESCRIPTOR = _BEGINEXECUTERESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.BeginExecuteResponse)
))
_sym_db.RegisterMessage(BeginExecuteResponse)
BeginExecuteBatchRequest = _reflection.GeneratedProtocolMessageType('BeginExecuteBatchRequest', (_message.Message,), dict(
DESCRIPTOR = _BEGINEXECUTEBATCHREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.BeginExecuteBatchRequest)
))
_sym_db.RegisterMessage(BeginExecuteBatchRequest)
BeginExecuteBatchResponse = _reflection.GeneratedProtocolMessageType('BeginExecuteBatchResponse', (_message.Message,), dict(
DESCRIPTOR = _BEGINEXECUTEBATCHRESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.BeginExecuteBatchResponse)
))
_sym_db.RegisterMessage(BeginExecuteBatchResponse)
SplitQueryRequest = _reflection.GeneratedProtocolMessageType('SplitQueryRequest', (_message.Message,), dict(
DESCRIPTOR = _SPLITQUERYREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.SplitQueryRequest)
))
_sym_db.RegisterMessage(SplitQueryRequest)
QuerySplit = _reflection.GeneratedProtocolMessageType('QuerySplit', (_message.Message,), dict(
DESCRIPTOR = _QUERYSPLIT,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.QuerySplit)
))
_sym_db.RegisterMessage(QuerySplit)
SplitQueryResponse = _reflection.GeneratedProtocolMessageType('SplitQueryResponse', (_message.Message,), dict(
DESCRIPTOR = _SPLITQUERYRESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.SplitQueryResponse)
))
_sym_db.RegisterMessage(SplitQueryResponse)
StreamHealthRequest = _reflection.GeneratedProtocolMessageType('StreamHealthRequest', (_message.Message,), dict(
DESCRIPTOR = _STREAMHEALTHREQUEST,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.StreamHealthRequest)
))
_sym_db.RegisterMessage(StreamHealthRequest)
RealtimeStats = _reflection.GeneratedProtocolMessageType('RealtimeStats', (_message.Message,), dict(
DESCRIPTOR = _REALTIMESTATS,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.RealtimeStats)
))
_sym_db.RegisterMessage(RealtimeStats)
StreamHealthResponse = _reflection.GeneratedProtocolMessageType('StreamHealthResponse', (_message.Message,), dict(
DESCRIPTOR = _STREAMHEALTHRESPONSE,
__module__ = 'query_pb2'
# @@protoc_insertion_point(class_scope:query.StreamHealthResponse)
))
_sym_db.RegisterMessage(StreamHealthResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.youtube.vitess.proto'))
_BOUNDQUERY_BINDVARIABLESENTRY.has_options = True
_BOUNDQUERY_BINDVARIABLESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
| 37.787183
| 8,052
| 0.742688
|
6e9e544b5c45a235cce6d9f62de8e958ead99d2c
| 4,345
|
bzl
|
Python
|
apple/internal/linking_support.bzl
|
mccorkill1/rules_apple
|
8562971108c11931618a220731c335e9fab9fb49
|
[
"Apache-2.0"
] | null | null | null |
apple/internal/linking_support.bzl
|
mccorkill1/rules_apple
|
8562971108c11931618a220731c335e9fab9fb49
|
[
"Apache-2.0"
] | 1
|
2021-02-23T17:44:22.000Z
|
2021-02-23T17:44:22.000Z
|
apple/internal/linking_support.bzl
|
mccorkill1/rules_apple
|
8562971108c11931618a220731c335e9fab9fb49
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for linking related actions."""
load(
"@build_bazel_rules_apple//apple/internal:rule_support.bzl",
"rule_support",
)
def _sectcreate_objc_provider(segname, sectname, file):
"""Returns an objc provider that propagates a section in a linked binary.
This function creates a new objc provider that contains the necessary linkopts
to create a new section in the binary to which the provider is propagated; it
is equivalent to the `ld` flag `-sectcreate segname sectname file`. This can
be used, for example, to embed entitlements in a simulator executable (since
they are not applied during code signing).
Args:
segname: The name of the segment in which the section will be created.
sectname: The name of the section to create.
file: The file whose contents will be used as the content of the section.
Returns:
An objc provider that propagates the section linkopts.
"""
# linkopts get deduped, so use a single option to pass then through as a
# set.
linkopts = ["-Wl,-sectcreate,%s,%s,%s" % (segname, sectname, file.path)]
return apple_common.new_objc_provider(
linkopt = depset(linkopts, order = "topological"),
link_inputs = depset([file]),
)
def _exported_symbols_list_objc_provider(files):
"""Returns an objc provider that propagates exported symbols lists.
This function creates a new objc provider that contains the necessary linkopts
to add exported symbols lists
Args:
files: The files whose contents will be the exported symbols lists.
Returns:
An objc provider that propagates the appropriate linkopts.
"""
linkopts = ["-Wl,-exported_symbols_list,%s" % (file.path) for file in files]
return apple_common.new_objc_provider(
linkopt = depset(linkopts, order = "topological"),
link_inputs = depset(files),
)
def _register_linking_action(ctx, extra_linkopts = []):
"""Registers linking actions using the Starlark Linking API for Apple binaries.
This method will add the linkopts as added on the rule descriptor, in addition to any extra
linkopts given when invoking this method.
Args:
ctx: The rule context.
extra_linkopts: Extra linkopts to add to the linking action.
Returns:
The `struct` returned by `apple_common.link_multi_arch_binary`, which contains the
following fields:
* `binary_provider`: A provider describing the binary that was linked. This is an
instance of either `AppleExecutableBinaryInfo`, `AppleDylibBinaryInfo`, or
`AppleLoadableBundleBinaryInfo`; all three have a `binary` field that is the linked
binary `File`.
* `debug_outputs_provider`: An `AppleDebugOutputsInfo` provider that contains debug
outputs, such as linkmaps and dSYM binaries.
* `output_groups`: A `dict` containing output groups that should be returned in the
`OutputGroupInfo` provider of the calling rule.
"""
linkopts = []
# Compatibility path for `apple_binary`, which does not have a product type.
if hasattr(ctx.attr, "_product_type"):
rule_descriptor = rule_support.rule_descriptor(ctx)
linkopts.extend(["-Wl,-rpath,{}".format(rpath) for rpath in rule_descriptor.rpaths])
linkopts.extend(rule_descriptor.extra_linkopts + extra_linkopts)
return apple_common.link_multi_arch_binary(
ctx = ctx,
extra_linkopts = linkopts,
)
linking_support = struct(
exported_symbols_list_objc_provider = _exported_symbols_list_objc_provider,
register_linking_action = _register_linking_action,
sectcreate_objc_provider = _sectcreate_objc_provider,
)
| 40.607477
| 95
| 0.718067
|
67529a10f7cf465815eed1d8d54a731ae6998c9e
| 4,332
|
py
|
Python
|
scorecard/tests/indicators/test_current_ratio.py
|
Code4SA/municipal-data-api
|
8b213b702245bc2ff1bab4bd160c4cd3b604d54f
|
[
"MIT"
] | null | null | null |
scorecard/tests/indicators/test_current_ratio.py
|
Code4SA/municipal-data-api
|
8b213b702245bc2ff1bab4bd160c4cd3b604d54f
|
[
"MIT"
] | null | null | null |
scorecard/tests/indicators/test_current_ratio.py
|
Code4SA/municipal-data-api
|
8b213b702245bc2ff1bab4bd160c4cd3b604d54f
|
[
"MIT"
] | null | null | null |
from ...profile_data import ApiData
from ...profile_data.indicators import (
CurrentRatio,
)
from . import (
import_data,
_IndicatorTestCase,
)
from .resources import (
GeographyResource,
FinancialPositionFactsV2Resource,
BsheetFactsV1Resource,
)
class TestCurrentRatio(_IndicatorTestCase):
def test_result(self):
# Load sample data
import_data(GeographyResource, 'current_ratio/scorecard_geography.csv')
import_data(BsheetFactsV1Resource, 'current_ratio/bsheet_facts_v1.csv')
import_data(
FinancialPositionFactsV2Resource,
'current_ratio/financial_position_facts_v2.csv',
)
# Fetch data from API
api_data = ApiData(self.api_client, "CPT", 2020, 2020, 2020, "2020q4")
api_data.fetch_data([
"bsheet_auda_years", "financial_position_auda_years_v2",
])
# Provide data to indicator
result = CurrentRatio.get_muni_specifics(api_data)
self.assertEqual(
result,
{
"result_type": "ratio",
"values": [
{
"date": 2020,
"year": 2020,
"amount_type": "AUDA",
"assets": 17848394183.0,
"liabilities": 7873348202.0,
"result": 2.27,
"rating": "good",
"cube_version": "v2"
},
{
"date": 2019,
"year": 2019,
"amount_type": "AUDA",
"assets": 14254084899.0,
"liabilities": 8561736837.0,
"result": 1.66,
"rating": "good",
"cube_version": "v1"
},
{
"date": 2018,
"year": 2018,
"amount_type": "AUDA",
"assets": 14590339781.0,
"liabilities": 8994077535.0,
"result": 1.62,
"rating": "good",
"cube_version": "v1"
},
{
"date": 2017,
"year": 2017,
"amount_type": "AUDA",
"assets": 11891860172.0,
"liabilities": 8848578284.0,
"result": 1.34,
"rating": "ave",
"cube_version": "v1"
}
],
"ref": {
"title": "Circular 71",
"url": "http://mfma.treasury.gov.za/Circulars/Pages/Circular71.aspx"
},
"last_year": 2020,
"formula": {
"text": "= Current Assets / Current Liabilities",
"actual": [
"=",
{
"cube": "bsheet",
"item_codes": ["2150"],
"amount_type": "AUDA",
},
"/",
{
"cube": "bsheet",
"item_codes": ["1600"],
"amount_type": "AUDA",
},
],
},
"formula_v2": {
"text": "= Current Assets / Current Liabilities",
"actual": [
"=",
{
"cube": "financial_position_v2",
"item_codes": ["0120", "0130", "0140", "0150", "0160", "0170"],
"amount_type": "AUDA",
},
"/",
{
"cube": "financial_position_v2",
"item_codes": ["0330", "0340", "0350", "0360", "0370"],
"amount_type": "AUDA",
},
],
},
}
)
| 35.508197
| 91
| 0.356648
|
e9b815d6bf292226f9035ec12ccd57117cbe8e2a
| 6,519
|
py
|
Python
|
mars/tensor/base/isin.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/base/isin.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/base/isin.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ...serialize import KeyField, BoolField
from ..operands import TensorOperand, TensorOperandMixin
from ..datasource import tensor as astensor
from ..array_utils import as_same_device, device
from ..core import TensorOrder
from .ravel import ravel
class TensorIsIn(TensorOperand, TensorOperandMixin):
_op_type_ = OperandDef.ISIN
_element = KeyField('element')
_test_elements = KeyField('test_elements')
_assume_unique = BoolField('assume_unique')
_invert = BoolField('invert')
def __init__(self, assume_unique=None, invert=None, dtype=None, **kw):
dtype = np.dtype(bool) if dtype is None else dtype
super(TensorIsIn, self).__init__(_assume_unique=assume_unique, _invert=invert,
_dtype=dtype, **kw)
@property
def element(self):
return self._element
@property
def test_elements(self):
return self._test_elements
@property
def assume_unique(self):
return self._assume_unique
@property
def invert(self):
return self._invert
def _set_inputs(self, inputs):
super(TensorIsIn, self)._set_inputs(inputs)
self._element = self._inputs[0]
self._test_elements = self._inputs[1]
def __call__(self, element, test_elements):
element, test_elements = astensor(element), ravel(astensor(test_elements))
return self.new_tensor([element, test_elements], element.shape, order=TensorOrder.C_ORDER)
@classmethod
def tile(cls, op):
in_tensor = op.element
test_elements = op.test_elements
out_tensor = op.outputs[0]
if len(test_elements.chunks) != 1:
test_elements = test_elements.rechunk(len(test_elements)).single_tiles()
test_elements_chunk = test_elements.chunks[0]
out_chunks = []
for c in in_tensor.chunks:
chunk_op = op.copy().reset_key()
out_chunk = chunk_op.new_chunk([c, test_elements_chunk], shape=c.shape,
index=c.index, order=out_tensor.order)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_tensors([in_tensor, test_elements], out_tensor.shape,
order=out_tensor.order, chunks=out_chunks,
nsplits=in_tensor.nsplits)
@classmethod
def execute(cls, ctx, op):
(element, test_elements), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
ctx[op.outputs[0].key] = xp.isin(element, test_elements,
assume_unique=op.assume_unique,
invert=op.invert)
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
Returns a boolean array of the same shape as `element` that is True
where an element of `element` is in `test_elements` and False otherwise.
Parameters
----------
element : array_like
Input tensor.
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if it is a tensor or array_like.
See notes for behavior with non-array-like parameters.
assume_unique : bool, optional
If True, the input tensors are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned tensor are inverted, as if
calculating `element not in test_elements`. Default is False.
``mt.isin(a, b, invert=True)`` is equivalent to (but faster
than) ``mt.invert(mt.isin(a, b))``.
Returns
-------
isin : Tensor, bool
Has the same shape as `element`. The values `element[isin]`
are in `test_elements`.
See Also
--------
in1d : Flattened version of this function.
Notes
-----
`isin` is an element-wise function version of the python keyword `in`.
``isin(a, b)`` is roughly equivalent to
``mt.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
`element` and `test_elements` are converted to tensors if they are not
already. If `test_elements` is a set (or other non-sequence collection)
it will be converted to an object tensor with one element, rather than a
tensor of the values contained in `test_elements`. This is a consequence
of the `tensor` constructor's way of handling non-sequence collections.
Converting the set to a list usually gives the desired behavior.
Examples
--------
>>> import mars.tensor as mt
>>> element = 2*mt.arange(4).reshape((2, 2))
>>> element.execute()
array([[0, 2],
[4, 6]])
>>> test_elements = [1, 2, 4, 8]
>>> mask = mt.isin(element, test_elements)
>>> mask.execute()
array([[ False, True],
[ True, False]])
>>> element[mask].execute()
array([2, 4])
>>> mask = mt.isin(element, test_elements, invert=True)
>>> mask.execute()
array([[ True, False],
[ False, True]])
>>> element[mask]
array([0, 6])
Because of how `array` handles sets, the following does not
work as expected:
>>> test_set = {1, 2, 4, 8}
>>> mt.isin(element, test_set).execute()
array([[ False, False],
[ False, False]])
Casting the set to a list gives the expected result:
>>> mt.isin(element, list(test_set)).execute()
array([[ False, True],
[ True, False]])
"""
op = TensorIsIn(assume_unique, invert)
return op(element, test_elements)
| 35.237838
| 98
| 0.637828
|
7b8c3c5ee7d94a1ce748c6d074633b503a9492ce
| 727
|
py
|
Python
|
Spaceship/message_types/msg_sensors.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | 1
|
2020-06-07T00:14:42.000Z
|
2020-06-07T00:14:42.000Z
|
Spaceship/message_types/msg_sensors.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | null | null | null |
Spaceship/message_types/msg_sensors.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | 1
|
2019-06-24T22:10:48.000Z
|
2019-06-24T22:10:48.000Z
|
"""
msg_sensors
- messages type for output of sensors
part of mavsim_python
- Beard & McLain, PUP, 2012
- Last update:
2/16/2019 - RWB
"""
class msg_sensors:
def __init__(self):
self.gyro_x = 0 # gyro_x
self.gyro_y = 0 # gyro_y
self.gyro_z = 0 # gyro_z
self.accel_x = 0 # accel_x
self.accel_y = 0 # accel_y
self.accel_z = 0 # accel_z
self.static_pressure = 0 # static pressure
self.diff_pressure = 0 # differential pressure
self.gps_n = 0 # gps north
self.gps_e = 0 # gps east
self.gps_h = 0 # gps altitude
self.gps_Vg = 0 # gps ground speed
self.gps_course = 0 # gps course angle
| 29.08
| 55
| 0.576341
|
b1362f54215aa607bc641842ffb47ab337afb0d5
| 1,179
|
py
|
Python
|
esphome/components/sntp/time.py
|
OttoWinter/esphomeyaml
|
6a85259e4d6d1b0a0f819688b8e555efcb99ecb0
|
[
"MIT"
] | 249
|
2018-04-07T12:04:11.000Z
|
2019-01-25T01:11:34.000Z
|
esphome/components/sntp/time.py
|
OttoWinter/esphomeyaml
|
6a85259e4d6d1b0a0f819688b8e555efcb99ecb0
|
[
"MIT"
] | 243
|
2018-04-11T16:37:11.000Z
|
2019-01-25T16:50:37.000Z
|
esphome/components/sntp/time.py
|
OttoWinter/esphomeyaml
|
6a85259e4d6d1b0a0f819688b8e555efcb99ecb0
|
[
"MIT"
] | 40
|
2018-04-10T05:50:14.000Z
|
2019-01-25T15:20:36.000Z
|
from esphome.components import time as time_
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.core import CORE
from esphome.const import CONF_ID, CONF_SERVERS
DEPENDENCIES = ["network"]
sntp_ns = cg.esphome_ns.namespace("sntp")
SNTPComponent = sntp_ns.class_("SNTPComponent", time_.RealTimeClock)
DEFAULT_SERVERS = ["0.pool.ntp.org", "1.pool.ntp.org", "2.pool.ntp.org"]
CONFIG_SCHEMA = time_.TIME_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(SNTPComponent),
cv.Optional(CONF_SERVERS, default=DEFAULT_SERVERS): cv.All(
cv.ensure_list(cv.Any(cv.domain, cv.hostname)), cv.Length(min=1, max=3)
),
}
).extend(cv.COMPONENT_SCHEMA)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
servers = config[CONF_SERVERS]
servers += [""] * (3 - len(servers))
cg.add(var.set_servers(*servers))
await cg.register_component(var, config)
await time_.register_time(var, config)
if CORE.is_esp8266 and len(servers) > 1:
# We need LwIP features enabled to get 3 SNTP servers (not just one)
cg.add_build_flag("-DPIO_FRAMEWORK_ARDUINO_LWIP2_LOW_MEMORY")
| 31.026316
| 83
| 0.71162
|
6e6093b5c0d1d438c1cd76caaf1d63937e638aed
| 650
|
py
|
Python
|
startupservice/src/__init__.py
|
Mariusz1970/enigma2-plugins-1
|
126d31d075c156f32b09d4321ebe1a17f93a5bd6
|
[
"OLDAP-2.3"
] | 2
|
2020-09-02T18:25:39.000Z
|
2020-09-02T18:39:07.000Z
|
startupservice/src/__init__.py
|
Mariusz1970/enigma2-plugins-1
|
126d31d075c156f32b09d4321ebe1a17f93a5bd6
|
[
"OLDAP-2.3"
] | null | null | null |
startupservice/src/__init__.py
|
Mariusz1970/enigma2-plugins-1
|
126d31d075c156f32b09d4321ebe1a17f93a5bd6
|
[
"OLDAP-2.3"
] | 11
|
2015-02-26T20:59:14.000Z
|
2021-09-20T08:23:03.000Z
|
# -*- coding: utf-8 -*-
from Components.Language import language
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_LANGUAGE
import os, gettext
PluginLanguageDomain = "StartUpService"
PluginLanguagePath = "SystemPlugins/StartUpService/locale"
def localeInit():
gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath))
def _(txt):
if gettext.dgettext(PluginLanguageDomain, txt):
return gettext.dgettext(PluginLanguageDomain, txt)
else:
print "[" + PluginLanguageDomain + "] fallback to default translation for " + txt
return gettext.gettext(txt)
language.addCallback(localeInit())
| 32.5
| 97
| 0.8
|
e47c2df134cdb3056c0ff664f6070f677b1c7477
| 144
|
py
|
Python
|
tests/test_relascope.py
|
Submissions/relascope
|
6bc9cfac99ab5d15bc62ed4538195d59da6893cb
|
[
"MIT"
] | null | null | null |
tests/test_relascope.py
|
Submissions/relascope
|
6bc9cfac99ab5d15bc62ed4538195d59da6893cb
|
[
"MIT"
] | null | null | null |
tests/test_relascope.py
|
Submissions/relascope
|
6bc9cfac99ab5d15bc62ed4538195d59da6893cb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_relascope
----------------------------------
Tests for `relascope` module.
"""
# TODO
| 12
| 34
| 0.458333
|
01d1f680f2f1e82106f35bc936c819a48b7bdf2d
| 630
|
py
|
Python
|
setup.py
|
swd543/reconstruct-document
|
8f366ec4f7a90d6499f3bb97a4522cbde9f8b7d0
|
[
"MIT"
] | 5
|
2019-01-16T10:49:06.000Z
|
2019-02-06T08:40:45.000Z
|
setup.py
|
swd543/reconstruct-document
|
8f366ec4f7a90d6499f3bb97a4522cbde9f8b7d0
|
[
"MIT"
] | null | null | null |
setup.py
|
swd543/reconstruct-document
|
8f366ec4f7a90d6499f3bb97a4522cbde9f8b7d0
|
[
"MIT"
] | 1
|
2019-11-19T22:59:27.000Z
|
2019-11-19T22:59:27.000Z
|
import re
import os
from setuptools import setup
version = '1.0.0'
description = ''
with open('README.md', 'rb') as file:
description = file.read().decode('utf-8')
setup(name='reconstruct_document',
version=version,
description='Command line document reconstruction utility.',
long_description=description,
entry_points = {
'console_scripts': ['reconstruct_document = reconstruct_document.reconstruct_document:main']
},
url='http://kaunild.github.io',
author='Kaunil Dhruv',
author_email='dhruv.kaunil@gmail.com',
license='BSD',
packages=['reconstruct_document', 'test']
)
| 24.230769
| 100
| 0.696825
|
ffbd0fcf2d80f2dc158054f84e018c1e068da28c
| 90,563
|
py
|
Python
|
Lib/test/test_functools.py
|
kolyshkin/cpython
|
8c349565e8a442e17f1a954d1a9996847749d778
|
[
"CNRI-Python-GPL-Compatible"
] | 3
|
2019-04-23T11:06:38.000Z
|
2021-03-03T12:17:16.000Z
|
Lib/test/test_functools.py
|
kolyshkin/cpython
|
8c349565e8a442e17f1a954d1a9996847749d778
|
[
"CNRI-Python-GPL-Compatible"
] | 2
|
2019-04-23T15:32:51.000Z
|
2019-05-10T20:32:32.000Z
|
Lib/test/test_functools.py
|
kolyshkin/cpython
|
8c349565e8a442e17f1a954d1a9996847749d778
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2021-02-20T14:09:54.000Z
|
2021-02-20T14:09:54.000Z
|
import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
from weakref import proxy
import contextlib
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
# FIXME: The following will only work after PEP 560 is implemented.
return
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with support.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
| 36.356082
| 122
| 0.558109
|
f2bc01f4f5d5295bef9e7d33ce0bc1cc2b24095f
| 347
|
py
|
Python
|
pomdp_grid_world/__init__.py
|
NishanthVAnand/new_env_gym
|
6ed044e8ab9b64fb3e6b3def432609833730a60a
|
[
"MIT"
] | null | null | null |
pomdp_grid_world/__init__.py
|
NishanthVAnand/new_env_gym
|
6ed044e8ab9b64fb3e6b3def432609833730a60a
|
[
"MIT"
] | null | null | null |
pomdp_grid_world/__init__.py
|
NishanthVAnand/new_env_gym
|
6ed044e8ab9b64fb3e6b3def432609833730a60a
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
register(
id='pomdpGridWorld-v0',
entry_point='pomdp_grid_world.envs:gridWorld',
kwargs={'map_name' : '4x4'},
max_episode_steps=100,
)
register(
id='pomdpGridWorld-v1',
entry_point='pomdp_grid_world.envs:gridWorld',
kwargs={'map_name' : '8x8'},
max_episode_steps=200,
)
| 23.133333
| 50
| 0.700288
|
fbb3e83ba35c53521e5e30a19c591bc7449383be
| 1,471
|
py
|
Python
|
tests/st/networks/models/resnet50/src/config.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/st/networks/models/resnet50/src/config.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/st/networks/models/resnet50/src/config.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed
config = ed({
"class_num": 1001,
"batch_size": 32,
"eval_interval": 1,
"eval_batch_size": 50,
"loss_scale": 1024,
"momentum": 0.9,
"weight_decay": 1e-4,
"use_nesterov": True,
"epoch_size": 90,
"pretrained_epoch_size": 1,
"buffer_size": 1000,
"image_height": 224,
"image_width": 224,
"save_checkpoint": False,
"save_checkpoint_epochs": 5,
"keep_checkpoint_max": 10,
"save_checkpoint_path": "./",
"warmup_epochs": 0,
"lr_decay_mode": "cosine",
"use_label_smooth": True,
"label_smooth_factor": 0.1,
"lr_init": 0,
"lr_max": 0.1,
"use_lars": True,
"lars_epsilon": 1e-8,
"lars_coefficient": 0.001
})
| 30.645833
| 78
| 0.64446
|
b1f340bdc93c878ec3660a2e30338a976c3fbc02
| 435
|
py
|
Python
|
scripts/buildschema.py
|
ksritharan/tectle
|
ca76424d85e66b041b40997838a3ceb79266efab
|
[
"MIT"
] | 1
|
2021-03-04T14:58:05.000Z
|
2021-03-04T14:58:05.000Z
|
scripts/buildschema.py
|
ksritharan/tectle
|
ca76424d85e66b041b40997838a3ceb79266efab
|
[
"MIT"
] | 8
|
2021-02-26T02:32:59.000Z
|
2021-05-28T02:22:07.000Z
|
scripts/buildschema.py
|
ksritharan/business-automation
|
ca76424d85e66b041b40997838a3ceb79266efab
|
[
"MIT"
] | null | null | null |
from tectle.buildschema import *
def main():
num_printers = 1
num_fake_receipts = 5
max_num_items = 2
max_quantity = 2
conn = get_connection(DB_DEBUG_FILE)
cur = conn.cursor()
build_schema(cur)
create_test_data(cur, num_printers, num_fake_receipts, max_num_items, max_quantity)
#fetch_shipping_costs(cur)
update_package_configs(cur)
conn.commit()
if __name__ == '__main__':
main()
| 24.166667
| 87
| 0.698851
|
4bb95ea10ad3ea32f5fae9d5322fd14fbbe4ff7e
| 308
|
py
|
Python
|
tweetme2/urls.py
|
abbasKareem/twitter2me
|
2fe867de5571d6be20b45b29d6ffe352b158a428
|
[
"MIT"
] | null | null | null |
tweetme2/urls.py
|
abbasKareem/twitter2me
|
2fe867de5571d6be20b45b29d6ffe352b158a428
|
[
"MIT"
] | null | null | null |
tweetme2/urls.py
|
abbasKareem/twitter2me
|
2fe867de5571d6be20b45b29d6ffe352b158a428
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path
from tweets.views import home_view, tweet_detail_view, tweet_list_view
urlpatterns = [
path('admin/', admin.site.urls),
path('', home_view),
path('tweets/', tweet_list_view),
path('tweets/<int:tweet_id>', tweet_detail_view),
]
| 23.692308
| 70
| 0.724026
|
689b29be4464c8318990d372609ee814c0092b66
| 790
|
py
|
Python
|
crowdsourcing/serializers/dynamic.py
|
ramcn/sept20
|
e6f6e238d0561ebf3353158161f1b20052e8b08b
|
[
"MIT"
] | 1
|
2016-02-29T01:26:42.000Z
|
2016-02-29T01:26:42.000Z
|
crowdsourcing/serializers/dynamic.py
|
ramcn/sept20
|
e6f6e238d0561ebf3353158161f1b20052e8b08b
|
[
"MIT"
] | 16
|
2015-08-10T18:28:18.000Z
|
2022-03-11T23:12:48.000Z
|
crowdsourcing/serializers/dynamic.py
|
Milstein/crowdsource-platform
|
60427e440373824c26c7daf8cf5f421b9c7ebbb5
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
fields = kwargs.pop('fields', None)
# Instantiate the superclass normally
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
if fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
| 39.5
| 78
| 0.659494
|
0969ede2795a61a6eb8c66bc87f9b3935c04990e
| 779
|
py
|
Python
|
proteus/default_so.py
|
cekees/proteus
|
11d8749e04f0950f090d1a406243539a868be642
|
[
"MIT"
] | null | null | null |
proteus/default_so.py
|
cekees/proteus
|
11d8749e04f0950f090d1a406243539a868be642
|
[
"MIT"
] | 1
|
2020-12-19T03:29:35.000Z
|
2020-12-19T03:29:35.000Z
|
proteus/default_so.py
|
cekees/proteus
|
11d8749e04f0950f090d1a406243539a868be642
|
[
"MIT"
] | null | null | null |
"""
The default values for so-files describing split operator formulations
"""
from __future__ import absolute_import
try:
from importlib import reload
except:
pass
from .SplitOperator import *
name = None
pnList = []
systemStepControllerType = Sequential_MinModelStep
systemStepExact = True
useOneMesh = True
tnList = None
needEBQ_GLOBAL = False
needEBQ = False
modelSpinUpList = []
useOneArchive=True#False
fastArchive = False
sList = []
from .Archiver import ArchiveFlags
archiveFlag = ArchiveFlags.EVERY_USER_STEP
#CEK CHANGED DEFAULT FROM EVERY_SEQUENCE_STEP
dt_system_fixed = None
"""A system-wide wide time step used by SplitOperator objects"""
skipSpinupOnHotstart = False
"""Use True if one wants to skip the spinup step when HotStart begins"""
| 16.934783
| 72
| 0.776637
|
388dbf0d23f71bad048078d94153282152e52601
| 12,515
|
py
|
Python
|
tests/generate_go_ethereum_fixture.py
|
EdNoepel/web3.py
|
008f1343621ee951330db23b3e691aeead1e55e3
|
[
"MIT"
] | null | null | null |
tests/generate_go_ethereum_fixture.py
|
EdNoepel/web3.py
|
008f1343621ee951330db23b3e691aeead1e55e3
|
[
"MIT"
] | null | null | null |
tests/generate_go_ethereum_fixture.py
|
EdNoepel/web3.py
|
008f1343621ee951330db23b3e691aeead1e55e3
|
[
"MIT"
] | null | null | null |
import contextlib
import json
import os
import pprint
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import time
from eth_utils.curried import (
apply_formatter_if,
is_bytes,
is_checksum_address,
is_dict,
is_same_address,
remove_0x_prefix,
to_hex,
to_text,
to_wei,
)
from eth_utils.toolz import (
merge,
valmap,
)
from utils import (
get_open_port,
)
from web3 import Web3
from web3._utils.module_testing.emitter_contract import (
CONTRACT_EMITTER_ABI,
CONTRACT_EMITTER_CODE,
EMITTER_ENUM,
)
from web3._utils.module_testing.math_contract import (
MATH_ABI,
MATH_BYTECODE,
)
COINBASE = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd'
COINBASE_PK = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d'
KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' # noqa: E501
KEYFILE_PW = 'web3py-test'
KEYFILE_FILENAME = 'UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' # noqa: E501
RAW_TXN_ACCOUNT = '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6'
UNLOCKABLE_PRIVATE_KEY = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01'
UNLOCKABLE_ACCOUNT = '0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13'
UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW
GENESIS_DATA = {
"nonce": "0xdeadbeefdeadbeef",
"timestamp": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"extraData": "0x7765623370792d746573742d636861696e",
"gasLimit": "0x47d5cc",
"difficulty": "0x01",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"coinbase": "0x3333333333333333333333333333333333333333",
"alloc": {
remove_0x_prefix(COINBASE): {
'balance': str(to_wei(1000000000, 'ether')),
},
remove_0x_prefix(RAW_TXN_ACCOUNT): {
'balance': str(to_wei(10, 'ether')),
},
remove_0x_prefix(UNLOCKABLE_ACCOUNT): {
'balance': str(to_wei(10, 'ether')),
},
},
"config": {
"chainId": 131277322940537, # the string 'web3py' as an integer
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0
},
}
def ensure_path_exists(dir_path):
"""
Make sure that a path exists
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
@contextlib.contextmanager
def tempdir():
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
def get_geth_binary():
from geth.install import (
get_executable_path,
install_geth,
)
if 'GETH_BINARY' in os.environ:
return os.environ['GETH_BINARY']
elif 'GETH_VERSION' in os.environ:
geth_version = os.environ['GETH_VERSION']
_geth_binary = get_executable_path(geth_version)
if not os.path.exists(_geth_binary):
install_geth(geth_version)
assert os.path.exists(_geth_binary)
return _geth_binary
else:
return 'geth'
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
def wait_for_socket(ipc_path, timeout=30):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
@contextlib.contextmanager
def graceful_kill_on_exit(proc):
try:
yield proc
finally:
kill_proc_gracefully(proc)
@contextlib.contextmanager
def get_geth_process(geth_binary,
datadir,
genesis_file_path,
geth_ipc_path,
geth_port):
init_datadir_command = (
geth_binary,
'--datadir', datadir,
'init',
genesis_file_path,
)
subprocess.check_output(
init_datadir_command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
run_geth_command = (
geth_binary,
'--datadir', datadir,
'--ipcpath', geth_ipc_path,
'--ethash.dagsondisk', '1',
'--gcmode', 'archive',
'--nodiscover',
'--port', geth_port,
'--etherbase', COINBASE[2:],
)
popen_proc = subprocess.Popen(
run_geth_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
with popen_proc as proc:
with graceful_kill_on_exit(proc) as graceful_proc:
yield graceful_proc
output, errors = proc.communicate()
print(
"Geth Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def write_config_json(config, datadir):
bytes_to_hex = apply_formatter_if(is_bytes, to_hex)
config_json_dict = valmap(bytes_to_hex, config)
config_path = os.path.join(datadir, 'config.json')
with open(config_path, 'w') as config_file:
config_file.write(json.dumps(config_json_dict))
config_file.write('\n')
def generate_go_ethereum_fixture(destination_dir):
with contextlib.ExitStack() as stack:
datadir = stack.enter_context(tempdir())
keystore_dir = os.path.join(datadir, 'keystore')
ensure_path_exists(keystore_dir)
keyfile_path = os.path.join(keystore_dir, KEYFILE_FILENAME)
with open(keyfile_path, 'w') as keyfile:
keyfile.write(KEYFILE_DATA)
genesis_file_path = os.path.join(datadir, 'genesis.json')
with open(genesis_file_path, 'w') as genesis_file:
genesis_file.write(json.dumps(GENESIS_DATA))
geth_ipc_path_dir = stack.enter_context(tempdir())
geth_ipc_path = os.path.join(geth_ipc_path_dir, 'geth.ipc')
geth_port = get_open_port()
geth_binary = get_geth_binary()
with get_geth_process(
geth_binary=geth_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
geth_ipc_path=geth_ipc_path,
geth_port=geth_port):
wait_for_socket(geth_ipc_path)
web3 = Web3(Web3.IPCProvider(geth_ipc_path))
chain_data = setup_chain_state(web3)
# close geth by exiting context
# must be closed before copying data dir
verify_chain_state(web3, chain_data)
# verify that chain state is still valid after closing
# and re-opening geth
with get_geth_process(
geth_binary=geth_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
geth_ipc_path=geth_ipc_path,
geth_port=geth_port):
wait_for_socket(geth_ipc_path)
web3 = Web3(Web3.IPCProvider(geth_ipc_path))
verify_chain_state(web3, chain_data)
static_data = {
'raw_txn_account': RAW_TXN_ACCOUNT,
'keyfile_pw': KEYFILE_PW,
}
config = merge(chain_data, static_data)
pprint.pprint(config)
write_config_json(config, datadir)
shutil.make_archive(destination_dir, 'zip', datadir)
def verify_chain_state(web3, chain_data):
receipt = web3.eth.waitForTransactionReceipt(chain_data['mined_txn_hash'])
latest = web3.eth.getBlock('latest')
assert receipt.blockNumber <= latest.number
def mine_transaction_hash(web3, txn_hash):
web3.geth.miner.start(1)
try:
return web3.eth.waitForTransactionReceipt(txn_hash, timeout=60)
finally:
web3.geth.miner.stop()
def mine_block(web3):
origin_block_number = web3.eth.blockNumber
start_time = time.time()
web3.geth.miner.start(1)
while time.time() < start_time + 60:
block_number = web3.eth.blockNumber
if block_number > origin_block_number:
web3.geth.miner.stop()
return block_number
else:
time.sleep(0.1)
else:
raise ValueError("No block mined during wait period")
def deploy_contract(web3, name, factory):
web3.geth.personal.unlockAccount(web3.eth.coinbase, KEYFILE_PW)
deploy_txn_hash = factory.constructor().transact({'from': web3.eth.coinbase})
print('{0}_CONTRACT_DEPLOY_HASH: '.format(name.upper()), deploy_txn_hash)
deploy_receipt = mine_transaction_hash(web3, deploy_txn_hash)
print('{0}_CONTRACT_DEPLOY_TRANSACTION_MINED'.format(name.upper()))
contract_address = deploy_receipt['contractAddress']
assert is_checksum_address(contract_address)
print('{0}_CONTRACT_ADDRESS:'.format(name.upper()), contract_address)
return deploy_receipt
def setup_chain_state(web3):
coinbase = web3.eth.coinbase
assert is_same_address(coinbase, COINBASE)
#
# Math Contract
#
math_contract_factory = web3.eth.contract(
abi=MATH_ABI,
bytecode=MATH_BYTECODE,
)
math_deploy_receipt = deploy_contract(web3, 'math', math_contract_factory)
assert is_dict(math_deploy_receipt)
#
# Emitter Contract
#
emitter_contract_factory = web3.eth.contract(
abi=CONTRACT_EMITTER_ABI,
bytecode=CONTRACT_EMITTER_CODE,
)
emitter_deploy_receipt = deploy_contract(web3, 'emitter', emitter_contract_factory)
emitter_contract = emitter_contract_factory(emitter_deploy_receipt['contractAddress'])
txn_hash_with_log = emitter_contract.functions.logDouble(
which=EMITTER_ENUM['LogDoubleWithIndex'], arg0=12345, arg1=54321,
).transact({
'from': web3.eth.coinbase,
})
print('TXN_HASH_WITH_LOG:', txn_hash_with_log)
txn_receipt_with_log = mine_transaction_hash(web3, txn_hash_with_log)
block_with_log = web3.eth.getBlock(txn_receipt_with_log['blockHash'])
print('BLOCK_HASH_WITH_LOG:', block_with_log['hash'])
#
# Empty Block
#
empty_block_number = mine_block(web3)
print('MINED_EMPTY_BLOCK')
empty_block = web3.eth.getBlock(empty_block_number)
assert is_dict(empty_block)
assert not empty_block['transactions']
print('EMPTY_BLOCK_HASH:', empty_block['hash'])
#
# Block with Transaction
#
web3.geth.personal.unlockAccount(coinbase, KEYFILE_PW)
web3.geth.miner.start(1)
mined_txn_hash = web3.eth.sendTransaction({
'from': coinbase,
'to': coinbase,
'value': 1,
'gas': 21000,
'gas_price': web3.eth.gasPrice,
})
mined_txn_receipt = mine_transaction_hash(web3, mined_txn_hash)
print('MINED_TXN_HASH:', mined_txn_hash)
block_with_txn = web3.eth.getBlock(mined_txn_receipt['blockHash'])
print('BLOCK_WITH_TXN_HASH:', block_with_txn['hash'])
geth_fixture = {
'math_deploy_txn_hash': math_deploy_receipt['transactionHash'],
'math_address': math_deploy_receipt['contractAddress'],
'emitter_deploy_txn_hash': emitter_deploy_receipt['transactionHash'],
'emitter_address': emitter_deploy_receipt['contractAddress'],
'txn_hash_with_log': txn_hash_with_log,
'block_hash_with_log': block_with_log['hash'],
'empty_block_hash': empty_block['hash'],
'mined_txn_hash': mined_txn_hash,
'block_with_txn_hash': block_with_txn['hash'],
}
return geth_fixture
if __name__ == '__main__':
fixture_dir = sys.argv[1]
generate_go_ethereum_fixture(fixture_dir)
| 30.52439
| 522
| 0.668158
|
4a8e5d0ea810e3b71db0f5d05ac4b74a20720daf
| 35,881
|
py
|
Python
|
dairy_erp/dairy_erp/doctype/farmer_payment_cycle_report/farmer_payment_cycle_report.py
|
shrikant9867/Dairy_project_Daiyerp
|
635d34115f0eb2081b6835a190eda4971dbfb99f
|
[
"MIT"
] | null | null | null |
dairy_erp/dairy_erp/doctype/farmer_payment_cycle_report/farmer_payment_cycle_report.py
|
shrikant9867/Dairy_project_Daiyerp
|
635d34115f0eb2081b6835a190eda4971dbfb99f
|
[
"MIT"
] | null | null | null |
dairy_erp/dairy_erp/doctype/farmer_payment_cycle_report/farmer_payment_cycle_report.py
|
shrikant9867/Dairy_project_Daiyerp
|
635d34115f0eb2081b6835a190eda4971dbfb99f
|
[
"MIT"
] | 2
|
2020-01-19T13:27:57.000Z
|
2021-12-28T20:32:56.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Stellapps Technologies Private Ltd.
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from dairy_erp.dairy_utils import make_dairy_log, make_journal_entry
from frappe.utils import flt, cstr,nowdate,cint
import json
class FarmerPaymentCycleReport(Document):
def validate(self):
if frappe.db.get_value("Farmer Payment Cycle Report",{'cycle':self.cycle,\
'vlcc_name':self.vlcc_name, 'farmer_id':self.farmer_id},'name') and self.is_new():
frappe.throw(_("FPCR has already been generated for this cycle against farmer <b>{0}</b>".format(self.farmer_id)))
if self.collection_to >= nowdate() :
frappe.throw(_("You can generate FPCR after <b>'{0}'</b>".format(self.collection_to)))
def before_submit(self):
try:
self.advance_operation()
self.loan_operation()
self.update_fpcr()
if float(self.incentives) != 0:
if not frappe.db.get_value("Purchase Invoice", {'cycle':self.cycle,\
'supplier': self.farmer_name},'name'):
self.create_incentive()
frappe.msgprint(_("Purchase invoice created successfully against Incentives"))
else: frappe.msgprint(_("Purchase invoice Already created successfully against Incentives"))
except Exception,e:
frappe.db.rollback()
make_dairy_log(title="JV creation Against Advance Failed",method="make_jv", status="Error",
data = "data", message=e, traceback=frappe.get_traceback())
frappe.throw("Something Went Wrong Please check dairy log")
def update_fpcr(self):
loan_total, loan_je, adavnce_je, advance_total = 0, 0, 0, 0
for row in self.loan_child:
je_amt = frappe.get_all("Journal Entry",fields=['ifnull(sum(total_debit), 0) as amt']\
,filters={'farmer_advance':row.loan_id,'type':'Farmer Loan'})
loan_je += je_amt[0].get('amt')
loan_total += row.principle
for row in self.advance_child:
je_amt = frappe.get_all("Journal Entry",fields=['ifnull(sum(total_debit), 0) as amt']\
,filters={'farmer_advance':row.adv_id,'type':'Farmer Advance'})
adavnce_je += je_amt[0].get('amt')
advance_total += row.principle
self.advance_outstanding = float(advance_total) - float(adavnce_je)
self.loan_outstanding = float(loan_total) - float(loan_je)
def advance_operation(self):
flag, je = False, ""
for row in self.advance_child:
flag = True
# SG 5-10
je_exist = frappe.db.get_value("Journal Entry",{'cycle': self.cycle,\
'farmer_advance':row.adv_id,'type':'Farmer Advance'}, 'name')
if not je_exist:
self.validate_advance(row)
je = self.create_advance_je(row)
self.update_advance_doc(row, je)
elif je_exist:
self.update_je_for_advance(row, self.cycle, je_exist)
self.update_advance_after_fpcr(row)
if flag:
frappe.msgprint(_("Journal Entry created successfully against Advances"))
def loan_operation(self):
flag = False
for row in self.loan_child:
flag = True
je_exist = frappe.db.get_value("Journal Entry",{'cycle': self.cycle,\
'farmer_advance':row.loan_id,'type':'Farmer Loan'}, 'name')
if not je_exist:
self.validate_loan(row)
je = self.create_loan_je(row)
self.update_loan_doc(row, je)
elif je_exist:
self.update_je_for_loan(row, self.cycle, je_exist)
self.update_loan_after_fpcr(row)
if flag:
frappe.msgprint(_("Journal Entry created successfully against Loans"))
def validate_advance(self, row):
adv_doc = frappe.get_doc("Farmer Advance",row.adv_id)
if not row.amount:
frappe.throw(_("Please Enter amount against <b>{0}</b>".format(row.adv_id)))
if float(row.amount) > float(row.outstanding):
frappe.throw(_("Amount can not be greater than outstanding for <b>{0}</b>".format(row.adv_id)))
if (int(row.no_of_instalment) + int(adv_doc.extension)) - row.paid_instalment == 1 and \
(float(row.amount) < float(adv_doc.emi_amount) or float(row.outstanding) != float(adv_doc.emi_amount)):
frappe.throw(_("Please Use Extension for <b>{0}</b>".format(row.adv_id)))
def validate_loan(self, row):
loan_doc = frappe.get_doc("Farmer Loan",row.loan_id)
if not row.amount:
frappe.throw(_("Please Enter amount against <b>{0}</b>".format(row.loan_id)))
if float(row.amount) > float(row.outstanding):
frappe.throw(_("Amount can not be greater than outstanding for <b>{0}</b>".format(row.loan_id)))
if (int(row.no_of_instalment) + int(loan_doc.extension)) - loan_doc.paid_instalment == 1 and \
(float(row.amount) < float(loan_doc.emi_amount) or float(row.outstanding) != float(loan_doc.emi_amount)):
frappe.throw(_("Please Use Extension <b>{0}</b>".format(row.loan_id)))
def update_loan_doc(self, row, je = None):
instalment = 0
principal_interest = get_interest_amount(row.amount, row.loan_id)
je_amt = frappe.get_all("Journal Entry",fields=['ifnull(sum(total_debit), 0) as amt']\
,filters={'farmer_advance':row.loan_id,'type':'Farmer Loan'})
loan_doc = frappe.get_doc("Farmer Loan", row.loan_id)
loan_doc.total_principle_paid = principal_interest.get('principal')
loan_doc.total_interest_paid = principal_interest.get('interest')
loan_doc.last_extension_used = flt(loan_doc.extension)
loan_doc.append("cycle", {"cycle": self.cycle, "sales_invoice": je})
loan_doc.outstanding_amount = float(loan_doc.advance_amount) - je_amt[0].get('amt')
for i in loan_doc.cycle:
instalment += 1
loan_doc.paid_instalment = instalment
if loan_doc.outstanding_amount > 0:
loan_doc.emi_amount = (float(loan_doc.outstanding_amount)) / (float(loan_doc.no_of_instalments) + float(loan_doc.extension) - float(loan_doc.paid_instalment))
if loan_doc.outstanding_amount == 0:
loan_doc.status = "Paid"
loan_doc.emi_amount = 0
loan_doc.flags.ignore_permissions = True
loan_doc.save()
def create_loan_je(self, row): # SG-8-10
principal_interest = get_interest_amount(row.amount, row.loan_id)
je_doc = make_journal_entry(voucher_type = "Journal Entry",company = self.vlcc_name,
posting_date = nowdate(),debit_account = "Debtors - ",credit_account = "Loans and Advances - ",
type = "Farmer Loan", cycle = self.cycle, amount = principal_interest.get('principal'),
party_type = "Customer", party = self.farmer_name, master_no = row.loan_id,
interest_account = "Interest Income - ", interest_amount= principal_interest.get('interest'))
frappe.db.set_value("Journal Entry", je_doc.name, 'posting_date', self.collection_to)
company_abbr = frappe.db.get_value("Company",get_vlcc(),'abbr',as_dict=1)
frappe.db.set_value("GL Entry", {"account": 'Debtors - '+company_abbr.get('abbr'), "voucher_no": je_doc.name},\
'posting_date', self.collection_to )
frappe.db.set_value("GL Entry", {"account": 'Loans and Advances - '+company_abbr.get('abbr'), "voucher_no": je_doc.name},\
'posting_date', self.collection_to )
frappe.db.set_value("GL Entry", {"account":"Interest Income - "+company_abbr.get('abbr'), "voucher_no": je_doc.name},\
'posting_date', self.collection_to )
return je_doc.name
def create_advance_je(self, row): # SG-5-10
advance_type = frappe.db.get_value("Farmer Advance",{'name': row.adv_id}, 'advance_type')
if advance_type == "Money Advance":
je_doc = make_journal_entry(voucher_type = "Journal Entry",company = self.vlcc_name,
posting_date = nowdate(),debit_account = "Debtors - ",credit_account = "Loans and Advances - ",
type = "Farmer Advance", cycle = self.cycle, amount = row.amount, faf_flag = 0,
party_type = "Customer", party = self.farmer_name, master_no = row.adv_id, advance_type = advance_type)
frappe.db.set_value("Journal Entry", je_doc.name, 'posting_date', self.collection_to)
company_abbr = frappe.db.get_value("Company",get_vlcc(),'abbr')
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_doc.name},\
'posting_date', self.collection_to )
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_doc.name},\
'posting_date', self.collection_to )
return je_doc.name
if advance_type == "Feed And Fodder Advance":
# parameter 'faf_flag', is used to fetch data on net-payOff report.
je_doc = make_journal_entry(voucher_type = "Journal Entry",company = self.vlcc_name,
posting_date = nowdate(),debit_account = "Debtors - ",credit_account = "Feed And Fodder Advance - ",
type = "Farmer Advance", cycle = self.cycle, amount = row.amount, faf_flag = 1,
party_type = "Customer", party = self.farmer_name, master_no = row.adv_id, advance_type = advance_type)
frappe.db.set_value("Journal Entry", je_doc.name, 'posting_date', self.collection_to)
company_abbr = frappe.db.get_value("Company",get_vlcc(),'abbr')
frappe.db.set_value("GL Entry", {"account": 'Debtors - '+company_abbr, "voucher_no": je_doc.name},\
'posting_date', self.collection_to )
frappe.db.set_value("GL Entry", {"account": 'Feed And Fodder Advance - '+company_abbr, "voucher_no": je_doc.name},\
'posting_date', self.collection_to )
return je_doc.name
def update_advance_doc(self, row, je=None): # SG-5-10
instalment = 0
je_amt = frappe.get_all("Journal Entry",fields=['ifnull(sum(total_debit), 0) as amt']\
,filters={'farmer_advance':row.adv_id,'type':'Farmer Advance'})
adv_doc = frappe.get_doc("Farmer Advance", row.adv_id)
adv_doc.append("cycle", {"cycle": self.cycle, "sales_invoice": je})
adv_doc.outstanding_amount = float(adv_doc.advance_amount) - je_amt[0].get('amt')
for i in adv_doc.cycle:
instalment +=1
adv_doc.paid_instalment = instalment
adv_doc.fpcr_instalment = instalment
if adv_doc.outstanding_amount > 0 :
adv_doc.emi_amount = (float(adv_doc.outstanding_amount)) / (float(adv_doc.no_of_instalment) + float(adv_doc.extension) - float(adv_doc.paid_instalment))
if adv_doc.outstanding_amount == 0:
adv_doc.status = "Paid"
adv_doc.emi_amount = 0
adv_doc.flags.ignore_permissions =True
adv_doc.save()
def update_advance_after_fpcr(self, row): # SG-5-10
instalment = 0
je_amt = frappe.get_all("Journal Entry",fields=['ifnull(sum(total_debit), 0) as amt']\
,filters={'farmer_advance':row.adv_id,'type':'Farmer Advance'})
adv_doc = frappe.get_doc("Farmer Advance", row.adv_id)
adv_doc.outstanding_amount = float(adv_doc.advance_amount) - je_amt[0].get('amt')
for i in adv_doc.cycle:
instalment +=1
adv_doc.paid_instalment = instalment
adv_doc.fpcr_instalment = instalment
if adv_doc.outstanding_amount > 0 :
adv_doc.emi_amount = (float(adv_doc.outstanding_amount)) / (float(adv_doc.no_of_instalment) + float(adv_doc.extension) - float(adv_doc.paid_instalment))
if adv_doc.outstanding_amount == 0:
adv_doc.status = "Paid"
adv_doc.emi_amount = 0
adv_doc.flags.ignore_permissions =True
adv_doc.save()
def update_loan_after_fpcr(self, row):
principal_interest = get_interest_amount(row.amount, row.loan_id)
print principal_interest,"inside update_loan_after_fpcr\n\n\n\n"
instalment = 0
je_amt = frappe.get_all("Journal Entry",fields=['ifnull(sum(total_debit), 0) as amt']\
,filters={'farmer_advance':row.loan_id,'type':'Farmer Loan'})
loan_doc = frappe.get_doc("Farmer Loan", row.loan_id)
loan_doc.total_principle_paid = principal_interest.get('principal')
loan_doc.total_interest_paid = principal_interest.get('interest')
loan_doc.last_extension_used = flt(loan_doc.extension)
loan_doc.outstanding_amount = float(loan_doc.advance_amount) - je_amt[0].get('amt')
for i in loan_doc.cycle:
instalment += 1
loan_doc.paid_instalment = instalment
if loan_doc.outstanding_amount > 0:
loan_doc.emi_amount = (float(loan_doc.outstanding_amount)) / (float(loan_doc.no_of_instalments) + float(loan_doc.extension) - float(loan_doc.paid_instalment))
if loan_doc.outstanding_amount == 0:
loan_doc.status = "Paid"
loan_doc.emi_amount = 0
loan_doc.flags.ignore_permissions = True
loan_doc.save()
def update_je_for_loan(self, row, cycle, je_no): # SG-5-10
principal_interest = get_interest_amount(row.amount, row.loan_id)
company = frappe.db.get_value("Company",self.vlcc_name,['name','abbr','cost_center'],as_dict=1)
accounts_row = frappe.db.get_value("Journal Entry Account", {'parent':je_no}, 'name')
accounts_row_debit = frappe.db.get_value("Journal Entry Account", {'parent':je_no,"account":\
'Debtors - '+company.get('abbr')}, 'name')
accounts_row_credit_principal = frappe.db.get_value("Journal Entry Account", {'parent':je_no,"account":\
'Loans and Advances - '+company.get('abbr')}, 'name')
accounts_row_credit_interest = frappe.db.get_value("Journal Entry Account", {'parent':je_no,"account":\
'Interest Income - '+company.get('abbr')}, 'name')
frappe.db.set_value("Journal Entry Account",{'name':accounts_row_debit, 'account':"Debtors - "+company.get('abbr')}, 'debit_in_account_currency', principal_interest.get('principal')+principal_interest.get('interest'))
frappe.db.set_value("Journal Entry Account",{'name':accounts_row_credit_principal, 'account':"Loans and Advances - "+company.get('abbr')}, 'credit_in_account_currency', principal_interest.get('principal'))
frappe.db.set_value("Journal Entry Account",{'name':accounts_row_credit_interest, 'account':"Interest Income - "+company.get('abbr')}, 'credit_in_account_currency', principal_interest.get('interest'))
frappe.db.set_value("Journal Entry", je_no, 'total_credit', row.amount)
frappe.db.set_value("Journal Entry", je_no, 'total_debit', row.amount)
frappe.db.set_value("Journal Entry", je_no, 'posting_date', self.collection_to)
self.update_gl_entry_loan(je_no, principal_interest)
def update_je_for_advance(self, row, cycle, je_no): # SG-5-10
company = frappe.db.get_value("Company",self.vlcc_name,['name','abbr','cost_center'],as_dict=1)
advance_type = frappe.db.get_value("Farmer Advance",{'name': row.adv_id}, 'advance_type')
if advance_type == "Money Advance":
accounts_row_debit = frappe.db.get_value("Journal Entry Account", {'parent':je_no,"account":\
'Debtors - '+company.get('abbr')}, 'name')
accounts_row_credit = frappe.db.get_value("Journal Entry Account", {'parent':je_no,"account":\
'Loans and Advances - '+company.get('abbr')}, 'name')
frappe.db.set_value("Journal Entry Account",{'name':accounts_row_debit, 'account':'Debtors - '+company.get('abbr')}, 'debit_in_account_currency', row.amount)
frappe.db.set_value("Journal Entry Account",{'name':accounts_row_credit, 'account':'Loans and Advances - '+company.get('abbr')}, 'credit_in_account_currency', row.amount)
frappe.db.set_value("Journal Entry", je_no, 'total_credit', row.amount)
frappe.db.set_value("Journal Entry", je_no, 'total_debit', row.amount)
frappe.db.set_value("Journal Entry", je_no, 'posting_date', self.collection_to)
self.update_gl_entry_advance(je_no, row, row.amount)
if advance_type == "Feed And Fodder Advance":
accounts_row_debit = frappe.db.get_value("Journal Entry Account", {'parent':je_no,"account":\
'Debtors - '+company.get('abbr')}, 'name')
accounts_row_credit = frappe.db.get_value("Journal Entry Account", {'parent':je_no,"account":\
'Feed And Fodder Advance - '+company.get('abbr')}, 'name')
frappe.db.set_value("Journal Entry Account",{'name':accounts_row_debit, 'account':'Debtors - '+company.get('abbr')}, 'debit_in_account_currency', row.amount)
frappe.db.set_value("Journal Entry Account",{'name':accounts_row_credit, 'account':'Feed And Fodder Advance - '+company.get('abbr')}, 'credit_in_account_currency', row.amount)
frappe.db.set_value("Journal Entry", je_no, 'total_credit', row.amount)
frappe.db.set_value("Journal Entry", je_no, 'total_debit', row.amount)
frappe.db.set_value("Journal Entry", je_no, 'posting_date', self.collection_to)
self.update_gl_entry_advance(je_no, row, row.amount)
def update_gl_entry_loan(self, je_no, principal_interest):
if je_no and principal_interest:
company_abbr = frappe.db.get_value("Company",get_vlcc(),'abbr')
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'debit', principal_interest.get('principal') + principal_interest.get('interest'))
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'credit', 0)
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'debit_in_account_currency', principal_interest.get('principal') + principal_interest.get('interest'))
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'credit_in_account_currency', 0)
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'posting_date', self.collection_to )
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'posting_date', self.collection_to )
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'debit', 0)
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'credit', principal_interest.get('principal'))
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'debit_in_account_currency', 0)
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'credit_in_account_currency', principal_interest.get('principal'))
frappe.db.set_value("GL Entry", {"account":"Interest Income - "+company_abbr, "voucher_no": je_no},\
'debit', 0)
frappe.db.set_value("GL Entry", {"account":"Interest Income - "+company_abbr, "voucher_no": je_no},\
'credit', principal_interest.get('interest') )
frappe.db.set_value("GL Entry", {"account":"Interest Income - "+company_abbr, "voucher_no": je_no},\
'debit_in_account_currency', 0)
frappe.db.set_value("GL Entry", {"account":"Interest Income - "+company_abbr, "voucher_no": je_no},\
'credit_in_account_currency', principal_interest.get('interest'))
frappe.db.set_value("GL Entry", {"account":"Interest Income - "+company_abbr, "voucher_no": je_no},\
'posting_date', self.collection_to )
def update_gl_entry_advance(self, je_no, row, amount):
if je_no and amount:
advance_type = frappe.db.get_value("Farmer Advance",{'name': row.adv_id}, 'advance_type')
company_abbr = frappe.db.get_value("Company",get_vlcc(),'abbr')
if advance_type == "Money Advance":
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'debit', amount)
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'credit_in_account_currency', 0)
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'debit_in_account_currency', amount)
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'credit_in_account_currency', 0)
frappe.db.set_value("GL Entry", {"account": "Debtors - "+company_abbr, "voucher_no": je_no},\
'posting_date', self.collection_to )
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'debit', 0)
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'credit', amount )
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'debit_in_account_currency', 0)
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'credit_in_account_currency', amount )
frappe.db.set_value("GL Entry", {"account": "Loans and Advances - "+company_abbr, "voucher_no": je_no},\
'posting_date', self.collection_to )
if advance_type == "Feed And Fodder Advance":
frappe.db.set_value("GL Entry", {"account": 'Debtors - '+company_abbr, "voucher_no": je_no},\
'debit', amount )
frappe.db.set_value("GL Entry", {"account": 'Debtors - '+company_abbr, "voucher_no": je_no},\
'credit', 0 )
frappe.db.set_value("GL Entry", {"account": 'Debtors - '+company_abbr, "voucher_no": je_no},\
'debit_in_account_currency', amount )
frappe.db.set_value("GL Entry", {"account": 'Debtors - '+company_abbr, "voucher_no": je_no},\
'credit_in_account_currency', 0 )
frappe.db.set_value("GL Entry", {"account": 'Debtors - '+company_abbr, "voucher_no": je_no},\
'posting_date', self.collection_to )
frappe.db.set_value("GL Entry", {"account": 'Feed And Fodder Advance - '+company_abbr, "voucher_no": je_no},\
'debit', 0 )
frappe.db.set_value("GL Entry", {"account": 'Feed And Fodder Advance - '+company_abbr, "voucher_no": je_no},\
'credit', amount )
frappe.db.set_value("GL Entry", {"account": 'Feed And Fodder Advance - '+company_abbr, "voucher_no": je_no},\
'debit_in_account_currency', 0 )
frappe.db.set_value("GL Entry", {"account": 'Feed And Fodder Advance - '+company_abbr, "voucher_no": je_no},\
'credit_in_account_currency', amount )
frappe.db.set_value("GL Entry", {"account": 'Feed And Fodder Advance - '+company_abbr, "voucher_no": je_no},\
'posting_date', self.collection_to )
def create_incentive(self):
pi = frappe.new_doc("Purchase Invoice")
pi.supplier = self.farmer_name
pi.company = self.vlcc_name
pi.pi_type = "Incentive"
pi.cycle = self.cycle
pi.append("items",
{
"qty":1,
"item_code": "Milk Incentives",
"rate": self.incentives,
"amount": self.incentives,
"cost_center": frappe.db.get_value("Company", self.vlcc_name, "cost_center")
})
pi.flags.ignore_permissions = True
pi.save()
pi.submit()
#updating date for current cycle
frappe.db.set_value("Purchase Invoice", pi.name, 'posting_date', self.collection_to)
gl_stock = frappe.db.get_value("Company", get_vlcc(), 'stock_received_but_not_billed')
gl_credit = frappe.db.get_value("Company", get_vlcc(), 'default_payable_account')
frappe.db.set_value("GL Entry",{'account': gl_stock,'voucher_no':pi.name}, 'posting_date', self.collection_to)
frappe.db.set_value("GL Entry",{'account': gl_credit,'voucher_no':pi.name}, 'posting_date', self.collection_to)
def get_interest_amount(amount, data):
loan_doc = frappe.get_all("Farmer Loan",fields=['interest','no_of_instalments','emi_amount'],filters={'name':data})
interest_per_cycle = loan_doc[0].get('interest') / loan_doc[0].get('no_of_instalments')
principal_per_cycle = amount - interest_per_cycle
if amount <= interest_per_cycle:
interest_per_cycle = flt(amount,2)
principal_per_cycle = 0
else:
interest_per_cycle = flt(interest_per_cycle,2)
principal_per_cycle = flt((amount - interest_per_cycle),2)
return { 'interest': interest_per_cycle , 'principal': principal_per_cycle}
@frappe.whitelist()
def get_fmcr(start_date, end_date, vlcc, farmer_id, cycle=None):
fmcr = frappe.db.sql("""
select rcvdtime,shift,milkquantity,fat,snf,rate,amount
from
`tabFarmer Milk Collection Record`
where
associated_vlcc = '{0}' and date(rcvdtime) between '{1}' and '{2}' and farmerid= '{3}'
""".format(vlcc, start_date, end_date, farmer_id),as_dict=1)
amount = 0
qty = 0
for i in fmcr:
amount += i.get('amount')
qty += i.get('milkquantity')
amount = flt(amount,2)
return {
"fmcr":fmcr,
"weighted_data" : get_weighted_fmcr_data(fmcr), # Added by Niraj
"incentive": get_incentives(amount, qty, vlcc) or 0,
"advance": get_advances(start_date, end_date, vlcc, farmer_id, cycle) or 0,
"loan": get_loans(start_date, end_date, vlcc, farmer_id, cycle) or 0,
"fodder": get_fodder_amount(start_date, end_date, farmer_id, vlcc) or 0,
"vet": vet_service_amnt(start_date, end_date, farmer_id, vlcc) or 0,
"child_loan": get_loans_child(start_date, end_date, vlcc, farmer_id,cycle),
"child_advance": get_advance_child(start_date, end_date, vlcc, farmer_id, cycle)
}
def get_weighted_fmcr_data(fmcr_data):
if len(fmcr_data) == 0:
return
milkquantity, fat, snf, rate, amount = 0, 0, 0, 0, 0
for data in fmcr_data:
milkquantity += data.get('milkquantity')
fat += data.get('fat')*data.get('milkquantity')
snf += data.get('snf')*data.get('milkquantity')
rate += data.get('rate')*data.get('milkquantity')
amount += data.get('amount')
fat, snf , rate = round(fat/milkquantity, 2), round(snf/milkquantity, 2), round(rate/milkquantity, 2)
return {
"milkquantity" : milkquantity,
"fat" : fat,
"snf" : snf,
"rate": rate,
"amount" : amount
}
def get_incentives(amount, qty, vlcc=None):
if vlcc and amount and qty:
incentive = 0
name = frappe.db.get_value("Farmer Settings", {'vlcc':vlcc}, 'name')
farmer_settings = frappe.get_doc("Farmer Settings",name)
if farmer_settings.enable_local_setting and not farmer_settings.enable_local_per_litre:
incentive = (float(farmer_settings.local_farmer_incentive ) * float(amount)) / 100
if farmer_settings.enable_local_setting and farmer_settings.enable_local_per_litre:
incentive = (float(farmer_settings.local_per_litre) * float(qty))
if not farmer_settings.enable_local_setting and not farmer_settings.enable_per_litre:
incentive = (float(farmer_settings.farmer_incentives) * float(amount)) / 100
if not farmer_settings.enable_local_setting and farmer_settings.enable_per_litre:
incentive = (float(farmer_settings.per_litre) * float(qty))
return incentive
@frappe.whitelist()
def get_advances(start_date, end_date, vlcc, farmer_id, cycle = None):
advance = frappe.db.sql("""
select ifnull(sum(outstanding_amount),0) as oustanding
from
`tabFarmer Advance`
where
creation < now() and farmer_id = '{2}' and status = 'Unpaid' and docstatus = 1
""".format(start_date, end_date, farmer_id), as_dict=1)
if len(advance):
return advance[0].get('oustanding') if advance[0].get('oustanding') != None else 0
else: return 0
@frappe.whitelist()
def get_loans(start_date, end_date, vlcc, farmer_id, cycle = None):
loan = frappe.db.sql("""
select ifnull(sum(outstanding_amount),0) as oustanding
from
`tabFarmer Loan`
where
creation < now() and farmer_id = '{2}' and status = 'Unpaid' and docstatus = 1
""".format(start_date, end_date, farmer_id), as_dict=1)
if len(loan):
return loan[0].get('oustanding') if loan[0].get('oustanding') != None else 0
else: return 0
def get_fodder_amount(start_date, end_date, farmer_id, vlcc=None):
fodder = frappe.db.sql("""
select ifnull(sum(si.amount),0) as amt
from
`tabSales Invoice Item` si,
`tabSales Invoice` s
where
s.name= si.parent and
s.docstatus = 1 and
si.item_group in ('Cattle Feed') and s.local_sale = 1 and
s.farmer = '{0}'and
s.local_sale_type not in ('Feed And Fodder Advance') and
s.posting_date between '{1}' and '{2}'
""".format(farmer_id, start_date, end_date),as_dict=1)
if len(fodder):
return fodder[0].get('amt') if fodder[0].get('amt') != None else 0
else: return 0
def vet_service_amnt(start_date, end_date, farmer_id, vlcc=None):
vet_amnt = frappe.db.sql("""
select ifnull(sum(si.amount),0) as amt
from
`tabSales Invoice Item` si,
`tabSales Invoice` s
where
s.name= si.parent and
s.docstatus = 1 and
si.item_group in ('Veterinary Services') and s.service_note = 1 and
s.farmer = '{0}'and
s.posting_date between '{1}' and '{2}'
""".format(farmer_id, start_date, end_date),as_dict=1)
if len(vet_amnt):
return vet_amnt[0].get('amt') if vet_amnt[0].get('amt') != None else 0
else: return 0
# @frappe.whitelist()
# def get_cycle(doctype,text,searchfields,start,pagelen,filters):
# return frappe.db.sql("""
# select name
# from
# `tabFarmer Date Computation`
# where
# end_date < now() and vlcc = '{vlcc}' and name like '{txt}' and name not in (select cycle from `tabFarmer Payment Cycle Report` where farmer_id = '{farmer}')
# """.format(farmer = filters.get('farmer') , vlcc = filters.get('vlcc'),txt= "%%%s%%" % text,as_list=True))
@frappe.whitelist()
def get_cycle(doctype,text,searchfields,start,pagelen,filters):
return frappe.db.sql("""
select name
from
`tabFarmer Date Computation`
where
end_date < now() and
end_date >= (select
date(creation)
from
`tabFarmer`
where
farmer_id='{farmer}') and
vlcc = '{vlcc}' and
name like '{txt}' and
name not in (select
cycle
from
`tabFarmer Payment Cycle Report`
where
farmer_id = '{farmer}')
""".format(farmer = filters.get('farmer') , vlcc = filters.get('vlcc'),txt= "%%%s%%" % text,as_list=True))
def req_cycle_computation(data):
if data.get('emi_deduction_start_cycle') > 0:
not_req_cycl = frappe.db.sql("""
select name
from
`tabFarmer Date Computation`
where
'{0}' < start_date or date('{0}') between start_date and end_date
and vlcc = '{1}' order by start_date limit {2}""".format(data.get('date_of_disbursement'),data.get('vlcc'),data.get('emi_deduction_start_cycle')),as_dict=1,debug=0)
not_req_cycl_list = [ '"%s"'%i.get('name') for i in not_req_cycl ]
instalment = int(data.get('no_of_instalments')) + int(data.get('extension'))
req_cycle = frappe.db.sql("""
select name
from
`tabFarmer Date Computation`
where
'{date}' <= start_date and name not in ({cycle}) and vlcc = '{vlcc}' order by start_date limit {instalment}
""".format(date=data.get('date_of_disbursement'), cycle = ','.join(not_req_cycl_list),vlcc = data.get('vlcc'),
instalment = instalment),as_dict=1,debug=0)
req_cycl_list = [i.get('name') for i in req_cycle]
return req_cycl_list
elif data.get('emi_deduction_start_cycle') == 0:
instalment = int(data.get('no_of_instalments')) + int(data.get('extension'))
req_cycle = frappe.db.sql("""
select
name
from
`tabFarmer Date Computation`
where
'{date}' <= end_date and vlcc = '{vlcc}'
order by start_date limit {instalment}
""".format(date=data.get('date_of_disbursement'),vlcc=data.get('vlcc'),instalment = instalment),as_dict=1,debug=0)
req_cycl_list = [i.get('name') for i in req_cycle]
return req_cycl_list
return []
def get_conditions(data):
conditions = " and 1=1"
if data.get('emi_deduction_start_cycle'):
conditions += ' limit {0}'.format(data.get('emi_deduction_start_cycle'))
return conditions
def get_cycle_cond(data,not_req_cycl_list):
conditions = " and 1=1"
if data.get('emi_deduction_start_cycle'):
conditions += ' and name not in ({cycle})'.format(cycle = ','.join(not_req_cycl_list))
else:
conditions += ' and name in ({cycle})'.format(cycle = ','.join(not_req_cycl_list))
return conditions
def get_current_cycle(data):
return frappe.db.sql("""
select name
from
`tabFarmer Date Computation`
where
vlcc = %s and now() between start_date and end_date
""",(data.get('vlcc')),as_dict=1)
def req_cycle_computation_advance(data):
if data.get('emi_deduction_start_cycle') > 0:
not_req_cycl = frappe.db.sql("""
select name
from
`tabFarmer Date Computation`
where
'{0}' < start_date or date('{0}') between start_date and end_date
and vlcc = '{1}' order by start_date limit {2}""".format(data.get('date_of_disbursement'),data.get('vlcc'),data.get('emi_deduction_start_cycle')),as_dict=1,debug=0)
not_req_cycl_list = [ '"%s"'%i.get('name') for i in not_req_cycl ]
instalment = int(data.get('no_of_instalment')) + int(data.get('extension'))
req_cycle = frappe.db.sql("""
select name
from
`tabFarmer Date Computation`
where
'{date}' <= start_date and name not in ({cycle}) and vlcc = '{vlcc}' order by start_date limit {instalment}
""".format(date=data.get('date_of_disbursement'), cycle = ','.join(not_req_cycl_list),vlcc = data.get('vlcc'),
instalment = instalment),as_dict=1,debug=0)
req_cycl_list = [i.get('name') for i in req_cycle]
return req_cycl_list
elif data.get('emi_deduction_start_cycle') == 0:
instalment = int(data.get('no_of_instalment')) + int(data.get('extension'))
req_cycle = frappe.db.sql("""
select
name
from
`tabFarmer Date Computation`
where
'{date}' <= end_date and vlcc= '{vlcc}'
order by start_date limit {instalment}
""".format(date=data.get('date_of_disbursement'),vlcc=data.get('vlcc'),instalment = instalment),as_dict=1,debug=0)
req_cycl_list = [i.get('name') for i in req_cycle]
return req_cycl_list
return []
def get_loans_child(start_date, end_date, vlcc, farmer_id, cycle=None):
loans_ = frappe.db.sql("""
select name,farmer_id,outstanding_amount,
emi_amount,no_of_instalments,paid_instalment,advance_amount,
emi_deduction_start_cycle,extension,date_of_disbursement,vlcc
from
`tabFarmer Loan`
where
farmer_id = '{0}' and outstanding_amount != 0 and date_of_disbursement < now() and docstatus =1
""".format(farmer_id),as_dict=1,debug=0)
loans = []
for row in loans_:
req_cycle = req_cycle_computation(row)
if cycle in req_cycle_computation(row):
loans.append(row)
return loans
def get_advance_child(start_date, end_date, vlcc, farmer_id, cycle=None):
advance_ = frappe.db.sql("""
select name,farmer_id,outstanding_amount,emi_amount,advance_amount,
no_of_instalment,paid_instalment,emi_deduction_start_cycle,
extension,date_of_disbursement,vlcc
from
`tabFarmer Advance`
where
farmer_id = '{0}' and outstanding_amount != 0 and date_of_disbursement < now() and docstatus =1
""".format(farmer_id),as_dict=1)
advance = []
for row in advance_:
if cycle in req_cycle_computation_advance(row):
advance.append(row)
return advance
@frappe.whitelist()
def update_full_loan(loan=None):
loan_doc = frappe.get_doc("Farmer Loan", loan)
paid_amnt = float(loan_doc.advance_amount) - float(loan_doc.outstanding_amount)
instlment = int(loan_doc.no_of_instalments) + int(loan_doc.extension)
instlment_brkup = float(loan_doc.interest) / instlment
principle_paid = float(paid_amnt) - float(instlment_brkup)
def fpcr_permission(user):
roles = frappe.get_roles(user)
user_doc = frappe.db.get_value("User",{"name":frappe.session.user},['operator_type','company','branch_office'], as_dict =1)
if user != 'Administrator' and "Vlcc Manager" in roles:
return """(`tabFarmer Payment Cycle Report`.vlcc_name = '{0}')""".format(user_doc.get('company'))
@frappe.whitelist()
def get_fpcr_flag():
return frappe.db.get_value("Farmer Settings", {'vlcc':get_vlcc()}, 'is_fpcr')
def get_vlcc():
return frappe.db.get_value("User",frappe.session.user, 'company')
# SG-6-10
@frappe.whitelist()
def get_updated_advance(cycle, data, adv_id, amount, total):
data, total_paid, total_amount, overriding_amount = json.loads(data), 0, 0, 0
for row in data.get('advance_child'):
sum_ = frappe.db.sql("""
select ifnull(sum(total_debit),0) as total
from
`tabJournal Entry`
where
farmer_advance =%s and cycle =%s and type='Farmer Advance' """,(row.get('adv_id'),cycle),as_dict=1,debug=0)
total_paid += sum_[0].get('total')
total_amount += row.get('principle')
overriding_amount += flt(row.get('amount'))
return flt((total_amount - overriding_amount),2) or 0
@frappe.whitelist()
def get_updated_loan(cycle, data, loan_id=None, amount=None, total = None):
data, total_paid, total_amount, overriding_amount = json.loads(data), 0, 0, 0
for row in data.get('loan_child'):
total_amount += row.get('principle')
overriding_amount += row.get('amount')
return flt((total_amount - overriding_amount),2) or 0
| 45.942382
| 219
| 0.707868
|
a37cedb51c712ddf53c24677c91bb6fa641080c4
| 129
|
py
|
Python
|
demo2/demo2_app/models.py
|
mpasternak/pytest-django-pytest-splinter-test
|
843577e05a91545e4ff1d687b3fd56f25e0e22d3
|
[
"Unlicense"
] | null | null | null |
demo2/demo2_app/models.py
|
mpasternak/pytest-django-pytest-splinter-test
|
843577e05a91545e4ff1d687b3fd56f25e0e22d3
|
[
"Unlicense"
] | null | null | null |
demo2/demo2_app/models.py
|
mpasternak/pytest-django-pytest-splinter-test
|
843577e05a91545e4ff1d687b3fd56f25e0e22d3
|
[
"Unlicense"
] | null | null | null |
from django.db import models
# Create your models here.
class Foobar(models.Model):
name = models.CharField(max_length=50)
| 18.428571
| 42
| 0.751938
|
35c2486e81e8227147642c32fe3906948d4f32da
| 30,715
|
py
|
Python
|
examples/contrib/run_vcr.py
|
splionar/transformers
|
2f457ddc32e44bd40752406ebabdd6ee9c9d64bc
|
[
"Apache-2.0"
] | null | null | null |
examples/contrib/run_vcr.py
|
splionar/transformers
|
2f457ddc32e44bd40752406ebabdd6ee9c9d64bc
|
[
"Apache-2.0"
] | null | null | null |
examples/contrib/run_vcr.py
|
splionar/transformers
|
2f457ddc32e44bd40752406ebabdd6ee9c9d64bc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner.
Finetuning the library models for multiple choice on SWAG (Bert).
"""
import argparse
import csv
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForMultipleChoice,
BertTokenizer,
get_linear_schedule_with_warmup,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in [BertConfig]), ())
MODEL_CLASSES = {
"bert": (BertConfig, BertForMultipleChoice, BertTokenizer),
}
class SwagExample(object):
"""A single training/test example for the SWAG dataset."""
def __init__(self, swag_id, context_sentence, ending_0, ending_1, ending_2, ending_3, label=None):
self.swag_id = swag_id
self.context_sentence = context_sentence
self.endings = [
ending_0,
ending_1,
ending_2,
ending_3,
]
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
attributes = [
"swag_id: {}".format(self.swag_id),
"context_sentence: {}".format(self.context_sentence),
"ending_0: {}".format(self.endings[0]),
"ending_1: {}".format(self.endings[1]),
"ending_2: {}".format(self.endings[2]),
"ending_3: {}".format(self.endings[3]),
]
if self.label is not None:
attributes.append("label: {}".format(self.label))
return ", ".join(attributes)
class InputFeatures(object):
def __init__(self, example_id, choices_features, label):
self.example_id = example_id
self.choices_features = [
{"input_ids": input_ids, "input_mask": input_mask, "segment_ids": segment_ids}
for _, input_ids, input_mask, segment_ids in choices_features
]
self.label = label
def read_swag_examples(input_file, is_training=True):
with open(input_file, "r", encoding="utf-8") as f:
lines = list(csv.reader(f))
if is_training and lines[0][-1] != "label":
raise ValueError("For training, the input file must contain a label column.")
examples = [
SwagExample(
swag_id=line[0],
context_sentence=line[3],
ending_0=line[4],
ending_1=line[5],
ending_2=line[6],
ending_3=line[7],
label=int(line[8]) if is_training else None,
)
for line in lines[1:] # we skip the line with the column names
]
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
# Swag is a multiple choice task. To perform this task using Bert,
# we will use the formatting proposed in "Improving Language
# Understanding by Generative Pre-Training" and suggested by
# @jacobdevlin-google in this issue
# https://github.com/google-research/bert/issues/38.
#
# Each choice will correspond to a sample on which we run the
# inference. For a given Swag example, we will create the 4
# following inputs:
# - [CLS] context [SEP] choice_1 [SEP]
# - [CLS] context [SEP] choice_2 [SEP]
# - [CLS] context [SEP] choice_3 [SEP]
# - [CLS] context [SEP] choice_4 [SEP]
# The model will output a single value for each input. To get the
# final decision of the model, we will run a softmax over these 4
# outputs.
features = []
for example_index, example in tqdm(enumerate(examples)):
context_tokens = tokenizer.tokenize(example.context_sentence)
choices_features = []
for ending_index, ending in enumerate(example.endings):
# We create a copy of the context tokens in order to be
# able to shrink it according to ending_tokens
context_tokens_choice = context_tokens[:]
ending_tokens = tokenizer.tokenize(ending)
# Modifies `context_tokens_choice` and `ending_tokens` in
# place so that the total length is less than the
# specified length. Account for [CLS], [SEP], [SEP] with
# "- 3"
_truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3)
tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"]
segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
choices_features.append((tokens, input_ids, input_mask, segment_ids))
label = example.label
if example_index < 5:
logger.info("*** Example ***")
logger.info("swag_id: {}".format(example.swag_id))
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("tokens: {}".format(" ".join(tokens)))
logger.info("input_ids: {}".format(" ".join(map(str, input_ids))))
logger.info("input_mask: {}".format(" ".join(map(str, input_mask))))
logger.info("segment_ids: {}".format(" ".join(map(str, segment_ids))))
if is_training:
logger.info("label: {}".format(label))
features.append(InputFeatures(example_id=example.swag_id, choices_features=choices_features, label=label))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def select_field(features, field):
return [[choice[field] for choice in feature.choices_features] for feature in features]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
input_file = args.predict_file if evaluate else args.train_file
cached_features_file = os.path.join(
os.path.dirname(input_file),
"cached_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", input_file)
examples = read_swag_examples(input_file)
features = convert_examples_to_features(examples, tokenizer, args.max_seq_length, not evaluate)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor(select_field(features, "input_ids"), dtype=torch.long)
all_input_mask = torch.tensor(select_field(features, "input_mask"), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(features, "segment_ids"), dtype=torch.long)
all_label = torch.tensor([f.label for f in features], dtype=torch.long)
if evaluate:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
if output_examples:
return dataset, examples, features
return dataset
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
# 'token_type_ids': None if args.model_type == 'xlm' else batch[2],
"token_type_ids": batch[2],
"labels": batch[3],
}
# if args.model_type in ['xlnet', 'xlm']:
# inputs.update({'cls_index': batch[5],
# 'p_mask': batch[6]})
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_vocabulary(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
prediction_list = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
# 'token_type_ids': None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids
"token_type_ids": batch[2],
"labels": batch[3],
}
# if args.model_type in ['xlnet', 'xlm']:
# inputs.update({'cls_index': batch[4],
# 'p_mask': batch[5]})
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
logits = logits.detach().cpu().numpy()
#logits_shape = np.shape(logits)
#predicted_label = np.argmax(logits, axis=1)
prediction_list.append(logits)
label_ids = inputs["labels"].to("cpu").numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
nb_eval_examples += inputs["input_ids"].size(0)
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
output_prediction_file = os.path.join(args.output_dir, "prediction")
np.save(output_prediction_file,prediction_list)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info("%s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--train_file", default=None, type=str, required=True, help="SWAG csv for training. E.g., train.csv"
)
parser.add_argument(
"--predict_file",
default=None,
type=str,
required=True,
help="SWAG csv for predictions. E.g., val.csv or test.csv",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help="The maximum total input sequence length after tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case
)
model = model_class.from_pretrained(
args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
if args.do_train:
checkpoints = [args.output_dir]
else:
# if do_train is False and do_eval is true, load model directly from pretrained.
checkpoints = [args.model_name_or_path]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
tokenizer = tokenizer_class.from_pretrained(checkpoint)
model.to(args.device)
# Evaluate
result = evaluate(args, model, tokenizer, prefix=global_step)
result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items())
results.update(result)
logger.info("Results: {}".format(results))
return results
if __name__ == "__main__":
main()
| 41.675712
| 150
| 0.64545
|
e89ec95113747d4758af5a178b6d6954b4db0d5f
| 5,156
|
py
|
Python
|
project/openaid/management/commands/import_financial_data.py
|
DeppSRL/open-aid
|
84130761c00600a8523f4f28467d70ad974859cd
|
[
"BSD-3-Clause"
] | null | null | null |
project/openaid/management/commands/import_financial_data.py
|
DeppSRL/open-aid
|
84130761c00600a8523f4f28467d70ad974859cd
|
[
"BSD-3-Clause"
] | null | null | null |
project/openaid/management/commands/import_financial_data.py
|
DeppSRL/open-aid
|
84130761c00600a8523f4f28467d70ad974859cd
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
__author__ = 'stefano'
import logging
from optparse import make_option
from pprint import pprint
from django.core.exceptions import ObjectDoesNotExist
from openpyxl import load_workbook
from django.core.management.base import BaseCommand
from openaid.projects.models import Initiative
class Command(BaseCommand):
option_list = BaseCommand.option_list
help = 'import financial data for initiatives. 2 dec 2015 only'
logger = logging.getLogger('openaid')
stash_codici = []
completed_only_xls = []
completed_in_xls = []
corso_only_xls = []
corso_in_xls = []
def get_code(self, row):
code = None
zfill_code = None
value = row[0].value
if type(value) == int:
code = value
if type(value) == float:
try:
code = int(value)
except TypeError:
return None, None
zfill_code = str(code).zfill(6)
return code, zfill_code
def convert_list_to_string(self, list):
return ",".join(list)
def check_uniqueness(self,ws):
ret = False
for row_counter, row in enumerate(ws.rows):
if row_counter == 0:
continue
code, zfill_code = self.get_code(row)
if code is None:
continue
if zfill_code in self.stash_codici:
self.logger.error("Row:{} - Codice '{}' non univoco!".format(row_counter,code))
ret = True
else:
self.stash_codici.append(zfill_code)
return ret
def examinate_in_corso(self, ws):
for row_counter, row in enumerate(ws.rows):
if row_counter == 0:
continue
code, zfill_code = self.get_code(row)
if code is None:
continue
if zfill_code not in self.corso_in_xls:
self.corso_in_xls.append(zfill_code)
try:
initiative = Initiative.objects.get(code=zfill_code)
except ObjectDoesNotExist:
self.corso_only_xls.append(zfill_code)
continue
else:
self.logger.info("Update financial data for init:{}".format(initiative.code))
total = row[6].value
grant = row[5].value
loan = row[4].value
initiative.total_project_costs = total
initiative.loan_amount_approved = loan
initiative.grant_amount_approved = grant
if initiative.status_temp == '100':
self.logger.info("Update STATUS for init:{}".format(initiative.code))
initiative.status_temp = '-'
initiative.save()
def check_subsets(self):
# check what are the codes only in the XLS, and then check which are the codes only in the DB
codes_db = set(Initiative.objects.all().exclude(status_temp='100').order_by('code').values_list('code',flat=True))
codes_xls = set(sorted(self.stash_codici))
stringa_db = self.convert_list_to_string(codes_db-codes_xls)
stringa_xls = self.convert_list_to_string(codes_xls-codes_db)
self.logger.info("Codes only in DB:{}".format(stringa_db))
self.logger.info("Codes only in XLS:{}".format(stringa_xls))
def handle(self, *args, **options):
verbosity = options['verbosity']
input_filename = 'resources/fixtures/Aid.Titolo.Iniziative.Stato.Finanziario.DGCS.251115.xlsx'
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
self.logger.info(u"Opening input file: {}".format(input_filename))
input_file = open(input_filename, 'rb')
input_workbook = load_workbook(input_file, data_only=True, use_iterators = True)
ws_esecuzione_con_scheda = input_workbook['Esecuzione con scheda']
ws_esecuzione_senza_scheda = input_workbook['Esecuzione senza scheda']
self.logger.info("Checking uniqueness of codes in the file")
# check that codes are unique in the whole file, initiatives cannot be repeated
self.logger.info("Checking iniziative esecuzione con scheda")
ret1 = self.check_uniqueness(ws_esecuzione_con_scheda)
self.logger.info("Checking iniziative esecuzione senza scheda")
ret2 = self.check_uniqueness(ws_esecuzione_senza_scheda)
if ret1 or ret2:
self.logger.critical("Codes are not unique in the file. Quitting")
exit()
else:
self.logger.info("All codes are unique")
self.logger.info("Examinate IN ESECUZIONE sheet")
# deal with in corso initiatives
self.examinate_in_corso(ws_esecuzione_con_scheda)
self.examinate_in_corso(ws_esecuzione_senza_scheda)
# log the results
self.check_subsets()
self.logger.info(u"Finish")
| 37.911765
| 122
| 0.621024
|
e53872ac53b7b774f4bf8de44c8ef9c4dcd42117
| 107,870
|
py
|
Python
|
nova/network/neutronv2/api.py
|
larsbutler/nova
|
fb190f30a911658d8b0c4deaf43cbb8c9e35b672
|
[
"Apache-2.0"
] | null | null | null |
nova/network/neutronv2/api.py
|
larsbutler/nova
|
fb190f30a911658d8b0c4deaf43cbb8c9e35b672
|
[
"Apache-2.0"
] | null | null | null |
nova/network/neutronv2/api.py
|
larsbutler/nova
|
fb190f30a911658d8b0c4deaf43cbb8c9e35b672
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
import uuid
from keystoneauth1 import loading as ks_loading
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
import six
from nova.compute import utils as compute_utils
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.network import base_api
from nova.network import model as network_model
from nova.network.neutronv2 import constants
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
from nova.policies import base as base_policies
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
_SESSION = None
_ADMIN_AUTH = None
DEFAULT_SECGROUP = 'default'
def reset_state():
global _ADMIN_AUTH
global _SESSION
_ADMIN_AUTH = None
_SESSION = None
def _load_auth_plugin(conf):
auth_plugin = ks_loading.load_auth_from_conf_options(conf,
nova.conf.neutron.NEUTRON_GROUP)
if auth_plugin:
return auth_plugin
err_msg = _('Unknown auth type: %s') % conf.neutron.auth_type
raise neutron_client_exc.Unauthorized(message=err_msg)
class ClientWrapper(clientv20.Client):
"""A Neutron client wrapper class.
Wraps the callable methods, catches Unauthorized,Forbidden from Neutron and
convert it to a 401,403 for Nova clients.
"""
def __init__(self, base_client, admin):
# Expose all attributes from the base_client instance
self.__dict__ = base_client.__dict__
self.base_client = base_client
self.admin = admin
def __getattribute__(self, name):
obj = object.__getattribute__(self, name)
if callable(obj):
obj = object.__getattribute__(self, 'proxy')(obj)
return obj
def proxy(self, obj):
def wrapper(*args, **kwargs):
try:
ret = obj(*args, **kwargs)
except neutron_client_exc.Unauthorized:
if not self.admin:
# Token is expired so Neutron is raising a
# unauthorized exception, we should convert it to
# raise a 401 to make client to handle a retry by
# renegerating a valid token and trying a new
# attempt.
raise exception.Unauthorized()
# In admin context if token is invalid Neutron client
# should be able to regenerate a valid by using the
# Neutron admin credential configuration located in
# nova.conf.
LOG.error(_LE("Neutron client was not able to generate a "
"valid admin token, please verify Neutron "
"admin credential located in nova.conf"))
raise exception.NeutronAdminCredentialConfigurationInvalid()
except neutron_client_exc.Forbidden as e:
raise exception.Forbidden(e)
return ret
return wrapper
def get_client(context, admin=False):
# NOTE(dprince): In the case where no auth_token is present we allow use of
# neutron admin tenant credentials if it is an admin context. This is to
# support some services (metadata API) where an admin context is used
# without an auth token.
global _ADMIN_AUTH
global _SESSION
auth_plugin = None
if not _SESSION:
_SESSION = ks_loading.load_session_from_conf_options(
CONF, nova.conf.neutron.NEUTRON_GROUP)
if admin or (context.is_admin and not context.auth_token):
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
auth_plugin = _ADMIN_AUTH
elif context.auth_token:
auth_plugin = context.get_auth_plugin()
if not auth_plugin:
# We did not get a user token and we should not be using
# an admin token so log an error
raise exception.Unauthorized()
return ClientWrapper(
clientv20.Client(session=_SESSION,
auth=auth_plugin,
endpoint_override=CONF.neutron.url,
region_name=CONF.neutron.region_name),
admin=admin or context.is_admin)
def _is_not_duplicate(item, items, items_list_name, instance):
present = item in items
# The expectation from this function's perspective is that the
# item is not part of the items list so if it is part of it
# we should at least log it as a warning
if present:
LOG.warning(_LW("%(item)s already exists in list: %(list_name)s "
"containing: %(items)s. ignoring it"),
{'item': item,
'list_name': items_list_name,
'items': items},
instance=instance)
return not present
def _ensure_no_port_binding_failure(port):
binding_vif_type = port.get('binding:vif_type')
if binding_vif_type == network_model.VIF_TYPE_BINDING_FAILED:
raise exception.PortBindingFailed(port_id=port['id'])
def _filter_hypervisor_macs(instance, ports, hypervisor_macs):
"""Removes macs from set if used by existing ports
:param requested_networks: list of NetworkRequests
:type requested_networks: nova.objects.NetworkRequestList
:param hypervisor_macs: None or a set of MAC addresses that the
instance should use. hypervisor_macs are supplied by the hypervisor
driver (contrast with requested_networks which is user supplied).
NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
:type hypervisor_macs: set
:returns a set of available MAC addresses to use if
creating a port later; this is the set of hypervisor_macs
after removing any MAC addresses from explicitly
requested ports.
"""
if not hypervisor_macs:
return None
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
if not ports:
return available_macs
for port in ports.values():
mac = port['mac_address']
if mac not in hypervisor_macs:
LOG.debug("Port %(port)s mac address %(mac)s is "
"not in the set of hypervisor macs: "
"%(hyper_macs)s. Nova will overwrite "
"this with a new mac address.",
{'port': port['id'],
'mac': mac,
'hyper_macs': hypervisor_macs},
instance=instance)
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(mac)
return available_macs
def get_pci_device_profile(pci_dev):
dev_spec = pci_whitelist.get_pci_device_devspec(pci_dev)
if dev_spec:
return {'pci_vendor_info': "%s:%s" %
(pci_dev.vendor_id, pci_dev.product_id),
'pci_slot': pci_dev.address,
'physical_network':
dev_spec.get_tags().get('physical_network')}
raise exception.PciDeviceNotFound(node_id=pci_dev.compute_node_id,
address=pci_dev.address)
class API(base_api.NetworkAPI):
"""API for interacting with the neutron 2.x API."""
def __init__(self):
super(API, self).__init__()
self.last_neutron_extension_sync = None
self.extensions = {}
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None, neutron=None,
auto_allocate=False):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
if not neutron:
neutron = get_client(context)
if net_ids:
# If user has specified to attach instance only to specific
# networks then only add these to **search_opts. This search will
# also include 'shared' networks.
search_opts = {'id': net_ids}
nets = neutron.list_networks(**search_opts).get('networks', [])
else:
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {'tenant_id': project_id, 'shared': False}
if auto_allocate:
# The auto-allocated-topology extension may create complex
# network topologies and it does so in a non-transactional
# fashion. Therefore API users may be exposed to resources that
# are transient or partially built. A client should use
# resources that are meant to be ready and this can be done by
# checking their admin_state_up flag.
search_opts['admin_state_up'] = True
nets = neutron.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
nets += neutron.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
return nets
def _create_port_minimal(self, port_client, instance, network_id,
fixed_ip=None, security_group_ids=None):
"""Attempts to create a port for the instance on the given network.
:param port_client: The client to use to create the port.
:param instance: Create the port for the given instance.
:param network_id: Create the port on the given network.
:param fixed_ip: Optional fixed IP to use from the given network.
:param security_group_ids: Optional list of security group IDs to
apply to the port.
:returns: The created port.
:raises PortLimitExceeded: If neutron fails with an OverQuota error.
:raises NoMoreFixedIps: If neutron fails with
IpAddressGenerationFailure error.
:raises: PortBindingFailed: If port binding failed.
"""
port_req_body = {'port': {}}
try:
if fixed_ip:
port_req_body['port']['fixed_ips'] = [
{'ip_address': str(fixed_ip)}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance.project_id
if security_group_ids:
port_req_body['port']['security_groups'] = security_group_ids
port_response = port_client.create_port(port_req_body)
port = port_response['port']
port_id = port['id']
try:
_ensure_no_port_binding_failure(port)
except exception.PortBindingFailed:
with excutils.save_and_reraise_exception():
port_client.delete_port(port_id)
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port
except neutron_client_exc.InvalidIpForNetworkClient:
LOG.warning(_LW('Neutron error: %(ip)s is not a valid IP address '
'for network %(network_id)s.'),
{'ip': fixed_ip, 'network_id': network_id},
instance=instance)
msg = (_('Fixed IP %(ip)s is not a valid ip address for '
'network %(network_id)s.') %
{'ip': fixed_ip, 'network_id': network_id})
raise exception.InvalidInput(reason=msg)
except neutron_client_exc.IpAddressInUseClient:
LOG.warning(_LW('Neutron error: Fixed IP %s is '
'already in use.'), fixed_ip, instance=instance)
msg = _("Fixed IP %s is already in use.") % fixed_ip
raise exception.FixedIpAlreadyInUse(message=msg)
except neutron_client_exc.OverQuotaClient:
LOG.warning(_LW(
'Neutron error: Port quota exceeded in tenant: %s'),
port_req_body['port']['tenant_id'], instance=instance)
raise exception.PortLimitExceeded()
except neutron_client_exc.IpAddressGenerationFailureClient:
LOG.warning(_LW('Neutron error: No more fixed IPs in network: %s'),
network_id, instance=instance)
raise exception.NoMoreFixedIps(net=network_id)
except neutron_client_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Neutron error creating port on network %s'),
network_id, instance=instance)
def _update_port(self, port_client, instance, port_id,
port_req_body):
try:
port_response = port_client.update_port(port_id, port_req_body)
port = port_response['port']
_ensure_no_port_binding_failure(port)
LOG.debug('Successfully updated port: %s', port_id,
instance=instance)
return port
except neutron_client_exc.MacAddressInUseClient:
mac_address = port_req_body['port'].get('mac_address')
network_id = port_req_body['port'].get('network_id')
LOG.warning(_LW('Neutron error: MAC address %(mac)s is already '
'in use on network %(network)s.'),
{'mac': mac_address, 'network': network_id},
instance=instance)
raise exception.PortInUse(port_id=mac_address)
@staticmethod
def _populate_mac_address(instance, port_req_body, available_macs):
# NOTE(johngarbutt) On port_update, this will cause us to override
# any previous mac address the port may have had.
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance.uuid)
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
return mac_address
def _check_external_network_attach(self, context, nets):
"""Check if attaching to external network is permitted."""
if not context.can(base_policies.NETWORK_ATTACH_EXTERNAL,
fatal=False):
for net in nets:
# Perform this check here rather than in validate_networks to
# ensure the check is performed every time
# allocate_for_instance is invoked
if net.get('router:external') and not net.get('shared'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
def _unbind_ports(self, context, ports,
neutron, port_client=None):
"""Unbind the given ports by clearing their device_id and
device_owner.
:param context: The request context.
:param ports: list of port IDs.
:param neutron: neutron client for the current context.
:param port_client: The client with appropriate karma for
updating the ports.
"""
port_binding = self._has_port_binding_extension(context,
refresh_cache=True, neutron=neutron)
if port_client is None:
# Requires admin creds to set port bindings
port_client = (neutron if not port_binding else
get_client(context, admin=True))
for port_id in ports:
# A port_id is optional in the NetworkRequest object so check here
# in case the caller forgot to filter the list.
if port_id is None:
continue
port_req_body = {'port': {'device_id': '', 'device_owner': ''}}
if port_binding:
port_req_body['port']['binding:host_id'] = None
port_req_body['port']['binding:profile'] = {}
if constants.DNS_INTEGRATION in self.extensions:
port_req_body['port']['dns_name'] = ''
try:
port_client.update_port(port_id, port_req_body)
except Exception:
LOG.exception(_LE("Unable to clear device ID "
"for port '%s'"), port_id)
def _validate_requested_port_ids(self, context, instance, neutron,
requested_networks):
"""Processes and validates requested networks for allocation.
Iterates over the list of NetworkRequest objects, validating the
request and building sets of ports, networks and MAC addresses to
use for allocating ports for the instance.
:param instance: allocate networks on this instance
:type instance: nova.objects.Instance
:param neutron: neutron client session
:type neutron: neutronclient.v2_0.client.Client
:returns: tuple of:
- ports: dict mapping of port id to port dict
- net_ids: list of requested network ids
- ordered_networks: list of nova.objects.NetworkRequest objects
for requested networks (either via explicit network request
or the network for an explicit port request)
:raises nova.exception.PortNotFound: If a requested port is not found
in Neutron.
:raises nova.exception.PortNotUsable: If a requested port is not owned
by the same tenant that the instance is created under.
:raises nova.exception.PortInUse: If a requested port is already
attached to another instance.
:raises nova.exception.PortNotUsableDNS: If a requested port has a
value assigned to its dns_name attribute.
"""
ports = {}
ordered_networks = []
# If we're asked to auto-allocate the network then there won't be any
# ports or real neutron networks to lookup, so just return empty
# results.
if requested_networks and not requested_networks.auto_allocate:
for request in requested_networks:
# Process a request to use a pre-existing neutron port.
if request.port_id:
# Make sure the port exists.
port = self._show_port(context, request.port_id,
neutron_client=neutron)
# Make sure the instance has access to the port.
if port['tenant_id'] != instance.project_id:
raise exception.PortNotUsable(port_id=request.port_id,
instance=instance.uuid)
# Make sure the port isn't already attached to another
# instance.
if port.get('device_id'):
raise exception.PortInUse(port_id=request.port_id)
# Make sure that if the user assigned a value to the port's
# dns_name attribute, it is equal to the instance's
# hostname
if port.get('dns_name'):
if port['dns_name'] != instance.hostname:
raise exception.PortNotUsableDNS(
port_id=request.port_id,
instance=instance.uuid, value=port['dns_name'],
hostname=instance.hostname)
# Make sure the port is usable
_ensure_no_port_binding_failure(port)
# If requesting a specific port, automatically process
# the network for that port as if it were explicitly
# requested.
request.network_id = port['network_id']
ports[request.port_id] = port
# Process a request to use a specific neutron network.
if request.network_id:
ordered_networks.append(request)
return ports, ordered_networks
def _clean_security_groups(self, security_groups):
"""Cleans security groups requested from Nova API
Neutron already passes a 'default' security group when
creating ports so it's not necessary to specify it to the
request.
"""
if security_groups == [DEFAULT_SECGROUP]:
security_groups = []
return security_groups
def _process_security_groups(self, instance, neutron, security_groups):
"""Processes and validates requested security groups for allocation.
Iterates over the list of requested security groups, validating the
request and filtering out the list of security group IDs to use for
port allocation.
:param instance: allocate networks on this instance
:type instance: nova.objects.Instance
:param neutron: neutron client session
:type neutron: neutronclient.v2_0.client.Client
:param security_groups: list of requested security group name or IDs
to use when allocating new ports for the instance
:return: list of security group IDs to use when allocating new ports
:raises nova.exception.NoUniqueMatch: If multiple security groups
are requested with the same name.
:raises nova.exception.SecurityGroupNotFound: If a requested security
group is not in the tenant-filtered list of available security
groups in Neutron.
"""
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance.project_id}
user_security_groups = neutron.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
# If there was a name match in a previous iteration
# of the loop, we have a conflict.
if name_match:
raise exception.NoUniqueMatch(
_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific.") %
security_group)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
else:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
return security_group_ids
def _validate_requested_network_ids(self, context, instance, neutron,
requested_networks, ordered_networks):
"""Check requested networks using the Neutron API.
Check the user has access to the network they requested, and that
it is a suitable network to connect to. This includes getting the
network details for any ports that have been passed in, because the
request will have been updated with the request_id in
_validate_requested_port_ids.
If the user has not requested any ports or any networks, we get back
a full list of networks the user has access to, and if there is only
one network, we update ordered_networks so we will connect the
instance to that network.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param requested_networks: value containing
network_id, fixed_ip, and port_id
:param ordered_networks: output from _validate_requested_port_ids
that will be used to create and update ports
"""
# Get networks from Neutron
# If net_ids is empty, this actually returns all available nets
auto_allocate = requested_networks and requested_networks.auto_allocate
net_ids = [request.network_id for request in ordered_networks]
nets = self._get_available_networks(context, instance.project_id,
net_ids, neutron=neutron,
auto_allocate=auto_allocate)
if not nets:
if requested_networks:
# There are no networks available for the project to use and
# none specifically requested, so check to see if we're asked
# to auto-allocate the network.
if auto_allocate:
# During validate_networks we checked to see if
# auto-allocation is available so we don't need to do that
# again here.
nets = [self._auto_allocate_network(instance, neutron)]
else:
# NOTE(chaochin): If user specifies a network id and the
# network can not be found, raise NetworkNotFound error.
for request in requested_networks:
if not request.port_id and request.network_id:
raise exception.NetworkNotFound(
network_id=request.network_id)
else:
# no requested nets and user has no available nets
return {}
# if this function is directly called without a requested_network param
# or if it is indirectly called through allocate_port_for_instance()
# with None params=(network_id=None, requested_ip=None, port_id=None,
# pci_request_id=None):
if (not requested_networks
or requested_networks.is_single_unspecified
or requested_networks.auto_allocate):
# If no networks were requested and none are available, consider
# it a bad request.
if not nets:
raise exception.InterfaceAttachFailedNoNetwork(
project_id=instance.project_id)
# bug/1267723 - if no network is requested and more
# than one is available then raise NetworkAmbiguous Exception
if len(nets) > 1:
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
ordered_networks.append(
objects.NetworkRequest(network_id=nets[0]['id']))
# NOTE(melwitt): check external net attach permission after the
# check for ambiguity, there could be another
# available net which is permitted bug/1364344
self._check_external_network_attach(context, nets)
return {net['id']: net for net in nets}
def _create_ports_for_instance(self, context, instance, ordered_networks,
nets, neutron, security_group_ids):
"""Create port for network_requests that don't have a port_id
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param ordered_networks: objects.NetworkRequestList in requested order
:param nets: a dict of network_id to networks returned from neutron
:param neutron: neutronclient using built from users request context
:param security_group_ids: a list of security_groups to go to neutron
:returns a list of pairs (NetworkRequest, created_port_uuid)
"""
created_port_ids = []
requests_and_created_ports = []
for request in ordered_networks:
network = nets.get(request.network_id)
# if network_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
if not network:
continue
try:
port_security_enabled = network.get(
'port_security_enabled', True)
if port_security_enabled:
if not network.get('subnets'):
# Neutron can't apply security groups to a port
# for a network without L3 assignments.
LOG.debug('Network with port security enabled does '
'not have subnets so security groups '
'cannot be applied: %s',
network, instance=instance)
raise exception.SecurityGroupCannotBeApplied()
else:
if security_group_ids:
# We don't want to apply security groups on port
# for a network defined with
# 'port_security_enabled=False'.
LOG.debug('Network has port security disabled so '
'security groups cannot be applied: %s',
network, instance=instance)
raise exception.SecurityGroupCannotBeApplied()
created_port_id = None
if not request.port_id:
# create minimal port, if port not already created by user
created_port = self._create_port_minimal(
neutron, instance, request.network_id,
request.address, security_group_ids)
created_port_id = created_port['id']
created_port_ids.append(created_port_id)
requests_and_created_ports.append((
request, created_port_id))
except Exception:
with excutils.save_and_reraise_exception():
if created_port_ids:
self._delete_ports(
neutron, instance, created_port_ids)
return requests_and_created_ports
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param requested_networks: optional value containing
network_id, fixed_ip, and port_id
:param security_groups: security groups to allocate for instance
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:param bind_host_id: the host ID to attach to the ports being created.
"""
LOG.debug('allocate_for_instance()', instance=instance)
if not instance.project_id:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance.uuid)
# We do not want to create a new neutron session for each call
neutron = get_client(context)
#
# Validate ports and networks with neutron
#
requested_networks = kwargs.get('requested_networks')
ports, ordered_networks = self._validate_requested_port_ids(
context, instance, neutron, requested_networks)
nets = self._validate_requested_network_ids(
context, instance, neutron, requested_networks, ordered_networks)
if not nets:
LOG.debug("No network configured", instance=instance)
return network_model.NetworkInfo([])
#
# Create any ports that might be required,
# after validating requested security groups
#
security_groups = self._clean_security_groups(
kwargs.get('security_groups', []))
security_group_ids = self._process_security_groups(
instance, neutron, security_groups)
requests_and_created_ports = self._create_ports_for_instance(
context, instance, ordered_networks, nets, neutron,
security_group_ids)
#
# Update existing and newly created ports
#
dhcp_opts = kwargs.get('dhcp_options')
bind_host_id = kwargs.get('bind_host_id')
hypervisor_macs = kwargs.get('macs', None)
available_macs = _filter_hypervisor_macs(instance, ports,
hypervisor_macs)
# We always need admin_client to build nw_info,
# we sometimes need it when updating ports
admin_client = get_client(context, admin=True)
ordered_nets, ordered_ports, preexisting_port_ids, \
created_port_ids = self._update_ports_for_instance(
context, instance,
neutron, admin_client, requests_and_created_ports, nets,
bind_host_id, dhcp_opts, available_macs)
#
# Perform a full update of the network_info_cache,
# including re-fetching lots of the required data from neutron
#
nw_info = self.get_instance_nw_info(
context, instance, networks=ordered_nets,
port_ids=ordered_ports,
admin_client=admin_client,
preexisting_port_ids=preexisting_port_ids,
update_cells=True)
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
# and in later runs will only be what was created that time. Thus,
# this only affects the attach case, not the original use for this
# method.
return network_model.NetworkInfo([vif for vif in nw_info
if vif['id'] in created_port_ids +
preexisting_port_ids])
def _update_ports_for_instance(self, context, instance, neutron,
admin_client, requests_and_created_ports, nets,
bind_host_id, dhcp_opts, available_macs):
"""Create port for network_requests that don't have a port_id
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param neutron: client using user context
:param admin_client: client using admin context
:param requests_and_created_ports: [(NetworkRequest, created_port_id)]
:param nets: a dict of network_id to networks returned from neutron
:param bind_host_id: a string for port['binding:host_id']
:param dhcp_opts: a list dicts that contain dhcp option name and value
e.g. [{'opt_name': 'tftp-server', 'opt_value': '1.2.3.4'}]
:param available_macs: a list of available mac addresses
"""
# The neutron client and port_client (either the admin context or
# tenant context) are read here. The reason for this is that there are
# a number of different calls for the instance allocation.
# We require admin creds to set port bindings.
port_client = (neutron if not
self._has_port_binding_extension(context,
refresh_cache=True, neutron=neutron) else
admin_client)
preexisting_port_ids = []
created_port_ids = []
ports_in_requested_order = []
nets_in_requested_order = []
created_vifs = [] # this list is for cleanups if we fail
for request, created_port_id in requests_and_created_ports:
vifobj = objects.VirtualInterface(context)
vifobj.instance_uuid = instance.uuid
vifobj.tag = request.tag if 'tag' in request else None
network = nets.get(request.network_id)
# if network_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
if not network:
continue
nets_in_requested_order.append(network)
zone = 'compute:%s' % instance.availability_zone
port_req_body = {'port': {'device_id': instance.uuid,
'device_owner': zone}}
try:
self._populate_neutron_extension_values(
context, instance, request.pci_request_id, port_req_body,
network=network, neutron=neutron,
bind_host_id=bind_host_id)
self._populate_pci_mac_address(instance,
request.pci_request_id, port_req_body)
self._populate_mac_address(
instance, port_req_body, available_macs)
if dhcp_opts is not None:
port_req_body['port']['extra_dhcp_opts'] = dhcp_opts
if created_port_id:
port_id = created_port_id
created_port_ids.append(port_id)
else:
port_id = request.port_id
ports_in_requested_order.append(port_id)
# After port is created, update other bits
updated_port = self._update_port(
port_client, instance, port_id, port_req_body)
# NOTE(danms): The virtual_interfaces table enforces global
# uniqueness on MAC addresses, which clearly does not match
# with neutron's view of the world. Since address is a 255-char
# string we can namespace it with our port id. Using '/' should
# be safely excluded from MAC address notations as well as
# UUIDs. We could stop doing this when we remove
# nova-network, but we'd need to leave the read translation in
# for longer than that of course.
vifobj.address = '%s/%s' % (updated_port['mac_address'],
updated_port['id'])
vifobj.uuid = port_id
vifobj.create()
created_vifs.append(vifobj)
if not created_port_id:
# only add if update worked and port create not called
preexisting_port_ids.append(port_id)
self._update_port_dns_name(context, instance, network,
ports_in_requested_order[-1],
neutron)
except Exception:
with excutils.save_and_reraise_exception():
self._unbind_ports(context,
preexisting_port_ids,
neutron, port_client)
self._delete_ports(neutron, instance, created_port_ids)
for vif in created_vifs:
vif.destroy()
return (nets_in_requested_order, ports_in_requested_order,
preexisting_port_ids, created_port_ids)
def _refresh_neutron_extensions_cache(self, context, neutron=None):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync)
>= CONF.neutron.extension_sync_interval)):
if neutron is None:
neutron = get_client(context)
extensions_list = neutron.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = {ext['name']: ext for ext in extensions_list}
def _has_port_binding_extension(self, context, refresh_cache=False,
neutron=None):
if refresh_cache:
self._refresh_neutron_extensions_cache(context, neutron=neutron)
return constants.PORTBINDING_EXT in self.extensions
def _has_auto_allocate_extension(self, context, refresh_cache=False,
neutron=None):
if refresh_cache or not self.extensions:
self._refresh_neutron_extensions_cache(context, neutron=neutron)
return constants.AUTO_ALLOCATE_TOPO_EXT in self.extensions
@staticmethod
def _populate_neutron_binding_profile(instance, pci_request_id,
port_req_body):
"""Populate neutron binding:profile.
Populate it with SR-IOV related information
"""
if pci_request_id:
pci_dev = pci_manager.get_instance_pci_devs(
instance, pci_request_id).pop()
profile = get_pci_device_profile(pci_dev)
port_req_body['port']['binding:profile'] = profile
@staticmethod
def _populate_pci_mac_address(instance, pci_request_id, port_req_body):
"""Add the updated MAC address value to the update_port request body.
Currently this is done only for PF passthrough.
"""
if pci_request_id is not None:
pci_devs = pci_manager.get_instance_pci_devs(
instance, pci_request_id)
if len(pci_devs) != 1:
# NOTE(ndipanov): We shouldn't ever get here since
# InstancePCIRequest instances built from network requests
# only ever index a single device, which needs to be
# successfully claimed for this to be called as part of
# allocate_networks method
LOG.error(_LE("PCI request %s does not have a "
"unique device associated with it. Unable to "
"determine MAC address"),
pci_request, instance=instance)
return
pci_dev = pci_devs[0]
if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF:
try:
mac = pci_utils.get_mac_by_pci_address(pci_dev.address)
except exception.PciDeviceNotFoundById as e:
LOG.error(
_LE("Could not determine MAC address for %(addr)s, "
"error: %(e)s"),
{"addr": pci_dev.address, "e": e}, instance=instance)
else:
port_req_body['port']['mac_address'] = mac
def _populate_neutron_extension_values(self, context, instance,
pci_request_id, port_req_body,
network=None, neutron=None,
bind_host_id=None):
"""Populate neutron extension values for the instance.
If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
"""
self._refresh_neutron_extensions_cache(context, neutron=neutron)
if constants.QOS_QUEUE in self.extensions:
flavor = instance.get_flavor()
rxtx_factor = flavor.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
has_port_binding_extension = (
self._has_port_binding_extension(context, neutron=neutron))
if has_port_binding_extension:
port_req_body['port']['binding:host_id'] = bind_host_id
self._populate_neutron_binding_profile(instance,
pci_request_id,
port_req_body)
if constants.DNS_INTEGRATION in self.extensions:
# If the DNS integration extension is enabled in Neutron, most
# ports will get their dns_name attribute set in the port create or
# update requests in allocate_for_instance. So we just add the
# dns_name attribute to the payload of those requests. The
# exception is when the port binding extension is enabled in
# Neutron and the port is on a network that has a non-blank
# dns_domain attribute. This case requires to be processed by
# method _update_port_dns_name
if (not has_port_binding_extension
or not network.get('dns_domain')):
port_req_body['port']['dns_name'] = instance.hostname
def _update_port_dns_name(self, context, instance, network, port_id,
neutron):
"""Update an instance port dns_name attribute with instance.hostname.
The dns_name attribute of a port on a network with a non-blank
dns_domain attribute will be sent to the external DNS service
(Designate) if DNS integration is enabled in Neutron. This requires the
assignment of the dns_name to the port to be done with a Neutron client
using the user's context. allocate_for_instance uses a port with admin
context if the port binding extensions is enabled in Neutron. In this
case, we assign in this method the dns_name attribute to the port with
an additional update request. Only a very small fraction of ports will
require this additional update request.
"""
if (constants.DNS_INTEGRATION in self.extensions and
self._has_port_binding_extension(context) and
network.get('dns_domain')):
try:
port_req_body = {'port': {'dns_name': instance.hostname}}
neutron.update_port(port_id, port_req_body)
except neutron_client_exc.BadRequest:
LOG.warning(_LW('Neutron error: Instance hostname '
'%(hostname)s is not a valid DNS name'),
{'hostname': instance.hostname}, instance=instance)
msg = (_('Instance hostname %(hostname)s is not a valid DNS '
'name') % {'hostname': instance.hostname})
raise exception.InvalidInput(reason=msg)
def _delete_ports(self, neutron, instance, ports, raise_if_fail=False):
exceptions = []
for port in ports:
try:
neutron.delete_port(port)
except neutron_client_exc.NeutronClientException as e:
if e.status_code == 404:
LOG.warning(_LW("Port %s does not exist"), port,
instance=instance)
else:
exceptions.append(e)
LOG.warning(
_LW("Failed to delete port %s for instance."),
port, instance=instance, exc_info=True)
if len(exceptions) > 0 and raise_if_fail:
raise exceptions[0]
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug('deallocate_for_instance()', instance=instance)
search_opts = {'device_id': instance.uuid}
neutron = get_client(context)
data = neutron.list_ports(**search_opts)
ports = [port['id'] for port in data.get('ports', [])]
requested_networks = kwargs.get('requested_networks') or []
# NOTE(danms): Temporary and transitional
if isinstance(requested_networks, objects.NetworkRequestList):
requested_networks = requested_networks.as_tuples()
ports_to_skip = set([port_id for nets, fips, port_id, pci_request_id
in requested_networks])
# NOTE(boden): requested_networks only passed in when deallocating
# from a failed build / spawn call. Therefore we need to include
# preexisting ports when deallocating from a standard delete op
# in which case requested_networks is not provided.
ports_to_skip |= set(self._get_preexisting_port_ids(instance))
ports = set(ports) - ports_to_skip
# Reset device_id and device_owner for the ports that are skipped
self._unbind_ports(context, ports_to_skip, neutron)
# Delete the rest of the ports
self._delete_ports(neutron, instance, ports, raise_if_fail=True)
# deallocate vifs (mac addresses)
objects.VirtualInterface.delete_by_instance_uuid(
context, instance.uuid)
# NOTE(arosen): This clears out the network_cache only if the instance
# hasn't already been deleted. This is needed when an instance fails to
# launch and is rescheduled onto another compute node. If the instance
# has already been deleted this call does nothing.
base_api.update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
bind_host_id=None):
"""Allocate a port for the instance."""
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=network_id,
address=requested_ip,
port_id=port_id,
pci_request_id=None)])
return self.allocate_for_instance(context, instance,
requested_networks=requested_networks,
bind_host_id=bind_host_id)
def deallocate_port_for_instance(self, context, instance, port_id):
"""Remove a specified port from the instance.
Return network information for the instance
"""
neutron = get_client(context)
preexisting_ports = self._get_preexisting_port_ids(instance)
if port_id in preexisting_ports:
self._unbind_ports(context, [port_id], neutron)
else:
self._delete_ports(neutron, instance, [port_id],
raise_if_fail=True)
# Delete the VirtualInterface for the given port_id.
vif = objects.VirtualInterface.get_by_uuid(context, port_id)
if vif:
vif.destroy()
else:
LOG.debug('VirtualInterface not found for port: %s',
port_id, instance=instance)
return self.get_instance_nw_info(context, instance)
def list_ports(self, context, **search_opts):
"""List ports for the client based on search options."""
return get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
"""Return the port for the client given the port id.
:param context: Request context.
:param port_id: The id of port to be queried.
:returns: A dict containing port data keyed by 'port', e.g.
::
{'port': {'port_id': 'abcd',
'fixed_ip_address': '1.2.3.4'}}
"""
return dict(port=self._show_port(context, port_id))
def _show_port(self, context, port_id, neutron_client=None, fields=None):
"""Return the port for the client given the port id.
:param context: Request context.
:param port_id: The id of port to be queried.
:param neutron_client: A neutron client.
:param fields: The condition fields to query port data.
:returns: A dict of port data.
e.g. {'port_id': 'abcd', 'fixed_ip_address': '1.2.3.4'}
"""
if not neutron_client:
neutron_client = get_client(context)
try:
if fields:
result = neutron_client.show_port(port_id, fields=fields)
else:
result = neutron_client.show_port(port_id)
return result.get('port')
except neutron_client_exc.PortNotFoundClient:
raise exception.PortNotFound(port_id=port_id)
except neutron_client_exc.Unauthorized:
raise exception.Forbidden()
except neutron_client_exc.NeutronClientException as exc:
msg = (_("Failed to access port %(port_id)s: %(reason)s") %
{'port_id': port_id, 'reason': exc})
raise exception.NovaException(message=msg)
def _get_instance_nw_info(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None, **kwargs):
# NOTE(danms): This is an inner method intended to be called
# by other code that updates instance nwinfo. It *must* be
# called with the refresh_cache-%(instance_uuid) lock held!
LOG.debug('_get_instance_nw_info()', instance=instance)
# Ensure that we have an up to date copy of the instance info cache.
# Otherwise multiple requests could collide and cause cache
# corruption.
compute_utils.refresh_info_cache_for_instance(context, instance)
nw_info = self._build_network_info_model(context, instance, networks,
port_ids, admin_client,
preexisting_port_ids)
return network_model.NetworkInfo.hydrate(nw_info)
def _gather_port_ids_and_networks(self, context, instance, networks=None,
port_ids=None):
"""Return an instance's complete list of port_ids and networks."""
if ((networks is None and port_ids is not None) or
(port_ids is None and networks is not None)):
message = _("This method needs to be called with either "
"networks=None and port_ids=None or port_ids and "
"networks as not none.")
raise exception.NovaException(message=message)
ifaces = compute_utils.get_nw_info_for_instance(instance)
# This code path is only done when refreshing the network_cache
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
if networks is None:
networks = self._get_available_networks(context,
instance.project_id,
net_ids)
# an interface was added/removed from instance.
else:
# Prepare the network ids list for validation purposes
networks_ids = [network['id'] for network in networks]
# Validate that interface networks doesn't exist in networks.
# Though this issue can and should be solved in methods
# that prepare the networks list, this method should have this
# ignore-duplicate-networks/port-ids mechanism to reduce the
# probability of failing to boot the VM.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces
if _is_not_duplicate(iface['network']['id'],
networks_ids,
"networks",
instance)]
# Include existing interfaces so they are not removed from the db.
# Validate that the interface id is not in the port_ids
port_ids = [iface['id'] for iface in ifaces
if _is_not_duplicate(iface['id'],
port_ids,
"port_ids",
instance)] + port_ids
return networks, port_ids
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed IP to the instance from specified network."""
neutron = get_client(context)
search_opts = {'network_id': network_id}
data = neutron.list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance.uuid)
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone,
'network_id': network_id}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
neutron.update_port(p['id'], port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
msg = ("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex}, instance=instance)
raise exception.NetworkNotFoundForInstance(
instance_id=instance.uuid)
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed IP from the instance."""
neutron = get_client(context)
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
neutron.update_port(p['id'], port_req_body)
except Exception as ex:
msg = ("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex},
instance=instance)
return self._get_instance_nw_info(context, instance)
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance.uuid, ip=address)
def _get_port_vnic_info(self, context, neutron, port_id):
"""Retrieve port vnic info
Invoked with a valid port_id.
Return vnic type and the attached physical network name.
"""
phynet_name = None
port = self._show_port(context, port_id, neutron_client=neutron,
fields=['binding:vnic_type', 'network_id'])
vnic_type = port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL)
if vnic_type in network_model.VNIC_TYPES_SRIOV:
net_id = port['network_id']
net = neutron.show_network(net_id,
fields='provider:physical_network').get('network')
phynet_name = net.get('provider:physical_network')
return vnic_type, phynet_name
def create_pci_requests_for_sriov_ports(self, context, pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
if not requested_networks:
return
neutron = get_client(context, admin=True)
for request_net in requested_networks:
phynet_name = None
vnic_type = network_model.VNIC_TYPE_NORMAL
if request_net.port_id:
vnic_type, phynet_name = self._get_port_vnic_info(
context, neutron, request_net.port_id)
pci_request_id = None
if vnic_type in network_model.VNIC_TYPES_SRIOV:
spec = {pci_request.PCI_NET_TAG: phynet_name}
dev_type = pci_request.DEVICE_TYPE_FOR_VNIC_TYPE.get(vnic_type)
if dev_type:
spec[pci_request.PCI_DEVICE_TYPE_TAG] = dev_type
request = objects.InstancePCIRequest(
count=1,
spec=[spec],
request_id=str(uuid.uuid4()))
pci_requests.requests.append(request)
pci_request_id = request.request_id
# Add pci_request_id into the requested network
request_net.pci_request_id = pci_request_id
def _can_auto_allocate_network(self, context, neutron):
"""Helper method to determine if we can auto-allocate networks
:param context: nova request context
:param neutron: neutron client
:returns: True if it's possible to auto-allocate networks, False
otherwise.
"""
# check that the auto-allocated-topology extension is available
if self._has_auto_allocate_extension(context, neutron=neutron):
# run the dry-run validation, which will raise a 409 if not ready
try:
neutron.validate_auto_allocated_topology_requirements(
context.project_id)
LOG.debug('Network auto-allocation is available for project '
'%s', context.project_id)
except neutron_client_exc.Conflict as ex:
LOG.debug('Unable to auto-allocate networks. %s',
six.text_type(ex))
else:
return True
else:
LOG.debug('Unable to auto-allocate networks. The neutron '
'auto-allocated-topology extension is not available.')
return False
def _auto_allocate_network(self, instance, neutron):
"""Automatically allocates a network for the given project.
:param instance: create the network for the project that owns this
instance
:param neutron: neutron client
:returns: Details of the network that was created.
:raises: nova.exception.UnableToAutoAllocateNetwork
:raises: nova.exception.NetworkNotFound
"""
project_id = instance.project_id
LOG.debug('Automatically allocating a network for project %s.',
project_id, instance=instance)
try:
topology = neutron.get_auto_allocated_topology(
project_id)['auto_allocated_topology']
except neutron_client_exc.Conflict:
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
try:
network = neutron.show_network(topology['id'])['network']
except neutron_client_exc.NetworkNotFoundClient:
# This shouldn't happen since we just created the network, but
# handle it anyway.
LOG.error(_LE('Automatically allocated network %(network_id)s '
'was not found.'), {'network_id': topology['id']},
instance=instance)
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
LOG.debug('Automatically allocated network: %s', network,
instance=instance)
return network
def _ports_needed_per_instance(self, context, neutron, requested_networks):
# TODO(danms): Remove me when all callers pass an object
if requested_networks and isinstance(requested_networks[0], tuple):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
ports_needed_per_instance = 0
if (requested_networks is None or len(requested_networks) == 0 or
requested_networks.auto_allocate):
nets = self._get_available_networks(context, context.project_id,
neutron=neutron)
if len(nets) > 1:
# Attaching to more than one network by default doesn't
# make sense, as the order will be arbitrary and the guest OS
# won't know which to configure
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
if not nets and (
requested_networks and requested_networks.auto_allocate):
# If there are no networks available to this project and we
# were asked to auto-allocate a network, check to see that we
# can do that first.
LOG.debug('No networks are available for project %s; checking '
'to see if we can automatically allocate a network.',
context.project_id)
if not self._can_auto_allocate_network(context, neutron):
raise exception.UnableToAutoAllocateNetwork(
project_id=context.project_id)
ports_needed_per_instance = 1
else:
net_ids_requested = []
for request in requested_networks:
if request.port_id:
port = self._show_port(context, request.port_id,
neutron_client=neutron)
if port.get('device_id', None):
raise exception.PortInUse(port_id=request.port_id)
deferred_ip = port.get('ip_allocation') == 'deferred'
# NOTE(carl_baldwin) A deferred IP port doesn't have an
# address here. If it fails to get one later when nova
# updates it with host info, Neutron will error which
# raises an exception.
if not deferred_ip and not port.get('fixed_ips'):
raise exception.PortRequiresFixedIP(
port_id=request.port_id)
request.network_id = port['network_id']
else:
ports_needed_per_instance += 1
net_ids_requested.append(request.network_id)
# NOTE(jecarey) There is currently a race condition.
# That is, if you have more than one request for a specific
# fixed IP at the same time then only one will be allocated
# the ip. The fixed IP will be allocated to only one of the
# instances that will run. The second instance will fail on
# spawn. That instance will go into error state.
# TODO(jecarey) Need to address this race condition once we
# have the ability to update mac addresses in Neutron.
if request.address:
# TODO(jecarey) Need to look at consolidating list_port
# calls once able to OR filters.
search_opts = {'network_id': request.network_id,
'fixed_ips': 'ip_address=%s' % (
request.address),
'fields': 'device_id'}
existing_ports = neutron.list_ports(
**search_opts)['ports']
if existing_ports:
i_uuid = existing_ports[0]['device_id']
raise exception.FixedIpAlreadyInUse(
address=request.address,
instance_uuid=i_uuid)
# Now check to see if all requested networks exist
if net_ids_requested:
nets = self._get_available_networks(
context, context.project_id, net_ids_requested,
neutron=neutron)
for net in nets:
if not net.get('subnets'):
raise exception.NetworkRequiresSubnet(
network_uuid=net['id'])
if len(nets) != len(net_ids_requested):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
if lostid_set:
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
return ports_needed_per_instance
def validate_networks(self, context, requested_networks, num_instances):
"""Validate that the tenant can use the requested networks.
Return the number of instances than can be successfully allocated
with the requested network configuration.
"""
LOG.debug('validate_networks() for %s', requested_networks)
neutron = get_client(context)
ports_needed_per_instance = self._ports_needed_per_instance(
context, neutron, requested_networks)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
# from the hypervisor. So we just check the quota and return
# how many of the requested number of instances can be created
if ports_needed_per_instance:
quotas = neutron.show_quota(context.project_id)['quota']
if quotas.get('port', -1) == -1:
# Unlimited Port Quota
return num_instances
# We only need the port count so only ask for ids back.
params = dict(tenant_id=context.project_id, fields=['id'])
ports = neutron.list_ports(**params)['ports']
free_ports = quotas.get('port') - len(ports)
if free_ports < 0:
msg = (_("The number of defined ports: %(ports)d "
"is over the limit: %(quota)d") %
{'ports': len(ports),
'quota': quotas.get('port')})
raise exception.PortLimitExceeded(msg)
ports_needed = ports_needed_per_instance * num_instances
if free_ports >= ports_needed:
return num_instances
else:
return free_ports // ports_needed_per_instance
return num_instances
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given IP address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def _get_port_id_by_fixed_address(self, client,
instance, address):
"""Return port_id from a fixed address."""
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating IP with a fixed IP."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
if fip['port_id']:
port = self._show_port(context, fip['port_id'],
neutron_client=client)
orig_instance_uuid = port['device_id']
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_LI('re-assign floating IP %(address)s from '
'instance %(instance_id)s'), msg_dict,
instance=instance)
orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
def get_all(self, context):
"""Get all networks for client."""
client = get_client(context)
networks = client.list_networks().get('networks')
network_objs = []
for network in networks:
network_objs.append(objects.Network(context=context,
name=network['name'],
label=network['name'],
uuid=network['id']))
return objects.NetworkList(context=context,
objects=network_objs)
def get(self, context, network_uuid):
"""Get specific network for client."""
client = get_client(context)
try:
network = client.show_network(network_uuid).get('network') or {}
except neutron_client_exc.NetworkNotFoundClient:
raise exception.NetworkNotFound(network_id=network_uuid)
net_obj = objects.Network(context=context,
name=network['name'],
label=network['name'],
uuid=network['id'])
return net_obj
def delete(self, context, network_uuid):
"""Delete a network for client."""
raise NotImplementedError()
def disassociate(self, context, network_uuid):
"""Disassociate a network for client."""
raise NotImplementedError()
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate a network for client."""
raise NotImplementedError()
def get_fixed_ip(self, context, id):
"""Get a fixed IP from the id."""
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
"""Return instance uuids given an address."""
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, context, client, port_id):
if not port_id:
return {}
port = self._show_port(context, port_id, neutron_client=client)
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return {i['id']: i for i in pools}
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return {p['id']: p for p in ports}
def get_floating_ip(self, context, id):
"""Return floating IP object given the floating IP id."""
client = get_client(context)
try:
fip = client.show_floatingip(id)['floatingip']
except neutron_client_exc.NeutronClientException as e:
if e.status_code == 404:
raise exception.FloatingIpNotFound(id=id)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to access floating IP %s'), id)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(context, client, fip['port_id'])
return self._make_floating_ip_obj(context, fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {constants.NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
"""Return floating IP pool names."""
client = get_client(context)
pools = self._get_floating_ip_pools(client)
# Note(salv-orlando): Return a list of names to be consistent with
# nova.network.api.get_floating_ip_pools
return [n['name'] or n['id'] for n in pools]
def _make_floating_ip_obj(self, context, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
# NOTE(danms): Don't give these objects a context, since they're
# not lazy-loadable anyway
floating = objects.floating_ip.NeutronFloatingIP(
id=fip['id'], address=fip['floating_ip_address'],
pool=(pool['name'] or pool['id']), project_id=fip['tenant_id'],
fixed_ip_id=fip['port_id'])
# In Neutron v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
if fip['fixed_ip_address']:
floating.fixed_ip = objects.FixedIP(
address=fip['fixed_ip_address'])
else:
floating.fixed_ip = None
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
# NOTE(danms): This could be .refresh()d, so give it context
floating.instance = objects.Instance(context=context,
uuid=instance_uuid)
if floating.fixed_ip:
floating.fixed_ip.instance_uuid = instance_uuid
else:
floating.instance = None
return floating
def get_floating_ip_by_address(self, context, address):
"""Return a floating IP given an address."""
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(context, client, fip['port_id'])
return self._make_floating_ip_obj(context, fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = get_client(context)
project_id = context.project_id
fips = self._safe_get_floating_ips(client, tenant_id=project_id)
if not fips:
return []
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._make_floating_ip_obj(context, fip, pool_dict, port_dict)
for fip in fips]
def get_instance_id_by_floating_address(self, context, address):
"""Return the instance id a floating IP's fixed IP is allocated to."""
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = self._show_port(context, fip['port_id'], neutron_client=client)
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating IP to a project from a pool."""
client = get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
param = {'floatingip': {'floating_network_id': pool_id}}
try:
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
raise exception.NoMoreFloatingIps(six.text_type(e))
except neutron_client_exc.OverQuotaClient as e:
raise exception.FloatingIpLimitExceeded(six.text_type(e))
except neutron_client_exc.BadRequest as e:
raise exception.FloatingIpBadRequest(six.text_type(e))
return fip['floatingip']['floating_ip_address']
def _safe_get_floating_ips(self, client, **kwargs):
"""Get floating IP gracefully handling 404 from Neutron."""
try:
return client.list_floatingips(**kwargs)['floatingips']
# If a neutron plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except neutron_client_exc.NotFound:
return []
except neutron_client_exc.NeutronClientException as e:
# bug/1513879 neutron client is currently using
# NeutronClientException when there is no L3 API
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to access floating IP for %s'),
', '.join(['%s %s' % (k, v)
for k, v in six.iteritems(kwargs)]))
def _get_floating_ip_by_address(self, client, address):
"""Get floating IP from floating IP address."""
if not address:
raise exception.FloatingIpNotFoundForAddress(address=address)
fips = self._safe_get_floating_ips(client, floating_ip_address=address)
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floating IPs from fixed IP and port."""
return self._safe_get_floating_ips(client, fixed_ip_address=fixed_ip,
port_id=port)
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating IP with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
self._release_floating_ip(context, address)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating IP.
This api call was added to allow this to be done in one operation
if using neutron.
"""
self._release_floating_ip(context, floating_ip['address'],
raise_if_associated=False)
def _release_floating_ip(self, context, address,
raise_if_associated=True):
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if raise_if_associated and fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating IP from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
self._update_port_binding_for_instance(context, instance,
migration['dest_compute'])
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _nw_info_get_ips(self, client, port):
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
return network_IPs
def _nw_info_get_subnets(self, context, port, network_IPs):
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
return subnets
def _nw_info_build_network(self, port, networks, subnets):
network_name = None
network_mtu = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
tenant_id = net['tenant_id']
network_mtu = net.get('mtu')
break
else:
tenant_id = port['tenant_id']
LOG.warning(_LW("Network %(id)s not matched with the tenants "
"network! The ports tenant %(tenant_id)s will be "
"used."),
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
port_details = port.get('binding:vif_details', {})
if vif_type == network_model.VIF_TYPE_OVS:
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
"brq" + port['network_id'])
should_create_bridge = True
elif vif_type == network_model.VIF_TYPE_DVS:
# The name of the DVS port group will contain the neutron
# network id
bridge = port['network_id']
elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and
port_details.get(network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)):
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
# Prune the bridge name if necessary. For the DVS this is not done
# as the bridge is a '<network-name>-<network-UUID>'.
if bridge is not None and vif_type != network_model.VIF_TYPE_DVS:
bridge = bridge[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=tenant_id,
mtu=network_mtu
)
network['subnets'] = subnets
port_profile = port.get('binding:profile')
if port_profile:
physical_network = port_profile.get('physical_network')
if physical_network:
network['physical_network'] = physical_network
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
return network, ovs_interfaceid
def _get_preexisting_port_ids(self, instance):
"""Retrieve the preexisting ports associated with the given instance.
These ports were not created by nova and hence should not be
deallocated upon instance deletion.
"""
net_info = compute_utils.get_nw_info_for_instance(instance)
if not net_info:
LOG.debug('Instance cache missing network info.',
instance=instance)
return [vif['id'] for vif in net_info
if vif.get('preserve_on_delete')]
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None):
"""Return list of ordered VIFs attached to instance.
:param context: Request context.
:param instance: Instance we are returning network info for.
:param networks: List of networks being attached to an instance.
If value is None this value will be populated
from the existing cached value.
:param port_ids: List of port_ids that are being attached to an
instance in order of attachment. If value is None
this value will be populated from the existing
cached value.
:param admin_client: A neutron client for the admin context.
:param preexisting_port_ids: List of port_ids that nova didn't
allocate and there shouldn't be deleted when
an instance is de-allocated. Supplied list will
be added to the cached list of preexisting port
IDs for this instance.
"""
search_opts = {'tenant_id': instance.project_id,
'device_id': instance.uuid, }
if admin_client is None:
client = get_client(context, admin=True)
else:
client = admin_client
data = client.list_ports(**search_opts)
current_neutron_ports = data.get('ports', [])
nw_info_refresh = networks is None and port_ids is None
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids)
nw_info = network_model.NetworkInfo()
if preexisting_port_ids is None:
preexisting_port_ids = []
preexisting_port_ids = set(
preexisting_port_ids + self._get_preexisting_port_ids(instance))
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
vif_active = False
if (current_neutron_port['admin_state_up'] is False
or current_neutron_port['status'] == 'ACTIVE'):
vif_active = True
network_IPs = self._nw_info_get_ips(client,
current_neutron_port)
subnets = self._nw_info_get_subnets(context,
current_neutron_port,
network_IPs)
devname = "tap" + current_neutron_port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network, ovs_interfaceid = (
self._nw_info_build_network(current_neutron_port,
networks, subnets))
preserve_on_delete = (current_neutron_port['id'] in
preexisting_port_ids)
nw_info.append(network_model.VIF(
id=current_neutron_port['id'],
address=current_neutron_port['mac_address'],
network=network,
vnic_type=current_neutron_port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL),
type=current_neutron_port.get('binding:vif_type'),
profile=current_neutron_port.get('binding:profile'),
details=current_neutron_port.get('binding:vif_details'),
ovs_interfaceid=ovs_interfaceid,
devname=devname,
active=vif_active,
preserve_on_delete=preserve_on_delete))
elif nw_info_refresh:
LOG.info(_LI('Port %s from network info_cache is no '
'longer associated with instance in Neutron. '
'Removing from network info_cache.'), port_id,
instance=instance)
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
for route in subnet.get('host_routes', []):
subnet_object.add_route(
network_model.Route(cidr=route['destination'],
gateway=network_model.IP(
address=route['nexthop'],
type='gateway')))
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating IPs.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def setup_instance_network_on_host(self, context, instance, host):
"""Setup network for specified instance on host."""
self._update_port_binding_for_instance(context, instance, host)
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host."""
pass
def _get_pci_mapping_for_migration(self, context, instance):
"""Get the mapping between the old PCI devices and the new PCI
devices that have been allocated during this migration. The
correlation is based on PCI request ID which is unique per PCI
devices for SR-IOV ports.
:param context: The request context.
:param instance: Get PCI mapping for this instance.
:Returns: dictionary of mapping {'<old pci address>': <New PciDevice>}
"""
migration_context = instance.migration_context
if not migration_context:
return {}
old_pci_devices = migration_context.old_pci_devices
new_pci_devices = migration_context.new_pci_devices
if old_pci_devices and new_pci_devices:
LOG.debug("Determining PCI devices mapping using migration"
"context: old_pci_devices: %(old)s, "
"new_pci_devices: %(new)s" %
{'old': [dev for dev in old_pci_devices],
'new': [dev for dev in new_pci_devices]})
return {old.address: new
for old in old_pci_devices
for new in new_pci_devices
if old.request_id == new.request_id}
return {}
def _update_port_binding_for_instance(self, context, instance, host):
if not self._has_port_binding_extension(context, refresh_cache=True):
return
neutron = get_client(context, admin=True)
search_opts = {'device_id': instance.uuid,
'tenant_id': instance.project_id}
data = neutron.list_ports(**search_opts)
pci_mapping = None
port_updates = []
ports = data['ports']
for p in ports:
updates = {}
# If the host hasn't changed, like in the case of resizing to the
# same host, there is nothing to do.
if p.get('binding:host_id') != host:
updates['binding:host_id'] = host
# Update port with newly allocated PCI devices. Even if the
# resize is happening on the same host, a new PCI device can be
# allocated.
vnic_type = p.get('binding:vnic_type')
if vnic_type in network_model.VNIC_TYPES_SRIOV:
if not pci_mapping:
pci_mapping = self._get_pci_mapping_for_migration(context,
instance)
binding_profile = p.get('binding:profile', {})
pci_slot = binding_profile.get('pci_slot')
new_dev = pci_mapping.get(pci_slot)
if new_dev:
updates['binding:profile'] = \
get_pci_device_profile(new_dev)
else:
raise exception.PortUpdateFailed(port_id=p['id'],
reason=_("Unable to correlate PCI slot %s") %
pci_slot)
port_updates.append((p['id'], updates))
# Avoid rolling back updates if we catch an error above.
# TODO(lbeliveau): Batch up the port updates in one neutron call.
for port_id, updates in port_updates:
if updates:
LOG.info(_LI("Updating port %(port)s with "
"attributes %(attributes)s"),
{"port": p['id'], "attributes": updates},
instance=instance)
try:
neutron.update_port(port_id, {'port': updates})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to update binding details "
"for port %s"),
port_id, instance=instance)
def update_instance_vnic_index(self, context, instance, vif, index):
"""Update instance vnic index.
When the 'VNIC index' extension is supported this method will update
the vnic index of the instance on the port.
"""
self._refresh_neutron_extensions_cache(context)
if constants.VNIC_INDEX_EXT in self.extensions:
neutron = get_client(context)
port_req_body = {'port': {'vnic_index': index}}
try:
neutron.update_port(vif['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to update instance VNIC index '
'for port %s.'),
vif['id'], instance=instance)
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
| 46.455642
| 79
| 0.593353
|
85de88bfcfa5efe6812675a9361115bf5fe04aa6
| 127
|
py
|
Python
|
setup.py
|
ceavelasquezpi/neuralnilm
|
184b2301333e49828d29064c59496f82c89dcbad
|
[
"Apache-2.0"
] | 135
|
2015-08-14T14:38:36.000Z
|
2022-03-02T10:29:49.000Z
|
setup.py
|
ceavelasquezpi/neuralnilm
|
184b2301333e49828d29064c59496f82c89dcbad
|
[
"Apache-2.0"
] | 12
|
2016-03-21T12:12:25.000Z
|
2019-12-07T06:05:26.000Z
|
setup.py
|
ceavelasquezpi/neuralnilm
|
184b2301333e49828d29064c59496f82c89dcbad
|
[
"Apache-2.0"
] | 82
|
2015-09-24T01:02:39.000Z
|
2022-01-18T16:05:20.000Z
|
from setuptools import setup, find_packages
setup(
name='NeuralNILM',
version='0.0.1',
packages=find_packages()
)
| 15.875
| 43
| 0.692913
|
ba4e201a27118e20ab85bfcaab0bc21037d3aaac
| 3,149
|
py
|
Python
|
Tests/Plot/LamWind/test_Slot_26_plot.py
|
helene-t/pyleecan
|
8362de9b0e32b346051b38192e07f3a6974ea9aa
|
[
"Apache-2.0"
] | 2
|
2019-06-08T15:04:39.000Z
|
2020-09-07T13:32:22.000Z
|
Tests/Plot/LamWind/test_Slot_26_plot.py
|
lyhehehe/pyleecan
|
421e9a843bf30d796415c77dc934546adffd1cd7
|
[
"Apache-2.0"
] | null | null | null |
Tests/Plot/LamWind/test_Slot_26_plot.py
|
lyhehehe/pyleecan
|
421e9a843bf30d796415c77dc934546adffd1cd7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from os.path import join
import matplotlib.pyplot as plt
from numpy import array, pi, zeros
from pyleecan.Classes.Frame import Frame
from pyleecan.Classes.LamSlotWind import LamSlotWind
from pyleecan.Classes.LamSquirrelCage import LamSquirrelCage
from pyleecan.Classes.MachineDFIM import MachineDFIM
from pyleecan.Classes.Shaft import Shaft
from pyleecan.Classes.VentilationCirc import VentilationCirc
from pyleecan.Classes.VentilationPolar import VentilationPolar
from pyleecan.Classes.VentilationTrap import VentilationTrap
from pyleecan.Classes.Winding import Winding
from pyleecan.Classes.WindingUD import WindingUD
from pyleecan.Classes.WindingCW2LT import WindingCW2LT
from pyleecan.Classes.WindingDW2L import WindingDW2L
from pyleecan.Classes.MatMagnetics import MatMagnetics
from pyleecan.Classes.SlotW26 import SlotW26
from Tests import save_plot_path as save_path
from Tests.Plot.LamWind import wind_mat
"""unittest for Lamination with winding plot"""
def test_Lam_Wind_26_wind_22():
"""Test machine plot with Slot 26 and winding rad=2, tan=2
"""
print("\nTest plot Slot 26")
plt.close("all")
test_obj = MachineDFIM()
test_obj.rotor = LamSlotWind(
Rint=0.2, Rext=0.5, is_internal=True, is_stator=False, L1=0.9, Nrvd=2, Wrvd=0.05
)
test_obj.rotor.axial_vent = [
VentilationCirc(Zh=6, Alpha0=pi / 6, D0=60e-3, H0=0.35)
]
test_obj.rotor.slot = SlotW26(
Zs=6, W0=20e-3, R1=30e-3, R2=20e-3, H0=20e-3, H1=20e-3
)
test_obj.rotor.winding = WindingUD(user_wind_mat=wind_mat, qs=4, p=4, Lewout=60e-3)
test_obj.rotor.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
test_obj.shaft = Shaft(Drsh=test_obj.rotor.Rint * 2, Lshaft=1)
test_obj.stator = LamSlotWind(
Rint=0.51,
Rext=0.8,
is_internal=False,
is_stator=True,
L1=0.9,
Nrvd=2,
Wrvd=0.05,
)
test_obj.stator.winding = WindingDW2L(qs=3, p=3)
test_obj.stator.slot = SlotW26(
Zs=18, W0=40e-3, R1=60e-3, R2=70e-3, H0=20e-3, H1=40e-3
)
test_obj.stator.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
test_obj.stator.winding.Lewout = 60e-3
test_obj.frame = Frame(Rint=0.8, Rext=0.9, Lfra=1)
test_obj.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Wind_s26_1-Machine.png"))
# Rotor + Stator + 2 for frame + 1 for shaft
assert len(fig.axes[0].patches) == 73
test_obj.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Wind_s26_2-Rotor.png"))
# 2 for lam + 6 vent + 4*Zs for wind
assert len(fig.axes[0].patches) == 32
test_obj.stator.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Wind_s26_3-Stator.png"))
# 2 for lam + Zs*2 for wind
assert len(fig.axes[0].patches) == 38
tooth = test_obj.rotor.slot.get_surface_tooth()
tooth.plot(color="r")
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Wind_s26_Tooth_in.png"))
tooth = test_obj.stator.slot.get_surface_tooth()
tooth.plot(color="r")
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Wind_s26_Tooth_out.png"))
| 34.228261
| 88
| 0.703715
|
7ce0a469c6c6642fb6ed03ab573445ff6d4da0d9
| 19,676
|
py
|
Python
|
mega_core/config/defaults.py
|
djiajunustc/mega.pytorch
|
96a640e8dc270091de38bf350c05b7378c2911d7
|
[
"BSD-2-Clause"
] | null | null | null |
mega_core/config/defaults.py
|
djiajunustc/mega.pytorch
|
96a640e8dc270091de38bf350c05b7378c2911d7
|
[
"BSD-2-Clause"
] | null | null | null |
mega_core/config/defaults.py
|
djiajunustc/mega.pytorch
|
96a640e8dc270091de38bf350c05b7378c2911d7
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the maximum image side during training will be
# INPUT.MAX_SIZE_TRAIN, while for testing it will be
# INPUT.MAX_SIZE_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
_C.MODEL.RPN_ONLY = False
_C.MODEL.MASK_ON = False
_C.MODEL.RETINANET_ON = False
_C.MODEL.KEYPOINT_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
_C.MODEL.CLS_AGNOSTIC_BBOX_REG = False
# If the WEIGHT starts with a catalog://, like :R-50, the code will look for
# the path in paths_catalog. Else, it will use it as the specified absolute
# path
_C.MODEL.WEIGHT = ""
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the smallest side of the image during training
_C.INPUT.MIN_SIZE_TRAIN = (800,) # (800,)
# Maximum size of the side of the image during training
_C.INPUT.MAX_SIZE_TRAIN = 1333
# Size of the smallest side of the image during testing
_C.INPUT.MIN_SIZE_TEST = 800
# Maximum size of the side of the image during testing
_C.INPUT.MAX_SIZE_TEST = 1333
# Values to be used for image normalization
_C.INPUT.PIXEL_MEAN = [102.9801, 115.9465, 122.7717]
# Values to be used for image normalization
_C.INPUT.PIXEL_STD = [1., 1., 1.]
# Convert image to BGR format (for Caffe2 models), in range 0-255
_C.INPUT.TO_BGR255 = True
# Image ColorJitter
_C.INPUT.BRIGHTNESS = 0.0
_C.INPUT.CONTRAST = 0.0
_C.INPUT.SATURATION = 0.0
_C.INPUT.HUE = 0.0
# Flips
_C.INPUT.HORIZONTAL_FLIP_PROB_TRAIN = 0.5
_C.INPUT.VERTICAL_FLIP_PROB_TRAIN = 0.0
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training, as present in paths_catalog.py
_C.DATASETS.TRAIN = ()
# List of the dataset names for testing, as present in paths_catalog.py
_C.DATASETS.TEST = ()
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
_C.DATALOADER.SIZE_DIVISIBILITY = 0
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
# The backbone conv body to use
# The string must match a function that is imported in modeling.model_builder
# (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN
# backbone)
_C.MODEL.BACKBONE.CONV_BODY = "R-50-C4"
# Add StopGrad at a specified stage so the bottom layers are frozen
_C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.FPN = CN()
_C.MODEL.FPN.USE_GN = False
_C.MODEL.FPN.USE_RELU = False
# ---------------------------------------------------------------------------- #
# Group Norm options
# ---------------------------------------------------------------------------- #
_C.MODEL.GROUP_NORM = CN()
# Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS)
_C.MODEL.GROUP_NORM.DIM_PER_GP = -1
# Number of groups in GroupNorm (-1 if using DIM_PER_GP)
_C.MODEL.GROUP_NORM.NUM_GROUPS = 32
# GroupNorm's small constant in the denominator
_C.MODEL.GROUP_NORM.EPSILON = 1e-5
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.USE_FPN = False
# Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input
_C.MODEL.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512)
# Stride of the feature map that RPN is attached.
# For FPN, number of strides should match number of scales
_C.MODEL.RPN.ANCHOR_STRIDE = (16,)
# RPN anchor aspect ratios
_C.MODEL.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.STRADDLE_THRESH = 0
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example)
_C.MODEL.RPN.FG_IOU_THRESHOLD = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example)
_C.MODEL.RPN.BG_IOU_THRESHOLD = 0.3
# Total number of RPN examples per image
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOP_N_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOP_N_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
_C.MODEL.RPN.POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOP_N_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.RPN.MIN_SIZE = 0
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000
# Apply the post NMS per batch (default) or per image during training
# (default is True to be consistent with Detectron, see Issue #672)
_C.MODEL.RPN.FPN_POST_NMS_PER_BATCH = True
# Custom rpn head, empty to use default conv or separable conv
_C.MODEL.RPN.RPN_HEAD = "SingleConvRPNHead"
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.USE_FPN = False
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.5
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
_C.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.5
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH
# E.g., a common configuration is: 512 * 2 * 8 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
_C.MODEL.ROI_HEADS.SCORE_THRESH = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS = 0.5
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
_C.MODEL.ROI_HEADS.DETECTIONS_PER_IMG = 100
_C.MODEL.ROI_BOX_HEAD = CN()
_C.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_BOX_HEAD.PREDICTOR = "FastRCNNPredictor"
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_BOX_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 81
# Hidden layer dimension when using an MLP for the RoI box head
_C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024
# GN
_C.MODEL.ROI_BOX_HEAD.USE_GN = False
# Dilation
_C.MODEL.ROI_BOX_HEAD.DILATION = 1
_C.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM = 256
_C.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS = 4
_C.MODEL.ROI_MASK_HEAD = CN()
_C.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_MASK_HEAD.PREDICTOR = "MaskRCNNC4Predictor"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_MASK_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD.CONV_LAYERS = (256, 256, 256, 256)
_C.MODEL.ROI_MASK_HEAD.RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True
# Whether or not resize and translate masks to the input image.
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS = False
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD = 0.5
# Dilation
_C.MODEL.ROI_MASK_HEAD.DILATION = 1
# GN
_C.MODEL.ROI_MASK_HEAD.USE_GN = False
_C.MODEL.ROI_KEYPOINT_HEAD = CN()
_C.MODEL.ROI_KEYPOINT_HEAD.FEATURE_EXTRACTOR = "KeypointRCNNFeatureExtractor"
_C.MODEL.ROI_KEYPOINT_HEAD.PREDICTOR = "KeypointRCNNPredictor"
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_KEYPOINT_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_KEYPOINT_HEAD.CONV_LAYERS = tuple(512 for _ in range(8))
_C.MODEL.ROI_KEYPOINT_HEAD.RESOLUTION = 14
_C.MODEL.ROI_KEYPOINT_HEAD.NUM_CLASSES = 17
_C.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Residual transformation function
_C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithFixedBatchNorm"
# ResNet's stem function (conv1 and pool1)
_C.MODEL.RESNETS.STEM_FUNC = "StemWithFixedBatchNorm"
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
_C.MODEL.RESNETS.BACKBONE_OUT_CHANNELS = 256 * 4
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
_C.MODEL.RESNETS.STAGE_WITH_DCN = (False, False, False, False)
_C.MODEL.RESNETS.WITH_MODULATED_DCN = False
_C.MODEL.RESNETS.DEFORMABLE_GROUPS = 1
# ---------------------------------------------------------------------------- #
# RetinaNet Options (Follow the Detectron version)
# ---------------------------------------------------------------------------- #
_C.MODEL.RETINANET = CN()
# This is the number of foreground classes and background.
_C.MODEL.RETINANET.NUM_CLASSES = 81
# Anchor aspect ratios to use
_C.MODEL.RETINANET.ANCHOR_SIZES = (32, 64, 128, 256, 512)
_C.MODEL.RETINANET.ASPECT_RATIOS = (0.5, 1.0, 2.0)
_C.MODEL.RETINANET.ANCHOR_STRIDES = (8, 16, 32, 64, 128)
_C.MODEL.RETINANET.STRADDLE_THRESH = 0
# Anchor scales per octave
_C.MODEL.RETINANET.OCTAVE = 2.0
_C.MODEL.RETINANET.SCALES_PER_OCTAVE = 3
# Use C5 or P5 to generate P6
_C.MODEL.RETINANET.USE_C5 = True
# Convolutions to use in the cls and bbox tower
# NOTE: this doesn't include the last conv for logits
_C.MODEL.RETINANET.NUM_CONVS = 4
# Weight for bbox_regression loss
_C.MODEL.RETINANET.BBOX_REG_WEIGHT = 4.0
# Smooth L1 loss beta for bbox regression
_C.MODEL.RETINANET.BBOX_REG_BETA = 0.11
# During inference, #locs to select based on cls score before NMS is performed
# per FPN level
_C.MODEL.RETINANET.PRE_NMS_TOP_N = 1000
# IoU overlap ratio for labeling an anchor as positive
# Anchors with >= iou overlap are labeled positive
_C.MODEL.RETINANET.FG_IOU_THRESHOLD = 0.5
# IoU overlap ratio for labeling an anchor as negative
# Anchors with < iou overlap are labeled negative
_C.MODEL.RETINANET.BG_IOU_THRESHOLD = 0.4
# Focal loss parameter: alpha
_C.MODEL.RETINANET.LOSS_ALPHA = 0.25
# Focal loss parameter: gamma
_C.MODEL.RETINANET.LOSS_GAMMA = 2.0
# Prior prob for the positives at the beginning of training. This is used to set
# the bias init for the logits layer
_C.MODEL.RETINANET.PRIOR_PROB = 0.01
# Inference cls score threshold, anchors with score > INFERENCE_TH are
# considered for inference
_C.MODEL.RETINANET.INFERENCE_TH = 0.05
# NMS threshold used in RetinaNet
_C.MODEL.RETINANET.NMS_TH = 0.4
# ---------------------------------------------------------------------------- #
# FBNet options
# ---------------------------------------------------------------------------- #
_C.MODEL.FBNET = CN()
_C.MODEL.FBNET.ARCH = "default"
# custom arch
_C.MODEL.FBNET.ARCH_DEF = ""
_C.MODEL.FBNET.BN_TYPE = "bn"
_C.MODEL.FBNET.SCALE_FACTOR = 1.0
# the output channels will be divisible by WIDTH_DIVISOR
_C.MODEL.FBNET.WIDTH_DIVISOR = 1
_C.MODEL.FBNET.DW_CONV_SKIP_BN = True
_C.MODEL.FBNET.DW_CONV_SKIP_RELU = True
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.DET_HEAD_LAST_SCALE = 1.0
_C.MODEL.FBNET.DET_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.DET_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.KPTS_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.KPTS_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.KPTS_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.MASK_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.MASK_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.MASK_HEAD_STRIDE = 0
# 0 to use all blocks defined in arch_def
_C.MODEL.FBNET.RPN_HEAD_BLOCKS = 0
_C.MODEL.FBNET.RPN_BN_TYPE = ""
# ---------------------------------------------------------------------------- #
# VID specfic options
# ---------------------------------------------------------------------------- #
_C.MODEL.VID = CN()
_C.MODEL.VID.ENABLE = False
_C.MODEL.VID.METHOD = "base"
_C.MODEL.VID.IGNORE = False
_C.MODEL.VID.FLOWNET_WEIGHT = "models/flownet.ckpt"
# ROI_BOX_HEAD config in VID
_C.MODEL.VID.ROI_BOX_HEAD = CN()
_C.MODEL.VID.ROI_BOX_HEAD.REDUCE_CHANNEL = False
#attention
_C.MODEL.VID.ROI_BOX_HEAD.ATTENTION = CN()
_C.MODEL.VID.ROI_BOX_HEAD.ATTENTION.ENABLE = False
_C.MODEL.VID.ROI_BOX_HEAD.ATTENTION.EMBED_DIM = 64
_C.MODEL.VID.ROI_BOX_HEAD.ATTENTION.GROUP = 16
_C.MODEL.VID.ROI_BOX_HEAD.ATTENTION.STAGE = 2
_C.MODEL.VID.ROI_BOX_HEAD.ATTENTION.ADVANCED_STAGE = 0
# RPN config in VID
_C.MODEL.VID.RPN = CN()
_C.MODEL.VID.RPN.REF_PRE_NMS_TOP_N = 6000
_C.MODEL.VID.RPN.REF_POST_NMS_TOP_N = 75
# RDN
_C.MODEL.VID.RDN = CN()
_C.MODEL.VID.RDN.MIN_OFFSET = -18
_C.MODEL.VID.RDN.MAX_OFFSET = 18
_C.MODEL.VID.RDN.ALL_FRAME_INTERVAL = 37
_C.MODEL.VID.RDN.KEY_FRAME_LOCATION = 18
_C.MODEL.VID.RDN.REF_NUM = 2
_C.MODEL.VID.RDN.RATIO = 0.2
# MEGA
_C.MODEL.VID.MEGA = CN()
_C.MODEL.VID.MEGA.MIN_OFFSET = -12
_C.MODEL.VID.MEGA.MAX_OFFSET = 12
_C.MODEL.VID.MEGA.ALL_FRAME_INTERVAL = 25
_C.MODEL.VID.MEGA.KEY_FRAME_LOCATION = 12
_C.MODEL.VID.MEGA.MEMORY = CN()
_C.MODEL.VID.MEGA.MEMORY.ENABLE = True
_C.MODEL.VID.MEGA.MEMORY.SIZE = 25
_C.MODEL.VID.MEGA.GLOBAL = CN()
_C.MODEL.VID.MEGA.GLOBAL.RES_STAGE = 1
_C.MODEL.VID.MEGA.GLOBAL.ENABLE = True
_C.MODEL.VID.MEGA.GLOBAL.SIZE = 10
_C.MODEL.VID.MEGA.GLOBAL.SHUFFLE = True
_C.MODEL.VID.MEGA.REF_NUM_LOCAL = 2
_C.MODEL.VID.MEGA.REF_NUM_MEM = 3
_C.MODEL.VID.MEGA.REF_NUM_GLOBAL = 2
_C.MODEL.VID.MEGA.RATIO = 0.2
# FGFA
_C.MODEL.VID.FGFA = CN()
_C.MODEL.VID.FGFA.MIN_OFFSET = -9
_C.MODEL.VID.FGFA.MAX_OFFSET = 9
_C.MODEL.VID.FGFA.ALL_FRAME_INTERVAL = 19
_C.MODEL.VID.FGFA.KEY_FRAME_LOCATION = 9
_C.MODEL.VID.FGFA.REF_NUM = 2
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 2
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 2500
_C.SOLVER.TEST_PERIOD = 0
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 16
# ---------------------------------------------------------------------------- #
# Specific test options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
_C.TEST.EXPECTED_RESULTS = []
_C.TEST.EXPECTED_RESULTS_SIGMA_TOL = 4
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST.IMS_PER_BATCH = 8
# Number of detections per image
_C.TEST.DETECTIONS_PER_IMG = 100
# ---------------------------------------------------------------------------- #
# Test-time augmentations for bounding box detection
# See configs/test_time_aug/e2e_mask_rcnn_R-50-FPN_1x.yaml for an example
# ---------------------------------------------------------------------------- #
_C.TEST.BBOX_AUG = CN()
# Enable test-time augmentation for bounding box detection if True
_C.TEST.BBOX_AUG.ENABLED = False
# Horizontal flip at the original scale (id transform)
_C.TEST.BBOX_AUG.H_FLIP = False
# Each scale is the pixel size of an image's shortest side
_C.TEST.BBOX_AUG.SCALES = ()
# Max pixel size of the longer side
_C.TEST.BBOX_AUG.MAX_SIZE = 4000
# Horizontal flip at each scale
_C.TEST.BBOX_AUG.SCALE_H_FLIP = False
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.OUTPUT_DIR = "."
_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py")
# ---------------------------------------------------------------------------- #
# Precision options
# ---------------------------------------------------------------------------- #
# Precision of input, allowable: (float32, float16)
_C.DTYPE = "float32"
# Enable verbosity in apex.amp
_C.AMP_VERBOSE = False
| 36.437037
| 83
| 0.649726
|
7ac406d933e95a9581fd7a7f6b5a7320d001109d
| 180,984
|
py
|
Python
|
number_generator_code_part.py
|
haojiang2020/Number-Generator
|
9941b28333961f291ea954a71690871b4e0fc2db
|
[
"BSD-3-Clause"
] | null | null | null |
number_generator_code_part.py
|
haojiang2020/Number-Generator
|
9941b28333961f291ea954a71690871b4e0fc2db
|
[
"BSD-3-Clause"
] | null | null | null |
number_generator_code_part.py
|
haojiang2020/Number-Generator
|
9941b28333961f291ea954a71690871b4e0fc2db
|
[
"BSD-3-Clause"
] | null | null | null |
# number generator & identifier
# This Python file is the core part of number generating
# # When generating a new mixed number with member number, please run 'number_mix_generate' and then 'number_member_generate'
# # when generating a new organization number, please run 'number_organization_generate'
# # when generating a new manipulation number, please run 'number_manipulation_generate'
from random import uniform
from random import seed
from datetime import datetime
def number_mix_generate(in_list_name, in_initial_list):
# to generate mixed number, 21 digits
# input:
# in_list_name = [[given name 0, family name 0] / None,
# [given name 1, family name 1] / None,
# [given name 2, family name 2] / None, ...]
# in_initial_list = [*, *, *]
# each of the *s is from 26 letters
# output:
# out_result = [[out number 0, given name 0, family name 0,
# if exists English name 0, Date-UTC 0, Time-UTC 0] / None,
# [out number 1, given name 1, family name 1,
# if exists English name 1, Date-UTC 1, Time-UTC 1] / None,
# [out number 2, given name 2, family name 2,
# if exists English name 2, Date-UTC 2, Time-UTC 2] / None, ...]
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
English_name_capital = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z")
English_name_other = (" ", "-", "'")
out_result = []
temp_bool_0 = True
if isinstance(in_initial_list, list) | isinstance(in_initial_list, tuple):
if len(in_initial_list) == 3:
init_char_0 = in_initial_list[0]
init_char_1 = in_initial_list[1]
init_char_2 = in_initial_list[2]
if (isinstance(init_char_0, str) & isinstance(init_char_1, str) &
isinstance(init_char_2, str)):
init_char_0 = init_char_0.strip().upper()
init_char_1 = init_char_1.strip().upper()
init_char_2 = init_char_2.strip().upper()
if ((init_char_0 in English_name_capital) & (init_char_1 in English_name_capital) &
(init_char_2 in English_name_capital)):
first_3_digits = (init_char_0.lower()+init_char_1.lower()+init_char_2.lower(),
init_char_0.lower()+init_char_1.lower()+init_char_2,
init_char_0.lower()+init_char_1+init_char_2.lower(),
init_char_0.lower()+init_char_1+init_char_2,
init_char_0+init_char_1.lower()+init_char_2.lower(),
init_char_0+init_char_1.lower()+init_char_2,
init_char_0+init_char_1+init_char_2.lower(),
init_char_0+init_char_1+init_char_2)
else:
temp_bool_0 = False
else:
temp_bool_0 = False
else:
temp_bool_0 = False
else:
temp_bool_0 = False
if temp_bool_0:
if isinstance(in_list_name, list) | isinstance(in_list_name, tuple):
temp_len = len(in_list_name)
if temp_len > 0:
temp_time_now = datetime.utcnow()
temp_num_0 = temp_time_now.microsecond
temp_num_0 = temp_num_0*60+temp_time_now.second
temp_num_0 = temp_num_0*60+temp_time_now.minute
temp_num_0 = temp_num_0*24+temp_time_now.hour
seed(temp_num_0)
for n in range(temp_len):
temp_bool_1 = True
if in_list_name[n] is None:
out_result.append(None)
elif isinstance(in_list_name[n], list) | isinstance(in_list_name[n], tuple):
if len(in_list_name[n]) == 2:
if isinstance(in_list_name[n][0], str) & isinstance(in_list_name[n][1], str):
temp_gn_str = in_list_name[n][0].strip()
temp_len_1 = len(temp_gn_str)
temp_fn_str = in_list_name[n][1].strip()
temp_len_2 = len(temp_fn_str)
if temp_len_1 == 0:
if temp_len_2 == 0:
temp_gn_1 = 0
temp_gn_2 = 0
else:
temp_bool_1 = False
elif temp_len_1 == 1:
temp_str_1 = temp_gn_str[0]
temp_gn_1 = 0
for n2 in range(26):
if English_name_capital[n2] == temp_str_1:
temp_gn_1 = n2+1
break
if temp_gn_1 > 0:
temp_gn_2 = 0
else:
temp_bool_1 = False
else:
temp_str_1 = temp_gn_str[0]
temp_gn_1 = 0
for n2 in range(26):
if English_name_capital[n2] == temp_str_1:
temp_gn_1 = n2+1
break
if temp_gn_1 > 0:
temp_str_2 = temp_gn_str[1].upper()
temp_gn_2 = 0
for n2 in range(26):
if English_name_capital[n2] == temp_str_2:
temp_gn_2 = n2+1
break
if temp_gn_2 < 1:
temp_bool_1 = temp_str_2 in English_name_other
else:
temp_bool_1 = False
if temp_bool_1:
if temp_len_2 == 0:
temp_fn_1 = 0
temp_fn_2 = 0
elif temp_len_2 == 1:
temp_str_1 = temp_fn_str[0]
temp_fn_1 = 0
for n2 in range(26):
if English_name_capital[n2] == temp_str_1:
temp_fn_1 = n2+1
break
if temp_fn_1 > 0:
temp_fn_2 = 0
else:
temp_bool_1 = False
else:
temp_str_1 = temp_fn_str[0]
temp_fn_1 = 0
for n2 in range(26):
if English_name_capital[n2] == temp_str_1:
temp_fn_1 = n2+1
break
if temp_fn_1 > 0:
temp_str_2 = temp_fn_str[1].upper()
temp_fn_2 = 0
for n2 in range(26):
if English_name_capital[n2] == temp_str_2:
temp_fn_2 = n2+1
break
if temp_fn_2 < 1:
temp_bool_1 = temp_str_2 in English_name_other
else:
temp_bool_1 = False
if temp_bool_1:
temp_time_now = datetime.utcnow()
temp_year = temp_time_now.year
temp_year_3 = temp_year%10
temp_year_0 = int((temp_year-temp_year_3)/10)
temp_year_2 = temp_year_0%10
temp_year_0 = int((temp_year_0-temp_year_2)/10)
temp_year_1 = temp_year_0%10
temp_year_0 = int((temp_year_0-temp_year_1)/10)
temp_month = temp_time_now.month
temp_day = temp_time_now.day
temp_hour = temp_time_now.hour
temp_minute = temp_time_now.minute
temp_num_0 = 0
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_1_0 = int(temp_runif*8)
if temp_num_1_0 == 8:
temp_num_1_0 -= 1
out_num_str = first_3_digits[temp_num_1_0]
temp_str_3 = out_num_str[0]
temp_num_1_1 = -1
for n2 in range(64):
if long_digits[n2] == temp_str_3:
temp_num_1_1 = n2
break
init_num_0 = temp_num_1_1
temp_num_0 += temp_num_1_1
temp_str_3 = out_num_str[1]
temp_num_1_1 = -1
for n2 in range(64):
if long_digits[n2] == temp_str_3:
temp_num_1_1 = n2
break
init_num_1 = temp_num_1_1
temp_num_0 += temp_num_1_1
temp_str_3 = out_num_str[2]
temp_num_1_1 = -1
for n2 in range(64):
if long_digits[n2] == temp_str_3:
temp_num_1_1 = n2
break
init_num_2 = temp_num_1_1
temp_num_0 += temp_num_1_1
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_2_1 = int(temp_runif*2297)
if temp_num_2_1 == 2297:
temp_num_2_1 -= 1
temp_num_2_2 = temp_year_2
temp_num_2_2 = temp_num_2_2*27+temp_fn_2
temp_num_2_2 = temp_num_2_2*27+temp_gn_2
temp_num_2 = temp_num_2_2+temp_num_2_1*7297
temp_num_2_3 = temp_num_2%64
temp_num_2_0 = int((temp_num_2-temp_num_2_3)/64)
temp_num_2_2 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_2)/64)
temp_num_2_1 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_1)/64)
temp_num_2_0 = (temp_num_2_0+init_num_0)%64
temp_num_0 += temp_num_2_0
out_num_str = out_num_str+long_digits[temp_num_2_0]
temp_num_2_1 = (temp_num_2_1-init_num_1)%64
temp_num_0 += temp_num_2_1
out_num_str = out_num_str+long_digits[temp_num_2_1]
temp_num_2_2 = (temp_num_2_2+init_num_2)%64
temp_num_0 += temp_num_2_2
out_num_str = out_num_str+long_digits[temp_num_2_2]
temp_num_2_3 = (temp_num_2_3-init_num_0)%64
temp_num_0 += temp_num_2_3
out_num_str = out_num_str+long_digits[temp_num_2_3]
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_2_1 = int(temp_runif*2579)
if temp_num_2_1 == 2579:
temp_num_2_1 -= 1
temp_num_2_2 = temp_hour
temp_num_2_2 = temp_num_2_2*27+temp_gn_1
temp_num_2_2 = temp_num_2_2*10+temp_year_0
temp_num_2 = temp_num_2_2+temp_num_2_1*6481
temp_num_2_3 = temp_num_2%64
temp_num_2_0 = int((temp_num_2-temp_num_2_3)/64)
temp_num_2_2 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_2)/64)
temp_num_2_1 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_1)/64)
temp_num_2_0 = (temp_num_2_0+init_num_1)%64
temp_num_0 += temp_num_2_0
out_num_str = out_num_str+long_digits[temp_num_2_0]
temp_num_2_1 = (temp_num_2_1-init_num_2)%64
temp_num_0 += temp_num_2_1
out_num_str = out_num_str+long_digits[temp_num_2_1]
temp_num_2_2 = (temp_num_2_2+init_num_0)%64
temp_num_0 += temp_num_2_2
out_num_str = out_num_str+long_digits[temp_num_2_2]
temp_num_2_3 = (temp_num_2_3-init_num_1)%64
temp_num_0 += temp_num_2_3
out_num_str = out_num_str+long_digits[temp_num_2_3]
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_2_1 = int(temp_runif*6173)
if temp_num_2_1 == 6173:
temp_num_2_1 -= 1
temp_num_2_2 = temp_year_3
temp_num_2_2 = temp_num_2_2*10+temp_year_1
temp_num_2_2 = temp_num_2_2*27+temp_fn_1
temp_num_2 = temp_num_2_2+temp_num_2_1*2707
temp_num_2_3 = temp_num_2%64
temp_num_2_0 = int((temp_num_2-temp_num_2_3)/64)
temp_num_2_2 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_2)/64)
temp_num_2_1 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_1)/64)
temp_num_2_0 = (temp_num_2_0+init_num_2)%64
temp_num_0 += temp_num_2_0
out_num_str = out_num_str+long_digits[temp_num_2_0]
temp_num_2_1 = (temp_num_2_1-init_num_0)%64
temp_num_0 += temp_num_2_1
out_num_str = out_num_str+long_digits[temp_num_2_1]
temp_num_2_2 = (temp_num_2_2+init_num_1)%64
temp_num_0 += temp_num_2_2
out_num_str = out_num_str+long_digits[temp_num_2_2]
temp_num_2_3 = (temp_num_2_3-init_num_2)%64
temp_num_0 += temp_num_2_3
out_num_str = out_num_str+long_digits[temp_num_2_3]
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_2_1 = int(temp_runif*48049)
if temp_num_2_1 == 48049:
temp_num_2_1 -= 1
temp_num_2_2 = (temp_month-1)
temp_num_2_2 = temp_num_2_2*31+(temp_day-1)
temp_num_2_2 = temp_num_2_2*60+temp_minute
temp_num_2 = temp_num_2_2+temp_num_2_1*22343
temp_num_2_4 = temp_num_2%64
temp_num_2_0 = int((temp_num_2-temp_num_2_4)/64)
temp_num_2_3 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_3)/64)
temp_num_2_2 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_2)/64)
temp_num_2_1 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_1)/64)
temp_num_2_0 = (temp_num_2_0+init_num_0)%64
temp_num_0 += temp_num_2_0
out_num_str = out_num_str+long_digits[temp_num_2_0]
temp_num_2_1 = (temp_num_2_1-init_num_1)%64
temp_num_0 += temp_num_2_1
out_num_str = out_num_str+long_digits[temp_num_2_1]
temp_num_2_2 = (temp_num_2_2+init_num_2)%64
temp_num_0 += temp_num_2_2
out_num_str = out_num_str+long_digits[temp_num_2_2]
temp_num_2_3 = (temp_num_2_3-init_num_0)%64
temp_num_0 += temp_num_2_3
out_num_str = out_num_str+long_digits[temp_num_2_3]
temp_num_2_4 = (temp_num_2_4+init_num_1)%64
temp_num_0 += temp_num_2_4
out_num_str = out_num_str+long_digits[temp_num_2_4]
out_num_str = out_num_str+long_digits[temp_num_0%64]
temp_str_4 = str(temp_year_0)+str(temp_year_1)+str(temp_year_2)+str(temp_year_3)
temp_str_4 = temp_str_4+"-"
if temp_month < 10:
temp_str_4 = temp_str_4+"0"+str(temp_month)
else:
temp_str_4 = temp_str_4+str(temp_month)
temp_str_4 = temp_str_4+"-"
if temp_day < 10:
temp_str_4 = temp_str_4+"0"+str(temp_day)
else:
temp_str_4 = temp_str_4+str(temp_day)
temp_str_5 = ""
if temp_hour < 10:
temp_str_5 = temp_str_5+"0"+str(temp_hour)
else:
temp_str_5 = temp_str_5+str(temp_hour)
temp_str_5 = temp_str_5+":"
if temp_minute < 10:
temp_str_5 = temp_str_5+"0"+str(temp_minute)
else:
temp_str_5 = temp_str_5+str(temp_minute)
out_result.append([out_num_str, temp_gn_str, temp_fn_str, temp_gn_1 != 0, temp_str_4, temp_str_5])
else:
out_result.append(None)
else:
out_result.append(None)
else:
out_result.append(None)
else:
out_result.append(None)
else:
temp_bool_0 = False
else:
temp_bool_0 = False
if not temp_bool_0:
out_result = None
return out_result
def num_mix_valid(mix_number, given_name = None, family_name = None,
year = None, month = None, day = None,
hour = None, minute = None):
# check validation of mixed number
# input: mix_number, string of length 21
# given_name, string
# family_name, string
# year, integer
# month, integer
# day, integer
# hour, integer
# minute, integer
# output: bool
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
English_name_capital = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z")
English_name_other = (" ", "-", "'")
out_bool = True
if isinstance(mix_number, str):
mix_number = mix_number.strip()
if len(mix_number) == 21:
init_str_0 = mix_number[0]
init_str_1 = mix_number[1]
init_str_2 = mix_number[2]
if ((init_str_0.upper() in English_name_capital) &
(init_str_1.upper() in English_name_capital) &
(init_str_2.upper() in English_name_capital)):
temp_number_list = []
temp_num_1 = 0
temp_num_0 = -1
for n1 in range(64):
if long_digits[n1] == init_str_0:
temp_num_0 = n1
break
init_num_0 = temp_num_0
temp_num_1 += temp_num_0
temp_number_list.append(temp_num_0)
temp_num_0 = -1
for n1 in range(64):
if long_digits[n1] == init_str_1:
temp_num_0 = n1
break
init_num_1 = temp_num_0
temp_num_1 += temp_num_0
temp_number_list.append(temp_num_0)
temp_num_0 = -1
for n1 in range(64):
if long_digits[n1] == init_str_2:
temp_num_0 = n1
break
init_num_2 = temp_num_0
temp_num_1 += temp_num_0
temp_number_list.append(temp_num_0)
temp_num_2 = 0
for n in range(3, 21):
temp_num_0 = -1
temp_str_0 = mix_number[n]
for n1 in range(64):
if long_digits[n1] == temp_str_0:
temp_num_0 = n1
break
if temp_num_0 >= 0:
temp_number_list.append(temp_num_0)
if n < 20:
temp_num_1 += temp_num_0
else:
temp_num_2 += temp_num_0
else:
out_bool = False
break
if out_bool:
out_bool = temp_num_1%64 == temp_num_2
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
if out_bool:
temp_num_0 = (temp_number_list[3]-init_num_0)%64
temp_num_0 = temp_num_0*64+(temp_number_list[4]+init_num_1)%64
temp_num_0 = temp_num_0*64+(temp_number_list[5]-init_num_2)%64
temp_num_0 = temp_num_0*64+(temp_number_list[6]+init_num_0)%64
temp_num_0 = temp_num_0%7297
temp_num_1_2 = temp_num_0%27
temp_num_1_0 = int((temp_num_0-temp_num_1_2)/27)
temp_num_1_1 = temp_num_1_0%27
temp_num_1_0 = int((temp_num_1_0-temp_num_1_1)/27)
temp_num_0 = (temp_number_list[7]-init_num_1)%64
temp_num_0 = temp_num_0*64+(temp_number_list[8]+init_num_2)%64
temp_num_0 = temp_num_0*64+(temp_number_list[9]-init_num_0)%64
temp_num_0 = temp_num_0*64+(temp_number_list[10]+init_num_1)%64
temp_num_0 = temp_num_0%6481
temp_num_2_2 = temp_num_0%10
temp_num_2_0 = int((temp_num_0-temp_num_2_2)/10)
temp_num_2_1 = temp_num_2_0%27
temp_num_2_0 = int((temp_num_2_0-temp_num_2_1)/27)
temp_num_0 = (temp_number_list[11]-init_num_2)%64
temp_num_0 = temp_num_0*64+(temp_number_list[12]+init_num_0)%64
temp_num_0 = temp_num_0*64+(temp_number_list[13]-init_num_1)%64
temp_num_0 = temp_num_0*64+(temp_number_list[14]+init_num_2)%64
temp_num_0 = temp_num_0%2707
temp_num_3_2 = temp_num_0%27
temp_num_3_0 = int((temp_num_0-temp_num_3_2)/27)
temp_num_3_1 = temp_num_3_0%10
temp_num_3_0 = int((temp_num_3_0-temp_num_3_1)/10)
temp_num_0 = (temp_number_list[15]-init_num_0)%64
temp_num_0 = temp_num_0*64+(temp_number_list[16]+init_num_1)%64
temp_num_0 = temp_num_0*64+(temp_number_list[17]-init_num_2)%64
temp_num_0 = temp_num_0*64+(temp_number_list[18]+init_num_0)%64
temp_num_0 = temp_num_0*64+(temp_number_list[19]-init_num_1)%64
temp_num_0 = temp_num_0%22343
temp_num_4_2 = temp_num_0%60
temp_num_4_0 = int((temp_num_0-temp_num_4_2)/60)
temp_num_4_1 = temp_num_4_0%31
temp_num_4_0 = int((temp_num_4_0-temp_num_4_1)/31)
if (temp_num_2_0 >= 0) & (temp_num_2_0 < 24):
if (temp_num_4_2 >= 0) & (temp_num_4_2 < 60):
if ((temp_num_1_0 >= 0) & (temp_num_1_0 <= 9) &
(temp_num_2_2 >= 0) & (temp_num_2_2 <= 9) &
(temp_num_3_0 >= 0) & (temp_num_3_0 <= 9) &
(temp_num_3_1 >= 0) & (temp_num_3_1 <= 9)):
temp_num_1 = temp_num_2_2
temp_num_1 = temp_num_1*10+temp_num_3_1
temp_num_1 = temp_num_1*10+temp_num_1_0
temp_num_1 = temp_num_1*10+temp_num_3_0
temp_num_2 = temp_num_4_0+1
if (temp_num_2 > 0) & (temp_num_2 <= 12):
temp_num_3 = temp_num_4_1+1
if temp_num_2 in (1, 3, 5, 7, 8, 10, 12):
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 31)
elif temp_num_2 in (4, 6, 9, 11):
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 30)
else:
if temp_num_1%400 == 0:
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 29)
elif temp_num_1%100 == 0:
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 28)
elif temp_num_1%4 == 0:
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 29)
else:
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 28)
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
if out_bool:
if not year is None:
out_bool = year == temp_num_1
if out_bool:
if not month is None:
out_bool = month == temp_num_2
if out_bool:
if not day is None:
out_bool = day == temp_num_3
if out_bool:
if not hour is None:
out_bool = hour == temp_num_2_0
if out_bool:
if not minute is None:
out_bool = minute == temp_num_4_2
if out_bool:
if not given_name is None:
if isinstance(given_name, str):
given_name = given_name.strip()
temp_len = len(given_name)
if temp_len == 0:
out_bool = (temp_num_1_2 == 0) & (temp_num_2_1 == 0)
elif temp_len == 1:
temp_num_1 = 0
temp_str_1 = given_name[0]
for n in range(26):
if temp_str_1 == English_name_capital[n]:
temp_num_1 = n+1
break
if temp_num_1 > 0:
if temp_num_1 == temp_num_2_1:
out_bool = temp_num_1_2 == 0
else:
out_bool = False
else:
out_bool = False
else:
temp_num_1 = 0
temp_str_1 = given_name[0]
for n in range(26):
if temp_str_1 == English_name_capital[n]:
temp_num_1 = n+1
break
if temp_num_1 > 0:
if temp_num_1 == temp_num_2_1:
temp_num_2 = 0
temp_str_2 = given_name[1].upper()
for n in range(26):
if temp_str_2 == English_name_capital[n]:
temp_num_2 = n+1
break
if temp_num_2 > 0:
out_bool = temp_num_1_2 == temp_num_2
elif temp_str_2 in English_name_other:
out_bool = temp_num_1_2 == 0
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
if out_bool:
if not family_name is None:
if isinstance(family_name, str):
family_name = family_name.strip()
temp_len = len(family_name)
if temp_len == 0:
out_bool = (temp_num_1_1 == 0) & (temp_num_3_2 == 0)
elif temp_len == 1:
temp_num_1 = 0
temp_str_1 = family_name[0]
for n in range(26):
if temp_str_1 == English_name_capital[n]:
temp_num_1 = n+1
break
if temp_num_1 > 0:
if temp_num_1 == temp_num_3_2:
out_bool = temp_num_1_1 == 0
else:
out_bool = False
else:
out_bool = False
else:
temp_num_1 = 0
temp_str_1 = family_name[0]
for n in range(26):
if temp_str_1 == English_name_capital[n]:
temp_num_1 = n+1
break
if temp_num_1 > 0:
if temp_num_1 == temp_num_3_2:
temp_num_2 = 0
temp_str_2 = family_name[1].upper()
for n in range(26):
if temp_str_2 == English_name_capital[n]:
temp_num_2 = n+1
break
if temp_num_2 > 0:
out_bool = temp_num_1_1 == temp_num_2
elif temp_str_2 in English_name_other:
out_bool = temp_num_1_1 == 0
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
return out_bool
def num_mix_64_2_8(mix_number):
# input: mix_number, string of length 21
# output: mix_number, string of length 42
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
oct_digits = ("0", "1", "2", "3", "4", "5", "6", "7")
out_str = ""
if isinstance(mix_number, str):
mix_number = mix_number.strip()
if len(mix_number) == 21:
temp_bool_0 = True
temp_number_list = []
for n in range(21):
temp_num_0 = -1
temp_str_0 = mix_number[n]
for n1 in range(64):
if long_digits[n1] == temp_str_0:
temp_num_0 = n1
break
if temp_num_0 >= 0:
temp_number_list.append(temp_num_0)
else:
temp_bool_0 = False
if temp_bool_0:
temp_number_list_1 = []
for n in range(21):
temp_num_1 = temp_number_list[n]%8
temp_num_0 = int((temp_number_list[n]-temp_num_1)/8)
temp_number_list_1.append(temp_num_0)
temp_number_list_1.append(temp_num_1)
for n in range(len(temp_number_list_1)):
out_str = out_str+oct_digits[temp_number_list_1[n]]
else:
out_str = None
else:
out_str = None
else:
out_str = None
return out_str
def num_mix_8_2_64(mix_number):
# input: mix_number, string of length 42
# output: mix_number, string of length 21
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
oct_digits = ("0", "1", "2", "3", "4", "5", "6", "7")
out_str = ""
if isinstance(mix_number, str):
mix_number = mix_number.strip()
if len(mix_number) == 42:
temp_bool_0 = True
temp_number_list = []
for n in range(42):
temp_num_0 = -1
temp_str_0 = mix_number[n]
for n1 in range(8):
if oct_digits[n1] == temp_str_0:
temp_num_0 = n1
break
if temp_num_0 >= 0:
temp_number_list.append(temp_num_0)
else:
temp_bool_0 = False
if temp_bool_0:
temp_number_list_1 = []
for n in range(21):
temp_num_0 = temp_number_list[2*n]*8+temp_number_list[2*n+1]
temp_number_list_1.append(temp_num_0)
for n in range(len(temp_number_list_1)):
out_str = out_str+long_digits[temp_number_list_1[n]]
else:
out_str = None
else:
out_str = None
else:
out_str = None
return out_str
def number_organization_generate(in_list_name, in_initial_list):
# to generate organization number, 14 digits
# input:
# in_list_name = [organization name 0 (str) / None,
# organization name 1 (str) / None,
# organization name 2 (str) / None, ...]
# in_initial_list = [*, *, *]
# each of the *s is from 26 letters
# output:
# out_result = [(out number 0, organization name 0,
# Date-UTC 0, Time-UTC 0) / None,
# (out number 1, organization name 1,
# Date-UTC 1, Time-UTC 1) / None,
# (out number 2, organization name 2,
# Date-UTC 2, Time-UTC 2) / None, ...]
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
English_name_capital = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z")
English_name_capital_1 = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z",
"0", "1", "2", "3", "4",
"5", "6", "7", "8", "9")
English_name_other = (" ", "-", "'", "‘", "’", "&",
"/", ".", ":", "(", ")")
temp_bool_0 = True
if isinstance(in_initial_list, list) | isinstance(in_initial_list, tuple):
if len(in_initial_list) == 3:
init_char_0 = in_initial_list[0]
init_char_1 = in_initial_list[1]
init_char_2 = in_initial_list[2]
if (isinstance(init_char_0, str) & isinstance(init_char_1, str) &
isinstance(init_char_2, str)):
init_char_0 = init_char_0.strip().upper()
init_char_1 = init_char_1.strip().upper()
init_char_2 = init_char_2.strip().upper()
if ((init_char_0 in English_name_capital) & (init_char_1 in English_name_capital) &
(init_char_2 in English_name_capital)):
first_3_digits = (init_char_0.lower()+init_char_1.lower()+init_char_2.lower(),
init_char_0.lower()+init_char_1.lower()+init_char_2,
init_char_0.lower()+init_char_1+init_char_2.lower(),
init_char_0.lower()+init_char_1+init_char_2,
init_char_0+init_char_1.lower()+init_char_2.lower(),
init_char_0+init_char_1.lower()+init_char_2,
init_char_0+init_char_1+init_char_2.lower(),
init_char_0+init_char_1+init_char_2)
else:
temp_bool_0 = False
else:
temp_bool_0 = False
else:
temp_bool_0 = False
else:
temp_bool_0 = False
out_result = []
if temp_bool_0:
if isinstance(in_list_name, list) | isinstance(in_list_name, tuple):
temp_len = len(in_list_name)
if temp_len > 0:
temp_time_now = datetime.utcnow()
temp_num_0 = temp_time_now.microsecond
temp_num_0 = temp_num_0*60+temp_time_now.second
temp_num_0 = temp_num_0*60+temp_time_now.minute
temp_num_0 = temp_num_0*24+temp_time_now.hour
seed(temp_num_0)
for n in range(temp_len):
temp_bool_1 = True
if in_list_name[n] is None:
out_result.append(None)
elif isinstance(in_list_name[n], str):
temp_on_str = in_list_name[n].strip()
temp_len = len(temp_on_str)
if temp_len == 0:
temp_bool_1 = False
elif temp_len == 1:
temp_num_1 = 0
temp_str_1 = in_list_name[n][0]
for n2 in range(36):
if English_name_capital_1[n2] == temp_str_1:
temp_num_1 = n2+1
break
if temp_num_1 < 1:
temp_bool_1 = False
else:
temp_num_1 = 0
temp_str_1 = in_list_name[n][0]
for n2 in range(36):
if English_name_capital_1[n2] == temp_str_1:
temp_num_1 = n2+1
break
if temp_num_1 > 0:
for n1 in range(1, temp_len):
temp_num_2 = 0
temp_str_2 = in_list_name[n][n1].upper()
for n2 in range(36):
if English_name_capital_1[n2] == temp_str_2:
temp_num_2 = n2+1
break
if temp_num_2 < 1:
if not temp_str_2 in English_name_other:
temp_bool_1 = False
break
else:
temp_bool_1 = False
if temp_bool_1:
temp_time_now = datetime.utcnow()
temp_year = temp_time_now.year
temp_year_3 = temp_year%10
temp_year_0 = int((temp_year-temp_year_3)/10)
temp_year_2 = temp_year_0%10
temp_year_0 = int((temp_year_0-temp_year_2)/10)
temp_year_1 = temp_year_0%10
temp_year_0 = int((temp_year_0-temp_year_1)/10)
temp_month = temp_time_now.month
temp_day = temp_time_now.day
temp_hour = temp_time_now.hour
temp_minute = temp_time_now.minute
temp_num_0 = 0
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_1_0 = int(temp_runif*8)
if temp_num_1_0 == 8:
temp_num_1_0 -= 1
out_num_str = first_3_digits[temp_num_1_0]
temp_str_3 = out_num_str[0]
temp_num_1_1 = -1
for n2 in range(64):
if long_digits[n2] == temp_str_3:
temp_num_1_1 = n2
break
init_num_0 = temp_num_1_1
temp_num_0 += temp_num_1_1
temp_str_3 = out_num_str[1]
temp_num_1_1 = -1
for n2 in range(64):
if long_digits[n2] == temp_str_3:
temp_num_1_1 = n2
break
init_num_1 = temp_num_1_1
temp_num_0 += temp_num_1_1
temp_str_3 = out_num_str[2]
temp_num_1_1 = -1
for n2 in range(64):
if long_digits[n2] == temp_str_3:
temp_num_1_1 = n2
break
init_num_2 = temp_num_1_1
temp_num_0 += temp_num_1_1
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_2_1 = int(temp_runif*89)
if temp_num_2_1 == 89:
temp_num_2_1 -= 1
temp_num_2_2 = temp_year_1
temp_num_2_2 = temp_num_2_2*24+temp_hour
temp_num_2_2 = temp_num_2_2*12+(temp_month-1)
temp_num_2 = temp_num_2_2+temp_num_2_1*2887
temp_num_2_2 = temp_num_2%64
temp_num_2_0 = int((temp_num_2-temp_num_2_2)/64)
temp_num_2_1 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_1)/64)
temp_num_2_0 = (temp_num_2_0+init_num_0)%64
temp_num_0 += temp_num_2_0
out_num_str = out_num_str+long_digits[temp_num_2_0]
temp_num_2_1 = (temp_num_2_1-init_num_1)%64
temp_num_0 += temp_num_2_1
out_num_str = out_num_str+long_digits[temp_num_2_1]
temp_num_2_2 = (temp_num_2_2+init_num_2)%64
temp_num_0 += temp_num_2_2
out_num_str = out_num_str+long_digits[temp_num_2_2]
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_2_1 = int(temp_runif*5393)
if temp_num_2_1 == 5393:
temp_num_2_1 -= 1
temp_num_2_2 = temp_day-1
temp_num_2_2 = temp_num_2_2*10+temp_year_0
temp_num_2_2 = temp_num_2_2*10+temp_year_2
temp_num_2 = temp_num_2_2+temp_num_2_1*3109
temp_num_2_3 = temp_num_2%64
temp_num_2_0 = int((temp_num_2-temp_num_2_3)/64)
temp_num_2_2 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_2)/64)
temp_num_2_1 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_1)/64)
temp_num_2_0 = (temp_num_2_0-init_num_0)%64
temp_num_0 += temp_num_2_0
out_num_str = out_num_str+long_digits[temp_num_2_0]
temp_num_2_1 = (temp_num_2_1+init_num_1)%64
temp_num_0 += temp_num_2_1
out_num_str = out_num_str+long_digits[temp_num_2_1]
temp_num_2_2 = (temp_num_2_2-init_num_2)%64
temp_num_0 += temp_num_2_2
out_num_str = out_num_str+long_digits[temp_num_2_2]
temp_num_2_3 = (temp_num_2_3+init_num_0)%64
temp_num_0 += temp_num_2_3
out_num_str = out_num_str+long_digits[temp_num_2_3]
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_2_1 = int(temp_runif*433)
if temp_num_2_1 == 433:
temp_num_2_1 -= 1
temp_num_2_2 = temp_minute
temp_num_2_2 = temp_num_2_2*10+temp_year_3
temp_num_2 = temp_num_2_2+temp_num_2_1*601
temp_num_2_2 = temp_num_2%64
temp_num_2_0 = int((temp_num_2-temp_num_2_2)/64)
temp_num_2_1 = temp_num_2_0%64
temp_num_2_0 = int((temp_num_2_0-temp_num_2_1)/64)
temp_num_2_0 = (temp_num_2_0-init_num_1)%64
temp_num_0 += temp_num_2_0
out_num_str = out_num_str+long_digits[temp_num_2_0]
temp_num_2_1 = (temp_num_2_1+init_num_2)%64
temp_num_0 += temp_num_2_1
out_num_str = out_num_str+long_digits[temp_num_2_1]
temp_num_2_2 = (temp_num_2_2-init_num_0)%64
temp_num_0 += temp_num_2_2
out_num_str = out_num_str+long_digits[temp_num_2_2]
out_num_str = out_num_str+long_digits[temp_num_0%64]
temp_runif = 1.0
temp_str_4 = str(temp_year_0)+str(temp_year_1)+str(temp_year_2)+str(temp_year_3)
temp_str_4 = temp_str_4+"-"
if temp_month < 10:
temp_str_4 = temp_str_4+"0"+str(temp_month)
else:
temp_str_4 = temp_str_4+str(temp_month)
temp_str_4 = temp_str_4+"-"
if temp_day < 10:
temp_str_4 = temp_str_4+"0"+str(temp_day)
else:
temp_str_4 = temp_str_4+str(temp_day)
temp_str_5 = ""
if temp_hour < 10:
temp_str_5 = temp_str_5+"0"+str(temp_hour)
else:
temp_str_5 = temp_str_5+str(temp_hour)
temp_str_5 = temp_str_5+":"
if temp_minute < 10:
temp_str_5 = temp_str_5+"0"+str(temp_minute)
else:
temp_str_5 = temp_str_5+str(temp_minute)
out_result.append((out_num_str, temp_on_str, temp_str_4, temp_str_5))
else:
out_result.append(None)
else:
out_result.append(None)
else:
temp_bool_0 = False
else:
temp_bool_0 = False
if not temp_bool_0:
out_result = None
return out_result
def num_organization_valid(organization_number,
year = None, month = None, day = None,
hour = None, minute = None):
# check validation of mixed number
# input: organization_number, string of length 14
# year, integer
# month, integer
# day, integer
# hour, integer
# minute, integer
# output: bool
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
English_name_capital = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z")
out_bool = True
if isinstance(organization_number, str):
organization_number = organization_number.strip()
if len(organization_number) == 14:
init_str_0 = organization_number[0]
init_str_1 = organization_number[1]
init_str_2 = organization_number[2]
if ((init_str_0.upper() in English_name_capital) &
(init_str_1.upper() in English_name_capital) &
(init_str_2.upper() in English_name_capital)):
temp_number_list = []
temp_num_1 = 0
temp_num_0 = -1
for n1 in range(64):
if long_digits[n1] == init_str_0:
temp_num_0 = n1
break
init_num_0 = temp_num_0
temp_num_1 += temp_num_0
temp_number_list.append(temp_num_0)
temp_num_0 = -1
for n1 in range(64):
if long_digits[n1] == init_str_1:
temp_num_0 = n1
break
init_num_1 = temp_num_0
temp_num_1 += temp_num_0
temp_number_list.append(temp_num_0)
temp_num_0 = -1
for n1 in range(64):
if long_digits[n1] == init_str_2:
temp_num_0 = n1
break
init_num_2 = temp_num_0
temp_num_1 += temp_num_0
temp_number_list.append(temp_num_0)
temp_num_2 = 0
for n in range(3, 14):
temp_num_0 = -1
temp_str_0 = organization_number[n]
for n1 in range(64):
if long_digits[n1] == temp_str_0:
temp_num_0 = n1
break
if temp_num_0 >= 0:
temp_number_list.append(temp_num_0)
if n < 13:
temp_num_1 += temp_num_0
else:
temp_num_2 += temp_num_0
else:
out_bool = False
break
if out_bool:
out_bool = temp_num_1%64 == temp_num_2
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
if out_bool:
temp_num_0 = (temp_number_list[3]-init_num_0)%64
temp_num_0 = temp_num_0*64+(temp_number_list[4]+init_num_1)%64
temp_num_0 = temp_num_0*64+(temp_number_list[5]-init_num_2)%64
temp_num_0 = temp_num_0%2887
temp_num_1_2 = temp_num_0%12
temp_num_1_0 = int((temp_num_0-temp_num_1_2)/12)
temp_num_1_1 = temp_num_1_0%24
temp_num_1_0 = int((temp_num_1_0-temp_num_1_1)/24)
temp_num_0 = (temp_number_list[6]+init_num_0)%64
temp_num_0 = temp_num_0*64+(temp_number_list[7]-init_num_1)%64
temp_num_0 = temp_num_0*64+(temp_number_list[8]+init_num_2)%64
temp_num_0 = temp_num_0*64+(temp_number_list[9]-init_num_0)%64
temp_num_0 = temp_num_0%3109
temp_num_2_2 = temp_num_0%10
temp_num_2_0 = int((temp_num_0-temp_num_2_2)/10)
temp_num_2_1 = temp_num_2_0%10
temp_num_2_0 = int((temp_num_2_0-temp_num_2_1)/10)
temp_num_0 = (temp_number_list[10]+init_num_1)%64
temp_num_0 = temp_num_0*64+(temp_number_list[11]-init_num_2)%64
temp_num_0 = temp_num_0*64+(temp_number_list[12]+init_num_0)%64
temp_num_0 = temp_num_0%601
temp_num_3_1 = temp_num_0%10
temp_num_3_0 = int((temp_num_0-temp_num_3_1)/10)
if (temp_num_1_1 >= 0) & (temp_num_1_1 < 24):
if (temp_num_3_0 >= 0) & (temp_num_3_0 < 60):
if ((temp_num_1_0 >= 0) & (temp_num_1_0 <= 9) &
(temp_num_2_1 >= 0) & (temp_num_2_1 <= 9) &
(temp_num_2_2 >= 0) & (temp_num_2_2 <= 9) &
(temp_num_3_1 >= 0) & (temp_num_3_1 <= 9)):
temp_num_1 = temp_num_2_1
temp_num_1 = temp_num_1*10+temp_num_1_0
temp_num_1 = temp_num_1*10+temp_num_2_2
temp_num_1 = temp_num_1*10+temp_num_3_1
temp_num_2 = temp_num_1_2+1
if (temp_num_2 > 0) & (temp_num_2 <= 12):
temp_num_3 = temp_num_2_0+1
if temp_num_2 in (1, 3, 5, 7, 8, 10, 12):
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 31)
elif temp_num_2 in (4, 6, 9, 11):
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 30)
else:
if temp_num_1%400 == 0:
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 29)
elif temp_num_1%100 == 0:
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 28)
elif temp_num_1%4 == 0:
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 29)
else:
out_bool = (temp_num_3 > 0) & (temp_num_3 <= 28)
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
if out_bool:
if not year is None:
out_bool = year == temp_num_1
if out_bool:
if not month is None:
out_bool = month == temp_num_2
if out_bool:
if not day is None:
out_bool = day == temp_num_3
if out_bool:
if not hour is None:
out_bool = hour == temp_num_1_1
if out_bool:
if not minute is None:
out_bool = minute == temp_num_3_0
return out_bool
def num_organization_64_2_16(organization_number):
# input: organization_number, string of length 14
# output: organization_number, string of length 21
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
hex_digits = ("0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "A", "B", "C", "D", "E", "F")
out_str = ""
if isinstance(organization_number, str):
organization_number = organization_number.strip()
if len(organization_number) == 14:
temp_bool_0 = True
temp_number_list = []
for n in range(14):
temp_num_0 = -1
temp_str_0 = organization_number[n]
for n1 in range(64):
if long_digits[n1] == temp_str_0:
temp_num_0 = n1
break
if temp_num_0 >= 0:
temp_number_list.append(temp_num_0)
else:
temp_bool_0 = False
if temp_bool_0:
temp_number_list_1 = []
for n in range(7):
temp_num_0 = temp_number_list[2*n]*64+temp_number_list[2*n+1]
temp_num_3 = temp_num_0%16
temp_num_0 = int((temp_num_0-temp_num_3)/16)
temp_num_2 = temp_num_0%16
temp_num_1 = int((temp_num_0-temp_num_2)/16)
temp_number_list_1.append(temp_num_1)
temp_number_list_1.append(temp_num_2)
temp_number_list_1.append(temp_num_3)
for n in range(len(temp_number_list_1)):
out_str = out_str+hex_digits[temp_number_list_1[n]]
else:
out_str = None
else:
out_str = None
else:
out_str = None
return out_str
def num_organization_16_2_64(organization_number):
# input: organization_number, string of length 21
# output: organization_number, string of length 14
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
hex_digits = ("0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "A", "B", "C", "D", "E", "F")
out_str = ""
if isinstance(organization_number, str):
organization_number = organization_number.strip()
if len(organization_number) == 21:
temp_bool_0 = True
temp_number_list = []
for n in range(21):
temp_num_0 = -1
temp_str_0 = organization_number[n].upper()
for n1 in range(16):
if hex_digits[n1] == temp_str_0:
temp_num_0 = n1
break
if temp_num_0 >= 0:
temp_number_list.append(temp_num_0)
else:
temp_bool_0 = False
if temp_bool_0:
temp_number_list_1 = []
for n in range(7):
temp_num_0 = temp_number_list[3*n]*256+temp_number_list[3*n+1]*16+temp_number_list[3*n+2]
temp_num_1 = temp_num_0%64
temp_num_0 = int((temp_num_0-temp_num_1)/64)
temp_number_list_1.append(temp_num_0)
temp_number_list_1.append(temp_num_1)
for n in range(len(temp_number_list_1)):
out_str = out_str+long_digits[temp_number_list_1[n]]
else:
out_str = None
else:
out_str = None
else:
out_str = None
return out_str
def number_manipulation_generate(organization_number):
# to generate manipulation number, 7 digits
# input: organization_number, string of length 14
# output:
# out_number, string of length 7
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
series_digits = ("5", "B", "7", "F", "0", "C", "2", "D",
"E", "9", "3", "1", "4", "8", "6", "A")
organization_number = organization_number.strip()
temp_bool = num_organization_valid(organization_number)
if temp_bool:
temp_number_list = []
for n in range(14):
temp_num_0 = -1
temp_str_0 = organization_number[n]
for n1 in range(64):
if long_digits[n1] == temp_str_0:
temp_num_0 = n1
break
temp_number_list.append(temp_num_0)
temp_num_1 = temp_number_list[0]+temp_number_list[4]+temp_number_list[8]
temp_num_2 = temp_number_list[1]+temp_number_list[5]+temp_number_list[9]
temp_num_3 = (temp_num_1+temp_num_2)%16
out_num_str = series_digits[temp_num_3]
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_1_0 = int(temp_runif*41)
if temp_num_1_0 == 41:
temp_num_1_0 -= 1
temp_num_1_0 *= 97
temp_num_3 = temp_num_1%5
temp_num_4 = temp_num_2%19
temp_num_5 = temp_num_3*19+temp_num_4+temp_num_1_0
temp_num_3 = temp_num_5%16
out_num_str = out_num_str+series_digits[temp_num_3]
temp_num_5 = int((temp_num_5-temp_num_3)/16)
temp_num_4 = temp_num_5%16
out_num_str = out_num_str+series_digits[temp_num_4]
temp_num_5 = int((temp_num_5-temp_num_4)/16)
out_num_str = out_num_str+series_digits[temp_num_5]
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_1_0 = int(temp_runif*31)
if temp_num_1_0 == 31:
temp_num_1_0 -= 1
temp_num_1_0*= 127
temp_num_3 = temp_num_2%17
temp_num_4 = temp_num_1%7
temp_num_5 = temp_num_3*7+temp_num_4+temp_num_1_0
temp_num_3 = temp_num_5%16
out_num_str = out_num_str+series_digits[temp_num_3]
temp_num_5 = int((temp_num_5-temp_num_3)/16)
temp_num_4 = temp_num_5%16
out_num_str = out_num_str+series_digits[temp_num_4]
temp_num_5 = int((temp_num_5-temp_num_4)/16)
out_num_str = out_num_str+series_digits[temp_num_5]
else:
out_num_str = None
return out_num_str
def number_manipulation_valid(manipulation_number, organization_number):
# to generate manipulation number, 7 digits
# input: manipulation_number, string of length 7
# organization_number, string of length 14
# output: bool
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
series_digits = ("5", "B", "7", "F", "0", "C", "2", "D",
"E", "9", "3", "1", "4", "8", "6", "A")
if isinstance(manipulation_number, str) & isinstance(organization_number, str):
manipulation_number = manipulation_number.strip()
organization_number = organization_number.strip()
out_bool = num_organization_valid(organization_number)
else:
out_bool = False
if out_bool:
if len(manipulation_number) == 7:
temp_number_list_0 = []
for n in range(7):
temp_num_0 = -1
temp_str_0 = manipulation_number[n]
for n1 in range(16):
if series_digits[n1] == temp_str_0:
temp_num_0 = n1
break
if temp_num_0 >= 0:
temp_number_list_0.append(temp_num_0)
else:
out_bool = False
break
if out_bool:
temp_number_list_1 = []
for n in range(14):
temp_num_0 = -1
temp_str_0 = organization_number[n]
for n1 in range(64):
if long_digits[n1] == temp_str_0:
temp_num_0 = n1
break
if temp_num_0 >= 0:
temp_number_list_1.append(temp_num_0)
else:
out_bool = False
break
if out_bool:
temp_num_1 = temp_number_list_1[0]+temp_number_list_1[4]+temp_number_list_1[8]
temp_num_2 = temp_number_list_1[1]+temp_number_list_1[5]+temp_number_list_1[9]
if (temp_num_1+temp_num_2)%16 == temp_number_list_0[0]:
temp_num_3 = temp_number_list_0[3]
temp_num_3 = temp_num_3*16+temp_number_list_0[2]
temp_num_3 = temp_num_3*16+temp_number_list_0[1]
temp_num_3 = temp_num_3%97
temp_num_4 = temp_num_3%19
temp_num_3 = int((temp_num_3-temp_num_4)/19)
if (temp_num_1%5 == temp_num_3) & (temp_num_2%19 == temp_num_4):
temp_num_3 = temp_number_list_0[6]
temp_num_3 = temp_num_3*16+temp_number_list_0[5]
temp_num_3 = temp_num_3*16+temp_number_list_0[4]
temp_num_3 = temp_num_3%127
temp_num_4 = temp_num_3%7
temp_num_3 = int((temp_num_3-temp_num_4)/7)
out_bool = (temp_num_2%17 == temp_num_3) & (temp_num_1%7 == temp_num_4)
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
return out_bool
def number_member_generate(in_list_name):
# to generate member number, 14 digits
# input:
# in_list_name = [[mix number 0,
# given name 0, family name 0,
# virtual name 0,
# organization number 0,
# date 0, time 0] / None,
# [mix number 1,
# given name 1, family name 1,
# virtual name 1,
# organization number 1,
# date 1, time 1] / None,
# [mix number 2,
# given name 2, family name 2,
# virtual name 2,
# organization number 2,
# date 2, time 2] / None, ...]
# output:
# out_result = [(out number 0, mix number 0, virtual name 0, organization number) / None,
# (out number 1, mix number 1, virtual name 1, organization number) / None,
# (out number 2, mix number 2, virtual name 2, organization number) / None, ...]
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
series_digits = ("5", "B", "7", "F", "0", "C", "2", "D",
"E", "9", "3", "1", "4", "8", "6", "A")
numeric_digits = ("0", "1", "2", "3", "4",
"5", "6", "7", "8", "9")
out_result = []
if isinstance(in_list_name, list) | isinstance(in_list_name, tuple):
temp_len = len(in_list_name)
if temp_len > 0:
temp_time_now = datetime.utcnow()
temp_num_0 = temp_time_now.microsecond
temp_num_0 = temp_num_0*60+temp_time_now.second
temp_num_0 = temp_num_0*60+temp_time_now.minute
temp_num_0 = temp_num_0*24+temp_time_now.hour
seed(temp_num_0)
for n in range(temp_len):
temp_bool = True
if in_list_name[n] is None:
temp_bool = False
elif isinstance(in_list_name[n], list) | isinstance(in_list_name[n], tuple):
if len(in_list_name[n]) == 7:
if (isinstance(in_list_name[n][0], str) & isinstance(in_list_name[n][1], str) &
isinstance(in_list_name[n][2], str) & isinstance(in_list_name[n][3], str) &
isinstance(in_list_name[n][4], str) & isinstance(in_list_name[n][5], str) &
isinstance(in_list_name[n][6], str)):
temp_list_0 = []
for n1 in range(7):
temp_list_0.append(in_list_name[n][n1].strip())
if len(temp_list_0[5]) == 10:
if ((temp_list_0[5][0] in numeric_digits) & (temp_list_0[5][1] in numeric_digits) &
(temp_list_0[5][2] in numeric_digits) & (temp_list_0[5][3] in numeric_digits) &
(temp_list_0[5][4] == "-") & (temp_list_0[5][5] in numeric_digits) &
(temp_list_0[5][6] in numeric_digits) & (temp_list_0[5][7] == "-") &
(temp_list_0[5][8] in numeric_digits) & (temp_list_0[5][9] in numeric_digits)):
temp_num_1_0 = int(temp_list_0[5][0:4])
temp_num_1_1 = int(temp_list_0[5][5:7])
temp_num_1_2 = int(temp_list_0[5][8:10])
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if len(temp_list_0[6]) == 5:
if ((temp_list_0[6][0] in numeric_digits) & (temp_list_0[6][1] in numeric_digits) &
(temp_list_0[6][2] == ":") & (temp_list_0[6][3] in numeric_digits) &
(temp_list_0[6][4] in numeric_digits)):
temp_num_2_0 = int(temp_list_0[6][0:2])
temp_num_2_1 = int(temp_list_0[6][3:5])
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if temp_list_0[0][:3].upper() == temp_list_0[4][:3].upper():
if num_mix_valid(temp_list_0[0], temp_list_0[1], temp_list_0[2],
year = temp_num_1_0, month = temp_num_1_1, day = temp_num_1_2,
hour = temp_num_2_0, minute = temp_num_2_1):
temp_bool = num_organization_valid(temp_list_0[4])
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
temp_number_list_0 = []
for n1 in range(21):
temp_num_0 = -1
temp_str_0 = temp_list_0[0][n1]
for n2 in range(64):
if long_digits[n2] == temp_str_0:
temp_num_0 = n2
break
temp_number_list_0.append(temp_num_0)
temp_number_list_1 = []
for n1 in range(14):
temp_num_0 = -1
temp_str_0 = temp_list_0[4][n1]
for n2 in range(64):
if long_digits[n2] == temp_str_0:
temp_num_0 = n2
break
temp_number_list_1.append(temp_num_0)
temp_len_1 = len(temp_list_0[3])
if temp_len_1 == 0:
if len(temp_list_0[1]) > 0:
temp_str_vn = ""
temp_num_1 = 0
temp_num_2 = 0
else:
temp_bool = False
else:
temp_str_vn = ""
for n1 in range(temp_len_1):
temp_str_1 = temp_list_0[3][n1]
temp_num_3 = ord(temp_str_1)
if (temp_num_3 >= 32) & (temp_num_3 < 65536):
if not temp_str_1 in ("'", '"'):
temp_str_vn = temp_str_vn+temp_str_1
else:
temp_str_vn = temp_str_vn+"?"
else:
temp_bool = False
break
if temp_bool:
if temp_len_1 == 1:
temp_str_1 = temp_str_vn[0]
temp_num_3 = ord(temp_str_1)
temp_num_1 = temp_num_3%16
temp_num_3 = int((temp_num_3-temp_num_1)/16)
temp_num_1 = temp_num_3%16
temp_num_2 = 0
else:
temp_str_1 = temp_str_vn[0]
temp_num_3 = ord(temp_str_1)
temp_num_1 = temp_num_3%16
temp_num_3 = int((temp_num_3-temp_num_1)/16)
temp_num_1 = temp_num_3%16
temp_str_2 = temp_str_vn[1]
temp_num_4 = ord(temp_str_2)
temp_num_2 = temp_num_4%16
if temp_bool:
temp_num_0 = 0
out_num_str = ""
temp_num_3 = (temp_number_list_0[0]+temp_number_list_0[4]+temp_number_list_0[8])%16
temp_num_0 += temp_num_3
out_num_str = out_num_str+series_digits[temp_num_3]
temp_num_3 = (temp_number_list_0[1]+temp_number_list_0[5]+temp_number_list_0[9])%16
temp_num_0 += temp_num_3
out_num_str = out_num_str+series_digits[temp_num_3]
temp_num_3 = (temp_number_list_0[2]+temp_number_list_0[6]+temp_number_list_0[10])%16
temp_num_0 += temp_num_3
out_num_str = out_num_str+series_digits[temp_num_3]
temp_num_3 = (temp_number_list_0[3]+temp_number_list_0[7]+temp_number_list_0[11])%16
temp_num_0 += temp_num_3
out_num_str = out_num_str+series_digits[temp_num_3]
temp_num_3 = (temp_number_list_1[3]+temp_number_list_1[7]+temp_number_list_1[11])%16
temp_num_0 += temp_num_3
out_num_str = out_num_str+series_digits[temp_num_3]
temp_num_3 = (temp_number_list_1[2]+temp_number_list_1[6]+temp_number_list_1[10])%16
temp_num_0 += temp_num_3
out_num_str = out_num_str+series_digits[temp_num_3]
temp_num_0 += temp_num_1
out_num_str = out_num_str+series_digits[temp_num_1]
temp_num_0 += temp_num_2
out_num_str = out_num_str+series_digits[temp_num_2]
temp_num_1 = (temp_num_2_0+temp_number_list_0[12])%29
temp_num_2 = (temp_num_1_1-temp_number_list_0[14])%17
temp_num_1 = temp_num_1*17+temp_num_2
temp_num_2 = (temp_num_1_0%10+temp_number_list_0[15])%11
temp_num_1 = temp_num_1*11+temp_num_2
temp_num_2 = (temp_num_1_2-temp_number_list_0[13])%31
temp_num_1 = temp_num_1*31+temp_num_2
temp_runif = 1.0
while temp_runif == 1.0:
temp_runif = uniform(0.0, 1.0)
temp_num_2 = int(temp_runif*6)
if temp_num_2 == 6:
temp_num_2 -= 1
temp_num_2 *= 168127
temp_num_1 += temp_num_2
temp_num_5 = temp_num_1%16
temp_num_1 = int((temp_num_1-temp_num_5)/16)
temp_num_4 = temp_num_1%16
temp_num_1 = int((temp_num_1-temp_num_4)/16)
temp_num_3 = temp_num_1%16
temp_num_1 = int((temp_num_1-temp_num_3)/16)
temp_num_2 = temp_num_1%16
temp_num_1 = int((temp_num_1-temp_num_2)/16)
temp_num_0 += temp_num_1
out_num_str = out_num_str+series_digits[temp_num_1]
temp_num_0 += temp_num_2
out_num_str = out_num_str+series_digits[temp_num_2]
temp_num_0 += temp_num_3
out_num_str = out_num_str+series_digits[temp_num_3]
temp_num_0 += temp_num_4
out_num_str = out_num_str+series_digits[temp_num_4]
temp_num_0 += temp_num_5
out_num_str = out_num_str+series_digits[temp_num_5]
if len(temp_list_0[1]) > 0:
out_num_str = out_num_str+series_digits[temp_num_0%15]
else:
out_num_str = out_num_str+series_digits[15]
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
out_result.append([out_num_str, temp_list_0[0], temp_str_vn, temp_list_0[4]])
else:
out_result.append(None)
else:
out_result = None
else:
out_result = None
return out_result
def num_member_valid(member_number, mix_number = None,
virtual_name = None, organization_number = None):
# check validation of mixed number
# input: member_number, string of length 14
# mix_number, string of length 21
# virtual_name, string
# organization_number, string of length 14
# output: bool
long_digits = ("G", "B", "l", "A", "r", "s", "6", "X",
"c", "K", "R", "Q", "I", "x", "h", "b",
"i", "f", "o", "a", "M", "S", "w", "0",
"P", "v", "3", "N", "t", "g", "8", "2",
"+", "-", "4", "k", "7", "e", "n", "D",
"V", "y", "U", "W", "F", "L", "d", "T",
"1", "J", "u", "Z", "z", "C", "Y", "9",
"m", "H", "O", "E", "5", "p", "j", "q")
series_digits = ("5", "B", "7", "F", "0", "C", "2", "D",
"E", "9", "3", "1", "4", "8", "6", "A")
out_bool = True
if isinstance(member_number, str):
member_number = member_number.strip()
if len(member_number) == 14:
temp_number_list = []
temp_num_1 = 0
temp_num_2 = 0
for n in range(14):
temp_num_0 = -1
temp_str_0 = member_number[n]
for n1 in range(16):
if series_digits[n1] == temp_str_0:
temp_num_0 = n1
break
if temp_num_0 >= 0:
temp_number_list.append(temp_num_0)
if n < 13:
temp_num_1 += temp_num_0
else:
temp_num_2 += temp_num_0
else:
out_bool = False
break
if out_bool:
if temp_number_list[13] != 15:
out_bool = temp_num_1%15 == temp_num_2
else:
out_bool = False
else:
out_bool = False
if out_bool:
if not mix_number is None:
if num_mix_valid(mix_number):
mix_number = mix_number.strip()
temp_number_list_1 = []
for n in range(21):
temp_num_0 = -1
temp_str_0 = mix_number[n]
for n1 in range(64):
if long_digits[n1] == temp_str_0:
temp_num_0 = n1
break
temp_number_list_1.append(temp_num_0)
if (temp_number_list_1[0]+temp_number_list_1[4]+
temp_number_list_1[8])%16 != temp_number_list[0]:
out_bool = False
elif (temp_number_list_1[1]+temp_number_list_1[5]+
temp_number_list_1[9])%16 != temp_number_list[1]:
out_bool = False
elif (temp_number_list_1[2]+temp_number_list_1[6]+
temp_number_list_1[10])%16 != temp_number_list[2]:
out_bool = False
elif (temp_number_list_1[3]+temp_number_list_1[7]+
temp_number_list_1[11])%16 != temp_number_list[3]:
out_bool = False
else:
temp_num_0 = (temp_number_list_1[7]-temp_number_list_1[1])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[8]+temp_number_list_1[2])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[9]-temp_number_list_1[0])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[10]+temp_number_list_1[1])%64
temp_num_0 = temp_num_0%6481
temp_num_1 = int(round(temp_num_0/270, 1))
temp_num_0 = (temp_number_list_1[11]-temp_number_list_1[2])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[12]+temp_number_list_1[0])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[13]-temp_number_list_1[1])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[14]+temp_number_list_1[2])%64
temp_num_0 = temp_num_0%2707
temp_num_2 = int(round(temp_num_0/270, 1))
temp_num_0 = (temp_number_list_1[15]-temp_number_list_1[0])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[16]+temp_number_list_1[1])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[17]-temp_number_list_1[2])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[18]+temp_number_list_1[0])%64
temp_num_0 = temp_num_0*64+(temp_number_list_1[19]-temp_number_list_1[1])%64
temp_num_0 = temp_num_0%22343
temp_num_3 = int(round(temp_num_0/60, 1))
temp_num_4 = temp_num_3%31
temp_num_3 = int((temp_num_3-temp_num_4)/31)
temp_num_1 = (temp_num_1+temp_number_list_1[12])%29
temp_num_2 = (temp_num_2+temp_number_list_1[15])%11
temp_num_3 = (temp_num_3+1-temp_number_list_1[14])%17
temp_num_4 = (temp_num_4+1-temp_number_list_1[13])%31
temp_num_5 = temp_number_list[8]
temp_num_5 = temp_num_5*16+temp_number_list[9]
temp_num_5 = temp_num_5*16+temp_number_list[10]
temp_num_5 = temp_num_5*16+temp_number_list[11]
temp_num_5 = temp_num_5*16+temp_number_list[12]
temp_num_5 = temp_num_5%168127
temp_num_6 = temp_num_5%31
if temp_num_6 == temp_num_4:
temp_num_5 = int((temp_num_5-temp_num_6)/31)
temp_num_6 = temp_num_5%11
if temp_num_6 == temp_num_2:
temp_num_5 = int((temp_num_5-temp_num_6)/11)
temp_num_6 = temp_num_5%17
if temp_num_6 == temp_num_3:
temp_num_5 = int((temp_num_5-temp_num_6)/17)
out_bool = temp_num_5 == temp_num_1
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
else:
out_bool = False
if out_bool:
if not virtual_name is None:
if isinstance(virtual_name, str):
virtual_name = virtual_name.strip()
temp_len = len(virtual_name)
for n in range(temp_len):
temp_str_0 = virtual_name[0]
temp_num_0 = ord(temp_str_0)
if (temp_num_0 >= 32) & (temp_num_0 < 65536):
if temp_str_0 in ("'", '"'):
out_bool = False
break
else:
out_bool = False
break
if out_bool:
if temp_len == 0:
if (temp_number_list[6] != 0) | (temp_number_list[7] != 0):
out_bool = False
elif temp_number_list[13] == 15:
out_bool = False
elif temp_len == 1:
temp_num_0 = ord(virtual_name[0])
temp_num_1 = temp_num_0%16
temp_num_0 = int((temp_num_0-temp_num_1)/16)
temp_num_1 = temp_num_0%16
if temp_number_list[6] == temp_num_1:
out_bool =temp_number_list[7] == 0
else:
out_bool = False
else:
temp_num_0 = ord(virtual_name[0])
temp_num_1 = temp_num_0%16
temp_num_0 = int((temp_num_0-temp_num_1)/16)
temp_num_1 = temp_num_0%16
if temp_number_list[6] == temp_num_1:
temp_num_0 = ord(virtual_name[1])
temp_num_1 = temp_num_0%16
out_bool = temp_number_list[7] == temp_num_1
else:
out_bool = False
else:
out_bool = False
if out_bool:
if not organization_number is None:
if num_organization_valid(organization_number):
organization_number = organization_number.strip()
if not mix_number is None:
out_bool = mix_number[:3].upper() == organization_number[:3].upper()
if out_bool:
temp_number_list_2 = []
for n in range(14):
temp_num_0 = -1
temp_str_0 = organization_number[n]
for n1 in range(64):
if long_digits[n1] == temp_str_0:
temp_num_0 = n1
break
temp_number_list_2.append(temp_num_0)
if ((temp_number_list_2[3]+temp_number_list_2[7]+
temp_number_list_2[11]) % 16 != temp_number_list[4]):
out_bool = False
elif ((temp_number_list_2[2]+temp_number_list_2[6]+
temp_number_list_2[10]) % 16 != temp_number_list[5]):
out_bool = False
else:
out_bool = False
return out_bool
def forming_str_text_org(in_org_num, in_org_name, in_org_init_num,
in_org_admin, in_gene_list, in_editor,
in_loc_list, in_member_of_org_list):
# forming string text of organization
# input: in_org_num, organization number, string of length 14
# in_org_name, organization name, string
# in_org_init_num, string of 3 initial numbers
# in_org_admin, administrator, string of length 7
# in_gene_list, generator of the organization, ["manipulation", "date", "time"]
# in_editor, current editor, string of length 7
# in_loc_list, location, ["city", "region"]
# in_member_of_org_list= [[mani_num 0, enabled or not (1/0), member_num,
# En_name or A/V name (0/1), name-0, name-1, name-2,
# issuer's org_num, org's date, org's time].
# [mani_num 1, enabled or not (1/0), member_num,
# En_name or A/V name (0/1), name-0, name-1, name-2,
# issuer's org_num, org's date, org's time].
# [mani_num 2, enabled or not (1/0), member_num,
# En_name or A/V name (0/1), name-0, name-1, name-2,
# issuer's org_num, org's date, org's time]. ...]
# length 10 per each
# output: string
file_sep = ";"+"\u0009"+"\u000a"
file_sub_sep = ","+"\u0009"
English_name_capital = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z")
English_name_capital_1 = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z",
"0", "1", "2", "3", "4",
"5", "6", "7", "8", "9")
English_org_name_other = (" ", "-", "'", "‘", "’", "&",
"/", ".", ":", "(", ")")
numeric_digits = ("0", "1", "2", "3", "4",
"5", "6", "7", "8", "9")
regions_short = ('nam', 'cam', 'car', 'sam', 'weu',
'seu', 'neu', 'eeu', 'naf', 'eaf',
'maf', 'saf', 'waf', 'eas', 'sea',
'nas', 'cas', 'sas', 'me', 'omi',
'ome', 'opo', 'oau', 'int', 'other')
regions = ("nam - Northern America",
"cam - Central America",
"car - Caribbean",
"sam - South America",
"weu - Western Europe",
"seu - Southern Europe",
"neu - Northern Europe",
"eeu - Eastern Europe",
"naf - North Africa",
"eaf - East Africa",
"maf - Middle Africa",
"saf - Southern Africa",
"waf - West Africa",
"eas - East Asia",
"sea - Southeast Asia",
"nas - North Asia / Siberia",
"cas - Central Asia",
"sas - South Asia",
"me - Western Asia / Middle East",
"omi - Micronesia",
"ome - Melanesia",
"opo - Polynesia",
"oau - Australasia",
"int - Internation",
"other - Other")
out_str = ""
temp_bool = True
if isinstance(in_org_num, str):
if isinstance(in_gene_list, list) | isinstance(in_gene_list, tuple):
if len(in_gene_list) == 3:
if (isinstance(in_gene_list[0], str) & isinstance(in_gene_list[1], str) &
isinstance(in_gene_list[2], str)):
temp_str_0 = in_gene_list[1].strip()
if len(temp_str_0) == 10:
if not temp_str_0[0] in numeric_digits:
temp_bool = False
elif not temp_str_0[1] in numeric_digits:
temp_bool = False
elif not temp_str_0[2] in numeric_digits:
temp_bool = False
elif not temp_str_0[3] in numeric_digits:
temp_bool = False
elif temp_str_0[4] != "-":
temp_bool = False
elif not temp_str_0[5] in numeric_digits:
temp_bool = False
elif not temp_str_0[6] in numeric_digits:
temp_bool = False
elif temp_str_0[7] != "-":
temp_bool = False
elif not temp_str_0[8] in numeric_digits:
temp_bool = False
elif not temp_str_0[9] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_0 = int(temp_str_0[0:4])
temp_num_1 = int(temp_str_0[5:7])
temp_num_2 = int(temp_str_0[8:10])
else:
temp_bool = False
if temp_bool:
temp_str_1 = in_gene_list[2].strip()
if len(temp_str_1) == 5:
if not temp_str_1[0] in numeric_digits:
temp_bool = False
elif not temp_str_1[1] in numeric_digits:
temp_bool = False
elif temp_str_1[2] != ":":
temp_bool = False
elif not temp_str_1[3] in numeric_digits:
temp_bool = False
elif not temp_str_1[4] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_3 = int(temp_str_1[0:2])
temp_num_4 = int(temp_str_1[3:5])
else:
temp_bool = False
if temp_bool:
in_org_num = in_org_num.strip()
temp_bool = num_organization_valid(in_org_num,
temp_num_0, temp_num_1, temp_num_2,
temp_num_3, temp_num_4)
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
out_str = out_str+in_org_num
out_str = out_str+file_sub_sep
out_str = out_str+num_organization_64_2_16(in_org_num)
if temp_bool:
if isinstance(in_org_num, str):
in_org_name = in_org_name.strip()
temp_num_0 = len(in_org_name)
if temp_num_0 == 0:
temp_bool = False
if temp_num_0 == 1:
temp_str_2 = in_org_name[0].upper()
if not temp_str_2 in English_name_capital_1:
temp_bool = False
else:
temp_str_2 = in_org_name[0].upper()
if temp_str_2 in English_name_capital_1:
for n in range(1, temp_num_0):
temp_str_2 = in_org_name[n].upper()
if ((not temp_str_2 in English_name_capital_1) &
(not temp_str_2 in English_org_name_other)):
temp_bool = False
break
else:
temp_bool = False
if temp_bool:
if isinstance(in_org_init_num, str):
in_org_init_num = in_org_init_num.strip()
if len(in_org_init_num) == 3:
temp_str_2 = ""
for n in range(3):
temp_str_3 = in_org_init_num[n].upper()
if temp_str_3 in English_name_capital:
temp_str_2 = temp_str_2+temp_str_3
else:
temp_bool = False
break
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
in_org_init_num = temp_str_2
if in_org_num[0:3].upper() == in_org_init_num:
out_str = out_str+file_sep
out_str = out_str+in_org_name
out_str = out_str+file_sub_sep
out_str = out_str+in_org_init_num
else:
temp_bool = False
if temp_bool:
if isinstance(in_org_admin, str):
in_org_admin = in_org_admin.strip()
if len(in_org_admin) == 7:
out_str = out_str+file_sep
out_str = out_str+in_org_admin
temp_str_2 = in_gene_list[0].strip()
if len(temp_str_2) == 7:
out_str = out_str+file_sep
out_str = out_str+temp_str_2
out_str = out_str+file_sub_sep
out_str = out_str+temp_str_0
out_str = out_str+file_sub_sep
out_str = out_str+temp_str_1
in_editor = in_editor.strip()
out_str = out_str+file_sep
out_str = out_str+in_editor
if len(in_editor) == 7:
temp_time_now = datetime.utcnow()
temp_num_0 = temp_time_now.year
temp_str_0 = ""
if temp_num_0 < 10:
temp_str_0 = temp_str_0+"000"+str(temp_num_0)
elif temp_num_0 < 100:
temp_str_0 = temp_str_0+"00"+str(temp_num_0)
elif temp_num_0 < 1000:
temp_str_0 = temp_str_0+"0"+str(temp_num_0)
elif temp_num_0 < 10000:
temp_str_0 = temp_str_0+str(temp_num_0)
temp_str_0 = temp_str_0+"-"
temp_num_0 = temp_time_now.month
if temp_num_0 < 10:
temp_str_0 = temp_str_0+"0"+str(temp_num_0)
elif temp_num_0 < 100:
temp_str_0 = temp_str_0+str(temp_num_0)
temp_str_0 = temp_str_0+"-"
temp_num_0 = temp_time_now.day
if temp_num_0 < 10:
temp_str_0 = temp_str_0+"0"+str(temp_num_0)
elif temp_num_0 < 100:
temp_str_0 = temp_str_0+str(temp_num_0)
out_str = out_str+file_sub_sep
out_str = out_str+temp_str_0
temp_str_0 = ""
temp_num_0 = temp_time_now.hour
if temp_num_0 < 10:
temp_str_0 = temp_str_0+"0"+str(temp_num_0)
elif temp_num_0 < 100:
temp_str_0 = temp_str_0+str(temp_num_0)
temp_str_0 = temp_str_0+":"
temp_num_0 = temp_time_now.minute
if temp_num_0 < 10:
temp_str_0 = temp_str_0+"0"+str(temp_num_0)
elif temp_num_0 < 100:
temp_str_0 = temp_str_0+str(temp_num_0)
out_str = out_str+file_sub_sep
out_str = out_str+temp_str_0
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if isinstance(in_loc_list, list) | isinstance(in_loc_list, tuple):
if len(in_loc_list) == 2:
if isinstance(in_loc_list[0], str) & isinstance(in_loc_list[1], str):
temp_str_0 = in_loc_list[0].strip()
temp_str_1 = in_loc_list[1].strip()
temp_num_0 = -1
for n in range(25):
if regions[n] == temp_str_1:
temp_num_0 = n
break
if temp_num_0 >= 0:
temp_num_1 = len(temp_str_0)
if temp_num_1 == 1:
temp_str_2 = temp_str_0[0].upper()
if not temp_str_2 in English_name_capital_1:
temp_bool = False
elif temp_num_1 > 1:
temp_str_2 = temp_str_0[0].upper()
if temp_str_2 in English_name_capital_1:
for n in range(1, temp_num_1):
temp_str_2 = in_org_name[n].upper()
if ((not temp_str_2 in English_name_capital_1) &
(not temp_str_2 in English_org_name_other)):
temp_bool = False
break
else:
temp_bool = False
if temp_bool:
out_str = out_str+file_sep
out_str = out_str+temp_str_0
out_str = out_str+file_sub_sep
out_str = out_str+regions_short[temp_num_0]
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if isinstance(in_member_of_org_list, list) | isinstance(in_member_of_org_list, tuple):
temp_num_0 = len(in_member_of_org_list)
if (temp_num_0 > 0) & (temp_num_0 <= 777):
temp_mani_str_list = []
temp_mani_num_list = []
temp_mani_num_enable_list = []
for n in range(temp_num_0):
temp_list_1 = in_member_of_org_list[n]
if not temp_list_1 is None:
temp_bool_1 = True
if isinstance(temp_list_1, list) | isinstance(temp_list_1, tuple):
if len(temp_list_1) == 10:
temp_str_0 = ""
if isinstance(temp_list_1[0], str):
temp_str_1 = temp_list_1[0].strip()
if not temp_str_1 in temp_mani_str_list:
if number_manipulation_valid(temp_str_1, in_org_num):
temp_str_0 = temp_str_0+temp_str_1
temp_mani_num_list.append(temp_str_1)
else:
temp_bool_1 = False
else:
temp_bool_1 = False
else:
temp_bool_1 = False
if temp_bool_1:
if isinstance(temp_list_1[1], bool):
temp_str_0 = temp_str_0+file_sub_sep
if temp_list_1[1]:
temp_str_0 = temp_str_0+"1"
else:
temp_str_0 = temp_str_0+"0"
temp_mani_num_enable_list.append(temp_list_1[1])
else:
temp_bool_1 = False
if temp_bool_1:
if isinstance(temp_list_1[2], str):
temp_str_1 = temp_list_1[2].strip()
if len(temp_str_1) == 14:
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_1
else:
temp_bool_1 = False
else:
temp_bool_1 = False
if temp_bool_1:
if isinstance(temp_list_1[3], str):
temp_str_2 = temp_list_1[3].strip()
if temp_str_2 == "en":
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+"0"
elif temp_str_2 == "vn":
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+"1"
else:
temp_bool_1 = False
else:
temp_bool_1 = False
if temp_bool_1:
if (isinstance(temp_list_1[4], str) & isinstance(temp_list_1[5], str) &
isinstance(temp_list_1[6], str)):
temp_str_3 = temp_list_1[4].strip()
temp_str_4 = temp_list_1[5].strip()
temp_str_5 = temp_list_1[6].strip()
if temp_str_2 == "en":
if len(temp_str_3) > 0:
if English_name_valid([temp_str_3, temp_str_4, temp_str_5]):
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_3
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_4
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_5
else:
temp_bool_1 = False
else:
temp_bool_1 = False
else:
if len(temp_str_4) > 0:
if virtual_name_valid([temp_str_3, temp_str_4, temp_str_5]):
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_3
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_4
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_5
temp_str_3 = temp_str_4
else:
temp_bool_1 = False
else:
temp_bool_1 = False
else:
temp_bool_1 = False
if temp_bool_1:
if (isinstance(temp_list_1[7], str) & isinstance(temp_list_1[8], str) &
isinstance(temp_list_1[9], str)):
temp_str_4 = temp_list_1[7].strip()
temp_str_5 = temp_list_1[8].strip()
temp_str_6 = temp_list_1[9].strip()
if len(temp_str_4) == 14:
temp_bool_1 = temp_str_4[0:3].upper() == in_org_init_num
else:
temp_bool_1 = False
if temp_bool_1:
if len(temp_str_5) == 10:
if not temp_str_5[0] in numeric_digits:
temp_bool_1 = False
elif not temp_str_5[1] in numeric_digits:
temp_bool_1 = False
elif not temp_str_5[2] in numeric_digits:
temp_bool_1 = False
elif not temp_str_5[3] in numeric_digits:
temp_bool_1 = False
elif temp_str_5[4] != "-":
temp_bool_1 = False
elif not temp_str_5[5] in numeric_digits:
temp_bool_1 = False
elif not temp_str_5[6] in numeric_digits:
temp_bool_1 = False
elif temp_str_5[7] != "-":
temp_bool_1 = False
elif not temp_str_5[8] in numeric_digits:
temp_bool_1 = False
elif not temp_str_5[9] in numeric_digits:
temp_bool_1 = False
if temp_bool_1:
temp_num_1 = int(temp_str_5[0:4])
temp_num_2 = int(temp_str_5[5:7])
temp_num_3 = int(temp_str_5[8:10])
else:
temp_bool_1 = False
if temp_bool_1:
if len(temp_str_6) == 5:
if not temp_str_6[0] in numeric_digits:
temp_bool_1 = False
elif not temp_str_6[1] in numeric_digits:
temp_bool_1 = False
elif temp_str_6[2] != ":":
temp_bool_1 = False
elif not temp_str_6[3] in numeric_digits:
temp_bool_1 = False
elif not temp_str_6[4] in numeric_digits:
temp_bool_1 = False
if temp_bool_1:
temp_num_4 = int(temp_str_6[0:2])
temp_num_5 = int(temp_str_6[3:5])
else:
temp_bool_1 = False
if temp_bool_1:
if num_organization_valid(temp_str_4,
temp_num_1, temp_num_2, temp_num_3,
temp_num_4, temp_num_5):
if temp_str_2 == "en":
temp_bool_1 = num_member_valid(temp_str_1,
organization_number = temp_str_4)
else:
temp_bool_1 = num_member_valid(temp_str_1,
virtual_name = temp_str_3,
organization_number = temp_str_4)
if temp_bool_1:
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_4
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_5
temp_str_0 = temp_str_0+file_sub_sep
temp_str_0 = temp_str_0+temp_str_6
else:
temp_bool_1 = False
else:
temp_bool_1 = False
else:
temp_bool_1 = False
else:
temp_bool_1 = False
if temp_bool_1:
temp_mani_str_list.append(temp_str_0)
else:
temp_bool = False
break
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
temp_num_0 = len(temp_mani_str_list)
temp_num_1 = -1
temp_str_1 = in_org_admin.strip()
temp_num_2 = -1
temp_str_2 = in_editor.strip()
temp_num_3 = -1
temp_str_3 = in_gene_list[0].strip()
for n in range(temp_num_0):
if temp_num_1 < 0:
if temp_mani_num_list[n] == temp_str_1:
temp_num_1 = n
if temp_num_2 < 0:
if temp_mani_num_list[n] == temp_str_2:
temp_num_2 = n
if temp_num_3 < 0:
if temp_mani_num_list[n] == temp_str_3:
temp_num_3 = n
if (temp_num_1 >= 0) & (temp_num_2 >= 0) & (temp_num_3 >= 0):
break
if (temp_num_1 >= 0) & (temp_num_2 >= 0) & (temp_num_3 >= 0):
if (temp_mani_num_enable_list[temp_num_1]) & (temp_mani_num_enable_list[temp_num_2]):
out_str = out_str+file_sep
out_str = out_str+str(temp_num_0)
for n in range(temp_num_0):
out_str = out_str+file_sep
out_str = out_str+temp_mani_str_list[n]
else:
temp_bool = False
else:
temp_bool = False
if not temp_bool:
out_str = None
return out_str
def reading_str_text_org(in_str):
# reading string text of organization
# input: in_str, string
# output: [organization info list,
# manipulation info list,
# manipulation number list,
# enabled number list]
file_read_sep = ";"+"\u0009"
file_sub_sep = ","+"\u0009"
English_name_capital = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z")
English_name_capital_1 = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z",
"0", "1", "2", "3", "4",
"5", "6", "7", "8", "9")
English_org_name_other = (" ", "-", "'", "‘", "’", "&",
"/", ".", ":", "(", ")")
numeric_digits = ("0", "1", "2", "3", "4",
"5", "6", "7", "8", "9")
regions_short = ('nam', 'cam', 'car', 'sam', 'weu',
'seu', 'neu', 'eeu', 'naf', 'eaf',
'maf', 'saf', 'waf', 'eas', 'sea',
'nas', 'cas', 'sas', 'me', 'omi',
'ome', 'opo', 'oau', 'int', 'other')
regions = ("nam - Northern America",
"cam - Central America",
"car - Caribbean",
"sam - South America",
"weu - Western Europe",
"seu - Southern Europe",
"neu - Northern Europe",
"eeu - Eastern Europe",
"naf - North Africa",
"eaf - East Africa",
"maf - Middle Africa",
"saf - Southern Africa",
"waf - West Africa",
"eas - East Asia",
"sea - Southeast Asia",
"nas - North Asia / Siberia",
"cas - Central Asia",
"sas - South Asia",
"me - Western Asia / Middle East",
"omi - Micronesia",
"ome - Melanesia",
"opo - Polynesia",
"oau - Australasia",
"int - Internation",
"other - Other")
org_info_list = []
mani_info_list = []
mani_num_list = []
mani_enabled_list = []
temp_bool = True
if isinstance(in_str, str):
temp_str_list_0 = in_str.split(file_read_sep)
temp_len_0 = len(temp_str_list_0)
if temp_len_0 >= 8:
temp_str_0 = temp_str_list_0[6].strip()
temp_len_1 = len(temp_str_0)
if temp_len_1 > 0:
for n in range(temp_len_1):
if not temp_str_0[n] in numeric_digits:
temp_bool = False
break
if temp_bool:
temp_len_1 = int(temp_str_0)
temp_bool = temp_len_1+7 == temp_len_0
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
temp_str_list_1 = temp_str_list_0[0].split(file_sub_sep)
if len(temp_str_list_1) == 2:
temp_str_0 = temp_str_list_1[0].strip()
temp_str_1 = temp_str_list_1[1].strip()
if (len(temp_str_0) == 14) & (len(temp_str_1) == 21):
org_info_list.append(temp_str_0)
org_info_list.append(temp_str_1)
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
temp_str_list_1 = temp_str_list_0[1].split(file_sub_sep)
if len(temp_str_list_1) == 2:
temp_str_0 = temp_str_list_1[0].strip()
temp_str_1 = temp_str_list_1[1].strip()
org_init_num = temp_str_1
if len(temp_str_1) == 3:
temp_str_2 = temp_str_1[1]
temp_str_3 = temp_str_1[2]
temp_str_1 = temp_str_1[0]
if ((temp_str_1 in English_name_capital) & (temp_str_2 in English_name_capital) &
(temp_str_3 in English_name_capital)):
if org_info_list[0][0:3].upper() == org_init_num:
temp_len_2 = len(temp_str_0)
if temp_len_2 < 1:
temp_bool = False
elif temp_len_2 == 1:
temp_bool = temp_str_0[0] in English_name_capital_1
else:
if temp_str_0[0] in English_name_capital_1:
for n in range(1, temp_len_2):
if ((not temp_str_0[n].upper() in English_name_capital_1) &
(not temp_str_0[n] in English_org_name_other)):
temp_bool = False
break
if temp_bool:
org_info_list.append(temp_str_0)
org_info_list.append((temp_str_1, temp_str_2, temp_str_3))
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
temp_str_0 = temp_str_list_0[2].strip()
if len(temp_str_0) == 7:
org_info_list.append(temp_str_0)
else:
temp_bool = False
if temp_bool:
temp_str_list_1 = temp_str_list_0[3].split(file_sub_sep)
if len(temp_str_list_1) == 3:
temp_str_0 = temp_str_list_1[0].strip()
temp_str_1 = temp_str_list_1[1].strip()
temp_str_2 = temp_str_list_1[2].strip()
if len(temp_str_0) == 7:
if len(temp_str_1) == 10:
if not temp_str_1[0] in numeric_digits:
temp_bool = False
elif not temp_str_1[1] in numeric_digits:
temp_bool = False
elif not temp_str_1[2] in numeric_digits:
temp_bool = False
elif not temp_str_1[3] in numeric_digits:
temp_bool = False
elif temp_str_1[4] != "-":
temp_bool = False
elif not temp_str_1[5] in numeric_digits:
temp_bool = False
elif not temp_str_1[6] in numeric_digits:
temp_bool = False
elif temp_str_1[7] != "-":
temp_bool = False
elif not temp_str_1[8] in numeric_digits:
temp_bool = False
elif not temp_str_1[9] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_0 = int(temp_str_1[0:4])
temp_num_1 = int(temp_str_1[5:7])
temp_num_2 = int(temp_str_1[8:10])
if not temp_str_2[0] in numeric_digits:
temp_bool = False
elif not temp_str_2[1] in numeric_digits:
temp_bool = False
elif temp_str_2[2] != ":":
temp_bool = False
elif not temp_str_2[3] in numeric_digits:
temp_bool = False
elif not temp_str_2[4] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_3 = int(temp_str_2[0:2])
temp_num_4 = int(temp_str_2[3:5])
if num_organization_valid(org_info_list[0],
temp_num_0, temp_num_1, temp_num_2,
temp_num_3, temp_num_4):
org_info_list.append((temp_str_0, temp_str_1, temp_str_2))
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
temp_str_list_1 = temp_str_list_0[4].split(file_sub_sep)
if len(temp_str_list_1) == 3:
temp_str_0 = temp_str_list_1[0].strip()
temp_str_1 = temp_str_list_1[1].strip()
temp_str_2 = temp_str_list_1[2].strip()
if len(temp_str_0) == 7:
if len(temp_str_1) == 10:
if not temp_str_1[0] in numeric_digits:
temp_bool = False
elif not temp_str_1[1] in numeric_digits:
temp_bool = False
elif not temp_str_1[2] in numeric_digits:
temp_bool = False
elif not temp_str_1[3] in numeric_digits:
temp_bool = False
elif temp_str_1[4] != "-":
temp_bool = False
elif not temp_str_1[5] in numeric_digits:
temp_bool = False
elif not temp_str_1[6] in numeric_digits:
temp_bool = False
elif temp_str_1[7] != "-":
temp_bool = False
elif not temp_str_1[8] in numeric_digits:
temp_bool = False
elif not temp_str_1[9] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_5 = int(temp_str_1[0:4])
temp_num_6 = int(temp_str_1[5:7])
temp_num_7 = int(temp_str_1[8:10])
if not temp_str_2[0] in numeric_digits:
temp_bool = False
elif not temp_str_2[1] in numeric_digits:
temp_bool = False
elif temp_str_2[2] != ":":
temp_bool = False
elif not temp_str_2[3] in numeric_digits:
temp_bool = False
elif not temp_str_2[4] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_8 = int(temp_str_2[0:2])
temp_num_9 = int(temp_str_2[3:5])
if temp_num_5 < temp_num_0:
temp_bool = False
elif temp_num_5 == temp_num_0:
if temp_num_6 < temp_num_1:
temp_bool = False
elif temp_num_6 == temp_num_1:
if temp_num_7 < temp_num_2:
temp_bool = False
elif temp_num_7 == temp_num_2:
if temp_num_8 < temp_num_3:
temp_bool = False
elif temp_num_8 == temp_num_3:
if temp_num_9 < temp_num_4:
temp_bool = False
if temp_bool:
if (temp_num_8 >= 0) & (temp_num_8 < 24):
if (temp_num_9 >= 0) & (temp_num_9 < 60):
if temp_num_6 in (4, 6, 9, 11):
if (temp_num_7 < 1) | (temp_num_7 > 30):
temp_bool = False
elif temp_num_6 in (1, 3, 5, 7, 8, 10, 12):
if (temp_num_7 < 1) | (temp_num_7 > 31):
temp_bool = False
elif temp_num_6 == 2:
if temp_num_5%400 == 0:
if (temp_num_7 < 1) | (temp_num_7 > 29):
temp_bool = False
elif temp_num_5%100 == 0:
if (temp_num_7 < 1) | (temp_num_7 > 28):
temp_bool = False
elif temp_num_5%4 == 0:
if (temp_num_7 < 1) | (temp_num_7 > 29):
temp_bool = False
else:
if (temp_num_7 < 1) | (temp_num_7 > 28):
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
org_info_list.append((temp_str_0, temp_str_1, temp_str_2))
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
temp_str_list_1 = temp_str_list_0[5].split(file_sub_sep)
if len(temp_str_list_1) == 2:
temp_str_0 = temp_str_list_1[0].strip()
temp_str_1 = temp_str_list_1[1].strip()
temp_num_0 = -1
for n in range(25):
if regions_short[n] == temp_str_1:
temp_num_0 = n
break
if temp_num_0 >= 0:
temp_len_2 = len(temp_str_0)
if temp_len_2 == 1:
if not temp_str_0[0] in English_name_capital_1:
temp_bool = False
elif temp_len_2 > 1:
if temp_str_0[0] in English_name_capital_1:
for n in range(1, temp_len_2):
if ((not temp_str_0[n].upper() in English_name_capital_1) &
(not temp_str_0[n] in English_org_name_other)):
temp_bool = False
break
else:
temp_bool = False
if temp_bool:
org_info_list.append([temp_str_0, regions[temp_num_0]])
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
for n in range(7, temp_len_0):
temp_str_list_1 = temp_str_list_0[n].split(file_sub_sep)
if len(temp_str_list_1) == 10:
temp_str_list_2 = []
for n1 in range(10):
temp_str_list_2.append(temp_str_list_1[n1].strip())
temp_str_list_3 = []
if not temp_str_list_2[0] in mani_num_list:
if number_manipulation_valid(temp_str_list_2[0], org_info_list[0]):
temp_str_list_3.append(temp_str_list_2[0])
mani_num_list.append(temp_str_list_2[0])
if temp_str_list_2[1] == "0":
temp_str_list_3.append(False)
mani_enabled_list.append(False)
elif temp_str_list_2[1] == "1":
temp_str_list_3.append(True)
mani_enabled_list.append(True)
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if len(temp_str_list_2[2]) == 14:
temp_str_list_3.append(temp_str_list_2[2])
if temp_str_list_2[3] == "0":
temp_str_list_3.append("en")
temp_str_list_4 = [temp_str_list_2[4],
temp_str_list_2[5],
temp_str_list_2[6]]
if English_name_valid(temp_str_list_4):
if num_member_valid(temp_str_list_2[2],
organization_number = temp_str_list_2[7]):
temp_str_list_3.append(temp_str_list_2[4])
temp_str_list_3.append(temp_str_list_2[5])
temp_str_list_3.append(temp_str_list_2[6])
else:
temp_bool = False
else:
temp_bool = False
elif temp_str_list_2[3] == "1":
temp_str_list_3.append("vn")
temp_str_list_4 = [temp_str_list_2[4],
temp_str_list_2[5],
temp_str_list_2[6]]
if virtual_name_valid(temp_str_list_4):
if num_member_valid(temp_str_list_2[2],
virtual_name = temp_str_list_2[5],
organization_number = temp_str_list_2[7]):
temp_str_list_3.append(temp_str_list_2[4])
temp_str_list_3.append(temp_str_list_2[5])
temp_str_list_3.append(temp_str_list_2[6])
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if len(temp_str_list_2[7]) == 14:
temp_bool = temp_str_list_2[7][0:3].upper() == org_init_num
else:
temp_bool = False
if temp_bool:
if not temp_str_list_2[8][0] in numeric_digits:
temp_bool = False
elif not temp_str_list_2[8][1] in numeric_digits:
temp_bool = False
elif not temp_str_list_2[8][2] in numeric_digits:
temp_bool = False
elif not temp_str_list_2[8][3] in numeric_digits:
temp_bool = False
elif temp_str_list_2[8][4] != "-":
temp_bool = False
elif not temp_str_list_2[8][5] in numeric_digits:
temp_bool = False
elif not temp_str_list_2[8][6] in numeric_digits:
temp_bool = False
elif temp_str_list_2[8][7] != "-":
temp_bool = False
elif not temp_str_list_2[8][8] in numeric_digits:
temp_bool = False
elif not temp_str_list_2[8][9] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_0 = int(temp_str_list_2[8][0:4])
temp_num_1 = int(temp_str_list_2[8][5:7])
temp_num_2 = int(temp_str_list_2[8][8:10])
if not temp_str_list_2[9][0] in numeric_digits:
temp_bool = False
elif not temp_str_list_2[9][1] in numeric_digits:
temp_bool = False
elif temp_str_list_2[9][2] != ":":
temp_bool = False
elif not temp_str_list_2[9][3] in numeric_digits:
temp_bool = False
elif not temp_str_list_2[9][4] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_3 = int(temp_str_list_2[9][0:2])
temp_num_4 = int(temp_str_list_2[9][3:5])
if num_organization_valid(temp_str_list_2[7],
temp_num_0, temp_num_1, temp_num_2,
temp_num_3, temp_num_4):
temp_str_list_3.append(temp_str_list_2[7])
temp_str_list_3.append(temp_str_list_2[8])
temp_str_list_3.append(temp_str_list_2[9])
else:
temp_bool = False
if temp_bool:
mani_info_list.append(tuple(temp_str_list_3))
else:
break
if temp_bool:
temp_num_0 = -1
temp_str_0 = org_info_list[4]
temp_num_1 = -1
temp_str_1 = org_info_list[5][0]
temp_num_2 = -1
temp_str_2 = org_info_list[6][0]
for n in range(temp_len_1):
if temp_num_0 < 0:
if mani_num_list[n] == temp_str_0:
temp_num_0 = n
if temp_num_1 < 0:
if mani_num_list[n] == temp_str_1:
temp_num_1 = n
if temp_num_2 < 0:
if mani_num_list[n] == temp_str_2:
temp_num_2 = n
if (temp_num_0 >= 0) & (temp_num_1 >= 0) & (temp_num_2 >= 0):
break
if (temp_num_0 >= 0) & (temp_num_1 >= 0) & (temp_num_2 >= 0):
temp_bool = (mani_enabled_list[temp_num_0]) & (mani_enabled_list[temp_num_1])
else:
temp_bool = False
if temp_bool:
out_tuple = (org_info_list, mani_info_list, mani_num_list, mani_enabled_list)
else:
out_tuple = None
return out_tuple
def forming_str_text_member(in_num_list, in_en_name_list,
in_vn_name_list, in_date_list,
in_org_list):
# forming string text of member
# input: in_num_list, numbers, [mix number, member number]
# in_en_name_list, English name, [given, middle, family]
# in_vn_name_list, another name / virtual name, [type, name, addition]
# in_date_list, issuing date of member, [date, time]
# in_org_list, issuing organization, [organization number, date, time, manipulation number]
# output: string
file_sep = ";"+"\u0009"+"\u000a"
file_sub_sep = ","+"\u0009"
numeric_digits = ("0", "1", "2", "3", "4",
"5", "6", "7", "8", "9")
out_str = "member_info.iden"
temp_bool = True
if isinstance(in_num_list, list) | isinstance(in_num_list, tuple):
if len(in_num_list) == 2:
if isinstance(in_num_list[0], str) & isinstance(in_num_list[1], str):
temp_str_0 = in_num_list[0].strip()
temp_str_1 = in_num_list[1].strip()
if (len(temp_str_0) == 21) & (len(temp_str_1) == 14):
out_str = out_str+file_sep
out_str = out_str+temp_str_0
out_str = out_str+file_sub_sep
out_str = out_str+temp_str_1
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if isinstance(in_en_name_list, list) | isinstance(in_en_name_list, tuple):
if len(in_en_name_list) == 3:
if (isinstance(in_en_name_list[0], str) & isinstance(in_en_name_list[1], str) &
isinstance(in_en_name_list[2], str)):
temp_list_0 = [in_en_name_list[0].strip(),
in_en_name_list[1].strip(),
in_en_name_list[2].strip()]
if English_name_valid(temp_list_0):
out_str = out_str+file_sep
out_str = out_str+temp_list_0[0]
out_str = out_str+file_sub_sep
out_str = out_str+temp_list_0[1]
out_str = out_str+file_sub_sep
out_str = out_str+temp_list_0[2]
temp_str_2 = temp_list_0[0]
temp_str_3 = temp_list_0[2]
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if isinstance(in_vn_name_list, list) | isinstance(in_vn_name_list, tuple):
if len(in_vn_name_list) == 3:
if (isinstance(in_vn_name_list[0], str) & isinstance(in_vn_name_list[1], str) &
isinstance(in_vn_name_list[2], str)):
temp_list_0 = [in_vn_name_list[0].strip(),
in_vn_name_list[1].strip(),
in_vn_name_list[2].strip()]
if virtual_name_valid(temp_list_0):
temp_str_4 = temp_list_0[1]
if (len(temp_str_2) > 0) | (len(temp_str_4) > 0):
out_str = out_str+file_sep
out_str = out_str+temp_list_0[0]
out_str = out_str+file_sub_sep
out_str = out_str+temp_list_0[1]
out_str = out_str+file_sub_sep
out_str = out_str+temp_list_0[2]
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if isinstance(in_date_list, list) | isinstance(in_date_list, tuple):
if len(in_date_list) == 2:
if isinstance(in_date_list[0], str) & isinstance(in_date_list[1], str):
temp_str_5 = in_date_list[0].strip()
if len(temp_str_5) == 10:
if not temp_str_5[0] in numeric_digits:
temp_bool = False
elif not temp_str_5[1] in numeric_digits:
temp_bool = False
elif not temp_str_5[2] in numeric_digits:
temp_bool = False
elif not temp_str_5[3] in numeric_digits:
temp_bool = False
elif temp_str_5[4] != "-":
temp_bool = False
elif not temp_str_5[5] in numeric_digits:
temp_bool = False
elif not temp_str_5[6] in numeric_digits:
temp_bool = False
elif temp_str_5[7] != "-":
temp_bool = False
elif not temp_str_5[8] in numeric_digits:
temp_bool = False
elif not temp_str_5[9] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_0 = int(temp_str_5[0:4])
temp_num_1 = int(temp_str_5[5:7])
temp_num_2 = int(temp_str_5[8:10])
else:
temp_bool = False
if temp_bool:
temp_str_6 = in_date_list[1].strip()
if len(temp_str_6) == 5:
if not temp_str_6[0] in numeric_digits:
temp_bool = False
elif not temp_str_6[1] in numeric_digits:
temp_bool = False
elif temp_str_6[2] != ":":
temp_bool = False
elif not temp_str_6[3] in numeric_digits:
temp_bool = False
elif not temp_str_6[4] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_3 = int(temp_str_6[0:2])
temp_num_4 = int(temp_str_6[3:5])
else:
temp_bool = False
if temp_bool:
if num_mix_valid(temp_str_0, temp_str_2, temp_str_3,
temp_num_0, temp_num_1, temp_num_2,
temp_num_3, temp_num_4):
out_str = out_str+file_sep
out_str = out_str+temp_str_5
out_str = out_str+file_sub_sep
out_str = out_str+temp_str_6
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if isinstance(in_org_list, list) | isinstance(in_org_list, tuple):
if len(in_org_list) == 4:
if (isinstance(in_org_list[0], str) & isinstance(in_org_list[1], str) &
isinstance(in_org_list[2], str) & isinstance(in_org_list[3], str)):
temp_str_2 = in_org_list[0].strip()
if num_member_valid(temp_str_1, temp_str_0,
temp_str_4, temp_str_2):
temp_str_5 = in_org_list[1].strip()
if len(temp_str_5) == 10:
if not temp_str_5[0] in numeric_digits:
temp_bool = False
elif not temp_str_5[1] in numeric_digits:
temp_bool = False
elif not temp_str_5[2] in numeric_digits:
temp_bool = False
elif not temp_str_5[3] in numeric_digits:
temp_bool = False
elif temp_str_5[4] != "-":
temp_bool = False
elif not temp_str_5[5] in numeric_digits:
temp_bool = False
elif not temp_str_5[6] in numeric_digits:
temp_bool = False
elif temp_str_5[7] != "-":
temp_bool = False
elif not temp_str_5[8] in numeric_digits:
temp_bool = False
elif not temp_str_5[9] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_5 = int(temp_str_5[0:4])
temp_num_6 = int(temp_str_5[5:7])
temp_num_7 = int(temp_str_5[8:10])
else:
temp_bool = False
if temp_bool:
temp_str_6 = in_org_list[2].strip()
if len(temp_str_6) == 5:
if not temp_str_6[0] in numeric_digits:
temp_bool = False
elif not temp_str_6[1] in numeric_digits:
temp_bool = False
elif temp_str_6[2] != ":":
temp_bool = False
elif not temp_str_6[3] in numeric_digits:
temp_bool = False
elif not temp_str_6[4] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_8 = int(temp_str_6[0:2])
temp_num_9 = int(temp_str_6[3:5])
if temp_num_5 > temp_num_0:
temp_bool = False
elif temp_num_5 == temp_num_0:
if temp_num_6 > temp_num_1:
temp_bool = False
elif temp_num_6 == temp_num_1:
if temp_num_7 > temp_num_2:
temp_bool = False
elif temp_num_7 == temp_num_2:
if temp_num_8 > temp_num_3:
temp_bool = False
elif temp_num_8 == temp_num_3:
if temp_num_9 > temp_num_4:
temp_bool = False
else:
temp_bool = False
if temp_bool:
if num_organization_valid(temp_str_2,
temp_num_5, temp_num_6, temp_num_7,
temp_num_8, temp_num_9):
temp_str_3 = in_org_list[3].strip()
if number_manipulation_valid(temp_str_3, temp_str_2):
out_str = out_str+file_sep
out_str = out_str+temp_str_2
out_str = out_str+file_sub_sep
out_str = out_str+temp_str_5
out_str = out_str+file_sub_sep
out_str = out_str+temp_str_6
out_str = out_str+file_sub_sep
out_str = out_str+temp_str_3
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if not temp_bool:
out_str = None
return out_str
def reading_str_text_mem(in_str):
# reading string text of organization
# input: in_str, string
# output: out_list = [out_num_list,
# out_en_name_list,
# out_vn_name_list,
# out_date_list,
# out_org_list]
file_read_sep = ";"+"\u0009"
file_sub_sep = ","+"\u0009"
numeric_digits = ("0", "1", "2", "3", "4",
"5", "6", "7", "8", "9")
title_str = "member_info.iden"
temp_bool = True
out_list = []
if isinstance(in_str, str):
temp_str_list_0 = in_str.split(file_read_sep)
temp_len_0 = len(temp_str_list_0)
if temp_len_0 == 6:
temp_str_0 = temp_str_list_0[0].strip()
if temp_str_0 == title_str:
temp_str_list_1 = temp_str_list_0[1].split(file_sub_sep)
if len(temp_str_list_1) == 2:
temp_str_1 = temp_str_list_1[0].strip()
temp_str_2 = temp_str_list_1[1].strip()
if (len(temp_str_1) == 21) & (len(temp_str_2) == 14):
out_list.append((temp_str_1, temp_str_2))
temp_str_list_1 = temp_str_list_0[2].split(file_sub_sep)
if len(temp_str_list_1) == 3:
temp_str_1 = temp_str_list_1[0].strip()
temp_str_2 = temp_str_list_1[1].strip()
temp_str_3 = temp_str_list_1[2].strip()
temp_tuple_0 = (temp_str_1, temp_str_2, temp_str_3)
if English_name_valid(temp_tuple_0):
out_list.append(temp_tuple_0)
temp_str_list_1 = temp_str_list_0[3].split(file_sub_sep)
if len(temp_str_list_1) == 3:
temp_str_1 = temp_str_list_1[0].strip()
temp_str_2 = temp_str_list_1[1].strip()
temp_str_3 = temp_str_list_1[2].strip()
temp_tuple_0 = (temp_str_1, temp_str_2, temp_str_3)
if virtual_name_valid(temp_tuple_0):
out_list.append(temp_tuple_0)
temp_str_list_1 = temp_str_list_0[4].split(file_sub_sep)
if len(temp_str_list_1) == 2:
temp_str_1 = temp_str_list_1[0].strip()
if len(temp_str_1) == 10:
if not temp_str_1[0] in numeric_digits:
temp_bool = False
elif not temp_str_1[1] in numeric_digits:
temp_bool = False
elif not temp_str_1[2] in numeric_digits:
temp_bool = False
elif not temp_str_1[3] in numeric_digits:
temp_bool = False
elif temp_str_1[4] != "-":
temp_bool = False
elif not temp_str_1[5] in numeric_digits:
temp_bool = False
elif not temp_str_1[6] in numeric_digits:
temp_bool = False
elif temp_str_1[7] != "-":
temp_bool = False
elif not temp_str_1[8] in numeric_digits:
temp_bool = False
elif not temp_str_1[9] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_0 = int(temp_str_1[0:4])
temp_num_1 = int(temp_str_1[5:7])
temp_num_2 = int(temp_str_1[8:10])
else:
temp_bool = False
if temp_bool:
temp_str_2 = temp_str_list_1[1].strip()
if len(temp_str_2) == 5:
if not temp_str_2[0] in numeric_digits:
temp_bool = False
elif not temp_str_2[1] in numeric_digits:
temp_bool = False
elif temp_str_2[2] != ":":
temp_bool = False
elif not temp_str_2[3] in numeric_digits:
temp_bool = False
elif not temp_str_2[4] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_3 = int(temp_str_2[0:2])
temp_num_4 = int(temp_str_2[3:5])
else:
temp_bool = False
if temp_bool:
if num_mix_valid(out_list[0][0], out_list[1][0], out_list[1][2],
temp_num_0, temp_num_1, temp_num_2,
temp_num_3, temp_num_4):
out_list.append((temp_str_1, temp_str_2))
else:
temp_bool = False
if temp_bool:
temp_str_list_1 = temp_str_list_0[5].split(file_sub_sep)
if len(temp_str_list_1) == 4:
temp_str_1 = temp_str_list_1[1].strip()
if len(temp_str_1) == 10:
if not temp_str_1[0] in numeric_digits:
temp_bool = False
elif not temp_str_1[1] in numeric_digits:
temp_bool = False
elif not temp_str_1[2] in numeric_digits:
temp_bool = False
elif not temp_str_1[3] in numeric_digits:
temp_bool = False
elif temp_str_1[4] != "-":
temp_bool = False
elif not temp_str_1[5] in numeric_digits:
temp_bool = False
elif not temp_str_1[6] in numeric_digits:
temp_bool = False
elif temp_str_1[7] != "-":
temp_bool = False
elif not temp_str_1[8] in numeric_digits:
temp_bool = False
elif not temp_str_1[9] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_5 = int(temp_str_1[0:4])
temp_num_6 = int(temp_str_1[5:7])
temp_num_7 = int(temp_str_1[8:10])
else:
temp_bool = False
if temp_bool:
temp_str_2 = temp_str_list_1[2].strip()
if len(temp_str_2) == 5:
if not temp_str_2[0] in numeric_digits:
temp_bool = False
elif not temp_str_2[1] in numeric_digits:
temp_bool = False
elif temp_str_2[2] != ":":
temp_bool = False
elif not temp_str_2[3] in numeric_digits:
temp_bool = False
elif not temp_str_2[4] in numeric_digits:
temp_bool = False
if temp_bool:
temp_num_8 = int(temp_str_2[0:2])
temp_num_9 = int(temp_str_2[3:5])
if temp_num_5 > temp_num_0:
temp_bool = False
elif temp_num_5 == temp_num_0:
if temp_num_6 > temp_num_1:
temp_bool = False
elif temp_num_6 == temp_num_1:
if temp_num_7 > temp_num_2:
temp_bool = False
elif temp_num_7 == temp_num_2:
if temp_num_8 > temp_num_3:
temp_bool = False
elif temp_num_8 == temp_num_3:
if temp_num_9 > temp_num_4:
temp_bool = False
else:
temp_bool = False
if temp_bool:
temp_str_3 = temp_str_list_1[0].strip()
temp_str_4 = temp_str_list_1[3].strip()
if number_manipulation_valid(temp_str_4, temp_str_3):
if num_organization_valid(temp_str_3,
temp_num_5, temp_num_6, temp_num_7,
temp_num_8, temp_num_9):
if num_member_valid(out_list[0][1], out_list[0][0],
out_list[2][1], temp_str_3):
out_list.append((temp_str_3, temp_str_1,
temp_str_2, temp_str_4))
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
else:
temp_bool = False
if not temp_bool:
out_list = None
return out_list
def English_name_valid(in_list):
English_name_capital = ("A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z")
English_name_other = (" ", "-", "'")
out_bool = True
temp_len_0 = len(in_list[0])
temp_len_1 = len(in_list[1])
temp_len_2 = len(in_list[2])
if temp_len_0 < 1:
out_bool = (temp_len_1 < 1) & (temp_len_2 < 1)
else:
if temp_len_0 == 1:
out_bool = in_list[0][0] in English_name_capital
else:
if in_list[0][0] in English_name_capital:
for n in range(1, temp_len_0):
temp_str_0 = in_list[0][n].upper()
if ((not temp_str_0 in English_name_capital) &
(not temp_str_0 in English_name_other)):
out_bool = False
break
else:
out_bool = False
if out_bool:
if temp_len_1 > 0:
if temp_len_1 == 1:
out_bool = in_list[1][0] in English_name_capital
else:
if in_list[1][0] in English_name_capital:
for n in range(1, temp_len_1):
temp_str_0 = in_list[1][n].upper()
if ((not temp_str_0 in English_name_capital) &
(not temp_str_0 in English_name_other)):
out_bool = False
break
else:
out_bool = False
if out_bool:
if temp_len_2 > 0:
if temp_len_2 == 1:
out_bool = in_list[2][0] in English_name_capital
else:
if in_list[2][0] in English_name_capital:
for n in range(1, temp_len_2):
temp_str_0 = in_list[2][n].upper()
if ((not temp_str_0 in English_name_capital) &
(not temp_str_0 in English_name_other)):
out_bool = False
break
else:
out_bool = False
return out_bool
def virtual_name_valid(in_list):
out_bool = True
temp_len_1 = len(in_list[1])
temp_len_2 = len(in_list[2])
if in_list[0].upper() in ("NONE", "NULL", "NA"):
out_bool = (temp_len_1 < 1) & (temp_len_2 < 1)
else:
if temp_len_1 > 0:
for n in range(temp_len_1):
temp_str_0 = in_list[1][n]
temp_num_0 = ord(temp_str_0)
if (temp_num_0 >= 32) & (temp_num_0 < 65536):
if temp_str_0 in ("'", '"'):
out_bool = False
break
else:
out_bool = False
break
if out_bool:
for n in range(temp_len_2):
temp_str_0 = in_list[2][n]
temp_num_0 = ord(temp_str_0)
if (temp_num_0 >= 32) & (temp_num_0 < 65536):
if temp_str_0 in ("'", '"'):
out_bool = False
break
else:
out_bool = False
break
else:
out_bool = False
return out_bool
| 53.656685
| 162
| 0.39414
|
6a1c87730c97358337e72b4d48ee0060dc4cf823
| 1,557
|
py
|
Python
|
src/modules/podcast.py
|
StaticallyTypedRice/PodcastDownloader
|
b2d5bc2a5b22ba5b2dc537fdafc588aedd67bcb5
|
[
"MIT"
] | 2
|
2019-08-07T09:23:26.000Z
|
2020-02-29T05:06:58.000Z
|
src/modules/podcast.py
|
StaticallyTypedRice/PodcastDownloader
|
b2d5bc2a5b22ba5b2dc537fdafc588aedd67bcb5
|
[
"MIT"
] | null | null | null |
src/modules/podcast.py
|
StaticallyTypedRice/PodcastDownloader
|
b2d5bc2a5b22ba5b2dc537fdafc588aedd67bcb5
|
[
"MIT"
] | 1
|
2019-03-26T10:00:49.000Z
|
2019-03-26T10:00:49.000Z
|
from xml.etree.ElementTree import Element
from modules.xml import get_unique_xml_element
class Episode(object):
'''The podcast episode object.'''
def __init__(self, item: Element):
'''Create an Episode object from an RSS item.
An example RSS file:
<rss xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" version="2.0">
<channel>
<!-- RSS metadata -->
<item>
<guid>1234</guid>
<title>Episode Title</title>
<description>Episode Description</description>
<pubDate>Date Published</pubDate>
<enclosure url="https://example.com/episode.mp3" type="audio/mpeg" />
</item>
<!-- ... -->
</channel>
</rss>
Arguments:
element: The <item> element for the episode.
'''
# Parse the RSS item
self.guid = get_unique_xml_element(item, 'guid').text
self.title = get_unique_xml_element(item, 'title').text
self.date = get_unique_xml_element(item, 'pubDate').text
self.url = get_unique_xml_element(item, 'enclosure').get('url')
# The file name is the final item in the URL path
self.file_name = self.url.split('/')[-1]
# The file extension is the final item in the file name
self.file_extension = self.file_name.split('.')[-1]
| 34.6
| 94
| 0.524727
|
f541fd930354f3199934f23879f56e14f1542103
| 36,466
|
py
|
Python
|
pirates/leveleditor/worldData/interior_shanty_blacksmith.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/leveleditor/worldData/interior_shanty_blacksmith.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/leveleditor/worldData/interior_shanty_blacksmith.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1156270917.73dzlu0': {'Type': 'Building Interior','Name': '','Instanced': True,'Objects': {'1165366099.89kmuller': {'Type': 'Barrel','DisableCollision': True,'Hpr': VBase3(15.46, 0.0, 0.0),'Pos': Point3(17.533, -10.644, 0.0),'Scale': VBase3(0.771, 0.771, 0.771),'Visual': {'Color': (0.7900000214576721, 0.6499999761581421, 0.5299999713897705, 1.0),'Model': 'models/props/barrel_grey'}},'1165366349.25kmuller': {'Type': 'Ship_Props','DisableCollision': False,'Hpr': VBase3(49.657, 19.679, -6.937),'Pos': Point3(16.491, -27.1, 0.0),'Scale': VBase3(0.376, 0.376, 0.376),'Visual': {'Model': 'models/props/anchor'}},'1165366420.5kmuller': {'Type': 'Rope','DisableCollision': True,'Hpr': VBase3(-0.822, -0.157, 0.27),'Pos': Point3(16.474, -23.769, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/rope_pile'}},'1165366677.64kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(-145.679, 0.0, 0.0),'Pos': Point3(13.11, -26.619, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/prop_group04'}},'1165367285.42kmuller': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(123.31, 0.0, 0.0),'Pos': Point3(0.224, 16.59, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/stool_shanty'}},'1166037975.06kmuller': {'Type': 'Prop_Groups','DisableCollision': False,'Hpr': VBase3(-69.155, 0.0, 0.0),'Pos': Point3(17.521, -6.903, 0.0),'Scale': VBase3(0.41, 0.41, 0.41),'Visual': {'Model': 'models/props/prop_group_C'}},'1167169073.65kmuller': {'Type': 'Furniture','DisableCollision': True,'Hpr': VBase3(92.404, 0.0, 0.0),'Pos': Point3(-18.691, -5.048, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.800000011920929, 0.9300000071525574, 0.8399999737739563, 1.0),'Model': 'models/props/cabinet_shanty'}},'1167169123.12kmuller': {'Type': 'Furniture','DisableCollision': True,'Hpr': VBase3(0.564, 0.0, 0.0),'Objects': {'1182199471.12kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(77.048, -74.135, 84.546),'Pos': Point3(1.393, 0.329, 4.358),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_dagger'}}},'Pos': Point3(-3.118, 28.546, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.5199999809265137, 0.5199999809265137, 0.5299999713897705, 1.0),'Model': 'models/props/cabinet_shanty_low'}},'1167969107.08kmuller': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-1.459, -0.384, 21.454),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/chandelier_jail'}},'1167969169.5kmuller': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': VBase3(88.006, 0.0, 0.0),'Pos': Point3(-19.73, 24.267, 10.652),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/lamp_candle'}},'1167969206.95kmuller': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': VBase3(93.881, 0.0, 0.0),'Pos': Point3(-17.684, -0.376, 10.545),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/lamp_candle'}},'1167969245.06kmuller': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': VBase3(-88.997, 0.0, 0.0),'Pos': Point3(17.922, -0.478, 10.568),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/lamp_candle'}},'1167969443.92kmuller': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(-176.841, 0.0, 0.0),'Objects': {'1181086102.85kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(70.216, -0.191, 0.0),'Pos': Point3(0.342, 2.697, 3.031),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_dagger'}}},'Pos': Point3(-3.068, 15.649, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.5799999833106995, 0.47999998927116394, 0.4000000059604645, 1.0),'Model': 'models/props/table_shanty_2'}},'1167969485.88kmuller': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(-85.857, 0.0, 0.0),'Pos': Point3(18.367, -16.557, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.800000011920929, 0.7900000214576721, 0.8299999833106995, 1.0),'Model': 'models/props/bench_shanty_1'}},'1172092924.3kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(22.373, 0.0, 0.0),'Pos': Point3(-8.829, 28.293, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_anvilA'}},'1172092937.32kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-10.481, 17.021, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_anvil_block'}},'1172092948.96kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(89.558, 0.0, 0.0),'Pos': Point3(-10.478, 21.542, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_anvilblock'}},'1172093074.6kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-17.346, 0.01, -0.033),'Pos': Point3(-15.958, 11.585, 2.999),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_bellows'}},'1172093082.58kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-1.388, 17.812, 2.968),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_bottleA'}},'1172093091.96kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-3.887, 18.135, 2.954),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_bottleB'}},'1172093101.55kmuller': {'Type': 'Interior_furnishings','DisableCollision': True,'Hpr': VBase3(90.019, 0.0, 0.0),'Pos': Point3(-18.311, 9.001, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_furness'}},'1172093111.05kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-180.0, 85.454, 71.678),'Pos': Point3(-9.941, 16.81, 1.693),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hammerA'}},'1172093118.39kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.0, 85.464, 0.0),'Pos': Point3(-1.206, 16.48, 3.043),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hammerB'}},'1172093125.44kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(118.162, 0.0, 0.0),'Pos': Point3(-15.628, 20.507, 3.008),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hot_iron'}},'1172093414.38kmuller': {'Type': 'Interior_furnishings','DisableCollision': True,'Hpr': VBase3(90.019, 0.0, 0.0),'Pos': Point3(-18.358, 21.57, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_furness'}},'1172093706.66kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(89.558, 0.0, 0.0),'Pos': Point3(-10.686, 8.661, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_anvilblock'}},'1172093886.97kmuller': {'Type': 'Furniture','DisableCollision': True,'Hpr': VBase3(90.207, 0.0, 0.0),'Objects': {'1182194607.77kmuller': {'Type': 'Interior_furnishings','DisableCollision': True,'Holiday': '','Hpr': VBase3(-90.207, 0.0, 0.0),'Pos': Point3(-0.156, -0.351, 2.971),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/shop_weapons_rack_floor'}},'1182194637.62kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(4.495, -20.427, -85.074),'Pos': Point3(-0.727, -0.797, 3.441),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_cutlass'}},'1182194680.35kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-0.563, -22.564, 88.533),'Pos': Point3(0.339, -0.818, 3.493),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_cutlass_shiny'}}},'Pos': Point3(-18.652, 3.899, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/table_shanty'}},'1172093925.14kmuller': {'Type': 'Log_Stack','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-18.618, 13.314, 0.0),'Scale': VBase3(0.725, 0.725, 0.725),'Visual': {'Model': 'models/props/Log_stack_a'}},'1172093932.94kmuller': {'Type': 'Log_Stack','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-18.393, 27.83, 0.0),'Scale': VBase3(0.676, 0.676, 0.676),'Visual': {'Model': 'models/props/Log_stack_b'}},'1172093972.0kmuller': {'Type': 'Log_Stack','DisableCollision': True,'Hpr': VBase3(91.886, 0.0, 0.0),'Pos': Point3(-19.247, 16.273, 0.0),'Scale': VBase3(0.675, 0.675, 0.675),'Visual': {'Model': 'models/props/Log_stack_c'}},'1172094047.5kmuller': {'Type': 'Bucket','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-11.19, 19.666, 0.0),'Scale': VBase3(0.469, 0.469, 0.469),'Visual': {'Model': 'models/props/bucket_handles'}},'1172094087.71kmuller': {'Type': 'Bucket','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-10.664, 11.007, 0.0),'Scale': VBase3(0.649, 0.649, 0.649),'Visual': {'Model': 'models/props/bucket'}},'1172094367.3kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.001, 85.392, -0.001),'Pos': Point3(-2.658, 29.77, 2.838),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hammerB'}},'1172094393.74kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-44.397, 84.882, -47.166),'Pos': Point3(-3.802, 28.283, 2.832),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hammerB'}},'1172094628.5kmuller': {'Type': 'Bucket','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-18.131, -8.295, 0.0),'Scale': VBase3(0.692, 0.692, 0.692),'Visual': {'Model': 'models/props/bucket'}},'1172094869.38kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.0, -84.545, 0.0),'Pos': Point3(-18.806, 4.238, 2.695),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hammerA'}},'1172094923.41kmuller': {'Type': 'Log_Stack','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(16.183, 13.736, 0.0),'Scale': VBase3(0.665, 0.665, 0.665),'Visual': {'Model': 'models/props/Log_stack_b'}},'1172099606.28kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-89.587, 0.0, 0.0),'Pos': Point3(18.43, 21.626, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_furness'}},'1172099644.25kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-90.386, 0.0, 0.0),'Pos': Point3(9.881, 22.605, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_anvilblock'}},'1172099662.3kmuller': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(-179.431, 0.0, 0.0),'Pos': Point3(14.635, 16.23, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/table_shanty'}},'1172099688.41kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.001, 87.923, -0.001),'Pos': Point3(15.845, 16.855, 3.061),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hammerB'}},'1172099743.93kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(66.696, 0.0, 0.276),'Pos': Point3(13.692, 16.37, 2.962),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_bellows'}},'1172099774.32kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-0.857, 0.0, 0.0),'Pos': Point3(14.971, 20.513, 2.962),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hot_iron'}},'1172099802.57kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-0.857, 0.0, 0.0),'Pos': Point3(18.155, 20.676, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hot_iron'}},'1172099819.82kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-29.03, 0.0, 0.0),'Pos': Point3(15.571, 21.99, 3.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_hot_iron'}},'1172099903.39kmuller': {'Type': 'Log_Stack','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(16.828, 10.25, 0.0),'Scale': VBase3(0.636, 0.636, 0.636),'Visual': {'Model': 'models/props/Log_stack_c'}},'1172099942.32kmuller': {'Type': 'Bucket','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(9.746, 20.053, 0.0),'Scale': VBase3(0.65, 0.65, 0.65),'Visual': {'Model': 'models/props/bucket_handles'}},'1172099984.19kmuller': {'Type': 'Cart','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-12.85, -25.109, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cart_broken'}},'1174586399.6dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','FlickRate': 0.5,'Flickering': True,'Hpr': VBase3(24.357, -13.874, -1.589),'Intensity': '0.3485','LightType': 'DIRECTIONAL','Pos': Point3(-3.537, -21.055, 10.179),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/light_tool_bulb'}},'1174586686.81dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','FlickRate': 0.5,'Flickering': True,'Hpr': VBase3(-15.155, -13.271, 12.04),'Intensity': '0.8030','LightType': 'DIRECTIONAL','Pos': Point3(0.046, -31.432, 10.042),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.75, 0.8, 0.85, 1.0),'Model': 'models/props/light_tool_bulb'}},'1174587910.91dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','FlickRate': 0.5,'Flickering': True,'Hpr': Point3(0.0, 0.0, 0.0),'Intensity': '0.0152','LightType': 'AMBIENT','Pos': Point3(-16.533, 15.855, 4.339),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1174588046.46dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','FlickRate': 0.5,'Flickering': True,'Hpr': Point3(0.0, 0.0, 0.0),'Intensity': '0.1212','LightType': 'AMBIENT','Pos': Point3(15.065, 29.083, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1181085740.6kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-178.775, 0.0, 0.0),'Pos': Point3(20.107, 3.818, 5.33),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_rack'}},'1181085794.23kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(19.293, 3.692, 5.33),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_rack_swords'}},'1181085900.65kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-89.34, 0.0, 0.0),'Pos': Point3(6.475, 29.909, 2.839),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_rack'}},'1181085958.56kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(90.139, 0.0, 0.0),'Pos': Point3(6.499, 29.01, 2.857),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_rack_swords'}},'1181244293.15kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(4.239, 0.0, 0.0),'Pos': Point3(-18.248, -4.809, 5.528),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_weapons_rack_table'}},'1181244326.59kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(91.179, -0.423, -5.939),'Pos': Point3(-18.278, -3.707, 6.239),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_cutlass_shiny'}},'1181244451.43kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(6.213, -89.507, -148.667),'Pos': Point3(-14.885, 7.222, 3.023),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_cutlass'}},'1181244506.53kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-18.582, -4.879, 2.63),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_weapons_rack_table'}},'1181244530.99kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.887, 3.703, 76.525),'Pos': Point3(-18.563, -5.708, 3.207),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_dagger'}},'1182541232.89kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-4.82, 10.307, 0.0),'Scale': VBase3(0.952, 0.952, 0.952),'Visual': {'Color': (0.7200000286102295, 0.699999988079071, 0.5899999737739563, 1.0),'Model': 'models/props/shop_bsmith_bucket_swords'}},'1182541244.14kmuller': {'Type': 'Bucket','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-4.82, 10.307, 0.0),'Scale': VBase3(0.952, 0.952, 0.952),'Visual': {'Model': 'models/props/bucket'}},'1182541292.93kmuller': {'Type': 'Bucket','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(0.987, 27.995, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/bucket'}},'1182541299.93kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(0.987, 27.995, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6700000166893005, 0.7900000214576721, 0.7799999713897705, 1.0),'Model': 'models/props/shop_bsmith_bucket_swords'}},'1182541350.71kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(18.602, 1.481, 0.47),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_bsmith_bucket_swords'}},'1182541360.48kmuller': {'Type': 'Bucket','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(18.513, 1.591, 0.006),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/bucket_handles'}},'1182541446.51kmuller': {'Type': 'Crate','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(16.274, 26.509, 0.034),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/crates_group_2'}},'1182541482.45kmuller': {'Type': 'Barrel','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(12.459, 27.249, 0.0),'Scale': VBase3(0.624, 0.624, 0.624),'Visual': {'Color': (0.47999998927116394, 0.44999998807907104, 0.4099999964237213, 1.0),'Model': 'models/props/barrel_grey'}},'1182541532.56kmuller': {'Type': 'Log_Stack','DisableCollision': True,'Holiday': '','Hpr': VBase3(55.794, 0.0, 0.0),'Pos': Point3(-16.8, 26.399, 3.027),'Scale': VBase3(0.362, 0.362, 0.362),'VisSize': '','Visual': {'Model': 'models/vegetation/gen_log_group02'}},'1185394665.68kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-2.038, 29.503, -0.677),'Scale': VBase3(1.584, 1.0, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1185405709.2kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-17.885, 19.485, -1.238),'Scale': VBase3(1.09, 5.167, 2.052),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1185405823.76kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-19.664, -0.289, -0.821),'Scale': VBase3(1.0, 3.509, 1.961),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1185405924.59kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(107.4, 0.0, 0.0),'Pos': Point3(-5.7, -27.557, -0.863),'Scale': VBase3(0.532, 1.0, 2.022),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185406084.68kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-142.795, 0.0, 0.0),'Pos': Point3(13.518, -24.542, 0.044),'Scale': VBase3(0.789, 1.0, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185406112.04kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-170.347, 0.0, 0.0),'Pos': Point3(18.385, -21.939, 0.0),'Scale': VBase3(0.382, 1.0, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185406130.26kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-107.472, 0.0, 0.0),'Pos': Point3(9.777, -29.089, 0.0),'Scale': VBase3(0.481, 1.0, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185406166.98kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(17.932, 10.497, -0.018),'Scale': VBase3(1.0, 1.405, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1185406212.2kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-42.767, 0.0, 0.0),'Pos': Point3(11.73, 25.936, 0.0),'Scale': VBase3(1.219, 1.0, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185406269.93kmuller': {'Type': 'Crate','DisableCollision': True,'Hpr': VBase3(28.588, 0.0, 0.0),'Pos': Point3(10.849, 28.465, -0.019),'Scale': VBase3(0.598, 0.306, 1.009),'Visual': {'Color': (0.47999998927116394, 0.44999998807907104, 0.4099999964237213, 1.0),'Model': 'models/props/crate'}},'1185406447.25kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(17.916, -9.813, -0.156),'Scale': VBase3(1.0, 1.0, 1.082),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1230932523.02akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(0.047, -29.861, 0.067),'Scale': VBase3(1.0, 1.0, 1.0)},'1257810355.28caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-111.841, -8.474, 18.09),'Pos': Point3(-4.707, 9.271, 2.31),'Scale': VBase3(1.316, 1.316, 1.316),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}},'1257810375.58caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(0.0, 0.0, 13.598),'Pos': Point3(-3.868, 10.024, 2.249),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}},'1257810525.24caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(66.286, -0.84, -0.868),'Pos': Point3(-17.565, 8.613, 10.638),'Scale': VBase3(1.381, 1.381, 1.381),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoStocking02_winter09'}},'1257810608.12caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-67.777, 1.208, -0.0),'Pos': Point3(-17.634, 21.981, 10.744),'Scale': VBase3(1.381, 1.381, 1.381),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoStocking02_winter09'}},'1257810667.57caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(119.636, 4.093, -0.0),'Pos': Point3(17.654, 21.347, 9.829),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoStocking02_winter09'}},'1257810772.8caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-3.019, 0.768, 36.924),'Pos': Point3(-8.853, -29.932, 9.962),'Scale': VBase3(2.397, 2.397, 2.397),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}},'1257810792.67caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-0.272, -0.0, 0.0),'Pos': Point3(0.009, 29.795, 8.278),'Scale': VBase3(1.562, 1.562, 1.562),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}},'1257810815.17caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-178.447, 0.192, 32.97),'Pos': Point3(-1.205, 29.808, 9.347),'Scale': VBase3(2.397, 2.397, 2.397),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}},'1257810863.83caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-3.019, 0.768, 36.924),'Pos': Point3(12.31, -29.948, 9.703),'Scale': VBase3(2.397, 2.397, 2.397),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}},'1257810863.86caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-177.777, 0.0, 37.705),'Pos': Point3(9.84, -29.887, 9.665),'Scale': VBase3(2.397, 2.397, 2.397),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}},'1257810863.88caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-177.777, 0.0, 0.0),'Pos': Point3(11.117, -29.809, 8.755),'Scale': VBase3(1.562, 1.562, 1.562),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}},'1257810958.03caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-177.777, 0.0, 37.705),'Pos': Point3(-11.324, -29.871, 9.923),'Scale': VBase3(2.397, 2.397, 2.397),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}},'1257810958.17caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-3.019, 0.768, 36.924),'Pos': Point3(-8.853, -29.932, 9.962),'Scale': VBase3(2.397, 2.397, 2.397),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}},'1257810958.2caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-177.777, 0.0, 0.0),'Pos': Point3(-10.046, -29.793, 9.014),'Scale': VBase3(1.562, 1.562, 1.562),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}},'1257811007.23caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-0.272, 0.0, 37.705),'Pos': Point3(1.289, 29.818, 9.187),'Scale': VBase3(2.397, 2.397, 2.397),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}}},'Visual': {'Color': (1.0, 0.9900000095367432, 1.0, 1.0),'Model': 'models/buildings/interior_shanty_npc_house'}}},'Node Links': [],'Layers': {},'ObjectIds': {'1156270917.73dzlu0': '["Objects"]["1156270917.73dzlu0"]','1165366099.89kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1165366099.89kmuller"]','1165366349.25kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1165366349.25kmuller"]','1165366420.5kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1165366420.5kmuller"]','1165366677.64kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1165366677.64kmuller"]','1165367285.42kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1165367285.42kmuller"]','1166037975.06kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1166037975.06kmuller"]','1167169073.65kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167169073.65kmuller"]','1167169123.12kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167169123.12kmuller"]','1167969107.08kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167969107.08kmuller"]','1167969169.5kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167969169.5kmuller"]','1167969206.95kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167969206.95kmuller"]','1167969245.06kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167969245.06kmuller"]','1167969443.92kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167969443.92kmuller"]','1167969485.88kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167969485.88kmuller"]','1172092924.3kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172092924.3kmuller"]','1172092937.32kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172092937.32kmuller"]','1172092948.96kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172092948.96kmuller"]','1172093074.6kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093074.6kmuller"]','1172093082.58kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093082.58kmuller"]','1172093091.96kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093091.96kmuller"]','1172093101.55kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093101.55kmuller"]','1172093111.05kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093111.05kmuller"]','1172093118.39kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093118.39kmuller"]','1172093125.44kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093125.44kmuller"]','1172093414.38kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093414.38kmuller"]','1172093706.66kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093706.66kmuller"]','1172093886.97kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093886.97kmuller"]','1172093925.14kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093925.14kmuller"]','1172093932.94kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093932.94kmuller"]','1172093972.0kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093972.0kmuller"]','1172094047.5kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172094047.5kmuller"]','1172094087.71kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172094087.71kmuller"]','1172094367.3kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172094367.3kmuller"]','1172094393.74kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172094393.74kmuller"]','1172094628.5kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172094628.5kmuller"]','1172094869.38kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172094869.38kmuller"]','1172094923.41kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172094923.41kmuller"]','1172099606.28kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099606.28kmuller"]','1172099644.25kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099644.25kmuller"]','1172099662.3kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099662.3kmuller"]','1172099688.41kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099688.41kmuller"]','1172099743.93kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099743.93kmuller"]','1172099774.32kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099774.32kmuller"]','1172099802.57kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099802.57kmuller"]','1172099819.82kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099819.82kmuller"]','1172099903.39kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099903.39kmuller"]','1172099942.32kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099942.32kmuller"]','1172099984.19kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172099984.19kmuller"]','1174586399.6dzlu': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1174586399.6dzlu"]','1174586686.81dzlu': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1174586686.81dzlu"]','1174587910.91dzlu': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1174587910.91dzlu"]','1174588046.46dzlu': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1174588046.46dzlu"]','1181085740.6kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1181085740.6kmuller"]','1181085794.23kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1181085794.23kmuller"]','1181085900.65kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1181085900.65kmuller"]','1181085958.56kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1181085958.56kmuller"]','1181086102.85kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167969443.92kmuller"]["Objects"]["1181086102.85kmuller"]','1181244293.15kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1181244293.15kmuller"]','1181244326.59kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1181244326.59kmuller"]','1181244451.43kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1181244451.43kmuller"]','1181244506.53kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1181244506.53kmuller"]','1181244530.99kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1181244530.99kmuller"]','1182194607.77kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093886.97kmuller"]["Objects"]["1182194607.77kmuller"]','1182194637.62kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093886.97kmuller"]["Objects"]["1182194637.62kmuller"]','1182194680.35kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1172093886.97kmuller"]["Objects"]["1182194680.35kmuller"]','1182199471.12kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1167169123.12kmuller"]["Objects"]["1182199471.12kmuller"]','1182541232.89kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1182541232.89kmuller"]','1182541244.14kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1182541244.14kmuller"]','1182541292.93kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1182541292.93kmuller"]','1182541299.93kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1182541299.93kmuller"]','1182541350.71kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1182541350.71kmuller"]','1182541360.48kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1182541360.48kmuller"]','1182541446.51kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1182541446.51kmuller"]','1182541482.45kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1182541482.45kmuller"]','1182541532.56kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1182541532.56kmuller"]','1185394665.68kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185394665.68kmuller"]','1185405709.2kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185405709.2kmuller"]','1185405823.76kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185405823.76kmuller"]','1185405924.59kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185405924.59kmuller"]','1185406084.68kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185406084.68kmuller"]','1185406112.04kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185406112.04kmuller"]','1185406130.26kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185406130.26kmuller"]','1185406166.98kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185406166.98kmuller"]','1185406212.2kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185406212.2kmuller"]','1185406269.93kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185406269.93kmuller"]','1185406447.25kmuller': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1185406447.25kmuller"]','1230932523.02akelts': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1230932523.02akelts"]','1257810355.28caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810355.28caoconno"]','1257810375.58caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810375.58caoconno"]','1257810525.24caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810525.24caoconno"]','1257810608.12caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810608.12caoconno"]','1257810667.57caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810667.57caoconno"]','1257810772.8caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810772.8caoconno"]','1257810792.67caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810792.67caoconno"]','1257810815.17caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810815.17caoconno"]','1257810863.83caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810863.83caoconno"]','1257810863.86caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810863.86caoconno"]','1257810863.88caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810863.88caoconno"]','1257810958.03caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810958.03caoconno"]','1257810958.17caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810958.17caoconno"]','1257810958.2caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257810958.2caoconno"]','1257811007.23caoconno': '["Objects"]["1156270917.73dzlu0"]["Objects"]["1257811007.23caoconno"]'}}
extraInfo = {'camPos': Point3(0, -14, 0),'camHpr': VBase3(0, 0, 0),'focalLength': 0.852765381336,'skyState': -2,'fog': 0}
| 12,155.333333
| 36,285
| 0.68472
|
7efcb74350aeaaae8e9f87d5e3d2ab9f09ac7a72
| 1,249
|
py
|
Python
|
metalfi/src/data/dataset.py
|
CemOezcan/metalfi
|
d7a071eea0229ce621fa07e3474a26d43bfaac66
|
[
"MIT"
] | 2
|
2019-12-05T07:57:14.000Z
|
2019-12-05T13:02:08.000Z
|
metalfi/src/data/dataset.py
|
CemOezcan/metalfi
|
d7a071eea0229ce621fa07e3474a26d43bfaac66
|
[
"MIT"
] | 31
|
2019-12-05T15:14:47.000Z
|
2020-12-04T14:37:46.000Z
|
metalfi/src/data/dataset.py
|
CemOezcan/metalfi
|
d7a071eea0229ce621fa07e3474a26d43bfaac66
|
[
"MIT"
] | 1
|
2020-12-04T13:40:11.000Z
|
2020-12-04T13:40:11.000Z
|
import time
from metalfi.src.data.meta.metafeatures import MetaFeatures
class Dataset:
def __init__(self, data_frame, target):
self.__data_frame = data_frame
self.__target = target
def getDataFrame(self):
return self.__data_frame
def getTarget(self):
return self.__target
def trainMetaData(self):
mf = MetaFeatures(self)
start_d_total = time.time()
d_time, u_time, m_time, l_time = mf.calculateMetaFeatures()
end_d_total = time.time()
d_total = end_d_total - start_d_total
start_t_total = time.time()
targets, d, p, l, s = mf.createTarget()
end_t_total = time.time()
t_total = end_t_total - start_t_total
data = mf.getMetaData()
data_time = {"data": d_time, "univariate": u_time, "multivariate": m_time, "landmarking": l_time,
"total": d_total}
target_time = {"LOFO": d, "PIMP": p, "LIME": l, "SHAP": s, "total": t_total}
return data, targets, (data_time, target_time), len(self.__data_frame.columns) - 1, len(self.__data_frame.index)
def testMetaData(self):
mf = MetaFeatures(self)
mf.calculateMetaFeatures()
return mf.getMetaData()
| 28.386364
| 120
| 0.628503
|
5a65f19c4856a706feb9cc4181d7ff71f59dd80b
| 662
|
py
|
Python
|
act/plotting/__init__.py
|
michaeltg12/ACT
|
c801ac7ac2762bdc73e1d419bc7c266512d55903
|
[
"BSD-3-Clause"
] | null | null | null |
act/plotting/__init__.py
|
michaeltg12/ACT
|
c801ac7ac2762bdc73e1d419bc7c266512d55903
|
[
"BSD-3-Clause"
] | null | null | null |
act/plotting/__init__.py
|
michaeltg12/ACT
|
c801ac7ac2762bdc73e1d419bc7c266512d55903
|
[
"BSD-3-Clause"
] | null | null | null |
"""
===========================
act.plotting (act.plotting)
===========================
.. currentmodule:: act.plotting
This module contains procedures for plotting ARM datasets.
.. autosummary::
:toctree: generated/
common.parse_ax
common.parse_ax_fig
common.get_date_format
"""
from .TimeSeriesDisplay import TimeSeriesDisplay
from .ContourDisplay import ContourDisplay
from .WindRoseDisplay import WindRoseDisplay
from .SkewTDisplay import SkewTDisplay
from .XSectionDisplay import XSectionDisplay
from .GeoDisplay import GeographicPlotDisplay
from .HistogramDisplay import HistogramDisplay
from .plot import Display
from . import common
| 24.518519
| 58
| 0.743202
|
059e8953ec124def6fd5eaccddc7b9dd09f5b990
| 13,923
|
py
|
Python
|
log_casp_inh/model_364.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_casp_inh/model_364.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_casp_inh/model_364.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM'])
Monomer('Ligand', ['Receptor'])
Monomer('C6pro', ['C3A'])
Monomer('ParpU', ['C3A'])
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('BidM', ['BaxM'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('Xiap', ['SmacC', 'C3A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C3ub')
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C3pro', ['C8A'])
Monomer('SmacM', ['BaxA'])
Monomer('SmacC', ['Xiap'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('C6pro_0', 100.0)
Parameter('ParpU_0', 1000000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('C8A_0', 0.0)
Parameter('Xiap_0', 91000.0)
Parameter('Receptor_0', 100.0)
Parameter('C3ub_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('SmacM_0', 100000.0)
Parameter('SmacC_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('BaxA_obs', BaxA())
Observable('Ligand_obs', Ligand())
Observable('C6pro_obs', C6pro())
Observable('ParpU_obs', ParpU())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('BidM_obs', BidM())
Observable('BaxM_obs', BaxM())
Observable('C8A_obs', C8A())
Observable('Xiap_obs', Xiap())
Observable('Receptor_obs', Receptor())
Observable('C3ub_obs', C3ub())
Observable('Fadd_obs', Fadd())
Observable('C3pro_obs', C3pro())
Observable('SmacM_obs', SmacM())
Observable('SmacC_obs', SmacC())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), BaxA_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(Xiap(SmacC=None, C3A=None), Xiap_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C3ub(), C3ub_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
| 85.417178
| 598
| 0.808518
|
e88a974cef3bec036afd0508fecc288575bec87e
| 18,474
|
py
|
Python
|
jax/experimental/jax2tf/tests/primitive_harness.py
|
kosklain/jax
|
1d61cfff48ce43402cb52940a4fdeb50a2603d9b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/experimental/jax2tf/tests/primitive_harness.py
|
kosklain/jax
|
1d61cfff48ce43402cb52940a4fdeb50a2603d9b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/experimental/jax2tf/tests/primitive_harness.py
|
kosklain/jax
|
1d61cfff48ce43402cb52940a4fdeb50a2603d9b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines test inputs and invocations for JAX primitives.
Used to test various implementations of JAX primitives, e.g., against
NumPy (lax_reference) or TensorFlow.
"""
import operator
from typing import Any, Callable, Dict, Iterable, Optional, NamedTuple, Sequence, Tuple, Union
from absl import testing
from jax import config
from jax import test_util as jtu
from jax import lax
from jax import lax_linalg
from jax import numpy as jnp
import numpy as np
FLAGS = config.FLAGS
Rng = Any # A random number generator
class RandArg(NamedTuple):
"""Descriptor for a randomly generated argument.
See description of `Harness`.
"""
shape: Tuple[int, ...]
dtype: np.dtype
class StaticArg(NamedTuple):
"""Descriptor for a static argument.
See description of `Harness`.
"""
value: Any
class Harness:
"""Specifies inputs and callable for a primitive.
A harness is conceptually a callable and a list of arguments, that together
exercise a use case. The harness can optionally have additional parameters
that can be used by the test.
The arguments are specified through argument descriptors. An argument
descriptor can be:
* a numeric value or ndarray, or
* an instance of ``RandArg(shape, dtype)`` to be used with a PRNG to generate
random tensor of the given shape and type, or
* an instance of ``StaticArg(value)``. These are values that specialize the
callable, but are not exposed as external arguments.
For example, a harness for ``lax.take(arr, indices, axis=None)`` may want
to expose as external (dynamic) argument the array and the indices, and
keep the axis as a static argument (technically specializing the `take` to
a axis):
Harness(f"take_axis={axis}",
lax.take,
[RandArg((2, 4), np.float32), np.array([-1, 0, 1]), StaticArg(axis)],
axis=axis)
"""
# Descriptive name of the harness, used as a testcase_name. Unique in a group.
name: str
# The function taking all arguments (static and dynamic).
fun: Callable
arg_descriptors: Sequence[Union[RandArg, StaticArg, Any]]
rng_factory: Callable
params: Dict[str, Any]
def __init__(self, name, fun, arg_descriptors, *,
rng_factory=jtu.rand_default, **params):
self.name = name
self.fun = fun
self.arg_descriptors = arg_descriptors
self.rng_factory = rng_factory
self.params = params
def __str__(self):
return self.name
def _arg_maker(self, arg_descriptor, rng: Rng):
if isinstance(arg_descriptor, StaticArg):
return arg_descriptor.value
if isinstance(arg_descriptor, RandArg):
return self.rng_factory(rng)(arg_descriptor.shape, arg_descriptor.dtype)
return arg_descriptor
def args_maker(self, rng: Rng) -> Sequence:
"""All-argument maker, including the static ones."""
return [self._arg_maker(ad, rng) for ad in self.arg_descriptors]
def dyn_args_maker(self, rng: Rng) -> Sequence:
"""A dynamic-argument maker, for use with `dyn_fun`."""
return [self._arg_maker(ad, rng) for ad in self.arg_descriptors
if not isinstance(ad, StaticArg)]
def dyn_fun(self, *dyn_args):
"""Invokes `fun` given just the dynamic arguments."""
all_args = self._args_from_dynargs(dyn_args)
return self.fun(*all_args)
def _args_from_dynargs(self, dyn_args: Sequence) -> Sequence:
"""All arguments, including the static ones."""
next_dynamic_argnum = 0
all_args = []
for ad in self.arg_descriptors:
if isinstance(ad, StaticArg):
all_args.append(ad.value)
else:
all_args.append(dyn_args[next_dynamic_argnum])
next_dynamic_argnum += 1
return all_args
def parameterized(harness_group: Iterable[Harness],
one_containing : Optional[str] = None):
"""Decorator for tests.
The tests receive a `harness` argument.
The `one_containing` parameter is useful for debugging. If given, then
picks only one harness whose name contains the string. The whole set of
parameterized tests is reduced to one test, whose name is not decorated
to make it easier to pick for running.
"""
cases = tuple(
dict(testcase_name=harness.name if one_containing is None else "",
harness=harness)
for harness in harness_group
if one_containing is None or one_containing in harness.name)
if one_containing is not None:
if not cases:
raise ValueError(f"Cannot find test case with name containing {one_containing}."
"Names are:"
"\n".join([harness.name for harness in harness_group]))
cases = cases[0:1]
return testing.parameterized.named_parameters(*cases)
### Harness definitions ###
###
_LAX_UNARY_ELEMENTWISE = (
lax.abs, lax.acosh, lax.asinh, lax.atanh, lax.bessel_i0e, lax.bessel_i1e,
lax.ceil, lax.cos, lax.cosh, lax.digamma, lax.erf, lax.erf_inv, lax.erfc,
lax.exp, lax.expm1, lax.floor, lax.is_finite, lax.lgamma, lax.log,
lax.log1p, lax.neg, lax.round, lax.rsqrt, lax.sign, lax.sin, lax.sinh,
lax.sqrt, lax.tan, lax.tanh)
lax_unary_elementwise = tuple(
Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg],
lax_name=f_lax.__name__,
dtype=dtype)
for f_lax in _LAX_UNARY_ELEMENTWISE
for dtype in jtu.dtypes.all_floating
for arg in [
np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.2, 1., 1.4, 1.6], dtype=dtype)
]
)
lax_bitwise_not = tuple(
[Harness(f"{jtu.dtype_str(dtype)}",
lax.bitwise_not,
[arg],
dtype=dtype)
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg in [
np.array([-1, -3, -2, 0, 0, 2, 1, 3], dtype=dtype),
]] +
[Harness("bool",
f_lax,
[arg],
lax_name=f_lax.__name__,
dtype=np.bool_)
for f_lax in [lax.bitwise_not]
for arg in [
np.array([True, False])
]]
)
_LAX_BINARY_ELEMENTWISE = (
lax.add, lax.atan2, lax.div, lax.igamma, lax.igammac, lax.max, lax.min,
lax.nextafter, lax.rem, lax.sub)
lax_binary_elementwise = tuple(
Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=dtype
)
for f_lax in _LAX_BINARY_ELEMENTWISE
for dtype in jtu.dtypes.all_floating
for arg1, arg2 in [
(np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.2, 1., 1.4, 1.6], dtype=dtype),
np.array([-1.6, 1.4, 1.0, 0.0, 0.1, 0.2, 1., 1.4, -1.6], dtype=dtype))
]
)
_LAX_BINARY_ELEMENTWISE_LOGICAL = (
lax.bitwise_and, lax.bitwise_or, lax.bitwise_xor, lax.shift_left,
)
lax_binary_elementwise_logical = tuple(
[Harness(f"{f_lax.__name__}_{jtu.dtype_str(dtype)}",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=dtype)
for f_lax in _LAX_BINARY_ELEMENTWISE_LOGICAL
for dtype in jtu.dtypes.all_integer + jtu.dtypes.all_unsigned
for arg1, arg2 in [
(np.array([1, 3, 2, 0, 0, 2, 1, 3], dtype=dtype),
np.array([1, 2, 3, 0, 1, 0, 2, 3], dtype=dtype))
]
] +
[Harness(f"{f_lax.__name__}_bool",
f_lax,
[arg1, arg2],
lax_name=f_lax.__name__,
dtype=np.bool_)
for f_lax in [lax.bitwise_and, lax.bitwise_or, lax.bitwise_xor]
for arg1, arg2 in [
(np.array([True, True, False, False]),
np.array([True, False, True, False])),
]
]
)
lax_betainc = tuple(
Harness(f"_{jtu.dtype_str(dtype)}",
lax.betainc,
[arg1, arg2, arg3],
dtype=dtype)
for dtype in jtu.dtypes.all_floating
for arg1, arg2, arg3 in [
(np.array([-1.6, -1.4, -1.0, 0.0, 0.1, 0.3, 1, 1.4, 1.6], dtype=dtype),
np.array([-1.6, 1.4, 1.0, 0.0, 0.2, 0.1, 1, 1.4, -1.6], dtype=dtype),
np.array([1.0, -1.0, 2.0, 1.0, 0.3, 0.3, -1.0, 2.4, 1.6],
dtype=np.float32))
]
)
_gather_input = np.arange(1000, dtype=np.float32).reshape((10, 10, 10))
lax_gather = tuple(
# Construct gather harnesses using take
[Harness(f"from_take_indices_shape={indices.shape}_axis={axis}",
lambda a, i, axis: jnp.take(a, i, axis=axis),
[_gather_input,
indices,
StaticArg(axis)])
for indices in [
# Ensure each set of indices has a distinct shape
np.array(2, dtype=np.int32),
np.array([2], dtype=np.int32),
np.array([2, 4], dtype=np.int32),
np.array([[2, 4], [5, 6]], dtype=np.int32),
np.array([0, 1, 10], dtype=np.int32), # Index out of bounds
np.array([0, 1, 2, -1], dtype=np.int32), # Index out of bounds
]
for axis in [0, 1, 2]] +
# Directly from lax.gather in lax_test.py.
[Harness(
f"_shape={shape}_idxs_shape={idxs.shape}_dnums={dnums}_slice_sizes={slice_sizes}",
lambda op, idxs, dnums, slice_sizes: lax.gather(op, idxs, dimension_numbers=dnums, slice_sizes=slice_sizes),
[RandArg(shape, np.float32),
idxs, StaticArg(dnums), StaticArg(slice_sizes)])
for shape, idxs, dnums, slice_sizes in [
((5,), np.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
((10,), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
((10, 5,), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
((10, 5), np.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
]
]
)
lax_pad = tuple(
Harness(f"_inshape={jtu.format_shape_dtype_string(arg_shape, dtype)}_pads={pads}",
lax.pad,
[RandArg(arg_shape, dtype), np.array(0, dtype), StaticArg(pads)],
rng_factory=jtu.rand_small,
arg_shape=arg_shape, dtype=dtype, pads=pads)
for arg_shape in [(2, 3)]
for dtype in jtu.dtypes.all_floating + jtu.dtypes.all_integer
for pads in [
[(0, 0, 0), (0, 0, 0)], # no padding
[(1, 1, 0), (2, 2, 0)], # only positive edge padding
[(1, 2, 1), (0, 1, 0)], # edge padding and interior padding
[(0, 0, 0), (-1, -1, 0)], # negative padding
[(0, 0, 0), (-2, -2, 4)], # add big dilation then remove from edges
[(0, 0, 0), (-2, -3, 1)], # remove everything in one dimension
]
)
lax_top_k = tuple( # random testing
Harness(f"_inshape={jtu.format_shape_dtype_string(shape, dtype)}_k={k}",
lax.top_k,
[RandArg(shape, dtype), StaticArg(k)],
shape=shape,
dtype=dtype,
k=k)
for dtype in jtu.dtypes.all
for shape in [(3,), (5, 3)]
for k in [-1, 1, 3, 4]
for rng_factory in [jtu.rand_default]
) + tuple( # stability test
Harness(f"stability_inshape={jtu.format_shape_dtype_string(arr.shape, arr.dtype)}_k={k}",
lax.top_k,
[arr, StaticArg(k)],
shape=arr.shape,
dtype=arr.dtype,
k=k)
for arr in [
np.array([5, 7, 5, 8, 8, 5], dtype=np.int32)
]
for k in [1, 3, 6]
) + tuple( # nan/inf sorting test
Harness(f"nan_inshape={jtu.format_shape_dtype_string(arr.shape, arr.dtype)}_k={k}",
lax.top_k,
[arr, StaticArg(k)],
shape=arr.shape,
dtype=arr.dtype,
k=k)
for arr in [
np.array([+np.inf, np.nan, -np.nan, np.nan, -np.inf, 3], dtype=np.float32)
]
for k in [1, 3, 6]
)
lax_sort = tuple( # one array, random data, all axes, all dtypes
Harness(f"one_array_shape={jtu.format_shape_dtype_string(shape, dtype)}_axis={dimension}_isstable={is_stable}",
lax.sort,
[RandArg(shape, dtype), StaticArg(dimension), StaticArg(is_stable)],
shape=shape,
dimension=dimension,
dtype=dtype,
is_stable=is_stable)
for dtype in jtu.dtypes.all
for shape in [(5,), (5, 7)]
for dimension in range(len(shape))
for is_stable in [False, True]
) + tuple( # one array, potential edge cases
Harness(f"one_special_array_shape={jtu.format_shape_dtype_string(arr.shape, arr.dtype)}_axis={dimension}_isstable={is_stable}",
lax.sort,
[arr, StaticArg(dimension), StaticArg(is_stable)],
shape=arr.shape,
dimension=dimension,
dtype=arr.dtype,
is_stable=is_stable)
for arr, dimension in [
[np.array([+np.inf, np.nan, -np.nan, -np.inf, 2, 4, 189], dtype=np.float32), -1]
]
for is_stable in [False, True]
) + tuple( # several arrays, random data, all axes, all dtypes
Harness(f"multi_array_shape={jtu.format_shape_dtype_string(shape, dtype)}_axis={dimension}_isstable={is_stable}",
lambda *args: lax.sort_p.bind(*args[:-2], dimension=args[-2], is_stable=args[-1], num_keys=1),
[RandArg(shape, dtype), RandArg(shape, dtype), StaticArg(dimension), StaticArg(is_stable)],
shape=shape,
dimension=dimension,
dtype=dtype,
is_stable=is_stable)
for dtype in jtu.dtypes.all
for shape in [(5,), (5, 7)]
for dimension in range(len(shape))
for is_stable in [False, True]
)
lax_linalg_qr = tuple(
Harness(f"multi_array_shape={jtu.format_shape_dtype_string(shape, dtype)}_fullmatrices={full_matrices}",
lax_linalg.qr,
[RandArg(shape, dtype), StaticArg(full_matrices)],
shape=shape,
dtype=dtype,
full_matrices=full_matrices)
for dtype in jtu.dtypes.all
for shape in [(1, 1), (3, 3), (3, 4), (2, 10, 5), (2, 200, 100)]
for full_matrices in [False, True]
)
lax_slice = tuple(
Harness(f"_shape={shape}_start_indices={start_indices}_limit_indices={limit_indices}_strides={strides}", # type: ignore
lax.slice,
[RandArg(shape, dtype), # type: ignore
StaticArg(start_indices), # type: ignore
StaticArg(limit_indices), # type: ignore
StaticArg(strides)], # type: ignore
shape=shape, # type: ignore
start_indices=start_indices, # type: ignore
limit_indices=limit_indices) # type: ignore
for shape, start_indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
# out-of-bounds cases
[(5,), (-1,), (0,), None],
[(5,), (-1,), (1,), None],
[(5,), (-4,), (-2,), None],
[(5,), (-5,), (-2,), None],
[(5,), (-6,), (-5,), None],
[(5,), (-10,), (-9,), None],
[(5,), (-100,), (-99,), None],
[(5,), (5,), (6,), None],
[(5,), (10,), (11,), None],
[(5,), (0,), (100,), None],
]
for dtype in [np.float32]
)
# Use lax_slice, but (a) make the start_indices dynamic arg, and (b) no strides.
lax_dynamic_slice = [
Harness(harness.name,
lax.dynamic_slice,
[harness.arg_descriptors[0],
np.array(list(start_indices)),
StaticArg(tuple(map(operator.sub, limit_indices, start_indices)))],
**harness.params)
for harness in lax_slice
for start_indices in [harness.params["start_indices"]]
for limit_indices in [harness.params["limit_indices"]]
]
lax_dynamic_update_slice = tuple(
Harness((f"_operand={jtu.format_shape_dtype_string(shape, dtype)}" # type: ignore
f"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}"
f"_start_indices={start_indices}"),
lax.dynamic_update_slice,
[RandArg(shape, dtype), # type: ignore
RandArg(update_shape, update_dtype), # type: ignore
np.array(start_indices)], # type: ignore
shape=shape, # type: ignore
start_indices=start_indices, # type: ignore
update_shape=update_shape) # type: ignore
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
[(3,), (-1,), (1,)], # out-of-bounds
[(3,), (10,), (1,)], # out-of-bounds
[(3,), (10,), (4,)], # out-of-bounds shape too big
[(3,), (10,), (2,)], # out-of-bounds
]
for dtype, update_dtype in [
(np.float32, np.float32),
(np.float64, np.float32)
])
lax_squeeze = tuple(
Harness(f"_inshape={jtu.format_shape_dtype_string(arg_shape, dtype)}_dimensions={dimensions}", # type: ignore
lax.squeeze,
[RandArg(arg_shape, dtype), StaticArg(dimensions)], # type: ignore[has-type]
arg_shape=arg_shape, dtype=dtype, dimensions=dimensions) # type: ignore[has-type]
for arg_shape, dimensions in [
[(1,), (0,)],
[(1,), (-1,)],
[(2, 1, 4), (1,)],
[(2, 1, 4), (-2,)],
[(2, 1, 3, 1), (1,)],
[(2, 1, 3, 1), (1, 3)],
[(2, 1, 3, 1), (3,)],
[(2, 1, 3, 1), (1, -1)],
]
for dtype in [np.float32]
)
shift_inputs = [
(arg, dtype, shift_amount)
for dtype in jtu.dtypes.all_unsigned + jtu.dtypes.all_integer
for arg in [
np.array([-250, -1, 0, 1, 250], dtype=dtype),
]
for shift_amount in [0, 1, 2, 3, 7]
]
lax_shift_left = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_left,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))])
for arg, dtype, shift_amount in shift_inputs
)
lax_shift_right_logical = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_right_logical,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))])
for arg, dtype, shift_amount in shift_inputs
)
lax_shift_right_arithmetic = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_right_arithmetic,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))])
for arg, dtype, shift_amount in shift_inputs
)
| 34.988636
| 129
| 0.624986
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.