hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7d84c34bf554a96a95880cb1e002b94f919769e8
| 17,684
|
py
|
Python
|
trac/util/tests/__init__.py
|
DanVerh/trac_az
|
24ac877f9f43ad08372cb1d15a838d764d9e7df4
|
[
"BSD-3-Clause"
] | 324
|
2015-01-07T05:30:52.000Z
|
2022-03-22T07:20:56.000Z
|
trac/util/tests/__init__.py
|
DanVerh/trac_az
|
24ac877f9f43ad08372cb1d15a838d764d9e7df4
|
[
"BSD-3-Clause"
] | 12
|
2017-03-24T23:24:55.000Z
|
2019-08-10T05:13:20.000Z
|
trac/util/tests/__init__.py
|
DanVerh/trac_az
|
24ac877f9f43ad08372cb1d15a838d764d9e7df4
|
[
"BSD-3-Clause"
] | 142
|
2015-01-12T09:30:28.000Z
|
2022-02-21T00:39:38.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2021 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import doctest
import importlib
import os.path
import pkg_resources
import random
import re
import sys
import textwrap
import unittest
import trac
from trac import util
from trac.test import mkdtemp, rmtree
from trac.util.tests import (concurrency, datefmt, presentation, text,
translation, html)
class AtomicFileTestCase(unittest.TestCase):
def setUp(self):
self.dir = mkdtemp()
self.path = os.path.join(self.dir, 'trac-tempfile')
def tearDown(self):
rmtree(self.dir)
def test_non_existing(self):
with util.AtomicFile(self.path) as f:
f.write('test content')
self.assertTrue(f.closed)
self.assertEqual('test content', util.read_file(self.path))
def test_existing(self):
util.create_file(self.path, 'Some content')
self.assertEqual('Some content', util.read_file(self.path))
with util.AtomicFile(self.path) as f:
f.write('Some new content')
self.assertTrue(f.closed)
self.assertEqual('Some new content', util.read_file(self.path))
@unittest.skipIf(os.name == 'nt',
'Symbolic links are not supported on Windows')
def test_symbolic_link(self):
link_path = os.path.join(self.dir, 'trac-tempfile-link')
os.symlink(self.path, link_path)
with util.AtomicFile(link_path) as f:
f.write('test content')
self.assertTrue(os.path.islink(link_path))
self.assertEqual('test content', util.read_file(link_path))
self.assertEqual('test content', util.read_file(self.path))
@unittest.skipIf(not util.can_rename_open_file,
'Open files cannot be renamed on Windows')
def test_existing_open_for_reading(self):
util.create_file(self.path, 'Initial file content')
self.assertEqual('Initial file content', util.read_file(self.path))
with open(self.path, 'rb') as rf:
with util.AtomicFile(self.path) as f:
f.write('Replaced content')
self.assertTrue(rf.closed)
self.assertTrue(f.closed)
self.assertEqual('Replaced content', util.read_file(self.path))
# FIXME: It is currently not possible to make this test pass on all
# platforms and with all locales. Typically, it will fail on Linux with
# LC_ALL=C.
# Python 3 adds sys.setfilesystemencoding(), which could be used here
# to remove the dependency on the locale. So the test is disabled until
# we require Python 3.
def _test_unicode_path(self):
self.path = os.path.join(self.dir, 'träc-témpfilè')
with util.AtomicFile(self.path) as f:
f.write('test content')
self.assertTrue(f.closed)
self.assertEqual('test content', util.read_file(self.path))
class PathTestCase(unittest.TestCase):
def assert_below(self, path, parent):
self.assertTrue(util.is_path_below(path.replace('/', os.sep),
parent.replace('/', os.sep)))
def assert_not_below(self, path, parent):
self.assertFalse(util.is_path_below(path.replace('/', os.sep),
parent.replace('/', os.sep)))
def test_is_path_below(self):
self.assert_below('/svn/project1', '/svn/project1')
self.assert_below('/svn/project1/repos', '/svn/project1')
self.assert_below('/svn/project1/sub/repos', '/svn/project1')
self.assert_below('/svn/project1/sub/../repos', '/svn/project1')
self.assert_not_below('/svn/project2/repos', '/svn/project1')
self.assert_not_below('/svn/project2/sub/repos', '/svn/project1')
self.assert_not_below('/svn/project1/../project2/repos',
'/svn/project1')
self.assertTrue(util.is_path_below('repos', os.path.join(os.getcwd())))
self.assertFalse(util.is_path_below('../sub/repos',
os.path.join(os.getcwd())))
def test_native_path(self):
self.assertIsNone(util.native_path(None))
if os.name == 'posix':
self.assertEqual('/D/Trac/x', util.native_path('D:\\Trac\\x'))
self.assertEqual('/D/Trac/x', util.native_path('/D/Trac/x'))
self.assertEqual('/D/', util.native_path('D:\\'))
self.assertEqual('/Trac/x', util.native_path('\\Trac\\x'))
self.assertEqual('Trac/x', util.native_path('Trac\\x'))
self.assertEqual('Trac/x', util.native_path('Trac/x'))
elif os.name == 'nt':
self.assertEqual('D:\\Trac\\x', util.native_path('/D/Trac/x'))
self.assertEqual('D:\\Trac\\x', util.native_path('D:/Trac/x'))
self.assertEqual('D:\\Trac\\x', util.native_path('D:\\Trac\\x'))
self.assertEqual('D:\\', util.native_path('/D/'))
self.assertEqual('D:', util.native_path('/D'))
self.assertEqual('C:\\', util.native_path('/'))
self.assertEqual('C:\\Trac\\x', util.native_path('/Trac/x'))
self.assertEqual('Trac\\x', util.native_path('Trac/x'))
self.assertEqual('Trac\\x', util.native_path('Trac\\x'))
class RandomTestCase(unittest.TestCase):
def setUp(self):
self.state = random.getstate()
def tearDown(self):
random.setstate(self.state)
def test_urandom(self):
"""urandom() returns random bytes"""
for i in range(129):
self.assertEqual(i, len(util.urandom(i)))
# For a large enough sample, each value should appear at least once
entropy = util.urandom(65536)
values = set(entropy)
self.assertEqual(256, len(values))
def test_hex_entropy(self):
"""hex_entropy() returns random hex digits"""
hex_digits = set('0123456789abcdef')
for i in range(129):
entropy = util.hex_entropy(i)
self.assertEqual(i, len(entropy))
self.assertEqual(set(), set(entropy) - hex_digits)
def test_hex_entropy_global_state(self):
"""hex_entropy() not affected by global random generator state"""
random.seed(0)
data = util.hex_entropy(64)
random.seed(0)
self.assertNotEqual(data, util.hex_entropy(64))
class ContentDispositionTestCase(unittest.TestCase):
def test_filename(self):
self.assertEqual('attachment; filename=myfile.txt',
util.content_disposition('attachment', 'myfile.txt'))
self.assertEqual('attachment; filename=a%20file.txt',
util.content_disposition('attachment', 'a file.txt'))
def test_no_filename(self):
self.assertEqual('inline', util.content_disposition('inline'))
self.assertEqual('attachment', util.content_disposition('attachment'))
def test_no_type(self):
self.assertEqual('filename=myfile.txt',
util.content_disposition(filename='myfile.txt'))
self.assertEqual('filename=a%20file.txt',
util.content_disposition(filename='a file.txt'))
class SafeReprTestCase(unittest.TestCase):
def test_normal_repr(self):
for x in ([1, 2, 3], "été", "été"):
self.assertEqual(repr(x), util.safe_repr(x))
def test_buggy_repr(self):
class eh_ix(object):
def __repr__(self):
return 1 + "2"
self.assertRaises(Exception, repr, eh_ix())
sr = util.safe_repr(eh_ix())
sr = re.sub('[A-F0-9]{4,}', 'ADDRESS', sr)
sr = re.sub(r'__main__|trac\.util\.tests(\.__init__)?', 'MODULE', sr)
self.assertEqual("<MODULE.eh_ix object at 0xADDRESS "
"(repr() error: TypeError: unsupported operand "
"type(s) for +: 'int' and 'str')>", sr)
class SetuptoolsUtilsTestCase(unittest.TestCase):
def setUp(self):
self.dir = mkdtemp()
sys.path.append(self.dir)
def tearDown(self):
sys.path.remove(self.dir)
rmtree(self.dir)
def test_get_module_path(self):
self.assertEqual(util.get_module_path(trac),
util.get_module_path(util))
def test_get_pkginfo_trac(self):
pkginfo = util.get_pkginfo(trac)
self.assertEqual(trac.__version__, pkginfo.get('version'))
self.assertNotEqual({}, pkginfo)
def test_get_pkginfo_non_toplevel(self):
from trac import core
import tracopt
pkginfo = util.get_pkginfo(trac)
self.assertEqual(pkginfo, util.get_pkginfo(util))
self.assertEqual(pkginfo, util.get_pkginfo(core))
self.assertEqual(pkginfo, util.get_pkginfo(tracopt))
def test_get_pkginfo_babel(self):
try:
import babel
import babel.core
dist = pkg_resources.get_distribution('Babel')
except:
pass
else:
pkginfo = util.get_pkginfo(babel)
self.assertNotEqual({}, pkginfo)
self.assertEqual(pkginfo, util.get_pkginfo(babel.core))
def test_get_pkginfo_pymysql(self):
try:
import pymysql
dist = pkg_resources.get_distribution('pymysql')
dist.get_metadata('top_level.txt')
except:
pass
else:
pkginfo = util.get_pkginfo(pymysql)
self.assertNotEqual({}, pkginfo)
self.assertEqual(pkginfo, util.get_pkginfo(pymysql.cursors))
def test_get_pkginfo_psycopg2(self):
# python-psycopg2 deb package doesn't provide SOURCES.txt and
# top_level.txt
try:
import psycopg2
import psycopg2.extensions
dist = pkg_resources.get_distribution('psycopg2')
except:
pass
else:
pkginfo = util.get_pkginfo(psycopg2)
self.assertNotEqual({}, pkginfo)
self.assertEqual(pkginfo, util.get_pkginfo(psycopg2.extensions))
def test_file_metadata(self):
pkgname = 'TestModule_' + util.hex_entropy(16)
modname = pkgname.lower()
with open(os.path.join(self.dir, pkgname + '-0.1.egg-info'), 'w',
encoding='utf-8') as f:
f.write('Metadata-Version: 1.1\n'
'Name: %(pkgname)s\n'
'Version: 0.1\n'
'Author: Joe\n'
'Author-email: joe@example.org\n'
'Maintainer: Jim\n'
'Maintainer-email: jim@example.org\n'
'Home-page: http://example.org/\n'
'Summary: summary.\n'
'Description: description.\n'
'Provides: %(modname)s\n'
'Provides: %(modname)s.foo\n'
% {'pkgname': pkgname, 'modname': modname})
os.mkdir(os.path.join(self.dir, modname))
for name in ('__init__.py', 'bar.py', 'foo.py'):
with open(os.path.join(self.dir, modname, name), 'w',
encoding='utf-8') as f:
f.write('# -*- coding: utf-8 -*-\n')
mod = importlib.import_module(modname)
mod.bar = importlib.import_module(modname + '.bar')
mod.foo = importlib.import_module(modname + '.foo')
pkginfo = util.get_pkginfo(mod)
self.assertEqual('0.1', pkginfo['version'])
self.assertEqual('Joe', pkginfo['author'])
self.assertEqual('joe@example.org', pkginfo['author_email'])
self.assertEqual('Jim', pkginfo['maintainer'])
self.assertEqual('jim@example.org', pkginfo['maintainer_email'])
self.assertEqual('http://example.org/', pkginfo['home_page'])
self.assertEqual('summary.', pkginfo['summary'])
self.assertEqual('description.', pkginfo['description'])
self.assertEqual(pkginfo, util.get_pkginfo(mod.bar))
self.assertEqual(pkginfo, util.get_pkginfo(mod.foo))
def _write_module(self, version, url):
modname = 'TestModule_' + util.hex_entropy(16)
modpath = os.path.join(self.dir, modname + '.py')
with open(modpath, 'w', encoding='utf-8') as f:
f.write(textwrap.dedent("""\
# -*- coding: utf-8 -*-
from trac.core import Component
version = '%s'
author = 'Joe'
author_email = 'joe@example.org'
maintainer = 'Jim'
maintainer_email = 'jim@example.org'
home_page = '%s'
license = 'BSD 3-Clause'
summary = 'summary.'
trac = 'http://my.trac.com'
class TestModule(Component):
pass
""") % (version, url))
return modname
def test_get_module_metadata(self):
version = '0.1'
home_page = 'http://example.org'
modname = self._write_module(version, home_page)
mod = importlib.import_module(modname)
info = util.get_module_metadata(mod)
self.assertEqual(version, info['version'])
self.assertEqual('Joe', info['author'])
self.assertEqual('joe@example.org', info['author_email'])
self.assertEqual('Jim', info['maintainer'])
self.assertEqual('jim@example.org', info['maintainer_email'])
self.assertEqual(home_page, info['home_page'])
self.assertEqual('summary.', info['summary'])
self.assertEqual('BSD 3-Clause', info['license'])
self.assertEqual('http://my.trac.com', info['trac'])
def test_get_module_metadata_keyword_expansion(self):
version = '10'
url = 'http://example.org'
modname = self._write_module('$Rev: %s $' % version,
'$URL: %s $' % url)
mod = importlib.import_module(modname)
info = util.get_module_metadata(mod)
self.assertEqual('r%s' % version, info['version'])
self.assertEqual(url, info['home_page'])
class LazyClass(object):
@util.lazy
def f(self):
return object()
class LazyTestCase(unittest.TestCase):
def setUp(self):
self.obj = LazyClass()
def test_lazy_get(self):
f = self.obj.f
self.assertTrue(self.obj.f is f)
def test_lazy_set(self):
self.obj.f = 2
self.assertEqual(2, self.obj.f)
def test_lazy_del(self):
f = self.obj.f
del self.obj.f
self.assertFalse(self.obj.f is f)
class FileTestCase(unittest.TestCase):
def setUp(self):
self.dir = mkdtemp()
self.filename = os.path.join(self.dir, 'trac-tempfile')
self.data = b'Lorem\ripsum\ndolor\r\nsit\namet,\rconsectetur\r\n'
def tearDown(self):
rmtree(self.dir)
def test_create_and_read_file(self):
util.create_file(self.filename, self.data, 'wb')
with open(self.filename, 'rb') as f:
self.assertEqual(self.data, f.read())
self.assertEqual(self.data, util.read_file(self.filename, 'rb'))
def test_touch_file(self):
util.create_file(self.filename, self.data, 'wb')
util.touch_file(self.filename)
with open(self.filename, 'rb') as f:
self.assertEqual(self.data, f.read())
def test_missing(self):
util.touch_file(self.filename)
self.assertTrue(os.path.isfile(self.filename))
self.assertEqual(0, os.path.getsize(self.filename))
class UtilitiesTestCase(unittest.TestCase):
def test_as_int(self):
self.assertEqual(1, util.as_int('1'))
self.assertEqual(1, util.as_int('1', None))
self.assertIsNone(util.as_int('A', None))
self.assertEqual(2, util.as_int('A', 2))
self.assertEqual(2, util.as_int('1', None, min=2))
self.assertEqual(0, util.as_int('1', None, max=0))
def test_as_float(self):
self.assertEqual(1.1, util.as_float('1.1'))
self.assertEqual(1.1, util.as_float('1.1', None))
self.assertEqual(1, util.as_float('1', None))
self.assertIsNone(util.as_float('A', None))
self.assertEqual(2.2, util.as_float('A', 2.2))
self.assertEqual(2.2, util.as_float('1.1', None, min=2.2))
self.assertEqual(0.1, util.as_float('1.1', None, max=0.1))
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AtomicFileTestCase))
suite.addTest(unittest.makeSuite(PathTestCase))
suite.addTest(unittest.makeSuite(RandomTestCase))
suite.addTest(unittest.makeSuite(ContentDispositionTestCase))
suite.addTest(unittest.makeSuite(SafeReprTestCase))
suite.addTest(unittest.makeSuite(SetuptoolsUtilsTestCase))
suite.addTest(unittest.makeSuite(LazyTestCase))
suite.addTest(unittest.makeSuite(FileTestCase))
suite.addTest(unittest.makeSuite(UtilitiesTestCase))
suite.addTest(concurrency.test_suite())
suite.addTest(datefmt.test_suite())
suite.addTest(presentation.test_suite())
suite.addTest(doctest.DocTestSuite(util))
suite.addTest(text.test_suite())
suite.addTest(translation.test_suite())
suite.addTest(html.test_suite())
suite.addTest(doctest.DocTestSuite(util.html))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 38.527233
| 79
| 0.611683
|
b1b15b9b79ef298f3b63882c8493148dab64f12b
| 2,024
|
py
|
Python
|
config/settings/test.py
|
ankmishra/whitenose
|
eb12ad5bb4e1e93daa70a6ef27a0be3a57a4c22e
|
[
"MIT"
] | 1
|
2020-02-11T13:28:19.000Z
|
2020-02-11T13:28:19.000Z
|
config/settings/test.py
|
ankmishra/whitenose
|
eb12ad5bb4e1e93daa70a6ef27a0be3a57a4c22e
|
[
"MIT"
] | null | null | null |
config/settings/test.py
|
ankmishra/whitenose
|
eb12ad5bb4e1e93daa70a6ef27a0be3a57a4c22e
|
[
"MIT"
] | null | null | null |
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY", default="UbfjPyDMgccpkbeacmVqqAw2Wn6lOYwTw9qVHdXzP6KjrJ3MDhqwuclIKWUFPo3c")
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": ""
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# Your stuff...
# ------------------------------------------------------------------------------
| 36.142857
| 113
| 0.546443
|
fdbd55a2a9fb1337f9086aef5d9aa5d5228f126d
| 287
|
py
|
Python
|
emptycabinet/routers/products.py
|
aldrickdev/emptycabinet
|
cb8f5ecd06cb53c884086e310a1a2bae39f025f7
|
[
"MIT"
] | null | null | null |
emptycabinet/routers/products.py
|
aldrickdev/emptycabinet
|
cb8f5ecd06cb53c884086e310a1a2bae39f025f7
|
[
"MIT"
] | null | null | null |
emptycabinet/routers/products.py
|
aldrickdev/emptycabinet
|
cb8f5ecd06cb53c884086e310a1a2bae39f025f7
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter
router = APIRouter(prefix="/api/products", tags=["products"])
product_list = [
{"name": "Beans"},
{"name": "Bread"},
{"name": "Milk"},
{"name": "Eggs"},
]
@router.get("/")
async def get_products():
return {"Products": product_list}
| 16.882353
| 61
| 0.595819
|
6cca4667240ce71f56e3046ae71f0e40098b0925
| 17,546
|
py
|
Python
|
hnswlib-pyspark/pyspark_hnsw/knn.py
|
chenjia123/hnswlib
|
3afc4ecb96f3e5b651fe5c4ab912262058b319f2
|
[
"Apache-2.0"
] | 158
|
2019-06-10T13:32:11.000Z
|
2022-03-23T10:49:41.000Z
|
hnswlib-pyspark/pyspark_hnsw/knn.py
|
chenjia123/hnswlib
|
3afc4ecb96f3e5b651fe5c4ab912262058b319f2
|
[
"Apache-2.0"
] | 43
|
2019-07-16T19:17:02.000Z
|
2022-02-27T11:27:30.000Z
|
hnswlib-pyspark/pyspark_hnsw/knn.py
|
chenjia123/hnswlib
|
3afc4ecb96f3e5b651fe5c4ab912262058b319f2
|
[
"Apache-2.0"
] | 38
|
2019-09-23T06:31:02.000Z
|
2022-02-27T15:03:25.000Z
|
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams
from pyspark.ml.param.shared import *
from pyspark.mllib.common import inherit_doc
from pyspark import keyword_only
from pyspark.ml.util import JavaMLReadable, JavaMLWritable, MLReader, _jvm
__all__ = ['HnswSimilarity', 'HnswSimilarityModel', 'BruteForceSimilarity', 'BruteForceSimilarityModel', 'HnswLibMLReader']
class HnswLibMLReader(MLReader):
"""
Specialization of :py:class:`MLReader` for :py:class:`JavaParams` types
"""
def __init__(self, clazz, java_class):
self._clazz = clazz
self._jread = self._load_java_obj(java_class).read()
def load(self, path):
"""Load the ML instance from the input path."""
java_obj = self._jread.load(path)
return self._clazz._from_java(java_obj)
@classmethod
def _load_java_obj(cls, java_class):
"""Load the peer Java object of the ML instance."""
java_obj = _jvm()
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj
@inherit_doc
class _KnnModelParams(HasFeaturesCol, HasPredictionCol):
"""
Params for knn models.
"""
queryIdentifierCol = Param(Params._dummy(), "queryIdentifierCol", "the column name for the query identifier",
typeConverter=TypeConverters.toString)
queryPartitionsCol = Param(Params._dummy(), "queryPartitionsCol", "the column name for the query partitions",
typeConverter=TypeConverters.toString)
parallelism = Param(Params._dummy(), "parallelism", "number of threads to use", typeConverter=TypeConverters.toInt)
k = Param(Params._dummy(), "k", "number of neighbors to find", typeConverter=TypeConverters.toInt)
numReplicas = Param(Params._dummy(), "numReplicas", "number of index replicas to create when querying", typeConverter=TypeConverters.toInt)
excludeSelf = Param(Params._dummy(), "excludeSelf", "whether to include the row identifier as a candidate neighbor",
typeConverter=TypeConverters.toBoolean)
similarityThreshold = Param(Params._dummy(), "similarityThreshold",
"do not return neighbors further away than this distance",
typeConverter=TypeConverters.toFloat)
outputFormat = Param(Params._dummy(), "outputFormat", "output format, one of full, minimal",
typeConverter=TypeConverters.toString)
def getQueryIdentifierCol(self):
"""
Gets the value of queryIdentifierCol or its default value.
"""
return self.getOrDefault(self.queryIdentifierCol)
def getQueryPartitionsCol(self):
"""
Gets the value of queryPartitionsCol or its default value.
"""
return self.getOrDefault(self.queryPartitionsCol)
def getParallelism(self):
"""
Gets the value of parallelism or its default value.
"""
return self.getOrDefault(self.parallelism)
def getK(self):
"""
Gets the value of k or its default value.
"""
return self.getOrDefault(self.k)
def getExcludeSelf(self):
"""
Gets the value of excludeSelf or its default value.
"""
return self.getOrDefault(self.excludeSelf)
def getSimilarityThreshold(self):
"""
Gets the value of similarityThreshold or its default value.
"""
return self.getOrDefault(self.similarityThreshold)
def getOutputFormat(self):
"""
Gets the value of outputFormat or its default value.
"""
return self.getOrDefault(self.outputFormat)
def getNumReplicas(self):
"""
Gets the value of numReplicas or its default value.
"""
return self.getOrDefault(self.numReplicas)
@inherit_doc
class _KnnParams(_KnnModelParams):
"""
Params for knn algorithms.
"""
identifierCol = Param(Params._dummy(), "identifierCol", "the column name for the row identifier",
typeConverter=TypeConverters.toString)
partitionCol = Param(Params._dummy(), "partitionCol", "the column name for the partition",
typeConverter=TypeConverters.toString)
numPartitions = Param(Params._dummy(), "numPartitions", "number of partitions", typeConverter=TypeConverters.toInt)
distanceFunction = Param(Params._dummy(), "distanceFunction",
"distance function, one of bray-curtis, canberra, cosine, correlation, " +
"euclidean, inner-product, manhattan or the fully qualified classname " +
"of a distance function", typeConverter=TypeConverters.toString)
def getIdentifierCol(self):
"""
Gets the value of identifierCol or its default value.
"""
return self.getOrDefault(self.identifierCol)
def getPartitionCol(self):
"""
Gets the value of partitionCol or its default value.
"""
return self.getOrDefault(self.partitionCol)
def getNumPartitions(self):
"""
Gets the value of numPartitions or its default value.
"""
return self.getOrDefault(self.numPartitions)
def getDistanceFunction(self):
"""
Gets the value of distanceFunction or its default value.
"""
return self.getOrDefault(self.distanceFunction)
@inherit_doc
class _HnswModelParams(_KnnModelParams):
"""
Params for :py:class:`Hnsw` and :py:class:`HnswModel`.
"""
ef = Param(Params._dummy(), "ef", "size of the dynamic list for the nearest neighbors (used during the search)",
typeConverter=TypeConverters.toInt)
def getEf(self):
"""
Gets the value of ef or its default value.
"""
return self.getOrDefault(self.ef)
@inherit_doc
class _HnswParams(_HnswModelParams, _KnnParams):
"""
Params for :py:class:`Hnsw`.
"""
m = Param(Params._dummy(), "m", "number of bi-directional links created for every new element during construction",
typeConverter=TypeConverters.toInt)
efConstruction = Param(Params._dummy(), "efConstruction",
"has the same meaning as ef, but controls the index time / index precision",
typeConverter=TypeConverters.toInt)
def getM(self):
"""
Gets the value of m or its default value.
"""
return self.getOrDefault(self.m)
def getEfConstruction(self):
"""
Gets the value of efConstruction or its default value.
"""
return self.getOrDefault(self.efConstruction)
@inherit_doc
class BruteForceSimilarity(JavaEstimator, _KnnParams, JavaMLReadable, JavaMLWritable):
"""
Exact nearest neighbour search.
"""
@keyword_only
def __init__(self, identifierCol="id", partitionCol=None, queryIdentifierCol=None, queryPartitionsCol=None,
parallelism= None, featuresCol="features", predictionCol="prediction", numPartitions=1, numReplicas=0,
k=5, distanceFunction="cosine", excludeSelf=False, similarityThreshold=-1.0, outputFormat="full"):
super(BruteForceSimilarity, self).__init__()
self._java_obj = self._new_java_obj("com.github.jelmerk.spark.knn.bruteforce.BruteForceSimilarity", self.uid)
self._setDefault(identifierCol="id", numPartitions=1, numReplicas=0, k=5, distanceFunction="cosine",
excludeSelf=False, similarityThreshold=-1.0, outputFormat="full")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def setIdentifierCol(self, value):
"""
Sets the value of :py:attr:`identifierCol`.
"""
return self._set(identifierCol=value)
def setQueryIdentifierCol(self, value):
"""
Sets the value of :py:attr:`queryIdentifierCol`.
"""
return self._set(queryIdentifierCol=value)
def setPartitionCol(self, value):
"""
Sets the value of :py:attr:`partitionCol`.
"""
return self._set(partitionCol=value)
def setQueryPartitionsCol(self, value):
"""
Sets the value of :py:attr:`queryPartitionsCol`.
"""
return self._set(queryPartitionsCol=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
def setNumReplicas(self, value):
"""
Sets the value of :py:attr:`numReplicas`.
"""
return self._set(numReplicas=value)
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
def setDistanceFunction(self, value):
"""
Sets the value of :py:attr:`distanceFunction`.
"""
return self._set(distanceFunction=value)
def setExcludeSelf(self, value):
"""
Sets the value of :py:attr:`excludeSelf`.
"""
return self._set(excludeSelf=value)
def setSimilarityThreshold(self, value):
"""
Sets the value of :py:attr:`similarityThreshold`.
"""
return self._set(similarityThreshold=value)
def setOutputFormat(self, value):
"""
Sets the value of :py:attr:`outputFormat`.
"""
return self._set(outputFormat=value)
@keyword_only
def setParams(self, identifierCol="id", queryIdentifierCol=None, queryPartitionsCol=None, parallelism=None,
featuresCol="features", predictionCol="prediction",numPartitions=1, numReplicas=0, k=5,
distanceFunction="cosine", excludeSelf=False, similarityThreshold=-1.0, outputFormat="full"):
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return BruteForceSimilarityModel(java_model)
class BruteForceSimilarityModel(JavaModel, _KnnModelParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by BruteForce.
"""
_classpath_model = 'com.github.jelmerk.spark.knn.bruteforce.BruteForceSimilarityModel'
def setQueryIdentifierCol(self, value):
"""
Sets the value of :py:attr:`queryIdentifierCol`.
"""
return self._set(queryIdentifierCol=value)
def setQueryPartitionsCol(self, value):
"""
Sets the value of :py:attr:`queryPartitionsCol`.
"""
return self._set(queryPartitionsCol=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
def setExcludeSelf(self, value):
"""
Sets the value of :py:attr:`excludeSelf`.
"""
return self._set(excludeSelf=value)
def setSimilarityThreshold(self, value):
"""
Sets the value of :py:attr:`similarityThreshold`.
"""
return self._set(similarityThreshold=value)
def setOutputFormat(self, value):
"""
Sets the value of :py:attr:`outputFormat`.
"""
return self._set(outputFormat=value)
def setNumReplicas(self, value):
"""
Sets the value of :py:attr:`numReplicas`.
"""
return self._set(numReplicas=value)
@classmethod
def read(cls):
return HnswLibMLReader(cls, cls._classpath_model)
@inherit_doc
class HnswSimilarity(JavaEstimator, _HnswParams, JavaMLReadable, JavaMLWritable):
"""
Approximate nearest neighbour search.
"""
@keyword_only
def __init__(self, identifierCol="id", queryIdentifierCol=None, queryPartitionsCol=None, parallelism=None,
featuresCol="features", predictionCol="prediction", m=16, ef=10, efConstruction=200, numPartitions=1,
numReplicas=0, k=5, distanceFunction="cosine", excludeSelf=False, similarityThreshold=-1.0,
outputFormat="full"):
super(HnswSimilarity, self).__init__()
self._java_obj = self._new_java_obj("com.github.jelmerk.spark.knn.hnsw.HnswSimilarity", self.uid)
self._setDefault(identifierCol="id", m=16, ef=10, efConstruction=200, numPartitions=1, numReplicas=0, k=5,
distanceFunction="cosine", excludeSelf=False, similarityThreshold=-1.0, outputFormat="full")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def setIdentifierCol(self, value):
"""
Sets the value of :py:attr:`identifierCol`.
"""
return self._set(identifierCol=value)
def setQueryIdentifierCol(self, value):
"""
Sets the value of :py:attr:`queryIdentifierCol`.
"""
return self._set(queryIdentifierCol=value)
def setPartitionCol(self, value):
"""
Sets the value of :py:attr:`partitionCol`.
"""
return self._set(partitionCol=value)
def setQueryPartitionsCol(self, value):
"""
Sets the value of :py:attr:`queryPartitionsCol`.
"""
return self._set(queryPartitionsCol=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
def setNumReplicas(self, value):
"""
Sets the value of :py:attr:`numReplicas`.
"""
return self._set(numReplicas=value)
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
def setDistanceFunction(self, value):
"""
Sets the value of :py:attr:`distanceFunction`.
"""
return self._set(distanceFunction=value)
def setExcludeSelf(self, value):
"""
Sets the value of :py:attr:`excludeSelf`.
"""
return self._set(excludeSelf=value)
def setSimilarityThreshold(self, value):
"""
Sets the value of :py:attr:`similarityThreshold`.
"""
return self._set(similarityThreshold=value)
def setOutputFormat(self, value):
"""
Sets the value of :py:attr:`outputFormat`.
"""
return self._set(outputFormat=value)
def setM(self, value):
"""
Sets the value of :py:attr:`m`.
"""
return self._set(m=value)
def setEf(self, value):
"""
Sets the value of :py:attr:`ef`.
"""
return self._set(ef=value)
def setEfConstruction(self, value):
"""
Sets the value of :py:attr:`efConstruction`.
"""
return self._set(efConstruction=value)
@keyword_only
def setParams(self, identifierCol="id", queryIdentifierCol=None, queryPartitionsCol=None, parallelism=None,
featuresCol="features", predictionCol="prediction", m=16, ef=10, efConstruction=200, numPartitions=1,
numReplicas=0, k=5, distanceFunction="cosine", excludeSelf=False, similarityThreshold=-1.0,
outputFormat="full"):
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return HnswSimilarityModel(java_model)
class HnswSimilarityModel(JavaModel, _HnswModelParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by Hnsw.
"""
_classpath_model = 'com.github.jelmerk.spark.knn.hnsw.HnswSimilarityModel'
def setQueryIdentifierCol(self, value):
"""
Sets the value of :py:attr:`queryIdentifierCol`.
"""
return self._set(queryIdentifierCol=value)
def setQueryPartitionsCol(self, value):
"""
Sets the value of :py:attr:`queryPartitionsCol`.
"""
return self._set(queryPartitionsCol=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
def setEf(self, value):
"""
Sets the value of :py:attr:`ef`.
"""
return self._set(ef=value)
def setExcludeSelf(self, value):
"""
Sets the value of :py:attr:`excludeSelf`.
"""
return self._set(excludeSelf=value)
def setSimilarityThreshold(self, value):
"""
Sets the value of :py:attr:`similarityThreshold`.
"""
return self._set(similarityThreshold=value)
def setOutputFormat(self, value):
"""
Sets the value of :py:attr:`outputFormat`.
"""
return self._set(outputFormat=value)
def setNumReplicas(self, value):
"""
Sets the value of :py:attr:`numReplicas`.
"""
return self._set(numReplicas=value)
@classmethod
def read(cls):
return HnswLibMLReader(cls, cls._classpath_model)
HnswSimilarityModelImpl = HnswSimilarityModel
BruteForceSimilarityModelImpl = BruteForceSimilarityModel
| 32.135531
| 143
| 0.62681
|
3a1cfa86766902dfcfcd5a490790b20ed1ec0009
| 1,807
|
py
|
Python
|
hummingbot/connector/exchange/kraken/kraken_user_stream_tracker.py
|
csdenboer/hummingbot
|
8a799675a325ebdbb74d76b2a44472cdbf74d691
|
[
"Apache-2.0"
] | 37
|
2020-07-08T03:44:26.000Z
|
2022-01-16T12:35:26.000Z
|
hummingbot/connector/exchange/kraken/kraken_user_stream_tracker.py
|
csdenboer/hummingbot
|
8a799675a325ebdbb74d76b2a44472cdbf74d691
|
[
"Apache-2.0"
] | 13
|
2021-02-16T01:57:23.000Z
|
2021-02-16T03:50:03.000Z
|
hummingbot/connector/exchange/kraken/kraken_user_stream_tracker.py
|
csdenboer/hummingbot
|
8a799675a325ebdbb74d76b2a44472cdbf74d691
|
[
"Apache-2.0"
] | 17
|
2021-04-07T21:29:46.000Z
|
2022-02-03T02:01:04.000Z
|
#!/usr/bin/env python
import asyncio
import logging
from typing import (
Optional
)
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from hummingbot.core.data_type.user_stream_tracker import UserStreamTracker
from hummingbot.core.utils.async_utils import (
safe_ensure_future,
safe_gather,
)
from hummingbot.connector.exchange.kraken.kraken_api_user_stream_data_source import KrakenAPIUserStreamDataSource
from hummingbot.connector.exchange.kraken.kraken_auth import KrakenAuth
class KrakenUserStreamTracker(UserStreamTracker):
_krust_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._krust_logger is None:
cls._krust_logger = logging.getLogger(__name__)
return cls._krust_logger
def __init__(self,
kraken_auth: KrakenAuth):
super().__init__()
self._ev_loop: asyncio.events.AbstractEventLoop = asyncio.get_event_loop()
self._data_source: Optional[UserStreamTrackerDataSource] = None
self._user_stream_tracking_task: Optional[asyncio.Task] = None
self._kraken_auth: KrakenAuth = kraken_auth
@property
def data_source(self) -> UserStreamTrackerDataSource:
if not self._data_source:
self._data_source = KrakenAPIUserStreamDataSource(kraken_auth=self._kraken_auth)
return self._data_source
@property
def exchange_name(self) -> str:
return "kraken"
async def start(self):
self._user_stream_tracking_task = safe_ensure_future(
self.data_source.listen_for_user_stream(self._ev_loop, self._user_stream)
)
await safe_gather(self._user_stream_tracking_task)
| 35.431373
| 113
| 0.753182
|
65618987c76990020ab25ab3fdc7979222f60245
| 2,544
|
py
|
Python
|
gestao_rh/settings.py
|
jesielcarlos/gestao_rh
|
8fdf155bfb772dfb4cab507ba82fca9882f0bf34
|
[
"MIT"
] | null | null | null |
gestao_rh/settings.py
|
jesielcarlos/gestao_rh
|
8fdf155bfb772dfb4cab507ba82fca9882f0bf34
|
[
"MIT"
] | null | null | null |
gestao_rh/settings.py
|
jesielcarlos/gestao_rh
|
8fdf155bfb772dfb4cab507ba82fca9882f0bf34
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import os
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-w&-^j@#u1z+r6qh(de_8-l6n-uo-2o3s&nu0!xc!c32)igzpok'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.empresa',
'apps.funcionarios',
'apps.departamentos',
'apps.registro_hora_extra',
'apps.documentos',
'apps.core',
'bootstrapform',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gestao_rh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gestao_rh.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,"staticfiles"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,"media")
STATIC_ROOT = os.path.join(BASE_DIR,"static/")
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'login'
| 21.931034
| 91
| 0.668632
|
b2f34f8bf110afe89a3823a2776f0e88da455171
| 3,858
|
py
|
Python
|
main/src/preparation/pandas_dataframe_generator.py
|
jason424217/Artificial-Code-Gen
|
a6e2c097c5ffe8cb0929e6703035b526f477e514
|
[
"MIT"
] | null | null | null |
main/src/preparation/pandas_dataframe_generator.py
|
jason424217/Artificial-Code-Gen
|
a6e2c097c5ffe8cb0929e6703035b526f477e514
|
[
"MIT"
] | null | null | null |
main/src/preparation/pandas_dataframe_generator.py
|
jason424217/Artificial-Code-Gen
|
a6e2c097c5ffe8cb0929e6703035b526f477e514
|
[
"MIT"
] | null | null | null |
import pandas as pd
import sys
import os
def save_to_pickle(dataframe, path, language, partition, data_level):
pickle_path = os.path.join(path, "../dataframes", language)
if not os.path.exists(pickle_path):
os.makedirs(pickle_path)
pickle_name = f"{language}_{data_level}_{partition}.pkl"
pickle_path = os.path.join(pickle_path, pickle_name)
dataframe.to_pickle(pickle_path, compression="gzip")
def file_level_data(data_level, language, partition_type, path):
data = []
for repository in os.listdir(path):
repository_path = os.path.join(path, repository)
if not os.path.isdir(repository_path): # This is just a failsafe in case a file gets in the folder of repositories
continue
for file in os.listdir(repository_path):
file_path = os.path.join(repository_path, file)
if os.path.isdir(file_path): # Skips over the methods folder within each repository
continue
with open (file_path, "r") as myfile:
data.append([data_level, language, partition_type, repository, file, myfile.read()])
return data
def function_level_data(data_level, language, partition_type, path):
data = []
for repository in os.listdir(path):
repository_path = os.path.join(path, repository)
if not os.path.isdir(repository_path):
continue
for file in os.listdir(repository_path):
file_path = os.path.join(repository_path, file)
if os.path.isdir(file_path) and file == "methods":
for method in os.listdir(file_path):
method_path = os.path.join(file_path, method)
with open (method_path, "r") as myfile:
data.append([data_level, language, partition_type, repository, method, myfile.read()])
return data
def dataframe_from_partition(language, path, partition_type, data_level):
if data_level == 'file':
data = file_level_data(data_level, language, partition_type, path)
elif data_level == 'function':
data = function_level_data(data_level, language, partition_type, path)
field_names = ["data_level", "language", "partition", "repository", "file_name", "contents"]
df = pd.DataFrame(data, columns=field_names)
return df
def process_path(path, language_target, partition_target, data_level):
for language in os.listdir(path):
language_path = os.path.join(path, language)
if (not os.path.isdir(language_path) or (language_target and (language != language_target))):
continue
for partition in os.listdir(language_path):
partition_path = os.path.join(language_path, partition)
if (not os.path.isdir(partition_path) or (partition_target and (partition != partition_target))):
continue
pd_dataframe = dataframe_from_partition(language, partition_path, partition, data_level)
save_to_pickle(pd_dataframe, path, language, partition, data_level)
def main():
if len(sys.argv) > 2:
path = sys.argv[1]
data_level = sys.argv[2]
if (data_level != 'file') and (data_level != 'function'):
print("Error: 'data_level' input must be 'file' or 'function'")
exit()
language_target = None
partition_target = None
if len(sys.argv) > 3: # If a language is specified, pass that into process_path()
language_target = sys.argv[3]
if len(sys.argv) > 4: # If a partition type is specified, pass that into process_path()
partition_target = sys.argv[4]
process_path(path, language_target, partition_target, data_level)
else:
print("ERROR: Path and data level inputs required, please run again with both")
if __name__ == "__main__":
main()
| 46.481928
| 122
| 0.659409
|
458914f50576f5387fd6d137a40844f2fa45ce67
| 4,314
|
py
|
Python
|
src/python/nispor/route.py
|
robgc/nispor
|
32e5d0a2e217322e144fe1ffe916e217133c2b74
|
[
"Apache-2.0"
] | 15
|
2020-05-18T18:55:38.000Z
|
2022-01-18T22:50:44.000Z
|
src/python/nispor/route.py
|
robgc/nispor
|
32e5d0a2e217322e144fe1ffe916e217133c2b74
|
[
"Apache-2.0"
] | 89
|
2020-05-18T18:55:33.000Z
|
2022-03-30T14:01:34.000Z
|
src/python/nispor/route.py
|
robgc/nispor
|
32e5d0a2e217322e144fe1ffe916e217133c2b74
|
[
"Apache-2.0"
] | 16
|
2020-05-18T18:48:08.000Z
|
2021-12-09T15:03:10.000Z
|
# Copyright 2020 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NisporRouteState:
def __init__(self, info):
self._rts = [NisporRoute(rt_info) for rt_info in info]
def __iter__(self):
for rt in self._rts:
yield rt
class NisporRoute:
def __init__(self, info):
self._info = info
@property
def address_family(self):
return self._info["address_family"]
@property
def tos(self):
return self._info["tos"]
@property
def table(self):
return self._info["table"]
@property
def protocol(self):
return self._info["protocol"]
@property
def scope(self):
return self._info["scope"]
@property
def route_type(self):
return self._info["route_type"]
@property
def flags(self):
return self._info["flags"]
@property
def dst(self):
return self._info.get("dst")
@property
def oif(self):
return self._info.get("oif")
@property
def iif(self):
return self._info.get("iif")
@property
def prefered_src(self):
return self._info.get("prefered_src")
@property
def src(self):
return self._info.get("src")
@property
def class_id(self):
return self._info.get("class_id")
@property
def gateway(self):
return self._info.get("gateway")
@property
def via(self):
return self._info.get("via")
@property
def mark(self):
return self._info.get("mark")
@property
def uid(self):
return self._info.get("uid")
@property
def lock(self):
return self._info.get("lock")
@property
def mtu(self):
return self._info.get("mtu")
@property
def window(self):
return self._info.get("window")
@property
def rtt(self):
return self._info.get("rtt")
@property
def rttvar(self):
return self._info.get("rttvar")
@property
def ssthresh(self):
return self._info.get("ssthresh")
@property
def cwnd(self):
return self._info.get("cwnd")
@property
def advmss(self):
return self._info.get("advmss")
@property
def reordering(self):
return self._info.get("reordering")
@property
def hoplimit(self):
return self._info.get("hoplimit")
@property
def initcwnd(self):
return self._info.get("initcwnd")
@property
def features(self):
return self._info.get("features")
@property
def rto_min(self):
return self._info.get("rto_min")
@property
def initrwnd(self):
return self._info.get("initrwnd")
@property
def quickack(self):
return self._info.get("quickack")
@property
def cc_algo(self):
return self._info.get("cc_algo")
@property
def fastopen_no_cookie(self):
return self._info.get("fastopen_no_cookie")
@property
def cache_clntref(self):
return self._info.get("cache_clntref")
@property
def cache_last_use(self):
return self._info.get("cache_last_use")
@property
def cache_expires(self):
return self._info.get("cache_expires")
@property
def cache_error(self):
return self._info.get("cache_error")
@property
def cache_used(self):
return self._info.get("cache_used")
@property
def cache_id(self):
return self._info.get("cache_id")
@property
def cache_ts(self):
return self._info.get("cache_ts")
@property
def cache_ts_age(self):
return self._info.get("cache_ts_age")
@property
def metric(self):
return self._info.get("metric")
@property
def perf(self):
return self._info.get("perf")
| 21.147059
| 74
| 0.619611
|
06582088aad30e9426b12d5190329b5224596cdd
| 985
|
py
|
Python
|
src/reqompyler/__init__.py
|
zurutech/reqompyler
|
433e92b1771bf049e0ce7d338def83250ed1acf3
|
[
"Apache-2.0"
] | 4
|
2020-02-13T12:01:39.000Z
|
2020-03-18T16:41:14.000Z
|
src/reqompyler/__init__.py
|
zurutech/reqompyler
|
433e92b1771bf049e0ce7d338def83250ed1acf3
|
[
"Apache-2.0"
] | 87
|
2020-02-20T11:19:38.000Z
|
2021-07-13T00:47:27.000Z
|
src/reqompyler/__init__.py
|
zurutech/reqompyler
|
433e92b1771bf049e0ce7d338def83250ed1acf3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Zuru Tech HK Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Top-level package for Reqompyler.
.. currentmodule:: reqompyler
.. rubric:: Functions
.. autosummary::
:nosignatures:
:toctree: _autosummary
reqcompyle
"""
__author__ = """Zuru Tech HK Limited, All rights reserved."""
__email__ = "ml@zuru.tech"
__version__ = "0.1.0"
from .reqompyler import reqcompyle
__ALL__ = ["reqompyle"]
| 24.625
| 74
| 0.724873
|
6c5a66a5eb978d0d19b08a4d336a5174ec9b5db3
| 4,774
|
py
|
Python
|
venv/lib/python3.5/site-packages/jsonrpcclient/request.py
|
kntoukakis/wifiportal21-map
|
1f1917c2f3c2987f7a88cc537d7c50449d144ea0
|
[
"MIT"
] | 1
|
2016-09-10T18:14:33.000Z
|
2016-09-10T18:14:33.000Z
|
venv/lib/python3.5/site-packages/jsonrpcclient/request.py
|
dukakisxyz/wifiportal21-map
|
1f1917c2f3c2987f7a88cc537d7c50449d144ea0
|
[
"MIT"
] | null | null | null |
venv/lib/python3.5/site-packages/jsonrpcclient/request.py
|
dukakisxyz/wifiportal21-map
|
1f1917c2f3c2987f7a88cc537d7c50449d144ea0
|
[
"MIT"
] | 1
|
2020-11-04T05:01:17.000Z
|
2020-11-04T05:01:17.000Z
|
"""
Requests
********
These classes make it easy to create JSON-RPC Request objects.
"""
import itertools
import json
from collections import OrderedDict
from future.utils import with_metaclass
def _sort_request(req):
"""Sorts a JSON-RPC request dict returning a sorted OrderedDict, having no
effect other than making it nicer to read.
>>> json.dumps(_sort_request(
... {'id': 2, 'params': [2, 3], 'method': 'add', 'jsonrpc': '2.0'}))
'{"jsonrpc": "2.0", "method": "add", "params": [2, 3], "id": 2}'
:param req: JSON-RPC request in dict format.
:return: The same request, nicely sorted.
"""
sort_order = ['jsonrpc', 'method', 'params', 'id']
return OrderedDict(sorted(req.items(), key=lambda k: sort_order.index(
k[0])))
class _RequestClassType(type):
"""Request Metaclass.
Purpose of this is to catch undefined attributes on the class.
"""
def __getattr__(cls, name):
"""This gives us an alternate way to make a request::
>>> Request.cat()
{'jsonrpc': '2.0', 'method': 'cat', 'id': 1}
That's the same as saying ``Request('cat')``. Technique is
explained here: http://code.activestate.com/recipes/307618/
"""
def attr_handler(*args, **kwargs):
"""Return the request using the specified method name."""
return cls(name, *args, **kwargs)
return attr_handler
class Notification(with_metaclass(_RequestClassType, dict)):
# pylint: disable=line-too-long
"""A JSON-RPC Request object, with no ``id`` member (meaning no payload data
is wanted)::
>>> from jsonrpcclient import Notification
>>> Notification('cat')
{'jsonrpc': '2.0', 'method': 'cat'}
The first argument is the *method*; everything else is *arguments* to the
method::
>>> Notification('cat', 'Mittens', 5)
{'jsonrpc': '2.0', 'method': 'cat', params: ['Mittens', 5]}
Keyword arguments are also acceptable::
>>> Notification('cat', name='Mittens', age=5)
{'jsonrpc': '2.0', 'method': 'cat', 'params': {'name': 'Mittens', 'age': 5}}
If you prefer, call the method as though it was a class attribute::
>>> Notification.cat(name='Mittens', age=5)
{'jsonrpc': '2.0', 'method': 'cat', 'params': {'name': 'Mittens', 'age': 5}}
:param method: The method name.
:param args: Positional arguments.
:param kwargs: Keyword arguments.
:returns: The JSON-RPC request in dictionary form.
"""
def __init__(self, method, *args, **kwargs):
# Start the basic request
self['jsonrpc'] = '2.0'
self['method'] = method
# Get the 'params' part
# Merge the positional and keyword arguments into one list
params = list()
if args:
params.extend(args)
if kwargs:
params.append(kwargs)
if params:
# The 'params' can be either "by-position" (a list) or "by-name" (a
# dict). If there's only one list or dict in the params list, take
# it out of the enclosing list, ie. [] instead of [[]], {} instead
# of [{}].
if len(params) == 1 and (isinstance(params[0], dict) or \
isinstance(params[0], list)):
params = params[0]
# Add the params to the request
self['params'] = params
def __str__(self):
"""Wrapper around request, returning a string instead of a dict"""
return json.dumps(_sort_request(self))
class Request(Notification):
"""A JSON-RPC Request object, with an ``id`` member (meaning payload data is
wanted)::
>>> Request('cat')
{'jsonrpc': '2.0', 'method': 'cat', 'id': 1}
An auto-incremented ``id`` is used, so each request has a unique ``id``::
>>> Request('cat')
{'jsonrpc': '2.0', 'method': 'cat', 'id': 2}
Use ``request_id`` to specify the ``id`` to use::
>>> Request('cat', request_id='Request #1')
{'jsonrpc': '2.0', 'method': 'cat', 'id': 'Request #1'}
:param method: The method name.
:param args: Positional arguments.
:param kwargs: Keyword arguments.
:returns: The JSON-RPC request in dictionary form.
"""
id_iterator = itertools.count(1)
def __init__(self, method, *args, **kwargs):
# 'response' means use an auto-iterated id
#kwargs.pop('response', None)
# 'request_id' means use the specified id
if kwargs.get('request_id'):
self['id'] = kwargs['request_id']
else:
self['id'] = next(self.id_iterator)
kwargs.pop('request_id', None)
super(Request, self).__init__(method, *args, **kwargs)
| 33.152778
| 84
| 0.58253
|
80a6d31b35dd40a8642707b533712a082f0168e4
| 12,049
|
py
|
Python
|
experiments/train_eoi.py
|
YYT-t/maddpg
|
23f2ef71e000b5422b7d21d69b0be1d2deabffd3
|
[
"MIT"
] | 1
|
2021-11-06T08:43:00.000Z
|
2021-11-06T08:43:00.000Z
|
experiments/train_eoi.py
|
YYT-t/maddpg
|
23f2ef71e000b5422b7d21d69b0be1d2deabffd3
|
[
"MIT"
] | null | null | null |
experiments/train_eoi.py
|
YYT-t/maddpg
|
23f2ef71e000b5422b7d21d69b0be1d2deabffd3
|
[
"MIT"
] | null | null | null |
import argparse
import numpy as np
import tensorflow as tf
import time
import pickle
from model3 import *
from AllBuffer import AllBuffer
from configure import *
import maddpg.common.tf_util as U
from maddpg.trainer.maddpg_eoi import MADDPGAgentTrainer
import tensorflow.contrib.layers as layers
def to_categorical(y, num_classes=None, dtype='float32'):#变成onehot
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def parse_args():
parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments")
# Environment
parser.add_argument("--scenario", type=str, default="simple", help="name of the scenario script")
parser.add_argument("--max-episode-len", type=int, default=25, help="maximum episode length")
parser.add_argument("--num-episodes", type=int, default=60000, help="number of episodes")
parser.add_argument("--num-adversaries", type=int, default=0, help="number of adversaries")
parser.add_argument("--good-policy", type=str, default="maddpg", help="policy for good agents")
parser.add_argument("--adv-policy", type=str, default="maddpg", help="policy of adversaries")
# Core training parameters
parser.add_argument("--lr", type=float, default=1e-2, help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.95, help="discount factor")
parser.add_argument("--batch-size", type=int, default=1024, help="number of episodes to optimize at the same time")
parser.add_argument("--num-units", type=int, default=64, help="number of units in the mlp")
# Checkpointing
parser.add_argument("--exp-name", type=str, default='exp', help="name of the experiment")
parser.add_argument("--save-dir", type=str, default="/tmp/policy/", help="directory in which training state and model should be saved")
parser.add_argument("--save-rate", type=int, default=1000, help="save model once every time this many episodes are completed")
parser.add_argument("--load-dir", type=str, default="", help="directory in which training state and model are loaded")
# Evaluation
parser.add_argument("--restore", action="store_true", default=False)
parser.add_argument("--display", action="store_true", default=False)
parser.add_argument("--benchmark", action="store_true", default=False)
parser.add_argument("--benchmark-iters", type=int, default=100000, help="number of iterations run for benchmarking")
parser.add_argument("--benchmark-dir", type=str, default="./benchmark_files/", help="directory where benchmark data is saved")
parser.add_argument("--plots-dir", type=str, default="./learning_curves/", help="directory where plot data is saved")
#alpha
parser.add_argument("--alpha", type=float, default=0.1)
return parser.parse_args()
def mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
with tf.variable_scope(scope, reuse=reuse):
out = input
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)
return out
def make_env(scenario_name, arglist, benchmark=False):
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
if benchmark:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env
def get_trainers(env, num_adversaries, obs_shape_n, arglist, get_eoi_reward, feature_space):
trainers = []
model = mlp_model
trainer = MADDPGAgentTrainer
for i in range(num_adversaries):
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist, get_eoi_reward, feature_space,
local_q_func=(arglist.adv_policy=='ddpg')))
for i in range(num_adversaries, env.n):
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist, get_eoi_reward, feature_space,
local_q_func=(arglist.good_policy=='ddpg')))
return trainers
def train(arglist):
with U.single_threaded_session() as sess:
# Create environment
env = make_env(arglist.scenario, arglist, arglist.benchmark)
# Create agent trainers
obs_shape_n = [env.observation_space[i].shape for i in range(env.n)]
num_adversaries = min(env.n, arglist.num_adversaries)
#定义一些常数
feature_space = env.observation_space[0].shape[0]
n_actions = env.action_space[0].n
n_ant = env.n
all_buff = AllBuffer(1e6, feature_space,n_actions, n_ant)
#只要在init之前搞应该都没问题吧
eoi_net = intrisic_eoi(feature_space, n_ant)
get_eoi_reward = build_batch_eoi(feature_space, eoi_net, n_ant)
trainers = get_trainers(env, num_adversaries, obs_shape_n, arglist, get_eoi_reward, feature_space)
print('Using good policy {} and adv policy {}'.format(arglist.good_policy, arglist.adv_policy))
# Initialize
U.initialize()
# Load previous results, if necessary
if arglist.load_dir == "":
arglist.load_dir = arglist.save_dir
if arglist.display or arglist.restore or arglist.benchmark:
print('Loading previous state...')
U.load_state(arglist.load_dir)
episode_rewards = [0.0] # sum of rewards for all agents
agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward
final_ep_rewards = [] # sum of rewards for training curve
final_ep_ag_rewards = [] # agent rewards for training curve
agent_info = [[[]]] # placeholder for benchmarking info
saver = tf.train.Saver()
obs_n = env.reset()
episode_step = 0
train_step = 0
t_start = time.time()
print('Starting iterations...')
while True:
# get action
action_n = [agent.action(obs) for agent, obs in zip(trainers,obs_n)]
# environment step
new_obs_n, rew_n, done_n, info_n = env.step(action_n)
episode_step += 1
done = all(done_n)
terminal = (episode_step >= arglist.max_episode_len)
# collect experience
for i, agent in enumerate(trainers):
agent.experience(obs_n[i], action_n[i], rew_n[i], new_obs_n[i], done_n[i], terminal)
all_buff.add(obs_n, action_n, rew_n, new_obs_n)
obs_n = new_obs_n
#不可能每个episode都训啊,那就死了啊
#但是需要保证p(i|o)与其他网络用的是同一批batch
for i, rew in enumerate(rew_n):
episode_rewards[-1] += rew
agent_rewards[i][-1] += rew
if done or terminal:
obs_n = env.reset()
episode_step = 0
episode_rewards.append(0)
for a in agent_rewards:
a.append(0)
agent_info.append([[]])
# increment global step counter
train_step += 1
# for benchmarking learned policies
if arglist.benchmark:
for i, info in enumerate(info_n):
agent_info[-1][i].append(info_n['n'])
if train_step > arglist.benchmark_iters and (done or terminal):
file_name = arglist.benchmark_dir + arglist.exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(agent_info[:-1], fp)
break
continue
# for displaying learned policies
if arglist.display:
time.sleep(0.1)
env.render()
continue
if (train_step > 1024 and len(trainers[0].replay_buffer) < trainers[0].max_replay_buffer_len
and train_step % 100 == 0):
# 我现在有所有agent的all_buff,开始训练eoi
feature = np.zeros((arglist.batch_size, feature_space))
feature_positive = np.zeros((arglist.batch_size, feature_space))
samples, positive_samples = all_buff.getObs(arglist.batch_size) # 对所有agent统一取batch_size个buffer中的东西
feature_label = np.random.randint(0, env.n, arglist.batch_size) # 随机取batch_szie个agent,允许重复
for i in range(arglist.batch_size):
feature[i] = samples[feature_label[i]][i][0:feature_space]
feature_positive[i] = positive_samples[feature_label[i]][i][0:feature_space]
sample_labels = to_categorical(feature_label, n_ant) # 把第几个batch去的什么agent用二进制矩阵表示
positive_labels = eoi_net.predict(feature_positive, batch_size=arglist.batch_size) # 先测一下
eoi_net.fit(feature, sample_labels + beta_1 * positive_labels, batch_size=arglist.batch_size, epochs=1,
verbose=0)
# update all trainers, if not in display or benchmark mode
loss = None
for agent in trainers:
agent.preupdate()
for agent in trainers:
loss = agent.update(trainers, train_step)
# save model, display training output
if terminal and (len(episode_rewards) % arglist.save_rate == 0):
U.save_state(arglist.save_dir, saver=saver)
# print statement depends on whether or not there are adversaries
if num_adversaries == 0:
print("steps: {}, episodes: {}, mean episode reward: {}, time: {}".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]), round(time.time()-t_start, 3)))
else:
print("steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, time: {}".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]),
[np.mean(rew[-arglist.save_rate:]) for rew in agent_rewards], round(time.time()-t_start, 3)))
t_start = time.time()
# Keep track of final episode reward
final_ep_rewards.append(np.mean(episode_rewards[-arglist.save_rate:]))
for rew in agent_rewards:
final_ep_ag_rewards.append(np.mean(rew[-arglist.save_rate:]))
# saves final episode reward for plotting training curve later
if len(episode_rewards) > arglist.num_episodes:
rew_file_name = arglist.plots_dir + arglist.exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(final_ep_rewards, fp)
agrew_file_name = arglist.plots_dir + arglist.exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(final_ep_ag_rewards, fp)
print('...Finished total of {} episodes.'.format(len(episode_rewards)))
break
if __name__ == '__main__':
arglist = parse_args()
train(arglist)
| 49.995851
| 139
| 0.640219
|
b39ae642dea1d3f27638ad147061713e5c5708d6
| 1,304
|
py
|
Python
|
admin/pin_dist.py
|
edmcman/angr-dev
|
ff7fdfb41146e91f7a877e0e9517c3cdfaffa7c5
|
[
"BSD-2-Clause"
] | 94
|
2015-11-05T04:01:09.000Z
|
2022-02-25T07:43:16.000Z
|
admin/pin_dist.py
|
edmcman/angr-dev
|
ff7fdfb41146e91f7a877e0e9517c3cdfaffa7c5
|
[
"BSD-2-Clause"
] | 94
|
2016-02-26T00:44:59.000Z
|
2022-01-28T20:27:34.000Z
|
admin/pin_dist.py
|
edmcman/angr-dev
|
ff7fdfb41146e91f7a877e0e9517c3cdfaffa7c5
|
[
"BSD-2-Clause"
] | 109
|
2015-09-11T04:26:55.000Z
|
2022-03-29T15:42:47.000Z
|
#!/usr/bin/env python3
version = '8.1.0'
github_repos = [
# NOTE this will need a refactor of some sort if we add packages that have different package names than repo names, e.g. bc dashes/underscores differences
'angr/angr',
'angr/pyvex',
'angr/claripy',
'angr/cle',
'angr/archinfo',
'angr/ailment'
]
from git import Repo
import os
location = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
install_requires = []
for github_name in github_repos:
namespace, repo_name = github_name.split('/')
path = os.path.join(location, repo_name)
repo = Repo(path, search_parent_directories=False)
commit = repo.commit()
install_requires.append('%s @ git+https://github.com/%s/%s@%s#egg=%s' % (repo_name, namespace, repo_name, commit, repo_name))
script = f"""\
#!/usr/bin/env python3
from setuptools import setup
install_requires = {install_requires}
setup(name='angr-dev',
version='{version}',
description='meta-package for development against angr',
author='angr team',
author_email='angr@lists.cs.ucsb.edu',
maintainer='rhelmot',
maintainer_email='audrey@rhelmot.io',
install_requires=install_requires
)
"""
with open('setup.py', 'w') as fp:
fp.write(script)
os.system('python setup.py sdist')
| 26.612245
| 158
| 0.684816
|
ed72f7e4fbaeeb4dd22196faa3a5ed5237306bb8
| 7,076
|
py
|
Python
|
forex_trade_tests.py
|
sizhe198/simple-forex-strategy-tester-with-python
|
90a30a2f46efefe46e2f5bf7c5ed9ab3ac9a6cc7
|
[
"MIT"
] | null | null | null |
forex_trade_tests.py
|
sizhe198/simple-forex-strategy-tester-with-python
|
90a30a2f46efefe46e2f5bf7c5ed9ab3ac9a6cc7
|
[
"MIT"
] | null | null | null |
forex_trade_tests.py
|
sizhe198/simple-forex-strategy-tester-with-python
|
90a30a2f46efefe46e2f5bf7c5ed9ab3ac9a6cc7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 12 02:44:36 2019
@author: sizhe198
"""
import cx_Oracle
import talib
import numpy as np
def jincha(short,long,price): #gloden cross singnal for moving average
ma_s =talib.SMA(price,timeperiod=short)
ma_l =talib.SMA(price,timeperiod=long)
jincha=[]
jincha.append(0)
for i in range(1,len(price)):
if ma_s[i] > ma_l[i] and ma_s[i-1]<=ma_l[i-1]:
jincha.append(1)
else:
jincha.append(0)
return jincha
def sicha(short,long,price): #gloden cross singnal for moving average
ma_s =talib.SMA(price,timeperiod=short)
ma_l =talib.SMA(price,timeperiod=long)
sicha=[]
sicha.append(0)
for i in range(1,len(price)):
if ma_s[i-1] >= ma_l[i-1] and ma_s[i]<ma_l[i]:
sicha.append(-1)
else:
sicha.append(0)
return sicha
def get_time(x,y): #get the trading time for the singnal
return [a for a in range(len(y)) if y[a] == x]
## define uptrend,if short term moving average above long term moving average for K period of time then the uptrend is confirmed.
def longtrend(short,long,K,price):
ma_s =talib.SMA(price,timeperiod=short)
ma_l=talib.SMA(price,timeperiod=long)
longtrend=[]
for i in range(len(price)-1):
if i< K-1:
longtrend.append(0)
else:
chi=0
for j in range(K):
if ma_s[i-j]>ma_l[i-j]:
chi+=1
if chi==K:
longtrend.append(1)
else:
longtrend.append(0)
return longtrend
## define downtrend,if short term moving average below long term moving average then the downtrend is confirmed.
def shortrend(short,long,price):
ma_s=talib.SMA(price,timeperiod=short)
ma_l=talib.SMA(price,timeperiod=long)
shortrend=[]
for i in range(len(price)):
if round(ma_s[i],5)<round(ma_l[i],5):
shortrend.append(1)
else:
shortrend.append(0)
return shortrend
#data processing. combine 5 mins data with 30 mins data which identifed with trend
def data_proc(short_30,long_30):
shortrends=shortrend(short_30,long_30,data_30mins)
period_5_30=[]
j=0
for i in range(len(data_class)-1):
if j <len(shortrends):
if data_class[i]==data_class[i+1]:
period_5_30.append(shortrends[j])
else:
period_5_30.append(shortrends[j])
j+=1
else:
period_5_30.append(shortrends[j])
period_5_30.append(shortrends[j])
return period_5_30
# computing the signals to open the position without considering when to close the position
def get_open(short_5_si,long_5_si,period_5_30,forbid_tm_st,forbid_tm_end):
#si=sicha(short_5_si,long_5_si,opend)
jin=jincha(short_5_si,long_5_si,opend)
#jin_time=get_time(1,jin)
signal_open=[]
for i in range(len(period_5_30)):
if jin[i]==0:
signal_open.append(0)
elif jin[i]==1:
if period_5_30[i]==1 and (int(data[i][1][0:2]) < forbid_tm_st or int(data[i][1][0:2])>= forbid_tm_end):
signal_open.append(-1)
else:
signal_open.append(0)
open_time=get_time(-1,signal_open)
return open_time
# computing the singal of open and close position with the consideration of max gain and loss with a trading cost of 0.0002
def get_signal(short_5_jin,long_5_jin,open_time,max_loss,max_gain):
si=sicha(short_5_jin,long_5_jin,opend)
si_time=get_time(-1,si)
all_times=[]
y=0
for i in range(len(open_time)):
if open_time[i]<=y:
continue
else:
for j in range(0,len(si_time)):
if si_time[j]>open_time[i]:
num=[]
q=0
for m in range(open_time[i],si_time[j]):
if opend[open_time[i]]-high[m+1]-0.0002 > -max_loss and opend[open_time[i]]-low[m+1]-0.0002 < max_gain:
num.append(1)
else:
num.append(-1)
q=m+2
break
if sum(num)==si_time[j]-open_time[i]:
all_times.append([open_time[i],si_time[j]])
y=si_time[j]
break
else:
all_times.append([open_time[i],q])
y=q
break
return all_times
#compute the gain with the considration of when not to open position
def profit_count020(all_times,max_loss,max_gain):
profit=[]
for i in range(len(all_times)):
if opend[all_times[i][0]]-high[all_times[i][1]-1]-0.0002 <=-max_loss:
profit.append([-max_loss*680])
elif opend[all_times[i][0]]-low[all_times[i][1]-1]-0.0002 >= max_gain:
profit.append([max_gain*680])
else:
profit.append([(opend[all_times[i][0]]-opend[all_times[i][1]])*680])
profit_sum=sum(np.array(profit))
return profit_sum
# ETL for data
conn=cx_Oracle.connect('name/password@localhost/XE')
c=conn.cursor()
x=c.execute('select * from FOREX_EURU') # table name
data=x.fetchall()
c.close()
conn.close()
high2=[]
for i in range(len(data)):
high2.append(data[i][3])
high=np.array(high2)
del high2
low2=[]
for i in range(len(data)):
low2.append(data[i][4])
low=np.array(low2)
del low2
opend2=[]
for i in range(len(data)):
opend2.append(data[i][2])
opend=np.array(opend2)
del opend2
data_class=[]
for i in range(len(data)):
if data[i][1][3:5] in ('05','10','15','20','25','00'):
data_class.append(1)
else:
data_class.append(2)
data_30min=[]
data_30min.append(data[0][2])
for i in range(len(data)-1):
if data_class[i]!=data_class[i+1]:
data_30min.append(data[i+1][2])
data_30mins=np.array(data_30min)
# compute the profit with different parameter for the max profit
pro_test3=[]
for i in range(7,18,5):
for j in range(22,35,6):
period_5_30 = data_proc(i,j)
for u in range(2,10,2):
for p in range(15,36,4):
open_time=get_open(u,p,period_5_30,3,7)
for o in range(32,50,5):
for q in range(73,110,10):
for x in range(25,52,8):
for z in range (35,120,25):
all_times=get_signal(o,q,open_time,x*0.0001,z*0.0001)
pro_test3.append([profit_count020(all_times,x*0.0001,z*0.0001),i,j,u,p,o,q,x,z])
print(i,j,u,p,o,q,x,z)
| 35.557789
| 130
| 0.547343
|
7b126b0c8f3837d5226e1bf423ba71d1ad8062ea
| 1,125
|
py
|
Python
|
examples/GA3C/ThreadTrainer.py
|
neopenx/Dragon
|
0e639a7319035ddc81918bd3df059230436ee0a1
|
[
"BSD-2-Clause"
] | 212
|
2015-07-05T07:57:17.000Z
|
2022-02-27T01:55:35.000Z
|
examples/GA3C/ThreadTrainer.py
|
neopenx/Dragon
|
0e639a7319035ddc81918bd3df059230436ee0a1
|
[
"BSD-2-Clause"
] | 6
|
2016-07-07T14:31:56.000Z
|
2017-12-12T02:21:15.000Z
|
examples/GA3C/ThreadTrainer.py
|
neopenx/Dragon
|
0e639a7319035ddc81918bd3df059230436ee0a1
|
[
"BSD-2-Clause"
] | 71
|
2016-03-24T09:02:41.000Z
|
2021-06-03T01:52:41.000Z
|
# --------------------------------------------------------
# GA3C for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from threading import Thread
import numpy as np
from Config import Config
class ThreadTrainer(Thread):
def __init__(self, server, id):
super(ThreadTrainer, self).__init__()
self.setDaemon(True)
self.id = id
self.server = server
self.exit_flag = False
def run(self):
while not self.exit_flag:
batch_size = 0
while batch_size <= Config.TRAINING_MIN_BATCH_SIZE:
x_, r_, a_ = self.server.training_q.get()
if batch_size == 0:
x__ = x_
r__ = r_
a__ = a_
else:
x__ = np.concatenate((x__, x_))
r__ = np.concatenate((r__, r_))
a__ = np.concatenate((a__, a_))
batch_size += x_.shape[0]
if Config.TRAIN_MODELS:
self.server.train_model(x__, r__, a__)
| 29.605263
| 63
| 0.469333
|
27f93b73847740abb55ac05dc7b24f174815ec82
| 534
|
py
|
Python
|
dowhy/interpreters/visual_interpreter.py
|
Sid-darthvader/dowhy
|
535cdb47d7eed8988573770769ecea3856180b48
|
[
"MIT"
] | 2,904
|
2019-05-07T08:09:33.000Z
|
2022-03-31T18:28:41.000Z
|
dowhy/interpreters/visual_interpreter.py
|
Sid-darthvader/dowhy
|
535cdb47d7eed8988573770769ecea3856180b48
|
[
"MIT"
] | 238
|
2019-05-11T02:57:22.000Z
|
2022-03-31T23:47:18.000Z
|
dowhy/interpreters/visual_interpreter.py
|
Sid-darthvader/dowhy
|
535cdb47d7eed8988573770769ecea3856180b48
|
[
"MIT"
] | 527
|
2019-05-08T16:23:45.000Z
|
2022-03-30T21:02:41.000Z
|
from dowhy.interpreter import Interpreter
class VisualInterpreter(Interpreter):
"""Base class for interpreters that show plots or visualizations as output.
"""
def __init__(self, instance, **kwargs):
super().__init__(instance, **kwargs)
def show(self, interpret_plot):
"""Display the intepretation.
:param interpret_plot: Plot object containing the interpretation
:returns: None
"""
# TODO: A common way to show all plots
raise NotImplementedError
| 26.7
| 79
| 0.662921
|
9b8962ae1961d74b8fb83870c40b8903aab346e5
| 10,635
|
py
|
Python
|
homeassistant/components/tesla/__init__.py
|
Toxblh/core
|
800cf6c8c0b1d688ab0a364d719bc908539ed3cf
|
[
"Apache-2.0"
] | 1
|
2020-09-07T17:15:34.000Z
|
2020-09-07T17:15:34.000Z
|
homeassistant/components/tesla/__init__.py
|
Toxblh/core
|
800cf6c8c0b1d688ab0a364d719bc908539ed3cf
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/tesla/__init__.py
|
Toxblh/core
|
800cf6c8c0b1d688ab0a364d719bc908539ed3cf
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Tesla cars."""
import asyncio
from collections import defaultdict
from datetime import timedelta
import logging
import async_timeout
from teslajsonpy import Controller as TeslaAPI, TeslaException
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
CONF_ACCESS_TOKEN,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_TOKEN,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util import slugify
from .config_flow import (
CannotConnect,
InvalidAuth,
configured_instances,
validate_input,
)
from .const import (
CONF_WAKE_ON_START,
DATA_LISTENER,
DEFAULT_SCAN_INTERVAL,
DEFAULT_WAKE_ON_START,
DOMAIN,
ICONS,
MIN_SCAN_INTERVAL,
TESLA_COMPONENTS,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): vol.All(cv.positive_int, vol.Clamp(min=MIN_SCAN_INTERVAL)),
}
)
},
extra=vol.ALLOW_EXTRA,
)
@callback
def _async_save_tokens(hass, config_entry, access_token, refresh_token):
hass.config_entries.async_update_entry(
config_entry,
data={
**config_entry.data,
CONF_ACCESS_TOKEN: access_token,
CONF_TOKEN: refresh_token,
},
)
async def async_setup(hass, base_config):
"""Set up of Tesla component."""
def _update_entry(email, data=None, options=None):
data = data or {}
options = options or {
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_WAKE_ON_START: DEFAULT_WAKE_ON_START,
}
for entry in hass.config_entries.async_entries(DOMAIN):
if email != entry.title:
continue
hass.config_entries.async_update_entry(entry, data=data, options=options)
config = base_config.get(DOMAIN)
if not config:
return True
email = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
scan_interval = config[CONF_SCAN_INTERVAL]
if email in configured_instances(hass):
try:
info = await validate_input(hass, config)
except (CannotConnect, InvalidAuth):
return False
_update_entry(
email,
data={
CONF_ACCESS_TOKEN: info[CONF_ACCESS_TOKEN],
CONF_TOKEN: info[CONF_TOKEN],
},
options={CONF_SCAN_INTERVAL: scan_interval},
)
else:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: email, CONF_PASSWORD: password},
)
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][email] = {CONF_SCAN_INTERVAL: scan_interval}
return True
async def async_setup_entry(hass, config_entry):
"""Set up Tesla as config entry."""
hass.data.setdefault(DOMAIN, {})
config = config_entry.data
websession = aiohttp_client.async_get_clientsession(hass)
email = config_entry.title
if email in hass.data[DOMAIN] and CONF_SCAN_INTERVAL in hass.data[DOMAIN][email]:
scan_interval = hass.data[DOMAIN][email][CONF_SCAN_INTERVAL]
hass.config_entries.async_update_entry(
config_entry, options={CONF_SCAN_INTERVAL: scan_interval}
)
hass.data[DOMAIN].pop(email)
try:
controller = TeslaAPI(
websession,
refresh_token=config[CONF_TOKEN],
access_token=config[CONF_ACCESS_TOKEN],
update_interval=config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
)
(refresh_token, access_token) = await controller.connect(
wake_if_asleep=config_entry.options.get(
CONF_WAKE_ON_START, DEFAULT_WAKE_ON_START
)
)
except TeslaException as ex:
_LOGGER.error("Unable to communicate with Tesla API: %s", ex.message)
return False
_async_save_tokens(hass, config_entry, access_token, refresh_token)
coordinator = TeslaDataUpdateCoordinator(
hass, config_entry=config_entry, controller=controller
)
# Fetch initial data so we have data when entities subscribe
entry_data = hass.data[DOMAIN][config_entry.entry_id] = {
"coordinator": coordinator,
"devices": defaultdict(list),
DATA_LISTENER: [config_entry.add_update_listener(update_listener)],
}
_LOGGER.debug("Connected to the Tesla API")
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
all_devices = controller.get_homeassistant_components()
if not all_devices:
return False
for device in all_devices:
entry_data["devices"][device.hass_type].append(device)
for component in TESLA_COMPONENTS:
_LOGGER.debug("Loading %s", component)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_unload_entry(hass, config_entry) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in TESLA_COMPONENTS
]
)
)
for listener in hass.data[DOMAIN][config_entry.entry_id][DATA_LISTENER]:
listener()
username = config_entry.title
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
_LOGGER.debug("Unloaded entry for %s", username)
return True
return False
async def update_listener(hass, config_entry):
"""Update when config_entry options update."""
controller = hass.data[DOMAIN][config_entry.entry_id]["coordinator"].controller
old_update_interval = controller.update_interval
controller.update_interval = config_entry.options.get(CONF_SCAN_INTERVAL)
if old_update_interval != controller.update_interval:
_LOGGER.debug(
"Changing scan_interval from %s to %s",
old_update_interval,
controller.update_interval,
)
class TeslaDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Tesla data."""
def __init__(self, hass, *, config_entry, controller):
"""Initialize global Tesla data updater."""
self.controller = controller
self.config_entry = config_entry
update_interval = timedelta(seconds=MIN_SCAN_INTERVAL)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=update_interval,
)
async def _async_update_data(self):
"""Fetch data from API endpoint."""
if self.controller.is_token_refreshed():
(refresh_token, access_token) = self.controller.get_tokens()
_async_save_tokens(
self.hass, self.config_entry, access_token, refresh_token
)
_LOGGER.debug("Saving new tokens in config_entry")
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(30):
return await self.controller.update()
except TeslaException as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
class TeslaDevice(Entity):
"""Representation of a Tesla device."""
def __init__(self, tesla_device, coordinator):
"""Initialise the Tesla device."""
self.tesla_device = tesla_device
self.coordinator = coordinator
self._name = self.tesla_device.name
self._unique_id = slugify(self.tesla_device.uniq_name)
self._attributes = self.tesla_device.attrs.copy()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def icon(self):
"""Return the icon of the sensor."""
if self.device_class:
return None
return ICONS.get(self.tesla_device.type)
@property
def should_poll(self):
"""No need to poll. Coordinator notifies entity of updates."""
return False
@property
def available(self):
"""Return if entity is available."""
return self.coordinator.last_update_success
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = self._attributes.copy()
if self.tesla_device.has_battery():
attr[ATTR_BATTERY_LEVEL] = self.tesla_device.battery_level()
attr[ATTR_BATTERY_CHARGING] = self.tesla_device.battery_charging()
return attr
@property
def device_info(self):
"""Return the device_info of the device."""
return {
"identifiers": {(DOMAIN, self.tesla_device.id())},
"name": self.tesla_device.car_name(),
"manufacturer": "Tesla",
"model": self.tesla_device.car_type,
"sw_version": self.tesla_device.car_version,
}
async def async_added_to_hass(self):
"""Register state update callback."""
self.async_on_remove(self.coordinator.async_add_listener(self.refresh))
async def async_will_remove_from_hass(self):
"""Prepare for unload."""
async def async_update(self):
"""Update the state of the device."""
_LOGGER.debug("Updating state for: %s", self.name)
await self.coordinator.async_request_refresh()
@callback
def refresh(self) -> None:
"""Refresh the state of the device.
This assumes the coordinator has updated the controller.
"""
self.tesla_device.refresh()
self.async_write_ha_state()
| 32.325228
| 88
| 0.654067
|
0af349056989c0d01e8b8daeb4af5d8950ae8c39
| 3,135
|
py
|
Python
|
nsd1802/python/ansible_project/myansible/myansible/settings.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
nsd1802/python/ansible_project/myansible/myansible/settings.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
nsd1802/python/ansible_project/myansible/myansible/settings.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for myansible project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%mh)i_vs_ttb66%vcgpi(aypp4x89^xhh%c)9(h701#etz#oh+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = '*'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webansi',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myansible.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myansible.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.696721
| 91
| 0.69697
|
9e0ccc962b147fa391ddf096005d3f0d70b379ec
| 1,780
|
py
|
Python
|
web/addons/hw_escpos/__openerp__.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/addons/hw_escpos/__openerp__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/addons/hw_escpos/__openerp__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'ESC/POS Hardware Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'website': 'https://www.odoo.com/page/point-of-sale',
'summary': 'Hardware Driver for ESC/POS Printers and Cashdrawers',
'description': """
ESC/POS Hardware Driver
=======================
This module allows openerp to print with ESC/POS compatible printers and
to open ESC/POS controlled cashdrawers in the point of sale and other modules
that would need such functionality.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'external_dependencies': {
'python' : ['usb.core','serial','qrcode'],
},
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 34.901961
| 78
| 0.612921
|
d4c9d827be35b57687d1102ff7aa5ccfe875e76a
| 32,810
|
py
|
Python
|
autotest/gdrivers/vrtderived.py
|
rcoup/gdal
|
31240deb7b71d990a2abbad1bebedd0918989ca0
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/vrtderived.py
|
rcoup/gdal
|
31240deb7b71d990a2abbad1bebedd0918989ca0
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/vrtderived.py
|
rcoup/gdal
|
31240deb7b71d990a2abbad1bebedd0918989ca0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test AddBand() with VRTDerivedRasterBand.
# Author: Antonio Valentino <a_valentino@users.sf.net>
#
###############################################################################
# Copyright (c) 2011, Antonio Valentino
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import shutil
import sys
import threading
from osgeo import gdal
sys.path.append('../pymod')
import gdaltest
def _xmlsearch(root, nodetype, name):
for node in root[2:]:
if node[0] == nodetype and node[1] == name:
return node
###############################################################################
# Verify raster band subClass
def vrtderived_1():
filename = 'tmp/derived.vrt'
vrt_ds = gdal.GetDriverByName('VRT').Create(filename, 50, 50, 0)
options = [
'subClass=VRTDerivedRasterBand',
]
vrt_ds.AddBand(gdal.GDT_Byte, options)
simpleSourceXML = ''' <SimpleSource>
<SourceFilename>data/byte.tif</SourceFilename>
<SourceBand>1</SourceBand>
</SimpleSource>'''
md = {}
md['source_0'] = simpleSourceXML
vrt_ds.GetRasterBand(1).SetMetadata(md, 'vrt_sources')
md_read = vrt_ds.GetRasterBand(1).GetMetadata('vrt_sources')
vrt_ds = None
expected_md_read = (
'<SimpleSource>\n'
' <SourceFilename relativeToVRT="0">data/byte.tif</SourceFilename>\n'
' <SourceBand>1</SourceBand>\n'
' <SourceProperties RasterXSize="20" RasterYSize="20" DataType="Byte" '
'BlockXSize="20" BlockYSize="20" />\n'
'</SimpleSource>\n')
if md_read['source_0'] != expected_md_read:
gdaltest.post_reason('fail')
print(md_read['source_0'])
return 'fail'
xmlstring = open(filename).read()
gdal.Unlink(filename)
node = gdal.ParseXMLString(xmlstring)
node = _xmlsearch(node, gdal.CXT_Element, 'VRTRasterBand')
node = _xmlsearch(node, gdal.CXT_Attribute, 'subClass')
node = _xmlsearch(node, gdal.CXT_Text, 'VRTDerivedRasterBand')
if node is None:
gdaltest.post_reason('invalid subclass')
return 'fail'
return 'success'
###############################################################################
# Verify derived raster band pixel function type
def vrtderived_2():
filename = 'tmp/derived.vrt'
vrt_ds = gdal.GetDriverByName('VRT').Create(filename, 50, 50, 0)
options = [
'subClass=VRTDerivedRasterBand',
'PixelFunctionType=dummy',
'PixelFunctionLanguage=Python',
]
vrt_ds.AddBand(gdal.GDT_Byte, options)
simpleSourceXML = ''' <SimpleSource>
<SourceFilename>data/byte.tif</SourceFilename>
<SourceBand>1</SourceBand>
</SimpleSource>'''
md = {}
md['source_0'] = simpleSourceXML
vrt_ds.GetRasterBand(1).SetMetadata(md, 'vrt_sources')
with gdaltest.error_handler():
cs = vrt_ds.GetRasterBand(1).Checksum()
if cs != 0:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
ret = vrt_ds.GetRasterBand(1).WriteRaster(0, 0, 1, 1, ' ')
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
vrt_ds = None
xmlstring = open(filename).read()
gdal.Unlink(filename)
node = gdal.ParseXMLString(xmlstring)
node = _xmlsearch(node, gdal.CXT_Element, 'VRTRasterBand')
pixelfunctiontype = _xmlsearch(node, gdal.CXT_Element, 'PixelFunctionType')
pixelfunctiontype = _xmlsearch(pixelfunctiontype, gdal.CXT_Text, 'dummy')
if pixelfunctiontype is None:
gdaltest.post_reason('incorrect PixelFunctionType value')
return 'fail'
pixelfunctionlanguage = _xmlsearch(node, gdal.CXT_Element, 'PixelFunctionLanguage')
pixelfunctionlanguage = _xmlsearch(pixelfunctionlanguage, gdal.CXT_Text, 'Python')
if pixelfunctionlanguage is None:
gdaltest.post_reason('incorrect PixelFunctionLanguage value')
return 'fail'
return 'success'
###############################################################################
# Verify derived raster band transfer type
def vrtderived_3():
filename = 'tmp/derived.vrt'
vrt_ds = gdal.GetDriverByName('VRT').Create(filename, 50, 50, 0)
options = [
'subClass=VRTDerivedRasterBand',
'PixelFunctionType=dummy',
'SourceTransferType=Byte',
]
vrt_ds.AddBand(gdal.GDT_Byte, options)
simpleSourceXML = ''' <SimpleSource>
<SourceFilename>data/byte.tif</SourceFilename>
<SourceBand>1</SourceBand>
</SimpleSource>'''
md = {}
md['source_0'] = simpleSourceXML
vrt_ds.GetRasterBand(1).SetMetadata(md, 'vrt_sources')
vrt_ds = None
xmlstring = open(filename).read()
gdal.Unlink(filename)
node = gdal.ParseXMLString(xmlstring)
node = _xmlsearch(node, gdal.CXT_Element, 'VRTRasterBand')
node = _xmlsearch(node, gdal.CXT_Element, 'SourceTransferType')
node = _xmlsearch(node, gdal.CXT_Text, 'Byte')
if node is None:
gdaltest.post_reason('incorrect SourceTransferType value')
return 'fail'
return 'success'
###############################################################################
# Check handling of invalid derived raster band transfer type
def vrtderived_4():
filename = 'tmp/derived.vrt'
vrt_ds = gdal.GetDriverByName('VRT').Create(filename, 50, 50, 0)
options = [
'subClass=VRTDerivedRasterBand',
'PixelFunctionType=dummy',
'SourceTransferType=Invalid',
]
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = vrt_ds.AddBand(gdal.GDT_Byte, options)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('invalid SourceTransferType value not detected')
return 'fail'
return 'success'
###############################################################################
# Check Python derived function with BufferRadius=1
def vrtderived_5():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')
ds = gdal.Open('data/n43_hillshade.vrt')
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 50577:
gdaltest.post_reason('invalid checksum')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Check Python derived function with BufferRadius=0 and no source
def vrtderived_6():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')
ds = gdal.Open('data/python_ones.vrt')
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 10000:
gdaltest.post_reason('invalid checksum')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Check Python derived function with no started Python interpreter
def vrtderived_7():
import test_cli_utilities
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret, err = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' -checksum data/n43_hillshade.vrt --config GDAL_VRT_ENABLE_PYTHON YES')
if gdal.GetConfigOption('CPL_DEBUG') is not None:
print(err)
# Either we cannot find a Python library, either it works
if ret.find('Checksum=0') >= 0:
print('Did not manage to find a Python library')
elif ret.find('Checksum=50577') < 0:
gdaltest.post_reason('fail')
print(ret)
print(err)
return 'fail'
ret, err = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' -checksum data/n43_hillshade.vrt --config GDAL_VRT_ENABLE_PYTHON YES --config VRT_ENABLE_PYTHON_PATH NO')
if gdal.GetConfigOption('CPL_DEBUG') is not None:
print(err)
# Either we cannot find a Python library, either it works
if ret.find('Checksum=0') >= 0:
print('Did not manage to find a Python library')
elif ret.find('Checksum=50577') < 0:
gdaltest.post_reason('fail')
print(ret)
print(err)
return 'fail'
ret, err = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' -checksum data/n43_hillshade.vrt --config GDAL_VRT_ENABLE_PYTHON YES --config VRT_ENABLE_PYTHON_SYMLINK NO')
if gdal.GetConfigOption('CPL_DEBUG') is not None:
print(err)
# Either we cannot find a Python library, either it works
if ret.find('Checksum=0') >= 0:
print('Did not manage to find a Python library')
elif ret.find('Checksum=50577') < 0:
gdaltest.post_reason('fail')
print(ret)
print(err)
return 'fail'
# Invalid shared object name
ret, err = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' -checksum data/n43_hillshade.vrt --config GDAL_VRT_ENABLE_PYTHON YES --config PYTHONSO foo')
if gdal.GetConfigOption('CPL_DEBUG') is not None:
print(err)
if ret.find('Checksum=0') < 0:
gdaltest.post_reason('fail')
print(ret)
print(err)
return 'fail'
# Valid shared object name, but without Python symbols
libgdal_so = gdaltest.find_lib('gdal')
if libgdal_so is not None:
ret, err = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' -checksum data/n43_hillshade.vrt --config GDAL_VRT_ENABLE_PYTHON YES --config PYTHONSO "%s"' % libgdal_so)
if gdal.GetConfigOption('CPL_DEBUG') is not None:
print(err)
if ret.find('Checksum=0') < 0:
gdaltest.post_reason('fail')
print(ret)
print(err)
return 'fail'
return 'success'
###############################################################################
# Check that GDAL_VRT_ENABLE_PYTHON=NO or undefined is honored
def vrtderived_8():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'NO')
ds = gdal.Open('data/n43_hillshade.vrt')
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
return 'fail'
ds = gdal.Open('data/n43_hillshade.vrt')
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Check various failure modes with Python functions
def vrtderived_9():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
# Missing PixelFunctionType
with gdaltest.error_handler():
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
</VRTRasterBand>
</VRTDataset>
""")
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Unsupported PixelFunctionLanguage
with gdaltest.error_handler():
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>identity</PixelFunctionType>
<PixelFunctionLanguage>foo</PixelFunctionLanguage>
</VRTRasterBand>
</VRTDataset>
""")
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# PixelFunctionCode can only be used with Python
with gdaltest.error_handler():
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>identity</PixelFunctionType>
<PixelFunctionCode><![CDATA[
def identity(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, r, gt, **kwargs):
syntax_error
]]>
</PixelFunctionCode>
</VRTRasterBand>
</VRTDataset>
""")
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# PixelFunctionArguments can only be used with Python
with gdaltest.error_handler():
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>identity</PixelFunctionType>
<PixelFunctionArguments foo="bar"/>
</VRTRasterBand>
</VRTDataset>
""")
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# BufferRadius can only be used with Python
with gdaltest.error_handler():
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>identity</PixelFunctionType>
<BufferRadius>1</BufferRadius>
</VRTRasterBand>
</VRTDataset>
""")
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Invalid BufferRadius
with gdaltest.error_handler():
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>identity</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
<BufferRadius>-1</BufferRadius>
</VRTRasterBand>
</VRTDataset>
""")
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Error at Python code compilation (indentation error)
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>identity</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
<PixelFunctionCode><![CDATA[
def identity(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, r, gt, **kwargs):
syntax_error
]]>
</PixelFunctionCode>
</VRTRasterBand>
</VRTDataset>
""")
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# Error at run time (in global code)
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>identity</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
<PixelFunctionCode><![CDATA[
runtime_error
def identity(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, r, gt, **kwargs):
pass
]]>
</PixelFunctionCode>
</VRTRasterBand>
</VRTDataset>
""")
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# Error at run time (in pixel function)
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>identity</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
<PixelFunctionCode><![CDATA[
def identity(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, r, gt, **kwargs):
runtime_error
]]>
</PixelFunctionCode>
</VRTRasterBand>
</VRTDataset>
""")
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# User exception
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>identity</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
<PixelFunctionCode><![CDATA[
def identity(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, r, gt, **kwargs):
raise Exception('my exception')
]]>
</PixelFunctionCode>
</VRTRasterBand>
</VRTDataset>
""")
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# unknown_function
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>unknown_function</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
<PixelFunctionCode><![CDATA[
def identity(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, r, gt, **kwargs):
pass
]]>
</PixelFunctionCode>
</VRTRasterBand>
</VRTDataset>
""")
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# uncallable object
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>uncallable_object</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
<PixelFunctionCode><![CDATA[
uncallable_object = True
]]>
</PixelFunctionCode>
</VRTRasterBand>
</VRTDataset>
""")
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# unknown_module
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>unknown_module.unknown_function</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
</VRTRasterBand>
</VRTDataset>
""")
with gdaltest.error_handler():
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', "YES")
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
def vrtderived_code_that_only_makes_sense_with_GDAL_VRT_ENABLE_PYTHON_equal_IF_SAFE_but_that_is_now_disabled():
# untrusted import
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>my_func</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
<PixelFunctionCode><![CDATA[
def my_func(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, r, gt, **kwargs):
import foo
]]>
</PixelFunctionCode>
</VRTRasterBand>
</VRTDataset>
""")
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# untrusted function
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>my_func</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
<PixelFunctionCode><![CDATA[
def my_func(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, r, gt, **kwargs):
open('/etc/passwd').read()
]]>
</PixelFunctionCode>
</VRTRasterBand>
</VRTDataset>
""")
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# GDAL_VRT_ENABLE_PYTHON not set to YES
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>vrtderived.one_pix_func</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
</VRTRasterBand>
</VRTDataset>
""")
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
###############################################################################
# Check Python function in another module
def one_pix_func(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize, raster_ysize, r, gt, **kwargs):
# pylint: disable=unused-argument
out_ar.fill(1)
def vrtderived_10():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
content = """<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<ColorInterp>Gray</ColorInterp>
<PixelFunctionType>vrtderived.one_pix_func</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
</VRTRasterBand>
</VRTDataset>
"""
ds = gdal.Open(content)
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', "YES")
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 100:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# GDAL_VRT_TRUSTED_MODULES not defined
ds = gdal.Open(content)
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# GDAL_VRT_PYTHON_TRUSTED_MODULES *NOT* matching our module
for val in ['vrtderive',
'vrtderivedX',
'vrtderivedX*',
'vrtderive.*'
'vrtderivedX.*']:
ds = gdal.Open(content)
gdal.SetConfigOption('GDAL_VRT_PYTHON_TRUSTED_MODULES', val)
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_PYTHON_TRUSTED_MODULES', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# GDAL_VRT_PYTHON_TRUSTED_MODULES matching our module
for val in ['foo,vrtderived,bar',
'*',
'foo,vrtderived*,bar',
'foo,vrtderived.*,bar',
'foo,vrtderi*,bar']:
ds = gdal.Open(content)
gdal.SetConfigOption('GDAL_VRT_PYTHON_TRUSTED_MODULES', val)
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_PYTHON_TRUSTED_MODULES', None)
if cs != 100:
gdaltest.post_reason('invalid checksum')
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
###############################################################################
# Test serializing with python code
def vrtderived_11():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
shutil.copy('data/n43_hillshade.vrt', 'tmp/n43_hillshade.vrt')
shutil.copy('data/n43.dt0', 'tmp/n43.dt0')
ds = gdal.Open('tmp/n43_hillshade.vrt', gdal.GA_Update)
ds.SetMetadataItem('foo', 'bar')
ds = None
ds = gdal.Open('tmp/n43_hillshade.vrt')
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
ds = None
os.unlink('tmp/n43_hillshade.vrt')
os.unlink('tmp/n43.dt0')
if cs != 50577:
gdaltest.post_reason('invalid checksum')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Test all data types with python code
def vrtderived_12():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
for dt in ["Byte", "UInt16", "Int16", "UInt32", "Int32",
"Float32", "Float64",
"CInt16", "CInt32", "CFloat32", "CFloat64"]:
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="%s" band="1" subClass="VRTDerivedRasterBand">
<ColorInterp>Gray</ColorInterp>
<PixelFunctionType>vrtderived.one_pix_func</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
</VRTRasterBand>
</VRTDataset>""" % dt)
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', "YES")
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
# CInt16/CInt32 do not map to native numpy types
if dt == 'CInt16' or dt == 'CInt32':
expected_cs = 0 # error
else:
expected_cs = 100
if cs != expected_cs:
gdaltest.post_reason('invalid checksum')
print(dt)
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
# Same for SourceTransferType
for dt in ["CInt16", "CInt32"]:
ds = gdal.Open("""<VRTDataset rasterXSize="10" rasterYSize="10">
<VRTRasterBand dataType="%s" band="1" subClass="VRTDerivedRasterBand">
<SourceTransferType>Byte</SourceTransferType>
<ColorInterp>Gray</ColorInterp>
<PixelFunctionType>vrtderived.one_pix_func</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
</VRTRasterBand>
</VRTDataset>""" % dt)
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', "YES")
with gdaltest.error_handler():
cs = ds.GetRasterBand(1).Checksum()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if cs != 0:
gdaltest.post_reason('invalid checksum')
print(dt)
print(cs)
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
###############################################################################
# Test translating a Python derived VRT
def vrtderived_13():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', "YES")
# Will test the VRTDerivedRasterBand::IGetDataCoverageStatus() interface
ds = gdal.GetDriverByName('GTiff').CreateCopy('/vsimem/vrtderived_13.tif', gdal.Open('data/python_ones.vrt'))
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('/vsimem/vrtderived_13.tif')
if cs != 10000:
gdaltest.post_reason('invalid checksum')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Test statistics functions
def vrtderived_14():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', "YES")
ds = gdal.GetDriverByName('VRT').CreateCopy('/vsimem/vrtderived_14.vrt', gdal.Open('data/python_ones.vrt'))
(my_min, my_max) = ds.GetRasterBand(1).ComputeRasterMinMax()
(my_min2, my_max2, mean, stddev) = ds.GetRasterBand(1).ComputeStatistics(False)
hist = ds.GetRasterBand(1).GetHistogram()
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
if (my_min, my_max) != (1.0, 1.0):
gdaltest.post_reason('invalid ComputeRasterMinMax')
print(my_min, my_max)
return 'fail'
if (my_min2, my_max2, mean, stddev) != (1.0, 1.0, 1.0, 0.0):
gdaltest.post_reason('invalid ComputeStatistics')
print(my_min2, my_max2, mean, stddev)
return 'fail'
if hist[1] != 10000:
gdaltest.post_reason('invalid GetHistogram')
print(hist)
return 'fail'
ds = None
gdal.GetDriverByName('VRT').Delete('/vsimem/vrtderived_14.vrt')
return 'success'
###############################################################################
# Test threading
def vrtderived_15_worker(args_dict):
content = """<VRTDataset rasterXSize="2000" rasterYSize="2000">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTDerivedRasterBand">
<ColorInterp>Gray</ColorInterp>
<PixelFunctionType>vrtderived.one_pix_func</PixelFunctionType>
<PixelFunctionLanguage>Python</PixelFunctionLanguage>
</VRTRasterBand>
</VRTDataset>
"""
ds = gdal.Open(content)
for _ in range(5):
cs = ds.GetRasterBand(1).Checksum()
if cs != 2304:
print(cs)
args_dict['ret'] = False
ds.FlushCache()
def vrtderived_15():
try:
import numpy
numpy.ones
except (ImportError, AttributeError):
return 'skip'
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', "YES")
threads = []
args_array = []
for i in range(4):
args_dict = {'ret': True}
t = threading.Thread(target=vrtderived_15_worker, args=(args_dict,))
args_array.append(args_dict)
threads.append(t)
t.start()
ret = 'success'
for i in range(4):
threads[i].join()
if not args_array[i]:
ret = 'fail'
gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)
return ret
###############################################################################
# Cleanup.
def vrtderived_cleanup():
try:
os.remove('tmp/derived.vrt')
except OSError:
pass
return 'success'
gdaltest_list = [
vrtderived_1,
vrtderived_2,
vrtderived_3,
vrtderived_4,
vrtderived_5,
vrtderived_6,
vrtderived_7,
vrtderived_8,
vrtderived_9,
vrtderived_10,
vrtderived_11,
vrtderived_12,
vrtderived_13,
vrtderived_14,
vrtderived_15,
vrtderived_cleanup,
]
if __name__ == '__main__':
gdaltest.setup_run('vrtderived')
gdaltest.run_tests(gdaltest_list)
sys.exit(gdaltest.summarize())
| 32.517344
| 201
| 0.639012
|
1161d07b703bb2ab93da5b3c6470f478d74a7e3b
| 8,972
|
py
|
Python
|
test/functional/wallet_abandonconflict.py
|
btcavenue/btcavenue
|
63c135c40dbb1aef3078abb4dffefa04b8ef8217
|
[
"MIT"
] | null | null | null |
test/functional/wallet_abandonconflict.py
|
btcavenue/btcavenue
|
63c135c40dbb1aef3078abb4dffefa04b8ef8217
|
[
"MIT"
] | null | null | null |
test/functional/wallet_abandonconflict.py
|
btcavenue/btcavenue
|
63c135c40dbb1aef3078abb4dffefa04b8ef8217
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Btcavenue Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already abandoned.
"""
from decimal import Decimal
from test_framework.test_framework import BtcavenueTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
wait_until,
)
class AbandonConflictTest(BtcavenueTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[1].generate(100)
self.sync_blocks()
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.sync_mempools()
self.nodes[1].generate(1)
# Can not abandon non-wallet transaction
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
# Can not abandon confirmed transaction
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))
self.sync_blocks()
newbalance = self.nodes[0].getbalance()
assert balance - newbalance < Decimal("0.001") #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txA)["details"] if tx_out["amount"] == Decimal("10"))
nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txB)["details"] if tx_out["amount"] == Decimal("10"))
nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txC)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
# spend 10btc outputs from txA and txB
inputs.append({"txid": txA, "vout": nA})
inputs.append({"txid": txB, "vout": nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txAB1)["details"] if tx_out["amount"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid": txAB1, "vout": nAB})
inputs.append({"txid": txC, "vout": nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [{"txid": txABC2, "vout": 0}]
outputs = {self.nodes[0].getnewaddress(): signed3_change}
signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if it is received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so it is unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs = []
inputs.append({"txid": txA, "vout": nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransactionwithwallet(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
self.sync_blocks()
# Verify that B and C's 10 BTAV outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTAV output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| 47.470899
| 138
| 0.667187
|
c6bed513942075f563f0fa55a70f495c52d0ebd3
| 12,530
|
py
|
Python
|
sphinx/domains/c.py
|
PeerHerholz/smobsc
|
db34d2bb96b80579bd4a3f4c198a6b524c5a134a
|
[
"BSD-2-Clause"
] | 3
|
2019-06-11T09:42:08.000Z
|
2020-03-10T15:57:09.000Z
|
sphinx/domains/c.py
|
PeerHerholz/smobsc
|
db34d2bb96b80579bd4a3f4c198a6b524c5a134a
|
[
"BSD-2-Clause"
] | 12
|
2019-01-09T15:43:57.000Z
|
2020-01-21T10:46:30.000Z
|
sphinx/domains/c.py
|
PeerHerholz/smobsc
|
db34d2bb96b80579bd4a3f4c198a6b524c5a134a
|
[
"BSD-2-Clause"
] | 10
|
2019-02-04T11:49:35.000Z
|
2020-03-21T13:32:20.000Z
|
"""
sphinx.domains.c
~~~~~~~~~~~~~~~~
The C language domain.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import string
from docutils import nodes
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.locale import _
from sphinx.roles import XRefRole
from sphinx.util.docfields import Field, TypedField
from sphinx.util.nodes import make_refnode
if False:
# For type annotation
from typing import Any, Dict, Iterator, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
# RE to split at word boundaries
wsplit_re = re.compile(r'(\W+)')
# REs for C signatures
c_sig_re = re.compile(
r'''^([^(]*?) # return type
([\w:.]+) \s* # thing name (colon allowed for C++)
(?: \((.*)\) )? # optionally arguments
(\s+const)? $ # const specifier
''', re.VERBOSE)
c_funcptr_sig_re = re.compile(
r'''^([^(]+?) # return type
(\( [^()]+ \)) \s* # name in parentheses
\( (.*) \) # arguments
(\s+const)? $ # const specifier
''', re.VERBOSE)
c_funcptr_arg_sig_re = re.compile(
r'''^\s*([^(,]+?) # return type
\( ([^()]+) \) \s* # name in parentheses
\( (.*) \) # arguments
(\s+const)? # const specifier
\s*(?=$|,) # end with comma or end of string
''', re.VERBOSE)
c_funcptr_name_re = re.compile(r'^\(\s*\*\s*(.*?)\s*\)$')
class CObject(ObjectDescription):
"""
Description of a C language object.
"""
doc_field_types = [
TypedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='type', typenames=('type',)),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=_('Return type'), has_arg=False,
names=('rtype',)),
]
# These C types aren't described anywhere, so don't try to create
# a cross-reference to them
stopwords = set((
'const', 'void', 'char', 'wchar_t', 'int', 'short',
'long', 'float', 'double', 'unsigned', 'signed', 'FILE',
'clock_t', 'time_t', 'ptrdiff_t', 'size_t', 'ssize_t',
'struct', '_Bool',
))
def _parse_type(self, node, ctype):
# type: (nodes.Element, str) -> None
# add cross-ref nodes for all words
for part in [_f for _f in wsplit_re.split(ctype) if _f]:
tnode = nodes.Text(part, part)
if part[0] in string.ascii_letters + '_' and \
part not in self.stopwords:
pnode = addnodes.pending_xref(
'', refdomain='c', reftype='type', reftarget=part,
modname=None, classname=None)
pnode += tnode
node += pnode
else:
node += tnode
def _parse_arglist(self, arglist):
# type: (str) -> Iterator[str]
while True:
m = c_funcptr_arg_sig_re.match(arglist)
if m:
yield m.group()
arglist = c_funcptr_arg_sig_re.sub('', arglist)
if ',' in arglist:
_, arglist = arglist.split(',', 1)
else:
break
else:
if ',' in arglist:
arg, arglist = arglist.split(',', 1)
yield arg
else:
yield arglist
break
def handle_signature(self, sig, signode):
# type: (str, addnodes.desc_signature) -> str
"""Transform a C signature into RST nodes."""
# first try the function pointer signature regex, it's more specific
m = c_funcptr_sig_re.match(sig)
if m is None:
m = c_sig_re.match(sig)
if m is None:
raise ValueError('no match')
rettype, name, arglist, const = m.groups()
desc_type = addnodes.desc_type('', '')
signode += desc_type
self._parse_type(desc_type, rettype)
try:
classname, funcname = name.split('::', 1)
classname += '::'
signode += addnodes.desc_addname(classname, classname)
signode += addnodes.desc_name(funcname, funcname)
# name (the full name) is still both parts
except ValueError:
signode += addnodes.desc_name(name, name)
# clean up parentheses from canonical name
m = c_funcptr_name_re.match(name)
if m:
name = m.group(1)
typename = self.env.ref_context.get('c:type')
if self.name == 'c:member' and typename:
fullname = typename + '.' + name
else:
fullname = name
if not arglist:
if self.objtype == 'function' or \
self.objtype == 'macro' and sig.rstrip().endswith('()'):
# for functions, add an empty parameter list
signode += addnodes.desc_parameterlist()
if const:
signode += addnodes.desc_addname(const, const)
return fullname
paramlist = addnodes.desc_parameterlist()
arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
# this messes up function pointer types, but not too badly ;)
for arg in self._parse_arglist(arglist):
arg = arg.strip()
param = addnodes.desc_parameter('', '', noemph=True)
try:
m = c_funcptr_arg_sig_re.match(arg)
if m:
self._parse_type(param, m.group(1) + '(')
param += nodes.emphasis(m.group(2), m.group(2))
self._parse_type(param, ')(' + m.group(3) + ')')
if m.group(4):
param += addnodes.desc_addname(m.group(4), m.group(4))
else:
ctype, argname = arg.rsplit(' ', 1)
self._parse_type(param, ctype)
# separate by non-breaking space in the output
param += nodes.emphasis(' ' + argname, '\xa0' + argname)
except ValueError:
# no argument name given, only the type
self._parse_type(param, arg)
paramlist += param
signode += paramlist
if const:
signode += addnodes.desc_addname(const, const)
return fullname
def get_index_text(self, name):
# type: (str) -> str
if self.objtype == 'function':
return _('%s (C function)') % name
elif self.objtype == 'member':
return _('%s (C member)') % name
elif self.objtype == 'macro':
return _('%s (C macro)') % name
elif self.objtype == 'type':
return _('%s (C type)') % name
elif self.objtype == 'var':
return _('%s (C variable)') % name
else:
return ''
def add_target_and_index(self, name, sig, signode):
# type: (str, str, addnodes.desc_signature) -> None
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata['c']['objects']
if name in inv:
self.state_machine.reporter.warning(
'duplicate C object description of %s, ' % name +
'other instance in ' + self.env.doc2path(inv[name][0]),
line=self.lineno)
inv[name] = (self.env.docname, self.objtype)
indextext = self.get_index_text(name)
if indextext:
self.indexnode['entries'].append(('single', indextext,
targetname, '', None))
def before_content(self):
# type: () -> None
self.typename_set = False
if self.name == 'c:type':
if self.names:
self.env.ref_context['c:type'] = self.names[0]
self.typename_set = True
def after_content(self):
# type: () -> None
if self.typename_set:
self.env.ref_context.pop('c:type', None)
class CXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
# type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1:]
return title, target
class CDomain(Domain):
"""C language domain."""
name = 'c'
label = 'C'
object_types = {
'function': ObjType(_('function'), 'func'),
'member': ObjType(_('member'), 'member'),
'macro': ObjType(_('macro'), 'macro'),
'type': ObjType(_('type'), 'type'),
'var': ObjType(_('variable'), 'data'),
}
directives = {
'function': CObject,
'member': CObject,
'macro': CObject,
'type': CObject,
'var': CObject,
}
roles = {
'func': CXRefRole(fix_parens=True),
'member': CXRefRole(),
'macro': CXRefRole(),
'data': CXRefRole(),
'type': CXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
} # type: Dict[str, Dict[str, Tuple[str, Any]]]
def clear_doc(self, docname):
# type: (str) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def merge_domaindata(self, docnames, otherdata):
# type: (List[str], Dict) -> None
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
# becase TypedField can generate xrefs
if target in CObject.stopwords:
return contnode
if target not in self.data['objects']:
return None
obj = self.data['objects'][target]
return make_refnode(builder, fromdocname, obj[0], 'c.' + target,
contnode, target)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
if target not in self.data['objects']:
return []
obj = self.data['objects'][target]
return [('c:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], 'c.' + target,
contnode, target))]
def get_objects(self):
# type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for refname, (docname, type) in list(self.data['objects'].items()):
yield (refname, refname, type, docname, 'c.' + refname, 1)
def setup(app):
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(CDomain)
return {
'version': 'builtin',
'env_version': 1,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 37.071006
| 134
| 0.532003
|
57e115e06fdf4c25006e2df0936979306f3b9b4f
| 1,861
|
py
|
Python
|
src/model.py
|
PeppeSaccardi/cnn-with-pytorch
|
52a3566cbaf1e99e4cb766bdd2cbd828de267838
|
[
"MIT"
] | null | null | null |
src/model.py
|
PeppeSaccardi/cnn-with-pytorch
|
52a3566cbaf1e99e4cb766bdd2cbd828de267838
|
[
"MIT"
] | null | null | null |
src/model.py
|
PeppeSaccardi/cnn-with-pytorch
|
52a3566cbaf1e99e4cb766bdd2cbd828de267838
|
[
"MIT"
] | null | null | null |
import torch
import config
import pandas as pd
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.dataset as dataset
class ShapeDataset(dataset.Dataset):
def __init__(self, features, targets):
self.features = features
self.targets = targets
def __len__(self):
return len(self.targets)
def __getitem__(self, item):
target = torch.tensor(self.targets[item], dtype = torch.int)
target= target.type(torch.LongTensor)
image = torch.tensor(
np.array(self.features.iloc[item]), dtype = torch.double
)
image = image.reshape(
config.CHANNELS,
config.IMAGE_H,
config.IMAGE_W,
)
return (image, target)
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(3, 64, (15,15))
self.bn1 = nn.BatchNorm2d(64)
self.dropout1 = nn.Dropout(p=0.1)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(64, 16, (4,4))
self.bn2 = nn.BatchNorm2d(16)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 2 * 2, 512)
self.bn3 = nn.BatchNorm1d(512)
self.dropout2 = nn.Dropout(p=0.4)
self.fc2 = nn.Linear(512, 180)
self.bn4 = nn.BatchNorm1d(180)
self.dropout3 = nn.Dropout(p=0.1)
self.fc3 = nn.Linear(180, 3)
def forward(self, x):
x = self.pool1(self.dropout1(F.relu(self.bn1(self.conv1(x)))))
x = self.pool2(F.relu(self.bn2(self.conv2(x))))
x = x.view(-1, 16 * 2 * 2)
x = self.dropout2(F.relu(self.bn3(self.fc1(x))))
x = self.dropout3(F.relu(self.bn4(self.fc2(x))))
x = self.fc3(x)
return x
if __name__ == "__main__":
print(Model())
| 30.016129
| 70
| 0.579796
|
4d1e4c353e8f2e3cfa0ebc33614a3634810d6779
| 111
|
py
|
Python
|
kapture/converter/opensfm/__init__.py
|
v-mehta/kapture
|
b95a15b83032d667282ab96fa5be5327b2c99ec7
|
[
"BSD-3-Clause"
] | 264
|
2020-07-21T14:48:33.000Z
|
2022-03-16T17:05:21.000Z
|
kapture/converter/opensfm/__init__.py
|
v-mehta/kapture
|
b95a15b83032d667282ab96fa5be5327b2c99ec7
|
[
"BSD-3-Clause"
] | 30
|
2020-08-31T19:27:26.000Z
|
2022-03-11T08:50:23.000Z
|
kapture/converter/opensfm/__init__.py
|
v-mehta/kapture
|
b95a15b83032d667282ab96fa5be5327b2c99ec7
|
[
"BSD-3-Clause"
] | 49
|
2020-07-30T06:11:22.000Z
|
2022-03-22T13:46:06.000Z
|
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
"""
OpenSfM to kapture import and export.
"""
| 18.5
| 63
| 0.738739
|
1a627b7258bc335c5be11acd4ec23e9f4455852f
| 3,981
|
py
|
Python
|
data_timeslice.py
|
gonuke/cyc3d
|
13350ea7f588a16c5971d30e86c0c53aa0cf60e6
|
[
"BSD-3-Clause"
] | null | null | null |
data_timeslice.py
|
gonuke/cyc3d
|
13350ea7f588a16c5971d30e86c0c53aa0cf60e6
|
[
"BSD-3-Clause"
] | null | null | null |
data_timeslice.py
|
gonuke/cyc3d
|
13350ea7f588a16c5971d30e86c0c53aa0cf60e6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import os
#import json
import simplejson as json
from argparse import ArgumentParser
from datetime import datetime
import time
from pprint import pprint
import numpy as np
from tools import diff_last, cost_val, load_kind, label_kind, MAX_COST, MAX_WASTE, \
outter_reproccessed
def json_at_year(filename, year, kind):
data = load_kind(filename, kind)
data = np.array(data[data['year'] == year])
return [{'name': k, 'size': data[k][0]} for k in data.dtype.names[1:] \
if data[k][0] > 0]
def main_by_year():
parser = ArgumentParser()
parser.add_argument('year', type=int)
parser.add_argument('filenames', nargs="+")
parser.add_argument('-k', dest="kind", help="waste or cost", default="waste")
ns = parser.parse_args()
j = {'name': "Year {0}".format(ns.year), 'children': []}
for i, filename in enumerate(ns.filenames, 1):
j['children'].append({'name': 'Fuel Cycle {0}'.format(i),
'children': json_at_year(filename, ns.year, ns.kind),
})
jfname = "year{0}-{1}.json".format(ns.year, ns.kind)
s = json.dumps(j)
with open(jfname, 'w') as f:
f.write(s)
CAT_LABEL = "year {0}\n{1}"
def json_at_category(data, category, years, kind):
j = []
for row in data:
if row['year'] not in years:
continue
label = CAT_LABEL.format(row['year'], label_kind(row[category], kind))
j.append({'name': label, 'size': row[category]})
return j
def main_by_fc_cat_year():
parser = ArgumentParser()
parser.add_argument('filename')
parser.add_argument('years', nargs='+', type=int)
parser.add_argument('-k', dest="kind", help="waste or cost", default="waste")
ns = parser.parse_args()
years = set(ns.years)
data = load_kind(ns.filename, ns.kind)
j = {'name': "", 'children': []} # FC level
for category in data.dtype.names[1:]:
j['children'].append({'name': category,
'children': json_at_category(data, category,
years, ns.kind),
})
jfname = "info-{0}-{1}-{2}.json".format(os.path.splitext(ns.filename)[0],
"_".join(map(str, ns.years)), ns.kind)
s = json.dumps(j)
with open(jfname, 'w') as f:
f.write(s)
YEAR_CAT_LABEL = "{0}\n{1}"
def json_at_year_cat(data, year, kind):
#data = np.array(data[data['year'] == year])
d = data[data['year'] == year]
return [{'name': YEAR_CAT_LABEL.format(k, label_kind(d[k][0], kind)),
'size': d[k][0] / (MAX_COST if kind == "cost" else MAX_WASTE)} \
for k in d.dtype.names[1:] if d[k][0] > 0]
def main_by_fc_year_cat():
parser = ArgumentParser()
parser.add_argument('filename')
parser.add_argument('years', nargs='+', type=int)
parser.add_argument('-k', dest="kind", help="waste or cost", default="waste")
ns = parser.parse_args()
years = set(ns.years)
data = load_kind(ns.filename, ns.kind)
j = {'name': "", 'children': [], 'scale': 0.0} # FC level
for year in years:
j['children'].append({'name': "year {0}".format(year),
'children': json_at_year_cat(data, year, ns.kind),})
with open('/dev/null', 'a') as f:
# prevents weird numpy segfault
print(data, file=f)
j['scale'] = max(j['scale'], sum([c['size'] for c in j['children'][-1]['children']]))
if ns.kind == 'waste':
outter_reproccessed(j['children'][-1]['children'])
jfname = "info-{0}-{1}-{2}.json".format(os.path.splitext(ns.filename)[0],
"_".join(map(str, ns.years)), ns.kind)
s = json.dumps(j)
with open(jfname, 'w') as f:
f.write(s)
if __name__ == "__main__":
main_by_fc_year_cat()
| 38.278846
| 93
| 0.568701
|
d6870e753264855d903190aecbef227d30cae2dd
| 424
|
py
|
Python
|
other_parsers_templates/numpy_parser.py
|
sushmaakoju/parser
|
e40e3f818921141044b499e231ae75e6bf4141c2
|
[
"MIT"
] | null | null | null |
other_parsers_templates/numpy_parser.py
|
sushmaakoju/parser
|
e40e3f818921141044b499e231ae75e6bf4141c2
|
[
"MIT"
] | null | null | null |
other_parsers_templates/numpy_parser.py
|
sushmaakoju/parser
|
e40e3f818921141044b499e231ae75e6bf4141c2
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import sympy as sp
from sympy import *
#numpy polynomial evaluation
#using x_i notation directly which is not real-world case anyway.
df = pd.DataFrame(
[[21, 72, 67.1],
[23, 78, 69.5],
[32, 74, 56.6],
[52, 54, 76.2]],
columns = ['x1','x2', 'x3'])
x1, x2, x3 = [df[c].to_numpy() for i,c in enumerate(df.columns, start=1)]
#expr = x0**2 + x1*x2
expr = x1**2 + x2*x3
print(expr)
| 26.5
| 73
| 0.641509
|
cd27c2d372406581550f7169b2b3e7d14ce6a5aa
| 8,385
|
py
|
Python
|
rltools/rltools/util.py
|
karlhthompson/cwrc
|
9fadf113712908c1bfa73bf8a696108a846e4677
|
[
"MIT"
] | 9
|
2018-04-04T10:46:31.000Z
|
2020-05-19T09:47:36.000Z
|
rltools/rltools/util.py
|
karlhthompson/cwrc
|
9fadf113712908c1bfa73bf8a696108a846e4677
|
[
"MIT"
] | null | null | null |
rltools/rltools/util.py
|
karlhthompson/cwrc
|
9fadf113712908c1bfa73bf8a696108a846e4677
|
[
"MIT"
] | 2
|
2021-12-11T22:53:05.000Z
|
2021-12-14T08:18:16.000Z
|
from __future__ import print_function
import errno
import os
import timeit
import h5py
import numpy as np
from colorama import Fore, Style
class Timer(object):
def __enter__(self):
self.t_start = timeit.default_timer()
return self
def __exit__(self, _1, _2, _3):
self.t_end = timeit.default_timer()
self.dt = self.t_end - self.t_start
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def split_h5_name(fullpath, sep='/'):
"""
From h5ls.c:
* Example: ../dir1/foo/bar/baz
* \_________/\______/
* file obj
*
"""
sep_inds = [i for i, c in enumerate(fullpath) if c == sep]
for sep_idx in sep_inds:
filename, objname = fullpath[:sep_idx], fullpath[sep_idx:]
if not filename:
continue
# Try to open the file. If it fails, try the next separation point.
try:
h5py.File(filename, 'r').close()
except IOError:
continue
# It worked!
return filename, objname
raise IOError('Could not open HDF5 file/object {}'.format(fullpath))
def discount(r_N_T_D, gamma):
'''
Computes Q values from rewards.
q_N_T_D[i,t,:] == r_N_T_D[i,t,:] + gamma*r_N_T_D[i,t+1,:] + gamma^2*r_N_T_D[i,t+2,:] + ...
'''
assert r_N_T_D.ndim == 2 or r_N_T_D.ndim == 3
input_ndim = r_N_T_D.ndim
if r_N_T_D.ndim == 2:
r_N_T_D = r_N_T_D[..., None]
discfactors_T = np.power(gamma, np.arange(r_N_T_D.shape[1]))
discounted_N_T_D = r_N_T_D * discfactors_T[None, :, None]
q_N_T_D = np.cumsum(
discounted_N_T_D[:, ::-1, :],
axis=1)[:, ::
-1, :] # this is equal to gamma**t * (r_N_T_D[i,t,:] + gamma*r_N_T_D[i,t+1,:] + ...)
q_N_T_D /= discfactors_T[None, :, None]
# Sanity check: Q values at last timestep should equal original rewards
assert np.allclose(q_N_T_D[:, -1, :], r_N_T_D[:, -1, :])
if input_ndim == 2:
assert q_N_T_D.shape[-1] == 1
return q_N_T_D[:, :, 0]
return q_N_T_D
def standardized(a):
out = a.copy()
out -= a.mean()
out /= a.std() + 1e-8
return out
def safezip(*ls):
assert all(len(l) == len(ls[0]) for l in ls)
return zip(*ls)
def maxnorm(a):
return np.abs(a).max()
def gather(vals, idx):
return vals[idx]
def lookup_last_idx(a, inds):
"""
Looks up indices in a. e.g. a[[1, 2, 3]] = [a[1], a[2], a[3]]
a is a d1 x d2 ... dn array
inds is a d1 x d2 ... d(n-1) array of integers
returns the array
out[i_1,...,i_{n-1}] = a[i_1,...,i_{n-1}, inds[i_1,...,i_{n-1}]]
"""
# Flatten the arrays
ashape, indsshape = np.shape(a), np.shape(inds)
aflat, indsflat = np.reshape(a, (-1,)), np.reshape(inds, (-1,))
# Compute the indices corresponding to inds in the flattened array
delta = gather(ashape, np.size(ashape) - 1) # i.e. delta = ashape[-1],
aflatinds = np.arange(0, stop=np.size(a), step=delta) + indsflat
# Look up the desired elements in the flattened array, and reshape
# to the original shape
return np.reshape(gather(aflat, aflatinds), indsshape)
def stack_dict_list(dict_list):
ret = dict()
if not dict_list:
return ret
keys = dict_list[0].keys()
for k in keys:
eg = dict_list[0][k]
if isinstance(eg, dict):
v = stack_dict_list([x[k] for x in dict_list])
else:
v = np.array([x[k] for x in dict_list])
ret[k] = v
return ret
def evaluate_policy(env, policy, n_trajs, deterministic, max_traj_len, mode, disc, n_workers=4):
ok('Sampling {} trajs (max len {}) from policy in {}'.format(n_trajs, max_traj_len, env))
# Sample
from rltools.samplers.parallel import RolloutProxy
from six.moves import cPickle
import time
from gevent import Timeout
from rltools.trajutil import TrajBatch
proxies = [RolloutProxy(env, policy, max_traj_len, mode, i, 0) for i in range(n_workers)]
if mode == 'concurrent':
state_str = cPickle.dumps([p.get_state() for p in policy])
else:
state_str = cPickle.dumps(policy.get_state(), protocol=-1)
for proxy in proxies:
proxy.client("set_state", state_str, async=True)
seed_idx = 0
seed_idx2 = seed_idx
worker2job = {}
def assign_job_to(i_worker, seed):
worker2job[i_worker] = (seed, proxies[i_worker].client("sample", seed, async=True))
seed += 1
return seed
# Start jobs
for i_worker in range(n_workers):
seed_idx2 = assign_job_to(i_worker, seed_idx2)
trajs_so_far = 0
seed2traj = {}
while True:
for i_worker in range(n_workers):
try:
(seed_idx, future) = worker2job[i_worker]
traj_string = future.get(timeout=1e-3) # XXX
except Timeout:
pass
else:
traj = cPickle.loads(traj_string)
seed2traj[seed_idx] = traj
trajs_so_far += 1
if trajs_so_far >= n_trajs:
break
else:
seed_idx2 = assign_job_to(i_worker, seed_idx2)
if trajs_so_far >= n_trajs:
break
time.sleep(0.01)
# Wait until all jobs finish
for seed_idx, future in worker2job.values():
seed2traj[seed_idx] = cPickle.loads(future.get())
trajs = []
for (seed, traj) in seed2traj.items():
trajs.append(traj)
trajs_so_far += 1
# Trajs
if mode == 'centralized':
trajbatch = TrajBatch.FromTrajs(trajs)
r_B_T = trajbatch.r.padded(fill=0.)
ret = r_B_T.sum(axis=1).mean()
discret = discount(r_B_T, disc).mean()
info = {tinfo[0]: np.mean(tinfo[1]) for tinfo in trajbatch.info}
return dict(ret=ret, disc_ret=discret, **info)
elif mode in ['decentralized', 'concurrent']:
agent2trajs = {}
for agid in range(len(env.agents)):
agent2trajs[agid] = []
for envtrajs in trajs:
for agid, agtraj in enumerate(envtrajs):
agent2trajs[agid].append(agtraj)
agent2trajbatch = {}
rets, retsstd = [], []
discrets = []
infos = []
for agent, trajs in agent2trajs.items():
agent2trajbatch[agent] = TrajBatch.FromTrajs(trajs)
r_B_T = agent2trajbatch[agent].r.padded(fill=0.)
rets.append(r_B_T.sum(axis=1).mean())
retsstd.append(r_B_T.sum(axis=1).std())
discrets.append(discount(r_B_T, disc).mean())
infos.append({tinfo[0]: np.mean(tinfo[1]) for tinfo in agent2trajbatch[agent].info})
infos = stack_dict_list(infos)
return dict(ret=rets, retstd=retsstd, disc_ret=discrets, **infos)
else:
raise NotImplementedError()
class Color(object):
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def header(s):
print(Color.HEADER + '{}'.format(s) + Color.ENDC)
def warn(s):
print(Color.WARNING + '{}'.format(s) + Color.ENDC)
def failure(s):
print(Color.FAIL + '{}'.format(s) + Color.ENDC)
def ok(s):
print(Color.OKBLUE + '{}'.format(s) + Color.ENDC)
class EzPickle(object):
"""Objects that are pickled and unpickled via their constructor
arguments.
Example usage:
class Dog(Animal, EzPickle):
def __init__(self, furcolor, tailkind="bushy"):
Animal.__init__()
EzPickle.__init__(furcolor, tailkind)
...
When this object is unpickled, a new Dog will be constructed by passing the provided
furcolor and tailkind into the constructor. However, philosophers are still not sure
whether it is still the same dog.
"""
def __init__(self, *args, **kwargs):
self._ezpickle_args = args
self._ezpickle_kwargs = kwargs
def __getstate__(self):
return {"_ezpickle_args": self._ezpickle_args, "_ezpickle_kwargs": self._ezpickle_kwargs}
def __setstate__(self, d):
out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"])
self.__dict__.update(out.__dict__)
| 29.013841
| 101
| 0.591055
|
4784c8d66dbc9d110c96e56b6379fede7ef4a2c7
| 47
|
py
|
Python
|
PlayStore/AppNotFoundException.py
|
geoffduong/PlayStoreLinks_Bot
|
710867fcb7df08a07fec357b9d3c424464531ae0
|
[
"MIT"
] | 81
|
2015-06-01T10:05:12.000Z
|
2021-11-16T13:49:10.000Z
|
PlayStore/AppNotFoundException.py
|
geoffduong/PlayStoreLinks_Bot
|
710867fcb7df08a07fec357b9d3c424464531ae0
|
[
"MIT"
] | 13
|
2015-05-19T17:39:03.000Z
|
2021-12-13T19:43:55.000Z
|
PlayStore/AppNotFoundException.py
|
geoffduong/PlayStoreLinks_Bot
|
710867fcb7df08a07fec357b9d3c424464531ae0
|
[
"MIT"
] | 22
|
2015-11-17T00:59:08.000Z
|
2020-04-19T22:37:42.000Z
|
class AppNotFoundException(Exception):
pass
| 23.5
| 38
| 0.808511
|
4d17d70a2746821203fb3300fe896fc0e7e0343d
| 1,781
|
py
|
Python
|
src/competition/forms/registration_forms.py
|
michaelwisely/django-competition
|
0c2ae28856d13738140820104dab2413e6e55ecb
|
[
"BSD-3-Clause"
] | 4
|
2015-12-29T07:35:56.000Z
|
2021-05-14T14:32:54.000Z
|
src/competition/forms/registration_forms.py
|
michaelwisely/django-competition
|
0c2ae28856d13738140820104dab2413e6e55ecb
|
[
"BSD-3-Clause"
] | 3
|
2015-09-11T01:10:39.000Z
|
2016-02-12T02:40:02.000Z
|
src/competition/forms/registration_forms.py
|
michaelwisely/django-competition
|
0c2ae28856d13738140820104dab2413e6e55ecb
|
[
"BSD-3-Clause"
] | 2
|
2016-03-15T09:48:35.000Z
|
2020-01-04T19:39:01.000Z
|
from django import forms
def get_form_choices(registration_question):
"""Takes a RegistrationQuestion object and returns a list of
properly formatted choices for a ChoiceField. Namely, a list of
tuples.
"""
raw_choices = registration_question.question_choice_set.all()
return [(str(rc.id), rc.choice) for rc in raw_choices]
def generate_question_form(registration_question):
"""Generates a form class based on the question being asked
"""
if registration_question.question_type == 'SC':
form_choices = get_form_choices(registration_question)
class SingleChoiceForm(forms.Form):
sc_response = forms.ChoiceField(
label=registration_question.question,
choices=form_choices,
widget=forms.RadioSelect
)
return SingleChoiceForm
if registration_question.question_type == 'MC':
form_choices = get_form_choices(registration_question)
class MultipleChoiceForm(forms.Form):
mc_response = forms.MultipleChoiceField(
label=registration_question.question,
choices=form_choices,
widget=forms.CheckboxSelectMultiple
)
return MultipleChoiceForm
if registration_question.question_type == 'SA':
class ShortAnswerForm(forms.Form):
sa_response = forms.CharField(
label=registration_question.question,
)
return ShortAnswerForm
if registration_question.question_type == 'AB':
class AgreementForm(forms.Form):
ab_response = forms.BooleanField(
label=registration_question.question,
required=True
)
return AgreementForm
| 31.245614
| 67
| 0.653565
|
759c04d77a23e0860cc8d2c59322e7cb749d44d9
| 6,315
|
py
|
Python
|
tests/test_signer.py
|
0xOmarA/RadixLib
|
85d75a47d4c4df4c1a319b74857ae2c513933623
|
[
"MIT"
] | 32
|
2022-01-12T16:52:28.000Z
|
2022-03-24T18:05:47.000Z
|
tests/test_signer.py
|
0xOmarA/RadixLib
|
85d75a47d4c4df4c1a319b74857ae2c513933623
|
[
"MIT"
] | 3
|
2022-01-12T17:01:55.000Z
|
2022-02-12T15:14:16.000Z
|
tests/test_signer.py
|
0xOmarA/RadixLib
|
85d75a47d4c4df4c1a319b74857ae2c513933623
|
[
"MIT"
] | 1
|
2022-01-21T04:28:07.000Z
|
2022-01-21T04:28:07.000Z
|
import radixlib as radix
import unittest
import os
class TestSigner(unittest.TestCase):
""" Unit tests for the Signer class """
# The mnemonic phrase which will be used throughout the signer tests. The mnemonic phrase that
# you see here is not of an active wallet or a real wallet. This is the mnemonic phrase a random
# empty wallet. Do not bother checking what it has or doesn't have.
MNEMONIC_PHRASE: str = "confirm few beach hamster betray catalog thank wine fish identify brick educate"
PASSWORD: str = "MySuperStrongPassword"
def test_public_key(self):
""" Tests the signer class to ensure that it generates correct public keys. """
# The public key we're expecting to see derived from the mnemonic phrase
expected_pub_key: str = "0223ba1f1def8bfbe973f7cb39913eb4f387839c5958774ab79c0d6eb3628d990f"
signer: radix.Signer = radix.Signer.from_mnemonic(self.MNEMONIC_PHRASE)
self.assertEqual(signer.public_key(), expected_pub_key)
def test_private_key(self):
""" Tests the signer class to ensure that it generates correct private keys. """
# The private key we're expecting to see derived from the mnemonic phrase
expected_priv_key: str = "965de1f0fb9fec4cf2f77ae173d92b0034f7ea57682634084b6287e8c2b2db37"
signer: radix.Signer = radix.Signer.from_mnemonic(self.MNEMONIC_PHRASE)
self.assertEqual(signer.private_key(), expected_priv_key)
def test_public_key_acc_index(self):
""" Test the signer class for the generation of public keys for higher account indexes. """
# The account index to test for and the expected public key
account_index: int = 12
expected_pub_key: str = "02c11234408bc331ec2356f9c14d5381f55e96fdada81dbbf98566a804d5c5ed65"
signer: radix.Signer = radix.Signer.from_mnemonic(self.MNEMONIC_PHRASE)
self.assertEqual(signer.public_key(index = account_index), expected_pub_key)
def test_private_key_acc_index(self):
""" Test the signer class for the generation of private keys for higher account indexes. """
# The account index to test for and the expected private key
account_index: int = 12
expected_pub_key: str = "35b9c46ea7c944f05d1545503f2ac85fd37961a7e6df64ea1d0c81d93c1939e0"
signer: radix.Signer = radix.Signer.from_mnemonic(self.MNEMONIC_PHRASE)
self.assertEqual(signer.private_key(index = account_index), expected_pub_key)
def test_old_wallet_json_load(self):
""" Test the loading of the old wallet.json file by the signer class """
# Loading up the content of the wallet.json file
script_path: str = os.path.dirname(os.path.realpath(__file__))
signer: radix.Signer = radix.Signer.from_wallet_json(
wallet_json_path = os.path.join(script_path, 'old wallet.json'),
passphrase = self.PASSWORD
)
# The private key we're expecting to see derived from the wallet.json file
expected_priv_key: str = "965de1f0fb9fec4cf2f77ae173d92b0034f7ea57682634084b6287e8c2b2db37"
self.assertEqual(signer.private_key(), expected_priv_key)
def test_new_wallet_json_load(self):
""" Test the loading of the old wallet.json file by the signer class """
# Loading up the content of the wallet.json file
script_path: str = os.path.dirname(os.path.realpath(__file__))
signer: radix.Signer = radix.Signer.from_wallet_json(
wallet_json_path = os.path.join(script_path, 'new wallet.json'),
passphrase = self.PASSWORD
)
# The private key we're expecting to see derived from the wallet.json file
expected_priv_key: str = "6b7a90c556a41b37fc7af7dee21fcf39be44a0b201ae6350ea98f7258765109b"
self.assertEqual(signer.private_key(), expected_priv_key)
def test_wallet_address_derivation(self):
""" Tests the ability of the signer to derive the wallet address """
# Loading up the signer object to use for the operation
signer: radix.Signer = radix.Signer.from_mnemonic(self.MNEMONIC_PHRASE)
# The expected resulting address from the operation
expected_address: str = "rdx1qspz8wslrhhch7lfw0mukwv386608purn3v4sa62k7wq6m4nv2xejrcmqcvl2"
self.assertEqual(signer.wallet_address(radix.network.MAINNET), expected_address)
# ##############################################################################################
# The following tests are related to the JWT token creation which for some reason I've struggled
# to get it's tests to work with tox. This is probably due to some conflicts between the two
# packages: cryptography and pycryptodome. This will be fixed at a later date.
# ##############################################################################################
# def test_jwt_creation_correct_key(self):
# """ Tests the ability of the signer to derive the wallet address """
# # Loading up the signer object to use for the operation
# signer: radix.Signer = radix.Signer.from_mnemonic(self.MNEMONIC_PHRASE)
# # The payload which we wish to create a JWT for
# jwt: str = signer.create_jwt({"order": "buy 1 scorpion"})
# self.assertEqual(radix.utils.verify_jwt(jwt), True)
# def test_jwt_creation_correct_key1(self):
# """ Tests the ability of the signer to derive the wallet address """
# # Loading up the signer object to use for the operation
# signer: radix.Signer = radix.Signer.from_mnemonic(self.MNEMONIC_PHRASE)
# # The payload which we wish to create a JWT for
# jwt: str = signer.create_jwt({"order": "buy 1 scorpion"})
# self.assertEqual(radix.utils.verify_jwt(jwt, signer.public_key(0)), True)
# def test_jwt_creation_incorrect_key(self):
# """ Tests the ability of the signer to derive the wallet address """
# # Loading up the signer object to use for the operation
# signer: radix.Signer = radix.Signer.from_mnemonic(self.MNEMONIC_PHRASE)
# # The payload which we wish to create a JWT for
# jwt: str = signer.create_jwt({"order": "buy 1 scorpion"})
# self.assertEqual(radix.utils.verify_jwt(jwt, signer.public_key(21)), False)
| 48.576923
| 108
| 0.689945
|
f7159091f18210b97ef9f6170f617a8643d4d010
| 1,414
|
py
|
Python
|
hbi/server/tornado_server.py
|
Glutexo/host-inventory
|
558b77eff633e5ec7cdb45393e767e4a05bca470
|
[
"Apache-2.0"
] | 1
|
2018-09-17T13:57:55.000Z
|
2018-09-17T13:57:55.000Z
|
hbi/server/tornado_server.py
|
Glutexo/host-inventory
|
558b77eff633e5ec7cdb45393e767e4a05bca470
|
[
"Apache-2.0"
] | 3
|
2018-10-02T10:05:12.000Z
|
2018-10-10T09:33:47.000Z
|
hbi/server/tornado_server.py
|
Glutexo/host-inventory
|
558b77eff633e5ec7cdb45393e767e4a05bca470
|
[
"Apache-2.0"
] | 3
|
2018-08-15T16:50:51.000Z
|
2018-09-26T08:52:44.000Z
|
import json, os
from threading import Thread
from tornado.ioloop import IOLoop
import tornado.web
from hbi.model import Host, Filter
from hbi.server import Service
class RootHandler(tornado.web.RequestHandler):
def get(self):
self.write("boop")
class EntitiesPoster(tornado.web.RequestHandler):
def post(self):
hosts_json = json.loads(self.request.body)
hosts = (Host.from_json(h) for h in hosts_json)
ret = self.application.service.create_or_update(hosts)
self.write(json.dumps([h.to_json() for h in ret]))
class EntitiesSearcher(tornado.web.RequestHandler):
def post(self):
filters_json = json.loads(self.request.body) if self.request.body else None
filters = [Filter.from_json(h) for h in filters_json] if filters_json else None
ret = self.application.service.get(filters)
self.write(json.dumps([h.to_json() for h in ret]))
def serve_tornado():
app = tornado.web.Application([
(r"/", RootHandler),
(r"/entities/search", EntitiesSearcher),
(r"/entities", EntitiesPoster),
])
app.listen(int(os.environ.get("PORT", "50051")))
app.service = Service()
loop = IOLoop.current()
class TornadoRunThread(Thread):
def run(self):
loop.start()
TornadoRunThread().start()
return app, loop
if __name__ == "__main__":
app, loop = serve_tornado()
| 25.709091
| 87
| 0.66761
|
4f39ba161cccf4823418208373af83cc6e00f6f6
| 11,485
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
pravintandale/password-poc
|
4cf7b960390785bc9d884da3ae9c510f37c82a73
|
[
"MIT"
] | 1
|
2021-01-18T05:03:35.000Z
|
2021-01-18T05:03:35.000Z
|
app/core/migrations/0001_initial.py
|
pravintandale/password-poc
|
4cf7b960390785bc9d884da3ae9c510f37c82a73
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
pravintandale/password-poc
|
4cf7b960390785bc9d884da3ae9c510f37c82a73
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2021-01-18 09:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='Email')),
('first_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='First Name')),
('last_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='Last Name')),
('company_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='Company Name')),
('country', models.CharField(blank=True, choices=[('Aruba', 'Aruba'), ('Afghanistan', 'Afghanistan'), ('Angola', 'Angola'), ('Anguilla', 'Anguilla'), ('Åland Islands', 'Åland Islands'), ('Albania', 'Albania'), ('Andorra', 'Andorra'), ('United Arab Emirates', 'United Arab Emirates'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('American Samoa', 'American Samoa'), ('Antarctica', 'Antarctica'), ('French Southern Territories', 'French Southern Territories'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Burundi', 'Burundi'), ('Belgium', 'Belgium'), ('Benin', 'Benin'), ('Bonaire, Sint Eustatius and Saba', 'Bonaire, Sint Eustatius and Saba'), ('Burkina Faso', 'Burkina Faso'), ('Bangladesh', 'Bangladesh'), ('Bulgaria', 'Bulgaria'), ('Bahrain', 'Bahrain'), ('Bahamas', 'Bahamas'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Saint Barthélemy', 'Saint Barthélemy'), ('Belarus', 'Belarus'), ('Belize', 'Belize'), ('Bermuda', 'Bermuda'), ('Bolivia, Plurinational State of', 'Bolivia, Plurinational State of'), ('Brazil', 'Brazil'), ('Barbados', 'Barbados'), ('Brunei Darussalam', 'Brunei Darussalam'), ('Bhutan', 'Bhutan'), ('Bouvet Island', 'Bouvet Island'), ('Botswana', 'Botswana'), ('Central African Republic', 'Central African Republic'), ('Canada', 'Canada'), ('Cocos (Keeling) Islands', 'Cocos (Keeling) Islands'), ('Switzerland', 'Switzerland'), ('Chile', 'Chile'), ('China', 'China'), ("Côte d'Ivoire", "Côte d'Ivoire"), ('Cameroon', 'Cameroon'), ('Congo, The Democratic Republic of the', 'Congo, The Democratic Republic of the'), ('Congo', 'Congo'), ('Cook Islands', 'Cook Islands'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Cabo Verde', 'Cabo Verde'), ('Costa Rica', 'Costa Rica'), ('Cuba', 'Cuba'), ('Curaçao', 'Curaçao'), ('Christmas Island', 'Christmas Island'), ('Cayman Islands', 'Cayman Islands'), ('Cyprus', 'Cyprus'), ('Czechia', 'Czechia'), ('Germany', 'Germany'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Denmark', 'Denmark'), ('Dominican Republic', 'Dominican Republic'), ('Algeria', 'Algeria'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('Eritrea', 'Eritrea'), ('Western Sahara', 'Western Sahara'), ('Spain', 'Spain'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Finland', 'Finland'), ('Fiji', 'Fiji'), ('Falkland Islands (Malvinas)', 'Falkland Islands (Malvinas)'), ('France', 'France'), ('Faroe Islands', 'Faroe Islands'), ('Micronesia, Federated States of', 'Micronesia, Federated States of'), ('Gabon', 'Gabon'), ('United Kingdom', 'United Kingdom'), ('Georgia', 'Georgia'), ('Guernsey', 'Guernsey'), ('Ghana', 'Ghana'), ('Gibraltar', 'Gibraltar'), ('Guinea', 'Guinea'), ('Guadeloupe', 'Guadeloupe'), ('Gambia', 'Gambia'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Greenland', 'Greenland'), ('Guatemala', 'Guatemala'), ('French Guiana', 'French Guiana'), ('Guam', 'Guam'), ('Guyana', 'Guyana'), ('Hong Kong', 'Hong Kong'), ('Heard Island and McDonald Islands', 'Heard Island and McDonald Islands'), ('Honduras', 'Honduras'), ('Croatia', 'Croatia'), ('Haiti', 'Haiti'), ('Hungary', 'Hungary'), ('Indonesia', 'Indonesia'), ('Isle of Man', 'Isle of Man'), ('India', 'India'), ('British Indian Ocean Territory', 'British Indian Ocean Territory'), ('Ireland', 'Ireland'), ('Iran, Islamic Republic of', 'Iran, Islamic Republic of'), ('Iraq', 'Iraq'), ('Iceland', 'Iceland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Jersey', 'Jersey'), ('Jordan', 'Jordan'), ('Japan', 'Japan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Cambodia', 'Cambodia'), ('Kiribati', 'Kiribati'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Korea, Republic of', 'Korea, Republic of'), ('Kuwait', 'Kuwait'), ("Lao People's Democratic Republic", "Lao People's Democratic Republic"), ('Lebanon', 'Lebanon'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Saint Lucia', 'Saint Lucia'), ('Liechtenstein', 'Liechtenstein'), ('Sri Lanka', 'Sri Lanka'), ('Lesotho', 'Lesotho'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Latvia', 'Latvia'), ('Macao', 'Macao'), ('Saint Martin (French part)', 'Saint Martin (French part)'), ('Morocco', 'Morocco'), ('Monaco', 'Monaco'), ('Moldova, Republic of', 'Moldova, Republic of'), ('Madagascar', 'Madagascar'), ('Maldives', 'Maldives'), ('Mexico', 'Mexico'), ('Marshall Islands', 'Marshall Islands'), ('North Macedonia', 'North Macedonia'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Myanmar', 'Myanmar'), ('Montenegro', 'Montenegro'), ('Mongolia', 'Mongolia'), ('Northern Mariana Islands', 'Northern Mariana Islands'), ('Mozambique', 'Mozambique'), ('Mauritania', 'Mauritania'), ('Montserrat', 'Montserrat'), ('Martinique', 'Martinique'), ('Mauritius', 'Mauritius'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Mayotte', 'Mayotte'), ('Namibia', 'Namibia'), ('New Caledonia', 'New Caledonia'), ('Niger', 'Niger'), ('Norfolk Island', 'Norfolk Island'), ('Nigeria', 'Nigeria'), ('Nicaragua', 'Nicaragua'), ('Niue', 'Niue'), ('Netherlands', 'Netherlands'), ('Norway', 'Norway'), ('Nepal', 'Nepal'), ('Nauru', 'Nauru'), ('New Zealand', 'New Zealand'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Panama', 'Panama'), ('Pitcairn', 'Pitcairn'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Palau', 'Palau'), ('Papua New Guinea', 'Papua New Guinea'), ('Poland', 'Poland'), ('Puerto Rico', 'Puerto Rico'), ("Korea, Democratic People's Republic of", "Korea, Democratic People's Republic of"), ('Portugal', 'Portugal'), ('Paraguay', 'Paraguay'), ('Palestine, State of', 'Palestine, State of'), ('French Polynesia', 'French Polynesia'), ('Qatar', 'Qatar'), ('Réunion', 'Réunion'), ('Romania', 'Romania'), ('Russian Federation', 'Russian Federation'), ('Rwanda', 'Rwanda'), ('Saudi Arabia', 'Saudi Arabia'), ('Sudan', 'Sudan'), ('Senegal', 'Senegal'), ('Singapore', 'Singapore'), ('South Georgia and the South Sandwich Islands', 'South Georgia and the South Sandwich Islands'), ('Saint Helena, Ascension and Tristan da Cunha', 'Saint Helena, Ascension and Tristan da Cunha'), ('Svalbard and Jan Mayen', 'Svalbard and Jan Mayen'), ('Solomon Islands', 'Solomon Islands'), ('Sierra Leone', 'Sierra Leone'), ('El Salvador', 'El Salvador'), ('San Marino', 'San Marino'), ('Somalia', 'Somalia'), ('Saint Pierre and Miquelon', 'Saint Pierre and Miquelon'), ('Serbia', 'Serbia'), ('South Sudan', 'South Sudan'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Suriname', 'Suriname'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Sweden', 'Sweden'), ('Eswatini', 'Eswatini'), ('Sint Maarten (Dutch part)', 'Sint Maarten (Dutch part)'), ('Seychelles', 'Seychelles'), ('Syrian Arab Republic', 'Syrian Arab Republic'), ('Turks and Caicos Islands', 'Turks and Caicos Islands'), ('Chad', 'Chad'), ('Togo', 'Togo'), ('Thailand', 'Thailand'), ('Tajikistan', 'Tajikistan'), ('Tokelau', 'Tokelau'), ('Turkmenistan', 'Turkmenistan'), ('Timor-Leste', 'Timor-Leste'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Tuvalu', 'Tuvalu'), ('Taiwan, Province of China', 'Taiwan, Province of China'), ('Tanzania, United Republic of', 'Tanzania, United Republic of'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United States Minor Outlying Islands', 'United States Minor Outlying Islands'), ('Uruguay', 'Uruguay'), ('United States', 'United States'), ('Uzbekistan', 'Uzbekistan'), ('Holy See (Vatican City State)', 'Holy See (Vatican City State)'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Venezuela, Bolivarian Republic of', 'Venezuela, Bolivarian Republic of'), ('Virgin Islands, British', 'Virgin Islands, British'), ('Virgin Islands, U.S.', 'Virgin Islands, U.S.'), ('Viet Nam', 'Viet Nam'), ('Vanuatu', 'Vanuatu'), ('Wallis and Futuna', 'Wallis and Futuna'), ('Samoa', 'Samoa'), ('Yemen', 'Yemen'), ('South Africa', 'South Africa'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe')], max_length=50, null=True, verbose_name='Country')),
('require_password_change', models.BooleanField(default=False, verbose_name='Require password change')),
('preferred_language', models.CharField(default='English', max_length=50, verbose_name='Preferred language')),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PasswordPolicy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('min_length', models.IntegerField()),
('exp_interval', models.IntegerField(default=90)),
('pwd_history', models.IntegerField(default=7)),
('is_alpha_numeric', models.BooleanField(default=True)),
('contains_username', models.BooleanField(default=False)),
('must_mixed', models.BooleanField(default=False)),
('status', models.BooleanField(default=False)),
('last_updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 201.491228
| 8,192
| 0.642055
|
f52042453875de2331fb323ec4971cb1d3f480b5
| 11,297
|
py
|
Python
|
bvh/BVHplay/geo.py
|
EducationalTestingService/VAMP
|
4dd18e12db8dc5d7211fed8f120e5a7059bebd87
|
[
"MIT"
] | 9
|
2015-09-22T21:04:20.000Z
|
2020-06-28T04:16:10.000Z
|
bvh/BVHplay/geo.py
|
EducationalTestingService/tamp
|
4dd18e12db8dc5d7211fed8f120e5a7059bebd87
|
[
"MIT"
] | null | null | null |
bvh/BVHplay/geo.py
|
EducationalTestingService/tamp
|
4dd18e12db8dc5d7211fed8f120e5a7059bebd87
|
[
"MIT"
] | 3
|
2015-12-10T13:36:20.000Z
|
2016-02-28T04:02:00.000Z
|
#
from numpy import array, dot
#########################################################
# WORLDVERT class
#########################################################
class worldvert:
def __init__(self, x=0, y=0, z=0, description='', DEBUG=0):
self.tr = array([x,y,z,1]) # tr = "translate position"
self.descr = description
self.DEBUG = DEBUG
def __repr__(self):
mystr = "worldvert " + self.descr + "\n tr: " + self.tr.__repr__()
return mystr
##################################################
# WORLDEDGE class
##################################################
class worldedge:
def __init__(self, wv1, wv2, description='', DEBUG=0):
self.wv1 = wv1
self.wv2 = wv2
self.descr = description
self.DEBUG = DEBUG
def __repr__(self):
mystr = "Worldedge " + self.descr +" wv1:\n" + self.wv1.__repr__() \
+ "\nworldedge " + self.descr + " wv2:\n" + \
self.wv2.__repr__() + "\n"
return mystr
##########################################################
# SCREEENVERT class
##########################################################
# 9/1/08: Way too ugly to have screenvert contain or point to
# a worldvert, so I'm changing this to use a translate array just
# like worldvert. If you want screenvert to have the values of
# a worldvert, you need to copy those values in by hand or pass
# them in at construction time, just like you would with a worldvert.
class screenvert:
def __init__(self, x=0., y=0., z=0., description='', DEBUG=0):
self.tr = array([x,y,z,1]) # tr = "translate position"
self.camtr = array([0.,0.,0.,0.]) # Position in camera space
self.screenx = 0
self.screeny = 0
self.descr = description
self.DEBUG = DEBUG
def __repr__(self):
mystr = "screenvert " + self.descr + "\n tr: " + self.tr.__repr__() \
+ "\ncamspace: " + self.camtr.__repr__() + \
"\n screenspace: " + str(self.screenx) + ", " + \
str(self.screeny)
return mystr
def worldtocam(self, camera):
# camera.worldtocam is a premultiplied set of conversion transforms
# (trans then rot then invertz) maintained by the Camera object
self.camtr = dot(camera.worldtocam, self.tr)
if self.DEBUG:
print "Converted vertex %s to camspace:" % (self.descr)
print self
##################################################
# SCREENEDGE class
##################################################
class screenedge:
def __init__(self, sv1, sv2, width=2, color='black', arrow='none', \
description='', circle=0, DEBUG=0):
self.sv1 = sv1 # screenvert not worldvert
self.sv2 = sv2
self.width = width
self.id = 0 # Tracks canvas ID for line
self.cid = 0 # canvas ID of circle at joint end, if in use
self.color = color
self.arrow = arrow
self.descr = description
self.circle = circle # Set to 1 to draw circle at end of edge
self.drawme = 1 # Set to 0 to not attempt to draw on screen
self.DEBUG = DEBUG
def __repr__(self):
mystr = "Screenedge " + self.descr +" sv1:\n" + self.sv1.__repr__() \
+ "\nscreenedge " + self.descr + " sv2:\n" + \
self.sv2.__repr__() + "\n"
return mystr
def worldtocam(self,camera):
self.sv1.worldtocam(camera)
self.sv2.worldtocam(camera)
# 9/6/08: was in screenvert class, but needs to be in screenedge class
def camtoscreen(self, camera, canvw, canvh): # canvw = canvaswidth, \
# canvh = canvasheight
# cf[xy] defines a camera rectangle, with origin at the center, that
# spans from (x,y)=(-cfx, -cfy) to (cfx, cfy)
#
# canv[wh] defines a Tk canvas rectange, with origin in upper left
# and "down" meaning "y positive", that spans from
# (0,0) to (canvh,canvw)
#
# PARALLEL PROJECTION (PP) camera:
# We can ignore the camera Z value in this case.
# So this function just has to map camera (x,y) onto Tk's canvas
# (x,y), taking the sign reversal of the y axis into account.
#
# PERSPECTIVE PROJECTION camera:
# First we have to correct vertex x and y using a fudge factor
# that incorporates the vertex's z distance away from the camera,
# and the projection plane distance.
# After that the math is the same as the PP case.
cfx = camera.cfx # "Camera frame size", width
cfy = camera.cfy # "Camera frame size", height
ppdist = camera.ppdist
x1 = self.sv1.camtr[0]
y1 = self.sv1.camtr[1]
z1 = self.sv1.camtr[2]
x2 = self.sv2.camtr[0]
y2 = self.sv2.camtr[1]
z2 = self.sv2.camtr[2]
if camera.parallel:
self.sv1.screenx = (canvw/2)*(1 + (x1/cfx))
self.sv1.screeny = (canvh/2)*(1 - (y1/cfy))
self.sv2.screenx = (canvw/2)*(1 + (x2/cfx))
self.sv2.screeny = (canvh/2)*(1 - (y2/cfy))
else: # perspective camera
if z1 > 0.1 and z2 > 0.1:
self.drawme = 1
xproj1 = x1 * ppdist / z1
yproj1 = y1 * ppdist / z1
self.sv1.screenx = (canvw/2)*(1 + (xproj1/cfx))
self.sv1.screeny = (canvh/2)*(1 - (yproj1/cfy))
xproj2 = x2 * ppdist / z2
yproj2 = y2 * ppdist / z2
self.sv2.screenx = (canvw/2)*(1 + (xproj2/cfx))
self.sv2.screeny = (canvh/2)*(1 - (yproj2/cfy))
elif z1 <= 0.1 and z2 <= 0.1:
self.drawme = 0 # Both verts are behind the camera -- stop now
elif z1 > 0.1 and z2 <= 0.1:
# First vert is in front of camera, second vert is not
# print "se.camtoscreen case 3 starting for (%s)" % self.descr
# print " verts are (%s,%s,%s) (%s,%s,%s)" % \
# (x1,y1,z1,x2,y2,z2)
self.drawme = 1
xproj1 = x1 * ppdist / z1
yproj1 = y1 * ppdist / z1
self.sv1.screenx = (canvw/2)*(1 + (xproj1/cfx))
self.sv1.screeny = (canvh/2)*(1 - (yproj1/cfy))
# print " sv1 maps to (%s,%s)" % (self.sv1.screenx, \
# self.sv1.screeny)
t = (0.1-z1)/(z2-z1)
x3 = t*(x2-x1) + x1
y3 = t*(y2-y1) + y1
z3 = 0.1
# print " Computed alternate point (%s,%s,%s)" % (x3,y3,z3)
xproj3 = x3 * ppdist / z3
yproj3 = y3 * ppdist / z3
self.sv2.screenx = (canvw/2)*(1 + (xproj3/cfx))
self.sv2.screeny = (canvh/2)*(1 - (yproj3/cfy))
# print " Alternate point maps to (%s,%s)" % (self.sv2.screenx, \
# self.sv2.screeny)
else:
# First vert is behind the camera, second vert is not
# print "se.camtoscreen case 4 starting for (%s)", self.descr
self.drawme = 1
xproj2 = x2 * ppdist / z2
yproj2 = y2 * ppdist / z2
self.sv2.screenx = (canvw/2)*(1 + (xproj2/cfx))
self.sv2.screeny = (canvh/2)*(1 - (yproj2/cfy))
t = (0.1-z2)/(z1-z2)
x3 = t*(x1-x2) + x2
y3 = t*(y1-y2) + y2
z3 = 0.1
xproj3 = x3 * ppdist / z3
yproj3 = y3 * ppdist / z3
self.sv1.screenx = (canvw/2)*(1 + (xproj3/cfx))
self.sv1.screeny = (canvh/2)*(1 - (yproj3/cfy))
# If the vertex has z=0, it's "in the camera's blind spot" - it's like
# having something right above your head or next to one of your ears.
# The visual result is that you can't see it. But you can still see
# the edge that connects to the vertex, if the other end of the edge
# is within the viewport.
#
# If the vertex has z<0, it will project onto the projection plane
# incorrectly, somewhat as if the object is behind you and you're
# holding a spoon in front of you and seeing the object reflected.
# The x and y projection points end up flipping from the z>0 case.
# This isn't what we want -- it's very disorienting and doesn't
# correspond to what an actual viewer would see.
# The approach used here is to compute a replacement point (x3,y3,z3)
# which is in front of the camera. Here the math sets z3=0.1 and
# computes x3 and y3 using a parameterized representation of
# the line segment sv1--sv2
#
# This approach to vertices with z<=0 also means that for the perspective
# camera, we don't see objects that are behind us. For the parallel
# camera this presently isn't true - the parallel camera renders points
# no matter what (camera z) is.
# def camtoscreen(self,camera, canvw, canvh):
# self.sv1.camtoscreen(camera, canvw, canvh)
# self.sv2.camtoscreen(camera, canvw, canvh)
def draw(self, canvas):
self.undraw(canvas)
if self.drawme:
x1 = self.sv1.screenx
y1 = self.sv1.screeny
x2 = self.sv2.screenx
y2 = self.sv2.screeny
if self.DEBUG:
print "About to call create_line with (%d, %d, %d, %d)" \
% (x1,y1,x2,y2)
self.id = canvas.create_line(x1,y1,x2,y2, fill=self.color, \
width=self.width, arrow=self.arrow)
if self.circle:
self.cid = canvas.create_oval(x2-3,y2-3,x2+3,y2+3, \
fill=self.color)
def undraw(self, canvas):
if self.id:
canvas.delete(self.id)
self.id = 0
if self.cid:
canvas.delete(self.cid)
self.cid = 0
#############################
# BEGIN NON-CLASS FUNCTIONS
#############################
####################################
# DELETE_SCREEN_LINES
# Use this function to delete all displayed skeleton edges (bones)
# from display on a canvas
#
def undraw_screen_lines(screenedgelist, canvas):
count = len(screenedgelist)
for x in range(count):
screenedgelist[x].undraw(canvas)
##################################
# GRID_SETUP
# Creates and returns a populated array of screenedge
# Don't call this until you've set up your skeleton and can
# extract minx, miny, maxx, maxy from it.
#
def grid_setup(minx, minz, maxx, maxz, DEBUG=0):
if DEBUG:
print "grid_setup: minx=%s, minz=%s, maxx=%s, maxz=%s" % \
(minx, minz, maxx, maxz)
# The input values define a rectangle. Round them to nearest 10.
minx2 = 10*int(minx/10) - 10
maxx2 = 10*int(maxx/10) + 10
minz2 = 10*int(minz/10) - 10
maxz2 = 10*int(maxz/10) + 10
gridedges = []
# Range() won't give us the topmost value of the range, so we have to
# use maxz2+1 as the top of the range.
for z in range(minz2, maxz2+1, 10):
sv1 = screenvert(minx2, 0., z)
sv2 = screenvert(maxx2, 0., z)
se = screenedge(sv1, sv2, width=1, color='grey', DEBUG=0)
if DEBUG:
print "grid_setup: adding screenedge from (%d,%d) to (%d,%d)" \
% (minx2, z, maxx2, z)
gridedges.append(se)
for x in range(minx2, maxx2+1, 10):
sv1 = screenvert(x, 0., minz2)
sv2 = screenvert(x, 0., maxz2)
se = screenedge(sv1, sv2, width=1, color='grey', DEBUG=0)
if DEBUG:
print "grid_setup: adding screenedge from (%d,%d) to (%d,%d)" \
% (x, minz2, x, maxz2)
gridedges.append(se)
return gridedges
| 36.092652
| 79
| 0.550323
|
bf17e821c49219ee8a57f48e7d18f08401d67c42
| 891
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/disk/models/Quota.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/disk/models/Quota.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/disk/models/Quota.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Quota(object):
def __init__(self, limit=None, used=None):
"""
:param limit: (Optional) 配额
:param used: (Optional) 已使用的数目
"""
self.limit = limit
self.used = used
| 29.7
| 75
| 0.702581
|
2e23180e5b8f70a7bb9a9cddfde7030af497031e
| 3,161
|
py
|
Python
|
player_types.py
|
yutaro-suzuki/batting_order_simulator
|
4ecc8a3b630ef11f908df4d9a97c3b9101bbbf5e
|
[
"MIT"
] | null | null | null |
player_types.py
|
yutaro-suzuki/batting_order_simulator
|
4ecc8a3b630ef11f908df4d9a97c3b9101bbbf5e
|
[
"MIT"
] | null | null | null |
player_types.py
|
yutaro-suzuki/batting_order_simulator
|
4ecc8a3b630ef11f908df4d9a97c3b9101bbbf5e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from player import Player
from ball import status
import numpy as np
class Pitcher(Player):
def __init__(self, f, p, mw):
super().__init__(f, p, mw)
self.range = 60
self.radians = 30
self.kyori = 150
def pitchTheBall(self, ball):
print('pitch!')
if self.hasBall and not self.top:
ball.thrown(0, 12)
ball.mode = status.PITCHED
self.hasBall = False
ball.isCaught = False
class Catcher(Player):
def __init__(self, f, p, mw):
super().__init__(f, p, mw)
self.range = 180
self.radians = 30
self.kyori = 30
def nearestToBall(self, ball, ground):
if ball.isFair or ball.mode == status.FRYING and ball.pos[2] > 3:
super().nearestToBall(ball, ground)
def move_defence(self, ball, ground):
if (ball.isFair or ball.mode == status.FRYING) and ground.bases[3].runner != -1:
self.goToBase(ground.bases[0], ball)
elif ball.isFair or ball.mode == status.FRYING and ball.pos[2] > 3:
super().move_defence(ball, ground)
def catchThePitch(self, ball):
x = ball.pos[0] + ball.vel[0] - self.pos[0]
y = ball.pos[1] + ball.vel[1] - self.pos[1]
dis = np.sqrt(x * x + y * y)
if ball.pos[2] + ball.vel[2] < 3 and dis <= 5:
self.catch(ball) # 捕球
ball.mode = status.TIME
self.hasBall = True
class First(Player):
def __init__(self, f, p, mw):
super().__init__(f, p, mw)
self.range = 15
self.radians = 100
self.kyori = 200
def move_defence(self, ball, ground):
super().move_defence(ball, ground)
if ball.mode == status.HITTED or ball.mode == status.FRYING and not ground.bases[1].isSteped:
self.goToBase(ground.bases[1], ball)
class Second(Player):
def __init__(self, f, p, mw):
super().__init__(f, p, mw)
self.range = 15
self.radians = 200
self.kyori = 250
def move_defence(self, ball, ground):
super().move_defence(ball, ground)
if (ball.isFair or ball.mode == status.FRYING) and ground.bases[1].runner != -1 and not ground.bases[2].isSteped:
self.goToBase(ground.bases[2], ball)
class Third(Player):
def __init__(self, f, p, mw):
super().__init__(f, p, mw)
self.range = 15
self.radians = 200
self.kyori = 200
def move_defence(self, ball, ground):
if (ball.isFair or ball.mode == status.FRYING) and ground.bases[2].runner != -1:
self.goToBase(ground.bases[3], ball)
else:
super().move_defence(ball, ground)
class Short(Player):
def __init__(self, f, p, mw):
super().__init__(f, p, mw)
self.range = 15
self.radians = 200
self.kyori = 250
def move_defence(self, ball, ground):
if (ball.isFair or ball.mode == status.FRYING) and ground.bases[1].runner != -1 and not ground.bases[2].isSteped:
self.goToBase(ground.bases[2], ball)
else:
super().move_defence(ball, ground)
| 29.820755
| 121
| 0.574818
|
7df338c973634676b438591cf0b10b488b28395b
| 20,200
|
py
|
Python
|
tests/test_modeling_fsmt.py
|
suliuzh/transformers
|
f34372a9ff99f6bc8619ac83dc07f7afe6b92141
|
[
"Apache-2.0"
] | 1
|
2022-03-25T00:00:49.000Z
|
2022-03-25T00:00:49.000Z
|
tests/test_modeling_fsmt.py
|
suliuzh/transformers
|
f34372a9ff99f6bc8619ac83dc07f7afe6b92141
|
[
"Apache-2.0"
] | 1
|
2021-03-21T03:28:23.000Z
|
2021-03-21T06:06:39.000Z
|
tests/test_modeling_fsmt.py
|
suliuzh/transformers
|
f34372a9ff99f6bc8619ac83dc07f7afe6b92141
|
[
"Apache-2.0"
] | 2
|
2021-03-27T16:46:26.000Z
|
2021-12-24T00:33:28.000Z
|
# coding=utf-8
# Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import timeout_decorator # noqa
from parameterized import parameterized
from transformers import is_torch_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import FSMTConfig, FSMTForConditionalGeneration, FSMTModel, FSMTTokenizer
from transformers.modeling_fsmt import (
SinusoidalPositionalEmbedding,
_prepare_fsmt_decoder_inputs,
invert_mask,
shift_tokens_right,
)
from transformers.pipelines import TranslationPipeline
@require_torch
class ModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.src_vocab_size = 99
self.tgt_vocab_size = 99
self.langs = ["ru", "en"]
self.batch_size = 13
self.seq_length = 7
self.is_training = False
self.use_labels = False
self.hidden_size = 16
self.num_hidden_layers = 2
self.num_attention_heads = 4
self.intermediate_size = 4
self.hidden_act = "relu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 20
self.bos_token_id = 0
self.pad_token_id = 1
self.eos_token_id = 2
torch.manual_seed(0)
# hack needed for modeling_common tests - despite not really having this attribute in this model
self.vocab_size = self.src_vocab_size
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.src_vocab_size).clamp(
3,
)
input_ids[:, -1] = 2 # Eos Token
config = FSMTConfig(
vocab_size=self.src_vocab_size, # hack needed for common tests
src_vocab_size=self.src_vocab_size,
tgt_vocab_size=self.tgt_vocab_size,
langs=self.langs,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
inputs_dict = prepare_fsmt_inputs_dict(config, input_ids)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"]
inputs_dict["decoder_attention_mask"] = inputs_dict["attention_mask"]
inputs_dict["use_cache"] = False
return config, inputs_dict
def prepare_fsmt_inputs_dict(
config,
input_ids,
attention_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
@require_torch
class FSMTModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (FSMTModel, FSMTForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (FSMTForConditionalGeneration,) if is_torch_available() else ()
is_encoder_decoder = True
test_pruning = False
test_head_masking = False
test_missing_keys = False
def setUp(self):
self.model_tester = ModelTester(self)
self.langs = ["en", "ru"]
config = {
"langs": self.langs,
"src_vocab_size": 10,
"tgt_vocab_size": 20,
}
# XXX: hack to appease to all other models requiring `vocab_size`
config["vocab_size"] = 99 # no such thing in FSMT
self.config_tester = ConfigTester(self, config_class=FSMTConfig, **config)
def test_config(self):
self.config_tester.run_common_tests()
# XXX: override test_model_common_attributes / different Embedding type
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Embedding))
model.set_input_embeddings(torch.nn.Embedding(10, 10))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.modules.sparse.Embedding))
def test_initialization_more(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
model = FSMTModel(config)
model.to(torch_device)
model.eval()
# test init
# self.assertTrue((model.encoder.embed_tokens.weight == model.shared.weight).all().item())
def _check_var(module):
"""Check that we initialized various parameters from N(0, config.init_std)."""
self.assertAlmostEqual(torch.std(module.weight).item(), config.init_std, 2)
_check_var(model.encoder.embed_tokens)
_check_var(model.encoder.layers[0].self_attn.k_proj)
_check_var(model.encoder.layers[0].fc1)
# XXX: different std for fairseq version of SinusoidalPositionalEmbedding
# self.assertAlmostEqual(torch.std(model.encoder.embed_positions.weights).item(), config.init_std, 2)
def test_advanced_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.use_cache = False
inputs_dict["input_ids"][:, -2:] = config.pad_token_id
decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs(
config, inputs_dict["input_ids"]
)
model = FSMTModel(config).to(torch_device).eval()
decoder_features_with_created_mask = model(**inputs_dict)[0]
decoder_features_with_passed_mask = model(
decoder_attention_mask=invert_mask(decoder_attn_mask), decoder_input_ids=decoder_input_ids, **inputs_dict
)[0]
_assert_tensors_equal(decoder_features_with_passed_mask, decoder_features_with_created_mask)
useless_mask = torch.zeros_like(decoder_attn_mask)
decoder_features = model(decoder_attention_mask=useless_mask, **inputs_dict)[0]
self.assertTrue(isinstance(decoder_features, torch.Tensor)) # no hidden states or attentions
self.assertEqual(
decoder_features.size(),
(self.model_tester.batch_size, self.model_tester.seq_length, config.tgt_vocab_size),
)
if decoder_attn_mask.min().item() < -1e3: # some tokens were masked
self.assertFalse((decoder_features_with_created_mask == decoder_features).all().item())
# Test different encoder attention masks
decoder_features_with_long_encoder_mask = model(
inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"].long()
)[0]
_assert_tensors_equal(decoder_features_with_long_encoder_mask, decoder_features_with_created_mask)
def test_save_load_missing_keys(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
@unittest.skip("can't be implemented for FSMT due to dual vocab.")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip("Passing inputs_embeds not implemented for FSMT.")
def test_inputs_embeds(self):
pass
@unittest.skip("model weights aren't tied in FSMT.")
def test_tie_model_weights(self):
pass
# def test_auto_model(self):
# # XXX: add a tiny model to s3?
# model_name = "facebook/wmt19-ru-en-tiny"
# tiny = AutoModel.from_pretrained(model_name) # same vocab size
# tok = AutoTokenizer.from_pretrained(model_name) # same tokenizer
# inputs_dict = tok.batch_encode_plus(["Hello my friends"], return_tensors="pt")
# with torch.no_grad():
# tiny(**inputs_dict)
@require_torch
class FSMTHeadTests(unittest.TestCase):
src_vocab_size = 99
tgt_vocab_size = 99
langs = ["ru", "en"]
def _get_config(self):
return FSMTConfig(
src_vocab_size=self.src_vocab_size,
tgt_vocab_size=self.tgt_vocab_size,
langs=self.langs,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
return_dict=True,
)
def _get_config_and_data(self):
input_ids = torch.tensor(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
],
dtype=torch.long,
device=torch_device,
)
batch_size = input_ids.shape[0]
config = self._get_config()
return config, input_ids, batch_size
def test_generate_beam_search(self):
input_ids = torch.Tensor([[71, 82, 2], [68, 34, 2]]).long().to(torch_device)
config = self._get_config()
lm_model = FSMTForConditionalGeneration(config).to(torch_device)
lm_model.eval()
max_length = 5
new_input_ids = lm_model.generate(
input_ids.clone(),
do_sample=True,
num_return_sequences=1,
num_beams=2,
no_repeat_ngram_size=3,
max_length=max_length,
)
self.assertEqual(new_input_ids.shape, (input_ids.shape[0], max_length))
def test_shift_tokens_right(self):
input_ids = torch.Tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]]).long()
shifted = shift_tokens_right(input_ids, 1)
n_pad_before = input_ids.eq(1).float().sum()
n_pad_after = shifted.eq(1).float().sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(n_pad_after, n_pad_before - 1)
self.assertTrue(torch.eq(shifted[:, 0], 2).all())
def test_generate_fp16(self):
config, input_ids, batch_size = self._get_config_and_data()
attention_mask = input_ids.ne(1).to(torch_device)
model = FSMTForConditionalGeneration(config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_dummy_inputs(self):
config, *_ = self._get_config_and_data()
model = FSMTForConditionalGeneration(config).eval().to(torch_device)
model(**model.dummy_inputs)
def test_prepare_fsmt_decoder_inputs(self):
config, *_ = self._get_config_and_data()
input_ids = _long_tensor(([4, 4, 2]))
decoder_input_ids = _long_tensor([[26388, 2, config.pad_token_id]])
ignore = float("-inf")
decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs(
config, input_ids, decoder_input_ids
)
expected_causal_mask = torch.tensor(
[[0, ignore, ignore], [0, 0, ignore], [0, 0, 0]] # never attend to the final token, because its pad
).to(input_ids.device)
self.assertEqual(decoder_attn_mask.size(), decoder_input_ids.size())
self.assertTrue(torch.eq(expected_causal_mask, causal_mask).all())
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
TOLERANCE = 1e-4
pairs = [
["en-ru"],
["ru-en"],
["en-de"],
["de-en"],
]
@require_torch
@require_sentencepiece
@require_tokenizers
class FSMTModelIntegrationTests(unittest.TestCase):
tokenizers_cache = {}
models_cache = {}
default_mname = "facebook/wmt19-en-ru"
@cached_property
def default_tokenizer(self):
return self.get_tokenizer(self.default_mname)
@cached_property
def default_model(self):
return self.get_model(self.default_mname)
def get_tokenizer(self, mname):
if mname not in self.tokenizers_cache:
self.tokenizers_cache[mname] = FSMTTokenizer.from_pretrained(mname)
return self.tokenizers_cache[mname]
def get_model(self, mname):
if mname not in self.models_cache:
self.models_cache[mname] = FSMTForConditionalGeneration.from_pretrained(mname).to(torch_device)
if torch_device == "cuda":
self.models_cache[mname].half()
return self.models_cache[mname]
@slow
def test_inference_no_head(self):
tokenizer = self.default_tokenizer
model = FSMTModel.from_pretrained(self.default_mname).to(torch_device)
src_text = "My friend computer will translate this for me"
input_ids = tokenizer([src_text], return_tensors="pt")["input_ids"]
input_ids = _long_tensor(input_ids).to(torch_device)
inputs_dict = prepare_fsmt_inputs_dict(model.config, input_ids)
with torch.no_grad():
output = model(**inputs_dict)[0]
expected_shape = torch.Size((1, 10, model.config.tgt_vocab_size))
self.assertEqual(output.shape, expected_shape)
# expected numbers were generated when en-ru model, using just fairseq's model4.pt
# may have to adjust if switched to a different checkpoint
expected_slice = torch.tensor(
[[-1.5753, -1.5753, 2.8975], [-0.9540, -0.9540, 1.0299], [-3.3131, -3.3131, 0.5219]]
).to(torch_device)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
def translation_setup(self, pair):
text = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
src, tgt = pair.split("-")
print(f"Testing {src} -> {tgt}")
mname = f"facebook/wmt19-{pair}"
src_text = text[src]
tgt_text = text[tgt]
tokenizer = self.get_tokenizer(mname)
model = self.get_model(mname)
return tokenizer, model, src_text, tgt_text
@parameterized.expand(pairs)
@slow
def test_translation_direct(self, pair):
tokenizer, model, src_text, tgt_text = self.translation_setup(pair)
input_ids = tokenizer.encode(src_text, return_tensors="pt").to(torch_device)
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
assert decoded == tgt_text, f"\n\ngot: {decoded}\nexp: {tgt_text}\n"
@parameterized.expand(pairs)
@slow
def test_translation_pipeline(self, pair):
tokenizer, model, src_text, tgt_text = self.translation_setup(pair)
device = 0 if torch_device == "cuda" else -1
pipeline = TranslationPipeline(model, tokenizer, framework="pt", device=device)
output = pipeline([src_text])
self.assertEqual([tgt_text], [x["translation_text"] for x in output])
@require_torch
class TestSinusoidalPositionalEmbeddings(unittest.TestCase):
padding_idx = 1
tolerance = 1e-4
def test_basic(self):
input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device)
emb1 = SinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6, padding_idx=self.padding_idx).to(
torch_device
)
emb = emb1(input_ids)
desired_weights = torch.tensor(
[
[9.0930e-01, 1.9999e-02, 2.0000e-04, -4.1615e-01, 9.9980e-01, 1.0000e00],
[1.4112e-01, 2.9995e-02, 3.0000e-04, -9.8999e-01, 9.9955e-01, 1.0000e00],
]
).to(torch_device)
self.assertTrue(
torch.allclose(emb[0], desired_weights, atol=self.tolerance),
msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n",
)
def test_odd_embed_dim(self):
# odd embedding_dim is allowed
SinusoidalPositionalEmbedding(num_positions=4, embedding_dim=5, padding_idx=self.padding_idx).to(torch_device)
# odd num_embeddings is allowed
SinusoidalPositionalEmbedding(num_positions=5, embedding_dim=4, padding_idx=self.padding_idx).to(torch_device)
@unittest.skip("different from marian (needs more research)")
def test_positional_emb_weights_against_marian(self):
desired_weights = torch.tensor(
[
[0, 0, 0, 0, 0],
[0.84147096, 0.82177866, 0.80180490, 0.78165019, 0.76140374],
[0.90929741, 0.93651021, 0.95829457, 0.97505713, 0.98720258],
]
)
emb1 = SinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512, padding_idx=self.padding_idx).to(
torch_device
)
weights = emb1.weights.data[:3, :5]
# XXX: only the 1st and 3rd lines match - this is testing against
# verbatim copy of SinusoidalPositionalEmbedding from fairseq
self.assertTrue(
torch.allclose(weights, desired_weights, atol=self.tolerance),
msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n",
)
# test that forward pass is just a lookup, there is no ignore padding logic
input_ids = torch.tensor(
[[4, 10, self.padding_idx, self.padding_idx, self.padding_idx]], dtype=torch.long, device=torch_device
)
no_cache_pad_zero = emb1(input_ids)[0]
# XXX: only the 1st line matches the 3rd
self.assertTrue(
torch.allclose(torch.tensor(desired_weights, device=torch_device), no_cache_pad_zero[:3, :5], atol=1e-3)
)
| 38.846154
| 118
| 0.652426
|
01934dcc8c20c3107c6c7c886f5a8ce9c5d2cc38
| 7,859
|
py
|
Python
|
virtual_env/lib/python3.5/site-packages/google_compute_engine/metadata_scripts/script_retriever.py
|
straydag/To_Due_Backend
|
ac91f5ebabe8e4f2b6db7faa5ccbd30ebdb4e3f6
|
[
"MIT"
] | 2
|
2019-06-25T18:25:49.000Z
|
2019-06-27T04:48:53.000Z
|
virtual_env/lib/python3.5/site-packages/google_compute_engine/metadata_scripts/script_retriever.py
|
straydag/To_Due_Backend
|
ac91f5ebabe8e4f2b6db7faa5ccbd30ebdb4e3f6
|
[
"MIT"
] | 6
|
2020-09-08T00:13:19.000Z
|
2022-02-27T01:04:42.000Z
|
virtual_env/lib/python3.5/site-packages/google_compute_engine/metadata_scripts/script_retriever.py
|
straydag/To_Due_Backend
|
ac91f5ebabe8e4f2b6db7faa5ccbd30ebdb4e3f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieve and store user provided metadata scripts."""
import ast
import re
import socket
import subprocess
import tempfile
from google_compute_engine import metadata_watcher
from google_compute_engine.compat import httpclient
from google_compute_engine.compat import urlerror
from google_compute_engine.compat import urlrequest
from google_compute_engine.compat import urlretrieve
class ScriptRetriever(object):
"""A class for retrieving and storing user provided metadata scripts."""
token_metadata_key = 'instance/service-accounts/default/token'
# Cached authentication token to be used when downloading from bucket.
token = None
def __init__(self, logger, script_type):
"""Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
script_type: string, the metadata script type to run.
"""
self.logger = logger
self.script_type = script_type
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
def _DownloadAuthUrl(self, url, dest_dir):
"""Download a Google Storage URL using an authentication token.
If the token cannot be fetched, fallback to unauthenticated download.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info(
'Downloading url from %s to %s using authentication token.', url, dest)
if not self.token:
response = self.watcher.GetMetadata(
self.token_metadata_key, recursive=False, retry=False)
if not response:
self.logger.info(
'Authentication token not found. Attempting unauthenticated '
'download.')
return self._DownloadUrl(url, dest_dir)
self.token = '%s %s' % (
response.get('token_type', ''), response.get('access_token', ''))
try:
request = urlrequest.Request(url)
request.add_unredirected_header('Metadata-Flavor', 'Google')
request.add_unredirected_header('Authorization', self.token)
content = urlrequest.urlopen(request).read().decode('utf-8')
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
self.logger.warning('Could not download %s. %s.', url, str(e))
return None
with open(dest, 'w') as f:
f.write(content)
return dest
def _DownloadUrl(self, url, dest_dir):
"""Download a script from a given URL.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info('Downloading url from %s to %s.', url, dest)
try:
urlretrieve.urlretrieve(url, dest)
return dest
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
self.logger.warning('Could not download %s. %s.', url, str(e))
except Exception as e:
self.logger.warning('Exception downloading %s. %s.', url, str(e))
return None
def _DownloadScript(self, url, dest_dir):
"""Download the contents of the URL to the destination.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
# Check for the preferred Google Storage URL format:
# gs://<bucket>/<object>
if url.startswith(r'gs://'):
# Convert the string into a standard URL.
url = re.sub('^gs://', 'https://storage.googleapis.com/', url)
return self._DownloadAuthUrl(url, dest_dir)
header = r'http[s]?://'
domain = r'storage\.googleapis\.com'
# Many of the Google Storage URLs are supported below.
# It is prefered that customers specify their object using
# its gs://<bucket>/<object> url.
bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])'
# Accept any non-empty string that doesn't contain a wildcard character
obj = r'(?P<obj>[^\*\?]+)'
# Check for the Google Storage URLs:
# http://<bucket>.storage.googleapis.com/<object>
# https://<bucket>.storage.googleapis.com/<object>
gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj))
match = gs_regex.match(url)
if match:
return self._DownloadAuthUrl(url, dest_dir)
# Check for the other possible Google Storage URLs:
# http://storage.googleapis.com/<bucket>/<object>
# https://storage.googleapis.com/<bucket>/<object>
#
# The following are deprecated but checked:
# http://commondatastorage.googleapis.com/<bucket>/<object>
# https://commondatastorage.googleapis.com/<bucket>/<object>
gs_regex = re.compile(
r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj))
match = gs_regex.match(url)
if match:
return self._DownloadAuthUrl(url, dest_dir)
# Unauthenticated download of the object.
return self._DownloadUrl(url, dest_dir)
def _GetAttributeScripts(self, attribute_data, dest_dir):
"""Retrieve the scripts from attribute metadata.
Args:
attribute_data: dict, the contents of the attributes metadata.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping metadata keys to files storing scripts.
"""
script_dict = {}
attribute_data = attribute_data or {}
metadata_key = '%s-script' % self.script_type
metadata_value = attribute_data.get(metadata_key)
if metadata_value:
self.logger.info('Found %s in metadata.', metadata_key)
with tempfile.NamedTemporaryFile(
mode='w', dir=dest_dir, delete=False) as dest:
dest.write(metadata_value.lstrip())
script_dict[metadata_key] = dest.name
metadata_key = '%s-script-url' % self.script_type
metadata_value = attribute_data.get(metadata_key)
if metadata_value:
self.logger.info('Found %s in metadata.', metadata_key)
script_dict[metadata_key] = self._DownloadScript(
metadata_value, dest_dir)
return script_dict
def GetScripts(self, dest_dir):
"""Retrieve the scripts to execute.
Args:
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping set metadata keys with associated scripts.
"""
metadata_dict = self.watcher.GetMetadata() or {}
try:
instance_data = metadata_dict['instance']['attributes']
except KeyError:
instance_data = None
self.logger.warning('Instance attributes were not found.')
try:
project_data = metadata_dict['project']['attributes']
except KeyError:
project_data = None
self.logger.warning('Project attributes were not found.')
return (self._GetAttributeScripts(instance_data, dest_dir)
or self._GetAttributeScripts(project_data, dest_dir))
| 34.928889
| 79
| 0.692455
|
e4d0c37d9e1e89f3cd4105cd09ca4059403bb819
| 515
|
py
|
Python
|
tests/gamestonk_terminal/stocks/quantitative_analysis/test_factors_view.py
|
elan17/GamestonkTerminal
|
f958f8275dc15ffaf30c1f0652f5b033725b7f10
|
[
"MIT"
] | 1,835
|
2021-05-09T02:55:06.000Z
|
2022-03-29T12:37:05.000Z
|
tests/gamestonk_terminal/stocks/quantitative_analysis/test_factors_view.py
|
elan17/GamestonkTerminal
|
f958f8275dc15ffaf30c1f0652f5b033725b7f10
|
[
"MIT"
] | 569
|
2021-05-09T15:59:41.000Z
|
2022-03-29T12:25:16.000Z
|
tests/gamestonk_terminal/stocks/quantitative_analysis/test_factors_view.py
|
elan17/GamestonkTerminal
|
f958f8275dc15ffaf30c1f0652f5b033725b7f10
|
[
"MIT"
] | 268
|
2021-05-10T21:46:50.000Z
|
2022-03-28T09:18:38.000Z
|
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.stocks.quantitative_analysis import factors_view
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("period1", "1598220000"),
("period2", "1635980400"),
],
}
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_capm_view():
factors_view.capm_view(ticker="PM")
| 20.6
| 72
| 0.673786
|
c6381f63306b08db3956c887602908c43750568b
| 3,546
|
py
|
Python
|
src/model-data/generator.py
|
carseven/color-blind-test-hack
|
debac7cc1c8176ff722e1e7fb5f5eae12a92d3a0
|
[
"MIT"
] | null | null | null |
src/model-data/generator.py
|
carseven/color-blind-test-hack
|
debac7cc1c8176ff722e1e7fb5f5eae12a92d3a0
|
[
"MIT"
] | null | null | null |
src/model-data/generator.py
|
carseven/color-blind-test-hack
|
debac7cc1c8176ff722e1e7fb5f5eae12a92d3a0
|
[
"MIT"
] | null | null | null |
import math
import random
import sys
from PIL import Image, ImageDraw
try:
from scipy.spatial import cKDTree as KDTree
import numpy as np
IMPORTED_SCIPY = True
except ImportError:
IMPORTED_SCIPY = False
BACKGROUND = (255, 255, 255)
TOTAL_CIRCLES = 1500
def color(c): return ((c >> 16) & 255, (c >> 8) & 255, c & 255)
COLORS_ON = [
color(0xF9BB82), color(0xEBA170), color(0xFCCD84)
]
COLORS_OFF = [
color(0x9CA594), color(0xACB4A5), color(0xBBB964),
color(0xD7DAAA), color(0xE5D57D), color(0xD1D6AF)
]
def generate_circle(image_width, image_height, min_diameter, max_diameter):
radius = random.triangular(min_diameter, max_diameter,
max_diameter * 0.8 + min_diameter * 0.2) / 2
angle = random.uniform(0, math.pi * 2)
distance_from_center = random.uniform(0, image_width * 0.48 - radius)
x = image_width * 0.5 + math.cos(angle) * distance_from_center
y = image_height * 0.5 + math.sin(angle) * distance_from_center
return x, y, radius
def overlaps_motive(image, x, y, r):
points_x = [x, x, x, x-r, x+r, x-r*0.93, x-r*0.93, x+r*0.93, x+r*0.93]
points_y = [y, y-r, y+r, y, y, y+r*0.93, y-r*0.93, y+r*0.93, y-r*0.93]
for xy in zip(points_x, points_y):
# if image.getpixel(xy)[:3] != BACKGROUND: -> Esto peta si las imagenes son en blanco y negro, analizar que tipo de imagen y añadir un flag.
if image.getpixel(xy) != BACKGROUND:
return True
return False
def circle_intersection(x1, y1, r1, x2, y2, r2):
return (x2 - x1)**2 + (y2 - y1)**2 < (r2 + r1)**2
def circle_draw(draw_image, image, x, y, r):
fill_colors = COLORS_ON if overlaps_motive(
image, x, y, r) else COLORS_OFF
fill_color = random.choice(fill_colors)
draw_image.ellipse((x - r, y - r, x + r, y + r),
fill=fill_color,
outline=fill_color)
def main():
image = Image.open(sys.argv[1])
image2 = Image.new('RGB', image.size, BACKGROUND)
draw_image = ImageDraw.Draw(image2)
width, height = image.size
min_diameter = (width + height) / 200
max_diameter = (width + height) / 75
circle = generate_circle(width, height, min_diameter, max_diameter)
circles = [circle]
circle_draw(draw_image, image, *circle)
try:
for i in range(TOTAL_CIRCLES):
tries = 0
if IMPORTED_SCIPY:
kdtree = KDTree([(x, y) for (x, y, _) in circles])
while True:
circle = generate_circle(
width, height, min_diameter, max_diameter)
elements, indexes = kdtree.query(
[(circle[0], circle[1])], k=12)
for element, index in zip(elements[0], indexes[0]):
if not np.isinf(element) and circle_intersection(*circle, *circles[index]):
break
else:
break
tries += 1
else:
while any(circle_intersection(*circle, *circle2) for circle2 in circles):
tries += 1
circle = generate_circle(
width, height, min_diameter, max_diameter)
print('{}/{} {}'.format(i, TOTAL_CIRCLES, tries))
circles.append(circle)
circle_draw(draw_image, image, *circle)
except (KeyboardInterrupt, SystemExit):
pass
image2.show()
if __name__ == '__main__':
main()
| 30.307692
| 148
| 0.576988
|
cf4d650780237d27c28c3ef4f710a95fadc69ddc
| 306
|
py
|
Python
|
pirates/kraken/KrakenAI.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/kraken/KrakenAI.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/kraken/KrakenAI.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
class KrakenAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('KrakenAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
| 38.25
| 70
| 0.816993
|
9eb49d6dcd91153745aedd963e02c902a85d6d71
| 2,347
|
py
|
Python
|
theano/misc/check_duplicate_key.py
|
royxue/Theano
|
626104a8c2b16898d270dc99e16a3ddb4a74678e
|
[
"BSD-3-Clause"
] | 2
|
2015-01-20T04:53:37.000Z
|
2015-01-20T04:53:40.000Z
|
theano/misc/check_duplicate_key.py
|
RoyXue/Theano
|
626104a8c2b16898d270dc99e16a3ddb4a74678e
|
[
"BSD-3-Clause"
] | null | null | null |
theano/misc/check_duplicate_key.py
|
RoyXue/Theano
|
626104a8c2b16898d270dc99e16a3ddb4a74678e
|
[
"BSD-3-Clause"
] | null | null | null |
import cPickle
import os, sys
import theano
DISPLAY_DUPLICATE_KEYS = False
DISPLAY_MOST_FREQUENT_DUPLICATE_CCODE = False
dirs = []
if len(sys.argv) > 1:
for compiledir in sys.argv[1:]:
dirs.extend([os.path.join(compiledir, d) for d in os.listdir(compiledir)])
else:
dirs = os.listdir(theano.config.compiledir)
dirs = [os.path.join(theano.config.compiledir, d) for d in dirs]
keys = {} # key -> nb seen
mods = {}
for dir in dirs:
key = None
try:
f = open(os.path.join(dir, "key.pkl"))
key = f.read()
f.close()
keys.setdefault(key, 0)
keys[key] += 1
del f
except IOError:
# print dir, "don't have a key.pkl file"
pass
try:
path = os.path.join(dir, "mod.cpp")
if not os.path.exists(path):
path = os.path.join(dir, "mod.cu")
f = open(path)
mod = f.read()
f.close()
mods.setdefault(mod, ())
mods[mod] += (key,)
del mod
del f
del path
except IOError:
print dir, "don't have a mod.{cpp,cu} file"
pass
if DISPLAY_DUPLICATE_KEYS:
for k, v in keys.iteritems():
if v > 1:
print "Duplicate key (%i copies): %s" % (v, cPickle.loads(k))
nbs_keys = {} # nb seen -> now many key
for val in keys.values():
nbs_keys.setdefault(val, 0)
nbs_keys[val] += 1
nbs_mod = {} # nb seen -> how many key
nbs_mod_to_key = {} # nb seen -> keys
more_than_one = 0
for mod, kk in mods.iteritems():
val = len(kk)
nbs_mod.setdefault(val, 0)
nbs_mod[val] += 1
if val > 1:
more_than_one += 1
nbs_mod_to_key[val] = kk
if DISPLAY_MOST_FREQUENT_DUPLICATE_CCODE:
m = max(nbs_mod.keys())
print "The keys associated to the mod.{cpp,cu} with the most number of copy:"
for kk in nbs_mod_to_key[m]:
kk = cPickle.loads(kk)
print kk
print "key.pkl histograph"
l = nbs_keys.items()
l.sort()
print l
print "mod.{cpp,cu} histogram"
l = nbs_mod.items()
l.sort()
print l
total = sum([len(k) for k in mods.values()])
uniq = len(mods)
useless = total - uniq
print "mod.{cpp,cu} total:", total
print "mod.{cpp,cu} uniq:", uniq
print "mod.{cpp,cu} with more than 1 copy:", more_than_one
print "mod.{cpp,cu} useless:", useless, float(useless)/total*100, "%"
print "nb directory", len(dirs)
| 24.968085
| 82
| 0.599489
|
121aa6671885653199df30676926ef804787b00f
| 4,737
|
py
|
Python
|
tests/test_sources.py
|
aaronspring/climetlab
|
05a478d373f29415304064ca2449b19121e0a856
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sources.py
|
aaronspring/climetlab
|
05a478d373f29415304064ca2449b19121e0a856
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sources.py
|
aaronspring/climetlab
|
05a478d373f29415304064ca2449b19121e0a856
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import sys
import pytest
from climetlab import load_source, source
def test_file_source_grib():
s = load_source("file", "docs/examples/test.grib")
assert len(s) == 2
def test_file_source_netcdf():
s = load_source("file", "docs/examples/test.nc")
assert len(s) == 2
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Version 3.7 or greater needed")
def test_file_source_shortcut():
s = source.file("docs/examples/test.grib")
assert len(s) == 2
def test_file_source_mars():
if not os.path.exists(os.path.expanduser("~/.ecmwfapirc")):
pytest.skip("No ~/.ecmwfapirc")
s = load_source(
"mars",
param=["2t", "msl"],
levtype="sfc",
area=[50, -50, 20, 50],
grid=[1, 1],
date="2012-12-13",
)
assert len(s) == 2
def test_file_source_cds_grib():
if not os.path.exists(os.path.expanduser("~/.cdsapirc")):
pytest.skip("No ~/.cdsapirc")
s = load_source(
"cds",
"reanalysis-era5-single-levels",
variable=["2t", "msl"],
product_type="reanalysis",
area=[50, -50, 20, 50],
date="2012-12-12",
time="12:00",
)
assert len(s) == 2
def test_file_source_cds_netcdf():
if not os.path.exists(os.path.expanduser("~/.cdsapirc")):
pytest.skip("No ~/.cdsapirc")
s = load_source(
"cds",
"reanalysis-era5-single-levels",
variable=["2t", "msl"],
product_type="reanalysis",
area=[50, -50, 20, 50],
date="2012-12-12",
time="12:00",
format="netcdf",
)
assert len(s) == 2
def test_ulr_source_1():
load_source("url", "http://download.ecmwf.int/test-data/metview/gallery/temp.bufr")
def test_ulr_source_2():
load_source(
"url", "https://github.com/ecmwf/climetlab/raw/master/docs/examples/test.grib"
)
def test_ulr_source_3():
load_source(
"url", "https://github.com/ecmwf/climetlab/raw/master/docs/examples/test.nc"
)
def zarr_not_installed():
try:
import s3fs
import zarr
return False
except ImportError:
return True
S3_URL = "https://storage.ecmwf.europeanweather.cloud/s2s-ai-competition/data/fixtures"
@pytest.mark.skipif(zarr_not_installed(), reason="Zarr or S3FS not installed")
def test_zarr_source_1():
source = load_source(
"zarr-s3",
f"{S3_URL}/0.1.20/zarr/mini-rt-20200102.zarr",
)
ds = source.to_xarray()
assert len(ds.forecast_time) == 1
@pytest.mark.skipif(zarr_not_installed(), reason="Zarr or S3FS not installed")
def test_zarr_source_2():
import datetime
from climetlab.utils.dates import to_datetime_list
source = load_source(
"zarr-s3",
[
f"{S3_URL}/0.1.20/zarr/mini-rt-20200109.zarr",
f"{S3_URL}/0.1.20/zarr/mini-rt-20200102.zarr",
],
)
ds = source.to_xarray()
assert len(ds.forecast_time) == 2
dates = to_datetime_list(ds.forecast_time)
assert dates[0] == datetime.datetime(2020, 1, 2)
assert dates[1] == datetime.datetime(2020, 1, 9)
dates = to_datetime_list(ds.forecast_time.values)
assert dates[0] == datetime.datetime(2020, 1, 2)
assert dates[1] == datetime.datetime(2020, 1, 9)
@pytest.mark.skipif(zarr_not_installed(), reason="Zarr or S3FS not installed")
def test_zarr_source_3():
import datetime
import numpy as np
from climetlab.utils.dates import to_datetime_list
source = load_source(
"zarr-s3",
[
f"{S3_URL}/0.1.20/zarr/mini-hc-20200109.zarr",
f"{S3_URL}/0.1.20/zarr/mini-hc-20200102.zarr",
],
)
ds = source.to_xarray()
assert len(ds.forecast_time) == 8
dates = to_datetime_list(ds.forecast_time)
assert dates[0] == datetime.datetime(2000, 1, 2)
assert dates[1] == datetime.datetime(2000, 1, 9)
assert dates[2] == datetime.datetime(2001, 1, 2)
assert dates[3] == datetime.datetime(2001, 1, 9)
dates = to_datetime_list(ds.forecast_time.values)
assert dates[0] == datetime.datetime(2000, 1, 2)
assert dates[1] == datetime.datetime(2000, 1, 9)
assert dates[2] == datetime.datetime(2001, 1, 2)
assert dates[3] == datetime.datetime(2001, 1, 9)
if __name__ == "__main__":
test_zarr_source_2()
| 25.605405
| 87
| 0.634157
|
eb86946feacc1791e66d5f9fdbed2211e94e1d87
| 4,221
|
py
|
Python
|
library/nsxt_fabric_details.py
|
VuppalaJagadeesh/ansible-for-nsxt
|
d7605e1d4d154e2664c062c47191d374011f025c
|
[
"BSD-2-Clause"
] | 1
|
2019-05-21T09:13:58.000Z
|
2019-05-21T09:13:58.000Z
|
library/nsxt_fabric_details.py
|
VuppalaJagadeesh/ansible-for-nsxt
|
d7605e1d4d154e2664c062c47191d374011f025c
|
[
"BSD-2-Clause"
] | null | null | null |
library/nsxt_fabric_details.py
|
VuppalaJagadeesh/ansible-for-nsxt
|
d7605e1d4d154e2664c062c47191d374011f025c
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
#
# Copyright © 2015 VMware, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#__author__ = 'VJ49'
import yaml
import yamlordereddictloader
from collections import OrderedDict
import paramiko
import time
from pyVmomi import vim, vmodl
from pyVim import connect
from pyVim.connect import SmartConnect, SmartConnectNoSSL
import logging
logger = logging.getLogger('nsxt_fabric_details')
hdlr = logging.FileHandler('/var/log/chaperone/ChaperoneNSXtLog.log')
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(funcName)s: %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(10)
def getting_thumbprint(module,pi):
try:
command = "openssl x509 -in /etc/vmware/ssl/rui.crt -fingerprint -sha256 -noout"
(sshin1, sshout1, ssherr1) = pi.exec_command(command)
out = sshout1.read()
output = out.split("=")[1]
logger.info(output)
return output.rstrip("\n")
except Exception as error:
logger.info("Error Occured:%s" %(error))
module.fail_json(msg="Error Occured: %s" %error)
def main():
module = AnsibleModule(
argument_spec=dict(
),
supports_check_mode=True
)
username = "root"
final_dict = {}
main_list = list()
main_dict = {}
stream = open('/var/lib/chaperone/answerfile.yml', 'r')
dict1 = yaml.load(stream, Loader=yamlordereddictloader.Loader)
try:
for key in dict1:
if key.startswith('esxi_compute') == True:
if "ip" in key:
main_dict["display_name"]=dict1[key]
main_dict["ip_address"]=dict1[key]
logger.info(main_dict)
if "os_version" in key:
main_dict["os_version"]=dict1[key]
if "password" in key:
main_dict["host_password"]=dict1[key]
logger.info(main_dict)
main_list.append(main_dict)
logger.info(main_dict)
logger.info(main_list)
main_dict={}
logger.info(main_list)
pi = paramiko.client.SSHClient()
pi.load_system_host_keys()
pi.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
for i in range(0,len(main_list)):
logger.info(main_list[i]["ip_address"])
pi.connect(main_list[i]["ip_address"], 22, username, main_list[i]["host_password"])
logger.info('Esxi host connection succeed...........')
thumb_prints= getting_thumbprint(module,pi)
main_list[i]["host_thumbprint"]=thumb_prints
logger.info(main_list)
final_dict['fabric_host_nodes']=main_list
module.exit_json(changed=True, result=final_dict, msg= "Successfully got the Fabric Host Nodes information")
except Exception as err:
module.fail_json(changed=False, msg= "Failure: %s" %(err))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| 41.792079
| 117
| 0.646766
|
1d882f0a1abc80177beb7513a64d77ac407b8cdf
| 5,027
|
py
|
Python
|
sdk/python/pulumi_aws/elasticache/security_group.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticache/security_group.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticache/security_group.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class SecurityGroup(pulumi.CustomResource):
description: pulumi.Output[str]
"""
description for the cache security group. Defaults to "Managed by Pulumi".
"""
name: pulumi.Output[str]
"""
Name for the cache security group. This value is stored as a lowercase string.
"""
security_group_names: pulumi.Output[list]
"""
List of EC2 security group names to be
authorized for ingress to the cache security group
"""
def __init__(__self__, resource_name, opts=None, description=None, name=None, security_group_names=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an ElastiCache Security Group to control access to one or more cache
clusters.
> **NOTE:** ElastiCache Security Groups are for use only when working with an
ElastiCache cluster **outside** of a VPC. If you are using a VPC, see the
ElastiCache Subnet Group resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
bar_security_group = aws.ec2.SecurityGroup("barSecurityGroup")
bar_elasticache_security_group_security_group = aws.elasticache.SecurityGroup("barElasticache/securityGroupSecurityGroup", security_group_names=[bar_security_group.name])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: description for the cache security group. Defaults to "Managed by Pulumi".
:param pulumi.Input[str] name: Name for the cache security group. This value is stored as a lowercase string.
:param pulumi.Input[list] security_group_names: List of EC2 security group names to be
authorized for ingress to the cache security group
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if description is None:
description = 'Managed by Pulumi'
__props__['description'] = description
__props__['name'] = name
if security_group_names is None:
raise TypeError("Missing required property 'security_group_names'")
__props__['security_group_names'] = security_group_names
super(SecurityGroup, __self__).__init__(
'aws:elasticache/securityGroup:SecurityGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, description=None, name=None, security_group_names=None):
"""
Get an existing SecurityGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: description for the cache security group. Defaults to "Managed by Pulumi".
:param pulumi.Input[str] name: Name for the cache security group. This value is stored as a lowercase string.
:param pulumi.Input[list] security_group_names: List of EC2 security group names to be
authorized for ingress to the cache security group
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = description
__props__["name"] = name
__props__["security_group_names"] = security_group_names
return SecurityGroup(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.7
| 178
| 0.681918
|
4c6de8d6d3839ec0591fd9933d31099140c6ee07
| 176
|
py
|
Python
|
modeling/backbone/efficientnet_pytorch/temp.py
|
sunggukcha/deeplabs
|
739be4accdc27fbb912c131aaafc9fcf10e04929
|
[
"MIT"
] | 34
|
2019-08-07T20:44:15.000Z
|
2021-12-05T10:23:47.000Z
|
modeling/backbone/efficientnet_pytorch/temp.py
|
toluwajosh/deeplabs
|
59e292e6777d1e53ed7716b7afd3c3489d57f61a
|
[
"MIT"
] | 9
|
2020-05-08T07:55:12.000Z
|
2020-12-23T08:36:08.000Z
|
modeling/backbone/efficientnet_pytorch/temp.py
|
toluwajosh/deeplabs
|
59e292e6777d1e53ed7716b7afd3c3489d57f61a
|
[
"MIT"
] | 6
|
2020-03-09T15:42:51.000Z
|
2021-09-12T19:33:08.000Z
|
from model import EfficientNet
from torchsummary import summary
model = EfficientNet.from_pretrained('efficientnet-b7')
model.cuda()
summary(model, input_size=(3, 360, 640))
| 22
| 55
| 0.795455
|
6e67212952c1c12e88c50c7cbc68aca8937ca807
| 6,979
|
py
|
Python
|
lib/deepsecurity/policies.py
|
kn0630/vulssimulator_ds
|
c0b9ebb11677d616408b35b74aa47d52d59703b2
|
[
"Apache-2.0"
] | 6
|
2016-10-01T07:48:15.000Z
|
2016-12-06T02:18:21.000Z
|
lib/deepsecurity/policies.py
|
kn0630/vulssimulator_ds
|
c0b9ebb11677d616408b35b74aa47d52d59703b2
|
[
"Apache-2.0"
] | null | null | null |
lib/deepsecurity/policies.py
|
kn0630/vulssimulator_ds
|
c0b9ebb11677d616408b35b74aa47d52d59703b2
|
[
"Apache-2.0"
] | null | null | null |
# standard library
import datetime
# 3rd party libraries
# project libraries
from . import core
from . import translation
class Policies(core.CoreDict):
def __init__(self, manager=None):
core.CoreDict.__init__(self)
self.manager = manager
self.log = self.manager.log if self.manager else None
def get(self):
"""
Get all of the policies from Deep Security
"""
call = self.manager._get_request_format(
call='securityProfileRetrieveAll')
response = self.manager._request(call)
if response and response['status'] == 200:
if not type(response['data']) == type([]):
response['data'] = [response['data']]
for policy in response['data']:
policy_obj = Policy(self.manager, policy, self.log)
if policy_obj:
self[policy_obj.id] = policy_obj
self.log(
"Added Policy {}".format(policy_obj.id), level='debug')
return len(self)
class Rules(core.CoreDict):
def __init__(self, manager=None):
core.CoreDict.__init__(self)
self.manager = manager
self.log = self.manager.log if self.manager else None
def get(self, intrusion_prevention=True, firewall=True, integrity_monitoring=True, log_inspection=True, web_reputation=True, application_types=True):
"""
Get all of the rules from Deep Security
"""
# determine which rules to get from the Manager()
rules_to_get = {
'DPIRuleRetrieveAll': intrusion_prevention,
'firewallRuleRetrieveAll': firewall,
'integrityRuleRetrieveAll': integrity_monitoring,
'logInspectionRuleRetrieveAll': log_inspection,
'applicationTypeRetrieveAll': application_types,
}
for call, get in list(rules_to_get.items()):
rule_key = translation.Terms.get(call).replace(
'_retrieve_all', '').replace('_rule', '')
self[rule_key] = core.CoreDict()
if get:
soap_call = self.manager._get_request_format(call=call)
if call == 'DPIRuleRetrieveAll':
self.log(
"Calling {}. This may take 15-30 seconds as the call returns a substantial amount of data".format(call), level='warning')
response = self.manager._request(soap_call)
if response and response['status'] == 200:
if not type(response['data']) == type([]):
response['data'] = [response['data']]
for i, rule in enumerate(response['data']):
rule_obj = Rule(
self.manager, rule, self.log, rule_type=rule_key)
if rule_obj:
if rule_key == 'intrusion_prevention' and rule_obj.cve_numbers:
rule_obj.cve_numbers = rule_obj.cve_numbers.split(
', ')
if type(rule_obj.cve_numbers) in [type(''), type('')]:
rule_obj.cve_numbers = [
rule_obj.cve_numbers]
rule_id = '{}-{: >10}'.format(rule_key, i)
if 'id' in dir(rule_obj):
rule_id = rule_obj.id
elif 'tbuid' in dir(rule_obj):
rule_id = rule_obj.tbuid
self[rule_key][rule_id] = rule_obj
self.log(
"Added Rule {} from call {}".format(rule_id, call), level='debug')
return len(self)
class IPLists(core.CoreDict):
def __init__(self, manager=None):
core.CoreDict.__init__(self)
self.manager = manager
self.log = self.manager.log if self.manager else None
def get(self):
"""
Get all of the IP Lists from Deep Security
"""
soap_call = self.manager._get_request_format(call='IPListRetrieveAll')
response = self.manager._request(soap_call)
if response and response['status'] == 200:
for ip_list in response['data']:
ip_list_obj = IPList(self.manager, ip_list, self.log)
self[ip_list_obj.id] = ip_list_obj
return len(self)
class Policy(core.CoreObject):
def __init__(self, manager=None, api_response=None, log_func=None):
self.manager = manager
self.computers = core.CoreDict()
self.rules = core.CoreDict()
if api_response:
self._set_properties(api_response, log_func)
self._flatten_rules()
def _flatten_rules(self):
"""
Flatten the various module rules into a master list
"""
for rule_type in [
'intrusion_prevention_rule_ids',
'firewall_rule_ids',
'integrity_monitoring_rule_ids',
'log_inspection_rule_ids',
]:
rules = getattr(self, rule_type)
if rules:
for rule in rules['item']:
self.rules[
'{}-{}'.format(rule_type.replace('rule_ids', ''), rule)] = None
def save(self):
"""
Save any changes made to the policy
"""
result = False
soap_call = self.manager._get_request_format(
call='securityProfileSave')
soap_call['data'] = {'sp': self.to_dict()}
if 'manager' in soap_call['data']['sp']:
del(soap_call['data']['sp']['manager'])
response = self.manager._request(soap_call)
if response['status'] == 200:
result = True
else:
result = False
if 'log' in dir(self):
self.log(
"Could not save the policy. Returned: {}".format(response), level='error')
return result
class Rule(core.CoreObject):
def __init__(self, manager=None, api_response=None, log_func=None, rule_type=None):
self.manager = manager
self.rule_type = rule_type
self.policies = core.CoreDict()
if api_response:
self._set_properties(api_response, log_func)
class IPList(core.CoreObject):
def __init__(self, manager=None, api_response=None, log_func=None, rule_type=None):
self.manager = manager
self.rule_type = rule_type
self.addresses = []
if api_response:
self._set_properties(api_response, log_func)
self._split_items()
def _split_items(self):
"""
Split the individual items in an IP List into entries
"""
if getattr(self, 'items') and "\n" in self.items:
self.addresses = self.items.split('\n')
else:
self.addresses.append(self.items.strip())
| 35.426396
| 153
| 0.551082
|
a1cc5a98ce9172e6a20d4505ed758b3af1121a30
| 422
|
py
|
Python
|
setup.py
|
paulosoaresua/mlbase
|
8b60b80fd1745d6565fd38e9bc9d2e203033ae27
|
[
"MIT"
] | null | null | null |
setup.py
|
paulosoaresua/mlbase
|
8b60b80fd1745d6565fd38e9bc9d2e203033ae27
|
[
"MIT"
] | null | null | null |
setup.py
|
paulosoaresua/mlbase
|
8b60b80fd1745d6565fd38e9bc9d2e203033ae27
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open('requirements.txt') as f:
libs = [lib.strip() for lib in f.readlines() if lib]
setup(
name='MLBase',
version='0.1.0_dev',
description='General structure to evaluate ML models.',
author='Paulo Soares',
packages=find_packages(),
install_requires=libs,
author_email='paulosoares@email.arizona.edu'
)
| 26.375
| 63
| 0.637441
|
cbe59536593b2748ea16f3a1bd169e24d1a124a2
| 9,823
|
py
|
Python
|
core/python/kungfu/yijinjing/journal.py
|
awesome-archive/kungfu
|
a8dd41fbbfbf734e00daeaca04f19b391f525e77
|
[
"Apache-2.0"
] | null | null | null |
core/python/kungfu/yijinjing/journal.py
|
awesome-archive/kungfu
|
a8dd41fbbfbf734e00daeaca04f19b391f525e77
|
[
"Apache-2.0"
] | null | null | null |
core/python/kungfu/yijinjing/journal.py
|
awesome-archive/kungfu
|
a8dd41fbbfbf734e00daeaca04f19b391f525e77
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import glob
import re
import json
import pyyjj
import pandas as pd
import kungfu.yijinjing.msg as yjj_msg
os_sep = re.escape(os.sep)
JOURNAL_LOCATION_REGEX = '{}{}{}{}{}{}{}{}{}{}{}'.format(
r'(.*)', os_sep, # category
r'(.*)', os_sep, # group
r'(.*)', os_sep, # name
r'journal', os_sep, # mode
r'(.*)', os_sep, # mode
r'(\w+).(\d+).journal', # hash + page_id
)
JOURNAL_LOCATION_PATTERN = re.compile(JOURNAL_LOCATION_REGEX)
MODES = {
'live': pyyjj.mode.LIVE,
'data': pyyjj.mode.DATA,
'replay': pyyjj.mode.REPLAY,
'backtest': pyyjj.mode.BACKTEST,
'*': pyyjj.mode.LIVE
}
CATEGORIES = {
'md': pyyjj.category.MD,
'td': pyyjj.category.TD,
'strategy': pyyjj.category.STRATEGY,
'system': pyyjj.category.SYSTEM,
'*': pyyjj.category.SYSTEM
}
def find_mode(m):
for k in MODES:
if int(MODES[k]) == m:
return MODES[k]
return pyyjj.mode.LIVE
def find_category(c):
for k in CATEGORIES:
if int(CATEGORIES[k]) == c:
return CATEGORIES[k]
return pyyjj.category.SYSTEM
class Locator(pyyjj.locator):
def __init__(self, home):
pyyjj.locator.__init__(self)
self._home = home
def layout_dir(self, location, layout):
mode = pyyjj.get_mode_name(location.mode)
category = pyyjj.get_category_name(location.category)
p = os.path.join(self._home, category, location.group, location.name, pyyjj.get_layout_name(layout), mode)
if not os.path.exists(p):
os.makedirs(p)
return p
def layout_file(self, location, layout, name):
return os.path.join(self.layout_dir(location, layout), "{}.{}".format(name, pyyjj.get_layout_name(layout)))
def default_to_system_db(self, location, name):
file = os.path.join(self.layout_dir(location, pyyjj.layout.SQLITE), "{}.{}".format(name, pyyjj.get_layout_name(pyyjj.layout.SQLITE)))
if os.path.exists(file):
return file
else:
system_location = pyyjj.location(pyyjj.mode.LIVE, pyyjj.category.SYSTEM, "etc", "kungfu", self)
system_file = os.path.join(self.layout_dir(system_location, pyyjj.layout.SQLITE),
"{}.{}".format(name, pyyjj.get_layout_name(pyyjj.layout.SQLITE)))
shutil.copy(system_file, file)
return file
def list_page_id(self, location, dest_id):
page_ids = []
for journal in glob.glob(os.path.join(self.layout_dir(location, pyyjj.layout.JOURNAL), hex(dest_id)[2:] + '.*.journal')):
match = JOURNAL_LOCATION_PATTERN.match(journal[len(self._home) + 1:])
if match:
page_id = match.group(6)
page_ids.append(int(page_id))
return page_ids
def collect_journal_locations(ctx):
search_path = os.path.join(ctx.home, ctx.category, ctx.group, ctx.name, 'journal', ctx.mode, '*.journal')
locations = {}
for journal in glob.glob(search_path):
match = JOURNAL_LOCATION_PATTERN.match(journal[len(ctx.home) + 1:])
if match:
category = match.group(1)
group = match.group(2)
name = match.group(3)
mode = match.group(4)
dest = match.group(5)
page_id = match.group(6)
uname = '{}/{}/{}/{}'.format(category, group, name, mode)
uid = pyyjj.hash_str_32(uname)
if uid in locations:
if dest in locations[uid]['readers']:
locations[uid]['readers'][dest].append(page_id)
else:
locations[uid]['readers'][dest] = [page_id]
else:
locations[uid] = {
'category': category,
'group': group,
'name': name,
'mode': mode,
'uname': uname,
'uid': pyyjj.hash_str_32(uname),
'readers': {
dest: [page_id]
}
}
ctx.logger.debug('found journal %s %s %s %s', MODES[mode], CATEGORIES[category], group, name)
else:
ctx.logger.warn('unable to match journal file %s to pattern %s', journal, JOURNAL_LOCATION_REGEX)
return locations
def find_sessions(ctx):
io_device = pyyjj.io_device(ctx.journal_util_location)
ctx.session_count = 1
sessions_df = pd.DataFrame(columns=[
'id', 'mode', 'category', 'group', 'name', 'begin_time', 'end_time', 'closed', 'duration', 'frame_count'
])
locations = collect_journal_locations(ctx)
dest_pub = '{:08x}'.format(0)
for key in locations:
record = locations[key]
location = pyyjj.location(MODES[record['mode']], CATEGORIES[record['category']], record['group'], record['name'], ctx.locator)
if dest_pub in record['readers']:
reader = io_device.open_reader_to_subscribe()
for dest_id in record['readers']:
reader.join(location, int(dest_id, 16), 0)
find_sessions_from_reader(ctx, sessions_df, reader, record['mode'], record['category'], record['group'], record['name'])
return sessions_df
def find_session(ctx, session_id):
all_sessions = find_sessions(ctx)
return all_sessions[all_sessions['id'] == session_id].iloc[0]
def find_sessions_from_reader(ctx, sessions_df, reader, mode, category, group, name):
session_start_time = -1
last_frame_time = 0
frame_count = 0
while reader.data_available():
frame = reader.current_frame()
frame_count = frame_count + 1
if frame.msg_type == yjj_msg.SessionStart:
if session_start_time > 0:
sessions_df.loc[len(sessions_df)] = [
ctx.session_count, mode, category, group, name,
session_start_time, last_frame_time, False,
last_frame_time - session_start_time, frame_count - 1
]
session_start_time = frame.trigger_time
ctx.session_count = ctx.session_count + 1
else:
session_start_time = frame.trigger_time
frame_count = 1
elif frame.msg_type == yjj_msg.SessionEnd:
if session_start_time > 0:
sessions_df.loc[len(sessions_df)] = [
ctx.session_count, mode, category, group, name,
session_start_time, frame.gen_time, True,
frame.gen_time - session_start_time, frame_count
]
session_start_time = -1
frame_count = 0
ctx.session_count = ctx.session_count + 1
last_frame_time = frame.gen_time
reader.next()
if session_start_time > 0:
sessions_df.loc[len(sessions_df)] = [
ctx.session_count, mode, category, group, name,
session_start_time, last_frame_time, False,
last_frame_time - session_start_time, frame_count
]
ctx.session_count = ctx.session_count + 1
def make_location_from_dict(ctx, location):
return pyyjj.location(MODES[location['mode']], CATEGORIES[location['category']], location['group'], location['name'], ctx.locator)
def trace_journal(ctx, session_id, io_type):
trace_df = pd.DataFrame(columns=[
'gen_time', 'trigger_time', 'source', 'dest', 'msg_type', 'frame_length', 'data_length'
])
session = find_session(ctx, session_id)
uname = '{}/{}/{}/{}'.format(session['category'], session['group'], session['name'], session['mode'])
uid = pyyjj.hash_str_32(uname)
ctx.category = '*'
ctx.group = '*'
ctx.name = '*'
ctx.mode = '*'
locations = collect_journal_locations(ctx)
location = locations[uid]
home = make_location_from_dict(ctx, location)
io_device = pyyjj.io_device(home)
reader = io_device.open_reader_to_subscribe()
if io_type == 'out' or io_type == 'all':
for dest in location['readers']:
dest_id = int(dest, 16)
reader.join(home, dest_id, session['begin_time'])
if (io_type == 'in' or io_type == 'all') and not (home.category == pyyjj.category.SYSTEM and home.group == 'master' and home.name == 'master'):
master_home_uid = pyyjj.hash_str_32('system/master/master/live')
master_home_location = make_location_from_dict(ctx, locations[master_home_uid])
reader.join(master_home_location, 0, session['begin_time'])
master_cmd_uid = pyyjj.hash_str_32('system/master/{:08x}/live'.format(location['uid']))
master_cmd_location = make_location_from_dict(ctx, locations[master_cmd_uid])
reader.join(master_cmd_location, location['uid'], session['begin_time'])
while reader.data_available() and reader.current_frame().gen_time <= session['end_time']:
frame = reader.current_frame()
trace_df.loc[len(trace_df)] = [
frame.gen_time, frame.trigger_time,
locations[frame.source]['uname'],
'public' if frame.dest == 0 else locations[frame.dest]['uname'],
frame.msg_type, frame.frame_length, frame.data_length
]
if frame.dest == home.uid and (frame.msg_type == yjj_msg.RequestReadFrom or frame.msg_type == yjj_msg.RequestReadFromPublic):
request = pyyjj.get_RequestReadFrom(frame)
source_location = make_location_from_dict(ctx, locations[request.source_id])
reader.join(source_location, location['uid'] if frame.msg_type == yjj_msg.RequestReadFrom else 0, request.from_time)
if frame.dest == home.uid and frame.msg_type == yjj_msg.Deregister:
loc = json.loads(frame.data_as_string())
reader.disjoin(loc['uid'])
reader.next()
return trace_df
| 39.292
| 147
| 0.609692
|
2e777d0e35b2ee13b9f02168f22b742fdc524503
| 21,386
|
py
|
Python
|
pyscf/mp/mp2.py
|
LeonOtis/pyscf
|
98ba8106396ac4c90dc65207059773ce048b0ebf
|
[
"Apache-2.0"
] | 2
|
2021-08-03T12:32:25.000Z
|
2021-09-29T08:19:02.000Z
|
pyscf/mp/mp2.py
|
LeonOtis/pyscf
|
98ba8106396ac4c90dc65207059773ce048b0ebf
|
[
"Apache-2.0"
] | null | null | null |
pyscf/mp/mp2.py
|
LeonOtis/pyscf
|
98ba8106396ac4c90dc65207059773ce048b0ebf
|
[
"Apache-2.0"
] | 2
|
2020-06-01T05:31:38.000Z
|
2022-02-08T02:38:33.000Z
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
RMP2
'''
import time
from functools import reduce
import copy
import numpy
from pyscf import gto
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf import __config__
WITH_T2 = getattr(__config__, 'mp_mp2_with_t2', True)
def kernel(mp, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2,
verbose=logger.NOTE):
if mo_energy is None or mo_coeff is None:
if mp.mo_energy is None or mp.mo_coeff is None:
raise RuntimeError('mo_coeff, mo_energy are not initialized.\n'
'You may need to call mf.kernel() to generate them.')
mo_coeff = None
mo_energy = _mo_energy_without_core(mp, mp.mo_energy)
else:
# For backward compatibility. In pyscf-1.4 or earlier, mp.frozen is
# not supported when mo_energy or mo_coeff is given.
assert(mp.frozen is 0 or mp.frozen is None)
if eris is None: eris = mp.ao2mo(mo_coeff)
nocc = mp.nocc
nvir = mp.nmo - nocc
eia = mo_energy[:nocc,None] - mo_energy[None,nocc:]
if with_t2:
t2 = numpy.empty((nocc,nocc,nvir,nvir), dtype=eris.ovov.dtype)
else:
t2 = None
emp2 = 0
for i in range(nocc):
if isinstance(eris.ovov, numpy.ndarray) and eris.ovov.ndim == 4:
# When mf._eri is a custom integrals wiht the shape (n,n,n,n), the
# ovov integrals might be in a 4-index tensor.
gi = eris.ovov[i]
else:
gi = numpy.asarray(eris.ovov[i*nvir:(i+1)*nvir])
gi = gi.reshape(nvir,nocc,nvir).transpose(1,0,2)
t2i = gi.conj()/lib.direct_sum('jb+a->jba', eia, eia[i])
emp2 += numpy.einsum('jab,jab', t2i, gi) * 2
emp2 -= numpy.einsum('jab,jba', t2i, gi)
if with_t2:
t2[i] = t2i
return emp2.real, t2
def make_rdm1(mp, t2=None, eris=None, verbose=logger.NOTE, ao_repr=False):
'''Spin-traced one-particle density matrix.
The occupied-virtual orbital response is not included.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
Kwargs:
ao_repr : boolean
Whether to transfrom 1-particle density matrix to AO
representation.
'''
from pyscf.cc import ccsd_rdm
doo, dvv = _gamma1_intermediates(mp, t2, eris)
nocc = doo.shape[0]
nvir = dvv.shape[0]
dov = numpy.zeros((nocc,nvir), dtype=doo.dtype)
dvo = dov.T
return ccsd_rdm._make_rdm1(mp, (doo, dov, dvo, dvv), with_frozen=True,
ao_repr=ao_repr)
def _gamma1_intermediates(mp, t2=None, eris=None):
if t2 is None: t2 = mp.t2
nmo = mp.nmo
nocc = mp.nocc
nvir = nmo - nocc
if t2 is None:
if eris is None: eris = mp.ao2mo()
mo_energy = _mo_energy_without_core(mp, mp.mo_energy)
eia = mo_energy[:nocc,None] - mo_energy[None,nocc:]
dtype = eris.ovov.dtype
else:
dtype = t2.dtype
dm1occ = numpy.zeros((nocc,nocc), dtype=dtype)
dm1vir = numpy.zeros((nvir,nvir), dtype=dtype)
for i in range(nocc):
if t2 is None:
gi = numpy.asarray(eris.ovov[i*nvir:(i+1)*nvir])
gi = gi.reshape(nvir,nocc,nvir).transpose(1,0,2)
t2i = gi.conj()/lib.direct_sum('jb+a->jba', eia, eia[i])
else:
t2i = t2[i]
l2i = t2i.conj()
dm1vir += numpy.einsum('jca,jcb->ba', l2i, t2i) * 2 \
- numpy.einsum('jca,jbc->ba', l2i, t2i)
dm1occ += numpy.einsum('iab,jab->ij', l2i, t2i) * 2 \
- numpy.einsum('iab,jba->ij', l2i, t2i)
return -dm1occ, dm1vir
def make_rdm2(mp, t2=None, eris=None, verbose=logger.NOTE):
r'''
Spin-traced two-particle density matrix in MO basis
dm2[p,q,r,s] = \sum_{sigma,tau} <p_sigma^\dagger r_tau^\dagger s_tau q_sigma>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
if t2 is None: t2 = mp.t2
nmo = nmo0 = mp.nmo
nocc = nocc0 = mp.nocc
nvir = nmo - nocc
if t2 is None:
if eris is None: eris = mp.ao2mo()
mo_energy = _mo_energy_without_core(mp, mp.mo_energy)
eia = mo_energy[:nocc,None] - mo_energy[None,nocc:]
if not (mp.frozen is 0 or mp.frozen is None):
nmo0 = mp.mo_occ.size
nocc0 = numpy.count_nonzero(mp.mo_occ > 0)
moidx = get_frozen_mask(mp)
oidx = numpy.where(moidx & (mp.mo_occ > 0))[0]
vidx = numpy.where(moidx & (mp.mo_occ ==0))[0]
else:
moidx = oidx = vidx = None
dm1 = make_rdm1(mp, t2, eris, verbose)
dm1[numpy.diag_indices(nocc0)] -= 2
dm2 = numpy.zeros((nmo0,nmo0,nmo0,nmo0), dtype=dm1.dtype) # Chemist notation
#dm2[:nocc,nocc:,:nocc,nocc:] = t2.transpose(0,3,1,2)*2 - t2.transpose(0,2,1,3)
#dm2[nocc:,:nocc,nocc:,:nocc] = t2.transpose(3,0,2,1)*2 - t2.transpose(2,0,3,1)
for i in range(nocc):
if t2 is None:
gi = numpy.asarray(eris.ovov[i*nvir:(i+1)*nvir])
gi = gi.reshape(nvir,nocc,nvir).transpose(1,0,2)
t2i = gi.conj()/lib.direct_sum('jb+a->jba', eia, eia[i])
else:
t2i = t2[i]
# dm2 was computed as dm2[p,q,r,s] = < p^\dagger r^\dagger s q > in the
# above. Transposing it so that it be contracted with ERIs (in Chemist's
# notation):
# E = einsum('pqrs,pqrs', eri, rdm2)
dovov = t2i.transpose(1,0,2)*2 - t2i.transpose(2,0,1)
dovov *= 2
if moidx is None:
dm2[i,nocc:,:nocc,nocc:] = dovov
dm2[nocc:,i,nocc:,:nocc] = dovov.conj().transpose(0,2,1)
else:
dm2[oidx[i],vidx[:,None,None],oidx[:,None],vidx] = dovov
dm2[vidx[:,None,None],oidx[i],vidx[:,None],oidx] = dovov.conj().transpose(0,2,1)
# Be careful with convention of dm1 and dm2
# dm1[q,p] = <p^\dagger q>
# dm2[p,q,r,s] = < p^\dagger r^\dagger s q >
# E = einsum('pq,qp', h1, dm1) + .5 * einsum('pqrs,pqrs', eri, dm2)
# When adding dm1 contribution, dm1 subscripts need to be flipped
for i in range(nocc0):
dm2[i,i,:,:] += dm1.T * 2
dm2[:,:,i,i] += dm1.T * 2
dm2[:,i,i,:] -= dm1.T
dm2[i,:,:,i] -= dm1
for i in range(nocc0):
for j in range(nocc0):
dm2[i,i,j,j] += 4
dm2[i,j,j,i] -= 2
return dm2#.transpose(1,0,3,2)
def get_nocc(mp):
if mp._nocc is not None:
return mp._nocc
elif mp.frozen is None:
nocc = numpy.count_nonzero(mp.mo_occ > 0)
assert(nocc > 0)
return nocc
elif isinstance(mp.frozen, (int, numpy.integer)):
nocc = numpy.count_nonzero(mp.mo_occ > 0) - mp.frozen
assert(nocc > 0)
return nocc
elif isinstance(mp.frozen[0], (int, numpy.integer)):
occ_idx = mp.mo_occ > 0
occ_idx[list(mp.frozen)] = False
nocc = numpy.count_nonzero(occ_idx)
assert(nocc > 0)
return nocc
else:
raise NotImplementedError
def get_nmo(mp):
if mp._nmo is not None:
return mp._nmo
elif mp.frozen is None:
return len(mp.mo_occ)
elif isinstance(mp.frozen, (int, numpy.integer)):
return len(mp.mo_occ) - mp.frozen
elif isinstance(mp.frozen[0], (int, numpy.integer)):
return len(mp.mo_occ) - len(set(mp.frozen))
else:
raise NotImplementedError
def get_frozen_mask(mp):
'''Get boolean mask for the restricted reference orbitals.
In the returned boolean (mask) array of frozen orbital indices, the
element is False if it corresonds to the frozen orbital.
'''
moidx = numpy.ones(mp.mo_occ.size, dtype=numpy.bool)
if mp._nmo is not None:
moidx[mp._nmo:] = False
elif mp.frozen is None:
pass
elif isinstance(mp.frozen, (int, numpy.integer)):
moidx[:mp.frozen] = False
elif len(mp.frozen) > 0:
moidx[list(mp.frozen)] = False
else:
raise NotImplementedError
return moidx
def as_scanner(mp):
'''Generating a scanner/solver for MP2 PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total MP2 energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
MP2 and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, mp
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> mp2_scanner = mp.MP2(scf.RHF(mol)).as_scanner()
>>> e_tot = mp2_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot = mp2_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
if isinstance(mp, lib.SinglePointScanner):
return mp
logger.info(mp, 'Set %s as a scanner', mp.__class__)
class MP2_Scanner(mp.__class__, lib.SinglePointScanner):
def __init__(self, mp):
self.__dict__.update(mp.__dict__)
self._scf = mp._scf.as_scanner()
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
for key in ('with_df', 'with_solvent'):
sub_mod = getattr(self, key, None)
if sub_mod:
sub_mod.reset(mol)
mf_scanner = self._scf
mf_scanner(mol)
self.mol = mol
self.mo_energy = mf_scanner.mo_energy
self.mo_coeff = mf_scanner.mo_coeff
self.mo_occ = mf_scanner.mo_occ
self.kernel(**kwargs)
return self.e_tot
return MP2_Scanner(mp)
class MP2(lib.StreamObject):
def __init__(self, mf, frozen=0, mo_coeff=None, mo_occ=None):
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.frozen = frozen
##################################################
# don't modify the following attributes, they are not input options
self.mo_energy = mf.mo_energy
self.mo_coeff = mo_coeff
self.mo_occ = mo_occ
self._nocc = None
self._nmo = None
self.e_corr = None
self.t2 = None
self._keys = set(self.__dict__.keys())
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('nocc = %s, nmo = %s', self.nocc, self.nmo)
if self.frozen is not 0:
log.info('frozen orbitals %s', self.frozen)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
@property
def emp2(self):
return self.e_corr
@property
def e_tot(self):
return self.e_corr + self._scf.e_tot
def kernel(self, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2,
_kern=kernel):
'''
Args:
with_t2 : bool
Whether to generate and hold t2 amplitudes in memory.
'''
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
self.e_corr, self.t2 = _kern(self, mo_energy, mo_coeff,
eris, with_t2, self.verbose)
self._finalize()
return self.e_corr, self.t2
def _finalize(self):
'''Hook for dumping results and clearing up the object.'''
logger.note(self, 'E(%s) = %.15g E_corr = %.15g',
self.__class__.__name__, self.e_tot, self.e_corr)
return self
def ao2mo(self, mo_coeff=None):
return _make_eris(self, mo_coeff, verbose=self.verbose)
make_rdm1 = make_rdm1
make_rdm2 = make_rdm2
as_scanner = as_scanner
def density_fit(self, auxbasis=None, with_df=None):
from pyscf.mp import dfmp2
mymp = dfmp2.DFMP2(self._scf, self.frozen, self.mo_coeff, self.mo_occ)
if with_df is not None:
mymp.with_df = with_df
if mymp.with_df.auxbasis != auxbasis:
mymp.with_df = copy.copy(mymp.with_df)
mymp.with_df.auxbasis = auxbasis
return mymp
def nuc_grad_method(self):
from pyscf.grad import mp2
return mp2.Gradients(self)
RMP2 = MP2
from pyscf import scf
scf.hf.RHF.MP2 = lib.class_as_method(MP2)
scf.rohf.ROHF.MP2 = None
def _mo_energy_without_core(mp, mo_energy):
return mo_energy[get_frozen_mask(mp)]
def _mo_without_core(mp, mo):
return mo[:,get_frozen_mask(mp)]
def _mem_usage(nocc, nvir):
nmo = nocc + nvir
basic = ((nocc*nvir)**2 + nocc*nvir**2*2)*8 / 1e6
incore = nocc*nvir*nmo**2/2*8 / 1e6 + basic
outcore = basic
return incore, outcore, basic
class _ChemistsERIs:
def __init__(self, mp, mo_coeff=None):
if mo_coeff is None:
mo_coeff = mp.mo_coeff
self.mo_coeff = _mo_without_core(mp, mo_coeff)
def _make_eris(mp, mo_coeff=None, ao2mofn=None, verbose=None):
log = logger.new_logger(mp, verbose)
time0 = (time.clock(), time.time())
eris = _ChemistsERIs(mp, mo_coeff)
mo_coeff = eris.mo_coeff
nocc = mp.nocc
nmo = mp.nmo
nvir = nmo - nocc
mem_incore, mem_outcore, mem_basic = _mem_usage(nocc, nvir)
mem_now = lib.current_memory()[0]
max_memory = max(0, mp.max_memory - mem_now)
if max_memory < mem_basic:
log.warn('Not enough memory for integral transformation. '
'Available mem %s MB, required mem %s MB',
max_memory, mem_basic)
co = numpy.asarray(mo_coeff[:,:nocc], order='F')
cv = numpy.asarray(mo_coeff[:,nocc:], order='F')
if (mp.mol.incore_anyway or
(mp._scf._eri is not None and mem_incore < max_memory)):
log.debug('transform (ia|jb) incore')
if callable(ao2mofn):
eris.ovov = ao2mofn((co,cv,co,cv)).reshape(nocc*nvir,nocc*nvir)
else:
eris.ovov = ao2mo.general(mp._scf._eri, (co,cv,co,cv))
elif getattr(mp._scf, 'with_df', None):
# To handle the PBC or custom 2-electron with 3-index tensor.
# Call dfmp2.MP2 for efficient DF-MP2 implementation.
log.warn('DF-HF is found. (ia|jb) is computed based on the DF '
'3-tensor integrals.\n'
'You can switch to dfmp2.MP2 for better performance')
log.debug('transform (ia|jb) with_df')
eris.ovov = mp._scf.with_df.ao2mo((co,cv,co,cv))
else:
log.debug('transform (ia|jb) outcore')
eris.feri = lib.H5TmpFile()
#ao2mo.outcore.general(mp.mol, (co,cv,co,cv), eris.feri,
# max_memory=max_memory, verbose=log)
#eris.ovov = eris.feri['eri_mo']
eris.ovov = _ao2mo_ovov(mp, co, cv, eris.feri, max(2000, max_memory), log)
time1 = log.timer('Integral transformation', *time0)
return eris
#
# the MO integral for MP2 is (ov|ov). This is the efficient integral
# (ij|kl) => (ij|ol) => (ol|ij) => (ol|oj) => (ol|ov) => (ov|ov)
# or => (ij|ol) => (oj|ol) => (oj|ov) => (ov|ov)
#
def _ao2mo_ovov(mp, orbo, orbv, feri, max_memory=2000, verbose=None):
time0 = (time.clock(), time.time())
log = logger.new_logger(mp, verbose)
mol = mp.mol
int2e = mol._add_suffix('int2e')
ao2mopt = _ao2mo.AO2MOpt(mol, int2e, 'CVHFnr_schwarz_cond',
'CVHFsetnr_direct_scf')
nao, nocc = orbo.shape
nvir = orbv.shape[1]
nbas = mol.nbas
assert(nvir <= nao)
ao_loc = mol.ao_loc_nr()
dmax = max(4, min(nao/3, numpy.sqrt(max_memory*.95e6/8/(nao+nocc)**2)))
sh_ranges = ao2mo.outcore.balance_partition(ao_loc, dmax)
dmax = max(x[2] for x in sh_ranges)
eribuf = numpy.empty((nao,dmax,dmax,nao))
ftmp = lib.H5TmpFile()
log.debug('max_memory %s MB (dmax = %s) required disk space %g MB',
max_memory, dmax, nocc**2*(nao*(nao+dmax)/2+nvir**2)*8/1e6)
buf_i = numpy.empty((nocc*dmax**2*nao))
buf_li = numpy.empty((nocc**2*dmax**2))
buf1 = numpy.empty_like(buf_li)
fint = gto.moleintor.getints4c
jk_blk_slices = []
count = 0
time1 = time0
with lib.call_in_background(ftmp.__setitem__) as save:
for ip, (ish0, ish1, ni) in enumerate(sh_ranges):
for jsh0, jsh1, nj in sh_ranges[:ip+1]:
i0, i1 = ao_loc[ish0], ao_loc[ish1]
j0, j1 = ao_loc[jsh0], ao_loc[jsh1]
jk_blk_slices.append((i0,i1,j0,j1))
eri = fint(int2e, mol._atm, mol._bas, mol._env,
shls_slice=(0,nbas,ish0,ish1, jsh0,jsh1,0,nbas),
aosym='s1', ao_loc=ao_loc, cintopt=ao2mopt._cintopt,
out=eribuf)
tmp_i = numpy.ndarray((nocc,(i1-i0)*(j1-j0)*nao), buffer=buf_i)
tmp_li = numpy.ndarray((nocc,nocc*(i1-i0)*(j1-j0)), buffer=buf_li)
lib.ddot(orbo.T, eri.reshape(nao,(i1-i0)*(j1-j0)*nao), c=tmp_i)
lib.ddot(orbo.T, tmp_i.reshape(nocc*(i1-i0)*(j1-j0),nao).T, c=tmp_li)
tmp_li = tmp_li.reshape(nocc,nocc,(i1-i0),(j1-j0))
save(str(count), tmp_li.transpose(1,0,2,3))
buf_li, buf1 = buf1, buf_li
count += 1
time1 = log.timer_debug1('partial ao2mo [%d:%d,%d:%d]' %
(ish0,ish1,jsh0,jsh1), *time1)
time1 = time0 = log.timer('mp2 ao2mo_ovov pass1', *time0)
eri = eribuf = tmp_i = tmp_li = buf_i = buf_li = buf1 = None
h5dat = feri.create_dataset('ovov', (nocc*nvir,nocc*nvir), 'f8',
chunks=(nvir,nvir))
occblk = int(min(nocc, max(4, 250/nocc, max_memory*.9e6/8/(nao**2*nocc)/5)))
def load(i0, eri):
if i0 < nocc:
i1 = min(i0+occblk, nocc)
for k, (p0,p1,q0,q1) in enumerate(jk_blk_slices):
eri[:i1-i0,:,p0:p1,q0:q1] = ftmp[str(k)][i0:i1]
if p0 != q0:
dat = numpy.asarray(ftmp[str(k)][:,i0:i1])
eri[:i1-i0,:,q0:q1,p0:p1] = dat.transpose(1,0,3,2)
def save(i0, i1, dat):
for i in range(i0, i1):
h5dat[i*nvir:(i+1)*nvir] = dat[i-i0].reshape(nvir,nocc*nvir)
orbv = numpy.asarray(orbv, order='F')
buf_prefecth = numpy.empty((occblk,nocc,nao,nao))
buf = numpy.empty_like(buf_prefecth)
bufw = numpy.empty((occblk*nocc,nvir**2))
bufw1 = numpy.empty_like(bufw)
with lib.call_in_background(load) as prefetch:
with lib.call_in_background(save) as bsave:
load(0, buf_prefecth)
for i0, i1 in lib.prange(0, nocc, occblk):
buf, buf_prefecth = buf_prefecth, buf
prefetch(i1, buf_prefecth)
eri = buf[:i1-i0].reshape((i1-i0)*nocc,nao,nao)
dat = _ao2mo.nr_e2(eri, orbv, (0,nvir,0,nvir), 's1', 's1', out=bufw)
bsave(i0, i1, dat.reshape(i1-i0,nocc,nvir,nvir).transpose(0,2,1,3))
bufw, bufw1 = bufw1, bufw
time1 = log.timer_debug1('pass2 ao2mo [%d:%d]' % (i0,i1), *time1)
time0 = log.timer('mp2 ao2mo_ovov pass2', *time0)
return h5dat
del(WITH_T2)
if __name__ == '__main__':
from pyscf import scf
from pyscf import gto
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = 'cc-pvdz'
mol.build()
mf = scf.RHF(mol).run()
mp = MP2(mf)
mp.verbose = 5
pt = MP2(mf)
emp2, t2 = pt.kernel()
print(emp2 - -0.204019967288338)
pt.max_memory = 1
emp2, t2 = pt.kernel()
print(emp2 - -0.204019967288338)
pt = MP2(scf.density_fit(mf, 'weigend'))
print(pt.kernel()[0] - -0.204254500454)
| 35.059016
| 92
| 0.588516
|
ca822a43b82867bc16d500e57aba15472b530da1
| 17
|
py
|
Python
|
config/version.py
|
veltzer/pyconf
|
2a06b5890ab41acca12b22cccb59f596ba454d96
|
[
"MIT"
] | null | null | null |
config/version.py
|
veltzer/pyconf
|
2a06b5890ab41acca12b22cccb59f596ba454d96
|
[
"MIT"
] | null | null | null |
config/version.py
|
veltzer/pyconf
|
2a06b5890ab41acca12b22cccb59f596ba454d96
|
[
"MIT"
] | null | null | null |
tup = (0, 0, 81)
| 8.5
| 16
| 0.411765
|
de325023a0bea5fceb31dbdd066786e1d739630a
| 34,830
|
py
|
Python
|
gui/qt/network_dialog.py
|
sfoxhq/electron-cash
|
7b07a51b76d199cc104fed7572090c7c4d686f74
|
[
"MIT"
] | null | null | null |
gui/qt/network_dialog.py
|
sfoxhq/electron-cash
|
7b07a51b76d199cc104fed7572090c7c4d686f74
|
[
"MIT"
] | null | null | null |
gui/qt/network_dialog.py
|
sfoxhq/electron-cash
|
7b07a51b76d199cc104fed7572090c7c4d686f74
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket, queue
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import PyQt5.QtCore as QtCore
from electroncash.i18n import _
from electroncash import networks
from electroncash.util import print_error, Weak, PrintError
from electroncash.network import serialize_server, deserialize_server, get_eligible_servers
from .util import *
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog, MessageBoxMixin):
network_updated_signal = pyqtSignal()
def __init__(self, network, config):
QDialog.__init__(self)
self.setWindowTitle(_('Network'))
self.setMinimumSize(500, 20)
self.nlayout = NetworkChoiceLayout(self, network, config)
vbox = QVBoxLayout(self)
vbox.addLayout(self.nlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.network_updated_signal.connect(self.on_update)
network.register_callback(self.on_network, ['updated', 'interfaces'])
def on_network(self, event, *args):
''' This may run in network thread '''
self.network_updated_signal.emit() # this enqueues call to on_update in GUI thread
@rate_limited(0.333) # limit network window updates to max 3 per second. More frequent isn't that useful anyway -- and on large wallets/big synchs the network spams us with events which we would rather collapse into 1
def on_update(self):
''' This always runs in main GUI thread '''
self.nlayout.update()
def closeEvent(self, e):
# Warn if non-SSL mode when closing dialog
if (not self.nlayout.ssl_cb.isChecked()
and not self.nlayout.tor_cb.isChecked()
and not self.nlayout.server_host.text().lower().endswith('.onion')
and not self.nlayout.config.get('non_ssl_noprompt', False)):
ok, chk = self.question(''.join([_("You have selected non-SSL mode for your server settings."), ' ',
_("Using this mode presents a potential security risk."), '\n\n',
_("Are you sure you wish to proceed?")]),
detail_text=''.join([
_("All of your traffic to the blockchain servers will be sent unencrypted."), ' ',
_("Additionally, you may also be vulnerable to man-in-the-middle attacks."), ' ',
_("It is strongly recommended that you go back and enable SSL mode."),
]),
rich_text=False,
title=_('Security Warning'),
icon=QMessageBox.Critical,
checkbox_text=("Don't ask me again"))
if chk: self.nlayout.config.set_key('non_ssl_noprompt', True)
if not ok:
e.ignore()
return
super().closeEvent(e)
class NodesListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Connected node'), _('Height')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
is_server = not bool(item.data(0, Qt.UserRole))
menu = QMenu()
if is_server:
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.parent.follow_server(server))
else:
index = item.data(1, Qt.UserRole)
menu.addAction(_("Follow this branch"), lambda: self.parent.follow_branch(index))
menu.exec_(self.viewport().mapToGlobal(position))
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, network):
self.clear()
self.addChild = self.addTopLevelItem
chains = network.get_blockchains()
n_chains = len(chains)
for k, items in chains.items():
b = network.blockchains[k]
name = b.get_name()
if n_chains >1:
x = QTreeWidgetItem([name + '@%d'%b.get_base_height(), '%d'%b.height()])
x.setData(0, Qt.UserRole, 1)
x.setData(1, Qt.UserRole, b.base_height)
else:
x = self
for i in items:
star = ' ◀' if i == network.interface else ''
item = QTreeWidgetItem([i.host + star, '%d'%i.tip])
item.setData(0, Qt.UserRole, 0)
item.setData(1, Qt.UserRole, i.server)
x.addChild(item)
if n_chains>1:
self.addTopLevelItem(x)
x.setExpanded(True)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
class ServerFlag:
''' Used by ServerListWidget for Server flags & Symbols '''
Banned = 2 # Blacklisting/banning was a hidden mechanism inherited from Electrum. We would blacklist misbehaving servers under the hood. Now that facility is exposed (editable by the user). We never connect to blacklisted servers.
Preferred = 1 # Preferred servers (white-listed) start off as the servers in servers.json and are "more trusted" and optionally the user can elect to connect to only these servers
NoFlag = 0
Symbol = ("", "★", "⛔") # indexed using pseudo-enum above
UnSymbol = ("", "✖", "⚬") # used for "disable X" context menu
class ServerListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels(['', _('Host'), _('Port')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
menu = QMenu()
server = item.data(2, Qt.UserRole)
if self.parent.can_set_server(server):
useAction = menu.addAction(_("Use as server"), lambda: self.set_server(server))
else:
useAction = menu.addAction(server.split(':',1)[0], lambda: None)
useAction.setDisabled(True)
menu.addSeparator()
flagval = item.data(0, Qt.UserRole)
iswl = flagval & ServerFlag.Preferred
if flagval & ServerFlag.Banned:
optxt = ServerFlag.UnSymbol[ServerFlag.Banned] + " " + _("Unban server")
isbl = True
useAction.setDisabled(True)
useAction.setText(_("Server banned"))
else:
optxt = ServerFlag.Symbol[ServerFlag.Banned] + " " + _("Ban server")
isbl = False
if not isbl:
if flagval & ServerFlag.Preferred:
optxt_fav = ServerFlag.UnSymbol[ServerFlag.Preferred] + " " + _("Remove from preferred")
else:
optxt_fav = ServerFlag.Symbol[ServerFlag.Preferred] + " " + _("Add to preferred")
menu.addAction(optxt_fav, lambda: self.parent.set_whitelisted(server, not iswl))
menu.addAction(optxt, lambda: self.parent.set_blacklisted(server, not isbl))
menu.exec_(self.viewport().mapToGlobal(position))
def set_server(self, s):
host, port, protocol = deserialize_server(s)
self.parent.server_host.setText(host)
self.parent.server_port.setText(port)
self.parent.autoconnect_cb.setChecked(False) # force auto-connect off if they did "Use as server"
self.parent.set_server()
self.parent.update()
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
@staticmethod
def lightenItemText(item, rang=None):
if rang is None: rang = range(0, item.columnCount())
for i in rang:
brush = item.foreground(i); color = brush.color(); color.setHsvF(color.hueF(), color.saturationF(), 0.5); brush.setColor(color)
item.setForeground(i, brush)
def update(self, network, servers, protocol, use_tor):
self.clear()
self.setIndentation(0)
wl_only = network.is_whitelist_only()
for _host, d in sorted(servers.items()):
if _host.lower().endswith('.onion') and not use_tor:
continue
port = d.get(protocol)
if port:
server = serialize_server(_host, port, protocol)
flag, flagval, tt = (ServerFlag.Symbol[ServerFlag.Banned], ServerFlag.Banned, _("This server is banned")) if network.server_is_blacklisted(server) else ("", 0, "")
flag2, flagval2, tt2 = (ServerFlag.Symbol[ServerFlag.Preferred], ServerFlag.Preferred, _("This is a preferred server")) if network.server_is_whitelisted(server) else ("", 0, "")
flag = flag or flag2; del flag2
tt = tt or tt2; del tt2
flagval |= flagval2; del flagval2
x = QTreeWidgetItem([flag, _host, port])
if tt: x.setToolTip(0, tt)
if (wl_only and flagval != ServerFlag.Preferred) or flagval & ServerFlag.Banned:
# lighten the text of servers we can't/won't connect to for the given mode
self.lightenItemText(x, range(1,3))
x.setData(2, Qt.UserRole, server)
x.setData(0, Qt.UserRole, flagval)
self.addTopLevelItem(x)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.ResizeToContents)
h.setSectionResizeMode(1, QHeaderView.Stretch)
h.setSectionResizeMode(2, QHeaderView.ResizeToContents)
class NetworkChoiceLayout(QObject, PrintError):
def __init__(self, parent, network, config, wizard=False):
super().__init__(parent)
self.network = network
self.config = config
self.protocol = None
self.tor_proxy = None
# tor detector
self.td = TorDetector(self)
self.td.found_proxy.connect(self.suggest_proxy)
self.tabs = tabs = QTabWidget()
server_tab = QWidget()
weakTd = Weak.ref(self.td)
class ProxyTab(QWidget):
def showEvent(slf, e):
super().showEvent(e)
td = weakTd()
if e.isAccepted() and td:
td.start() # starts the tor detector when proxy_tab appears
def hideEvent(slf, e):
super().hideEvent(e)
td = weakTd()
if e.isAccepted() and td:
td.stop() # stops the tor detector when proxy_tab disappears
proxy_tab = ProxyTab()
blockchain_tab = QWidget()
tabs.addTab(blockchain_tab, _('Overview'))
tabs.addTab(server_tab, _('Server'))
tabs.addTab(proxy_tab, _('Proxy'))
if wizard:
tabs.setCurrentIndex(1)
# server tab
grid = QGridLayout(server_tab)
grid.setSpacing(8)
self.server_host = QLineEdit()
self.server_host.setFixedWidth(200)
self.server_port = QLineEdit()
self.server_port.setFixedWidth(60)
self.ssl_cb = QCheckBox(_('Use SSL'))
self.autoconnect_cb = QCheckBox(_('Select server automatically'))
self.autoconnect_cb.setEnabled(self.config.is_modifiable('auto_connect'))
weakSelf = Weak.ref(self) # Qt/Python GC hygeine: avoid strong references to self in lambda slots.
self.server_host.editingFinished.connect(lambda: weakSelf() and weakSelf().set_server(onion_hack=True))
self.server_port.editingFinished.connect(lambda: weakSelf() and weakSelf().set_server(onion_hack=True))
self.ssl_cb.clicked.connect(self.change_protocol)
self.autoconnect_cb.clicked.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.update)
msg = ' '.join([
_("If auto-connect is enabled, Electron Cash will always use a server that is on the longest blockchain."),
_("If it is disabled, you have to choose a server you want to use. Electron Cash will warn you if your server is lagging.")
])
grid.addWidget(self.autoconnect_cb, 0, 0, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.preferred_only_cb = QCheckBox(_("Connect only to preferred servers"))
self.preferred_only_cb.setEnabled(self.config.is_modifiable('whitelist_servers_only'))
self.preferred_only_cb.setToolTip(_("If enabled, restricts Electron Cash to connecting to servers only marked as 'preferred'."))
self.preferred_only_cb.clicked.connect(self.set_whitelisted_only) # re-set the config key and notify network.py
msg = '\n\n'.join([
_("If 'Connect only to preferred servers' is enabled, Electron Cash will only connect to servers marked as 'preferred' servers ({}).").format(ServerFlag.Symbol[ServerFlag.Preferred]),
_("This feature was added in response to the potential for a malicious actor to deny service via launching many servers (aka a sybil attack)."),
_("If unsure, most of the time it's safe to leave this option disabled. However leaving it enabled is safer (if a little bit discouraging to new server operators wanting to populate their servers).")
])
grid.addWidget(self.preferred_only_cb, 1, 0, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
grid.addWidget(self.ssl_cb, 2, 0, 1, 3)
self.ssl_help = HelpButton(_('SSL is used to authenticate and encrypt your connections with the blockchain servers.') + "\n\n"
+ _('Due to potential security risks, you may only disable SSL when using a Tor Proxy.'))
grid.addWidget(self.ssl_help, 2, 4)
grid.addWidget(QLabel(_('Server') + ':'), 3, 0)
grid.addWidget(self.server_host, 3, 1, 1, 2)
grid.addWidget(self.server_port, 3, 3)
self.server_list_label = label = QLabel('') # will get set by self.update()
grid.addWidget(label, 4, 0, 1, 5)
self.servers_list = ServerListWidget(self)
grid.addWidget(self.servers_list, 5, 0, 1, 5)
self.legend_label = label = WWLabel('') # will get populated with the legend by self.update()
self.legend_label.linkActivated.connect(self.on_view_blacklist)
grid.addWidget(label, 6, 0, 1, 4)
msg = ' '.join([
_("Preferred servers ({}) are servers you have designated as reliable and/or trustworthy.").format(ServerFlag.Symbol[ServerFlag.Preferred]),
_("Initially, the preferred list is the hard-coded list of known-good servers vetted by the Electron Cash developers."),
_("You can add or remove any server from this list and optionally elect to only connect to preferred servers."),
"\n\n"+_("Banned servers ({}) are servers deemed unreliable and/or untrustworthy, and so they will never be connected-to by Electron Cash.").format(ServerFlag.Symbol[ServerFlag.Banned])
])
grid.addWidget(HelpButton(msg), 6, 4)
# Proxy tab
grid = QGridLayout(proxy_tab)
grid.setSpacing(8)
# proxy setting
self.proxy_cb = QCheckBox(_('Use proxy'))
self.proxy_cb.clicked.connect(self.check_disable_proxy)
self.proxy_cb.clicked.connect(self.set_proxy)
self.proxy_mode = QComboBox()
self.proxy_mode.addItems(['SOCKS4', 'SOCKS5', 'HTTP'])
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(200)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(60)
self.proxy_user = QLineEdit()
self.proxy_user.setPlaceholderText(_("Proxy user"))
self.proxy_password = QLineEdit()
self.proxy_password.setPlaceholderText(_("Password"))
self.proxy_password.setEchoMode(QLineEdit.Password)
self.proxy_password.setFixedWidth(60)
self.proxy_mode.currentIndexChanged.connect(self.set_proxy)
self.proxy_host.editingFinished.connect(self.set_proxy)
self.proxy_port.editingFinished.connect(self.set_proxy)
self.proxy_user.editingFinished.connect(self.set_proxy)
self.proxy_password.editingFinished.connect(self.set_proxy)
self.proxy_mode.currentIndexChanged.connect(self.proxy_settings_changed)
self.proxy_host.textEdited.connect(self.proxy_settings_changed)
self.proxy_port.textEdited.connect(self.proxy_settings_changed)
self.proxy_user.textEdited.connect(self.proxy_settings_changed)
self.proxy_password.textEdited.connect(self.proxy_settings_changed)
self.tor_cb = QCheckBox(_("Use Tor Proxy"))
self.tor_cb.setIcon(QIcon(":icons/tor_logo.png"))
self.tor_cb.hide()
self.tor_cb.clicked.connect(self.use_tor_proxy)
grid.addWidget(self.tor_cb, 1, 0, 1, 3)
grid.addWidget(self.proxy_cb, 2, 0, 1, 3)
grid.addWidget(HelpButton(_('Proxy settings apply to all connections: with Electron Cash servers, but also with third-party services.')), 2, 4)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
grid.addWidget(self.proxy_user, 5, 2)
grid.addWidget(self.proxy_password, 5, 3)
grid.setRowStretch(7, 1)
# Blockchain Tab
grid = QGridLayout(blockchain_tab)
msg = ' '.join([
_("Electron Cash connects to several nodes in order to download block headers and find out the longest blockchain."),
_("This blockchain is used to verify the transactions sent by your transaction server.")
])
self.status_label = QLabel('')
grid.addWidget(QLabel(_('Status') + ':'), 0, 0)
grid.addWidget(self.status_label, 0, 1, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.server_label = QLabel('')
msg = _("Electron Cash sends your wallet addresses to a single server, in order to receive your transaction history.")
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_label, 1, 1, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
self.height_label = QLabel('')
msg = _('This is the height of your local copy of the blockchain.')
grid.addWidget(QLabel(_('Blockchain') + ':'), 2, 0)
grid.addWidget(self.height_label, 2, 1)
grid.addWidget(HelpButton(msg), 2, 4)
self.split_label = QLabel('')
grid.addWidget(self.split_label, 3, 0, 1, 3)
self.nodes_list_widget = NodesListWidget(self)
grid.addWidget(self.nodes_list_widget, 5, 0, 1, 5)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
self.fill_in_proxy_settings()
self.update()
def check_disable_proxy(self, b):
if not self.config.is_modifiable('proxy'):
b = False
for w in [self.proxy_mode, self.proxy_host, self.proxy_port, self.proxy_user, self.proxy_password]:
w.setEnabled(b)
def get_set_server_flags(self):
return (self.config.is_modifiable('server'),
(not self.autoconnect_cb.isChecked()
and not self.preferred_only_cb.isChecked())
)
def can_set_server(self, server):
return bool(self.get_set_server_flags()[0]
and not self.network.server_is_blacklisted(server)
and (not self.network.is_whitelist_only()
or self.network.server_is_whitelisted(server))
)
def enable_set_server(self):
modifiable, notauto = self.get_set_server_flags()
if modifiable:
self.server_host.setEnabled(notauto)
self.server_port.setEnabled(notauto)
else:
for w in [self.autoconnect_cb, self.server_host, self.server_port]:
w.setEnabled(False)
def update(self):
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
preferred_only = self.network.is_whitelist_only()
self.server_host.setText(host)
self.server_port.setText(port)
self.ssl_cb.setChecked(protocol=='s')
ssl_disable = self.ssl_cb.isChecked() and not self.tor_cb.isChecked() and not host.lower().endswith('.onion')
for w in [self.ssl_cb]:#, self.ssl_help]:
w.setDisabled(ssl_disable)
self.autoconnect_cb.setChecked(auto_connect)
self.preferred_only_cb.setChecked(preferred_only)
host = self.network.interface.host if self.network.interface else _('None')
self.server_label.setText(host)
self.set_protocol(protocol)
self.servers = self.network.get_servers()
def protocol_suffix():
if protocol == 't':
return ' (non-SSL)'
elif protocol == 's':
return ' [SSL]'
return ''
server_list_txt = (_('Server peers') if self.network.is_connected() else _('Servers')) + " ({})".format(len(self.servers))
server_list_txt += protocol_suffix()
self.server_list_label.setText(server_list_txt)
if self.network.blacklisted_servers:
bl_srv_ct_str = ' ({}) <a href="ViewBanList">{}</a>'.format(len(self.network.blacklisted_servers), _("View ban list..."))
else:
bl_srv_ct_str = " (0)<i> </i>" # ensure rich text
servers_whitelisted = set(get_eligible_servers(self.servers, protocol)).intersection(self.network.whitelisted_servers) - self.network.blacklisted_servers
self.legend_label.setText(ServerFlag.Symbol[ServerFlag.Preferred] + "=" + _("Preferred") + " ({})".format(len(servers_whitelisted)) + " "
+ ServerFlag.Symbol[ServerFlag.Banned] + "=" + _("Banned") + bl_srv_ct_str)
self.servers_list.update(self.network, self.servers, self.protocol, self.tor_cb.isChecked())
self.enable_set_server()
height_str = "%d "%(self.network.get_local_height()) + _('blocks')
self.height_label.setText(height_str)
n = len(self.network.get_interfaces())
status = _("Connected to %d nodes.")%n if n else _("Not connected")
if n: status += protocol_suffix()
self.status_label.setText(status)
chains = self.network.get_blockchains()
if len(chains)>1:
chain = self.network.blockchain()
checkpoint = chain.get_base_height()
name = chain.get_name()
msg = _('Chain split detected at block %d')%checkpoint + '\n'
msg += (_('You are following branch') if auto_connect else _('Your server is on branch'))+ ' ' + name
msg += ' (%d %s)' % (chain.get_branch_size(), _('blocks'))
else:
msg = ''
self.split_label.setText(msg)
self.nodes_list_widget.update(self.network)
def fill_in_proxy_settings(self):
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
if not proxy_config:
proxy_config = {"mode": "none", "host": "localhost", "port": "9050"}
b = proxy_config.get('mode') != "none"
self.check_disable_proxy(b)
if b:
self.proxy_cb.setChecked(True)
self.proxy_mode.setCurrentIndex(
self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
self.proxy_user.setText(proxy_config.get("user", ""))
self.proxy_password.setText(proxy_config.get("password", ""))
def layout(self):
return self.layout_
def set_protocol(self, protocol):
if protocol != self.protocol:
self.protocol = protocol
def change_protocol(self, use_ssl):
p = 's' if use_ssl else 't'
host = self.server_host.text()
pp = self.servers.get(host, networks.net.DEFAULT_PORTS)
if p not in pp.keys():
p = list(pp.keys())[0]
port = pp[p]
self.server_host.setText(host)
self.server_port.setText(port)
self.set_protocol(p)
self.set_server()
def follow_branch(self, index):
self.network.follow_chain(index)
self.update()
def follow_server(self, server):
self.network.switch_to_interface(server)
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host, port, protocol = deserialize_server(server)
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
self.update()
def server_changed(self, x):
if x:
self.change_server(str(x.text(0)), self.protocol)
def change_server(self, host, protocol):
pp = self.servers.get(host, networks.net.DEFAULT_PORTS)
if protocol and protocol not in protocol_letters:
protocol = None
if protocol:
port = pp.get(protocol)
if port is None:
protocol = None
if not protocol:
if 's' in pp.keys():
protocol = 's'
port = pp.get(protocol)
else:
protocol = list(pp.keys())[0]
port = pp.get(protocol)
self.server_host.setText(host)
self.server_port.setText(port)
self.ssl_cb.setChecked(protocol=='s')
def accept(self):
pass
def set_server(self, onion_hack=False):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host = str(self.server_host.text())
port = str(self.server_port.text())
protocol = 's' if self.ssl_cb.isChecked() else 't'
if onion_hack:
# Fix #1174 -- bring back from the dead non-SSL support for .onion only in a safe way
if host.lower().endswith('.onion'):
self.print_error("Onion/TCP hack: detected .onion, forcing TCP (non-SSL) mode")
protocol = 't'
self.ssl_cb.setChecked(False)
auto_connect = self.autoconnect_cb.isChecked()
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def set_proxy(self):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
if self.proxy_cb.isChecked():
proxy = { 'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()),
'user':str(self.proxy_user.text()),
'password':str(self.proxy_password.text())}
else:
proxy = None
self.tor_cb.setChecked(False)
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def suggest_proxy(self, found_proxy):
if not found_proxy:
self.tor_cb.hide()
self.tor_cb.setChecked(False) # It's not clear to me that if the tor service goes away and comes back later, and in the meantime they unchecked proxy_cb, that this should remain checked. I can see it being confusing for that to be the case. Better to uncheck. It gets auto-re-checked anyway if it comes back and it's the same due to code below. -Calin
return
self.tor_proxy = found_proxy
self.tor_cb.setText("Use Tor proxy at port " + str(found_proxy[1]))
if (self.proxy_mode.currentIndex() == self.proxy_mode.findText('SOCKS5')
and self.proxy_host.text() == found_proxy[0]
and self.proxy_port.text() == str(found_proxy[1])
and self.proxy_cb.isChecked()):
self.tor_cb.setChecked(True)
self.tor_cb.show()
def use_tor_proxy(self, use_it):
if not use_it:
self.proxy_cb.setChecked(False)
else:
socks5_mode_index = self.proxy_mode.findText('SOCKS5')
if socks5_mode_index == -1:
print_error("[network_dialog] can't find proxy_mode 'SOCKS5'")
return
self.proxy_mode.setCurrentIndex(socks5_mode_index)
self.proxy_host.setText("127.0.0.1")
self.proxy_port.setText(str(self.tor_proxy[1]))
self.proxy_user.setText("")
self.proxy_password.setText("")
self.tor_cb.setChecked(True)
self.proxy_cb.setChecked(True)
self.check_disable_proxy(use_it)
self.set_proxy()
def proxy_settings_changed(self):
self.tor_cb.setChecked(False)
def set_blacklisted(self, server, bl):
self.network.server_set_blacklisted(server, bl, True)
self.set_server() # if the blacklisted server is the active server, this will force a reconnect to another server
self.update()
def set_whitelisted(self, server, flag):
self.network.server_set_whitelisted(server, flag, True)
self.set_server()
self.update()
def set_whitelisted_only(self, b):
self.network.set_whitelist_only(b)
self.set_server() # forces us to send a set-server to network.py which recomputes eligible servers, etc
self.update()
def on_view_blacklist(self, ignored):
''' The 'view ban list...' link leads to a modal dialog box where the
user has the option to clear the entire blacklist. Build that dialog here. '''
bl = sorted(self.network.blacklisted_servers)
parent = self.parent()
if not bl:
parent.show_error(_("Server ban list is empty!"))
return
d = WindowModalDialog(parent.top_level_window(), _("Banned Servers"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Banned Servers") + " ({})".format(len(bl))))
tree = QTreeWidget()
tree.setHeaderLabels([_('Host'), _('Port')])
for s in bl:
host, port, protocol = deserialize_server(s)
item = QTreeWidgetItem([host, str(port)])
item.setFlags(Qt.ItemIsEnabled)
tree.addTopLevelItem(item)
tree.setIndentation(3)
h = tree.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
vbox.addWidget(tree)
clear_but = QPushButton(_("Clear ban list"))
weakSelf = Weak.ref(self)
weakD = Weak.ref(d)
clear_but.clicked.connect(lambda: weakSelf() and weakSelf().on_clear_blacklist() and weakD().reject())
vbox.addLayout(Buttons(clear_but, CloseButton(d)))
d.exec_()
def on_clear_blacklist(self):
bl = list(self.network.blacklisted_servers)
blen = len(bl)
if self.parent().question(_("Clear all {} servers from the ban list?").format(blen)):
for i,s in enumerate(bl):
self.network.server_set_blacklisted(s, False, save=bool(i+1 == blen)) # save on last iter
self.update()
return True
return False
class TorDetector(QThread):
found_proxy = pyqtSignal(object)
def start(self):
self.stopQ = queue.Queue() # create a new stopQ blowing away the old one just in case it has old data in it (this prevents races with stop/start arriving too quickly for the thread)
super().start()
def stop(self):
if self.isRunning():
self.stopQ.put(None)
def run(self):
ports = [9050, 9150] # Probable ports for Tor to listen at
while True:
for p in ports:
if TorDetector.is_tor_port(p):
self.found_proxy.emit(("127.0.0.1", p))
break
else:
self.found_proxy.emit(None) # no proxy found, will hide the Tor checkbox
try:
self.stopQ.get(timeout=10.0) # keep trying every 10 seconds
return # we must have gotten a stop signal if we get here, break out of function, ending thread
except queue.Empty:
continue # timeout, keep looping
@staticmethod
def is_tor_port(port):
try:
s = (socket._socketobject if hasattr(socket, "_socketobject") else socket.socket)(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
s.connect(("127.0.0.1", port))
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
pass
return False
| 45.648755
| 363
| 0.62472
|
f2627c0d8155f18f70c48823984ae39d89a9221f
| 1,488
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/ipanti/apis/SwitchForwardRuleProtectRequest.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | null | null | null |
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/ipanti/apis/SwitchForwardRuleProtectRequest.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | null | null | null |
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/ipanti/apis/SwitchForwardRuleProtectRequest.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | null | null | null |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class SwitchForwardRuleProtectRequest(JDCloudRequest):
"""
非网站类规则切换成防御状态
"""
def __init__(self, parameters, header=None, version="v1"):
super(SwitchForwardRuleProtectRequest, self).__init__(
'/regions/{regionId}/instances/{instanceId}/forwardRules/{forwardRuleId}:protect', 'POST', header, version)
self.parameters = parameters
class SwitchForwardRuleProtectParameters(object):
def __init__(self, regionId, instanceId, forwardRuleId, ):
"""
:param regionId: 区域 ID, 高防不区分区域, 传 cn-north-1 即可
:param instanceId: 高防实例 Id
:param forwardRuleId: 转发规则 Id
"""
self.regionId = regionId
self.instanceId = instanceId
self.forwardRuleId = forwardRuleId
| 32.347826
| 119
| 0.718414
|
6b3ae162c36b26820121078f458bbc30a289c216
| 1,024
|
py
|
Python
|
tic_tac_toe/agent/strategy/boltzmann.py
|
erikgrip/tictactoe_reinforcement_learning
|
d763ec4e30c1cf7424165c369f8ef2b8a91bdbca
|
[
"MIT"
] | null | null | null |
tic_tac_toe/agent/strategy/boltzmann.py
|
erikgrip/tictactoe_reinforcement_learning
|
d763ec4e30c1cf7424165c369f8ef2b8a91bdbca
|
[
"MIT"
] | null | null | null |
tic_tac_toe/agent/strategy/boltzmann.py
|
erikgrip/tictactoe_reinforcement_learning
|
d763ec4e30c1cf7424165c369f8ef2b8a91bdbca
|
[
"MIT"
] | null | null | null |
from agent.strategy.base_strategy import BaseStrategy
import numpy as np
class Boltzmann(BaseStrategy):
def __init__(self, start, end, decay):
super().__init__(start, end, decay)
def _action_probs(self, aq_pairs):
# As defined by Graessner anf Keng p.86.
tau = self.get_decayed_rate()
tau = np.max([tau, 0.001]) # Tau=0 will lead to division by zero
try:
qs = np.array(aq_pairs[:, 1])
# Normalize to avoid overflow. The output probability is
# insensitive to shifts in values of qs
qs = qs - qs.max()
except:
raise ValueError(f"Unable to find action for max Q in {aq_pairs}")
ps = (np.exp(qs/tau)) / (np.exp(qs/tau).sum())
return ps
def select_action(self, aq_pairs):
ps = self._action_probs(aq_pairs)
sampled_index = np.argmax(np.random.multinomial(n=1, pvals=ps))
action = int(aq_pairs[sampled_index][0])
self.current_step += 1
return action
| 36.571429
| 78
| 0.613281
|
d5c63851bd8e3d0e1b101dfd4e52705b7a76124f
| 489
|
py
|
Python
|
GrapPlotter/plot.py
|
ujjwal3067/TravelReservationSystem
|
5775366fd8ac664c1bc0da53a817a40a604e9628
|
[
"Apache-2.0"
] | 1
|
2021-03-10T22:28:38.000Z
|
2021-03-10T22:28:38.000Z
|
GrapPlotter/plot.py
|
ujjwal3067/TravelReservationSystem
|
5775366fd8ac664c1bc0da53a817a40a604e9628
|
[
"Apache-2.0"
] | null | null | null |
GrapPlotter/plot.py
|
ujjwal3067/TravelReservationSystem
|
5775366fd8ac664c1bc0da53a817a40a604e9628
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
filePointer = open("output.txt", "r")
Lines = filePointer.readlines()
x = []
y = []
# populates points for x axis and y axis
for line in Lines:
tmp = line.strip().split(",")
x.append(float(tmp[0]))
y.append(float(tmp[1]))
plt.plot(x, y)
plt.locator_params(axis='y', nbins=10)
plt.locator_params(axis='x', nbins=10)
plt.xlabel('Number of Transactions')
plt.ylabel('Response Time')
plt.title('Performance Graph')
plt.show()
| 21.26087
| 40
| 0.687117
|
6fc91c8dd3a032b9dc2e347659623f6d77f04d92
| 3,883
|
py
|
Python
|
component_sdk/python/kfp_component/_base_op.py
|
ohmystack/pipelines
|
ef851e80fc2027842ff4773f2aba1bdee8f25422
|
[
"Apache-2.0"
] | null | null | null |
component_sdk/python/kfp_component/_base_op.py
|
ohmystack/pipelines
|
ef851e80fc2027842ff4773f2aba1bdee8f25422
|
[
"Apache-2.0"
] | null | null | null |
component_sdk/python/kfp_component/_base_op.py
|
ohmystack/pipelines
|
ef851e80fc2027842ff4773f2aba1bdee8f25422
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
import os
import google
import json
from datetime import datetime
import logging
import sys
import re
from kubernetes import client, config
from kubernetes.client.rest import ApiException
KF_NAMESPACE = 'kubeflow'
class BaseOp:
"""Base class for operation running inside Kubeflow Pipelines.
The base class is aware of the KFP environment and can cascade pipeline
cancel or deadline event to the operation through ``on_cancelling``
handler.
"""
def __init__(self):
config.load_incluster_config()
self._v1_core = client.CoreV1Api()
def execute(self):
"""Executes the operation."""
original_sigterm_hanlder = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self._exit_gracefully)
try:
return self.on_executing()
except Exception as e:
logging.error('Failed to execute the op: {}'.format(e))
raise
finally:
signal.signal(signal.SIGTERM, original_sigterm_hanlder)
def on_executing(self):
"""Triggers when execute method is called.
Subclass should override this method.
"""
pass
def on_cancelling(self):
"""Triggers when the operation should be cancelled.
Subclass should override this method.
"""
pass
def _exit_gracefully(self, signum, frame):
logging.info('SIGTERM signal received.')
if self._should_cancel():
self.on_cancelling()
def _should_cancel(self):
"""Checks argo's execution config deadline and decide whether the operation
should be cancelled.
Argo cancels workflow by setting deadline to 0 and sends SIGTERM
signal to main container with 10s graceful period.
"""
pod = self._load_pod()
if not pod or not pod.metadata or not pod.metadata.annotations:
return False
argo_execution_config_json = pod.metadata.annotations.get('workflows.argoproj.io/execution', None)
if not argo_execution_config_json:
return False
try:
argo_execution_config = json.loads(argo_execution_config_json)
except Exception as e:
logging.error("Error deserializing argo execution config: {}".format(e))
return False
deadline_json = argo_execution_config.get('deadline', None)
if not deadline_json:
return False
try:
deadline = datetime.strptime(deadline_json, '%Y-%m-%dT%H:%M:%SZ')
except Exception as e:
logging.error("Error converting deadline string to datetime: {}".format(e))
return False
return datetime.now() > deadline
def _load_pod(self):
pod_name = os.environ.get('KFP_POD_NAME', None)
if not pod_name:
logging.warning("No KFP_POD_NAME env var. Exit without cancelling.")
return None
logging.info('Fetching latest pod metadata: {}.'.format(pod_name))
try:
return self._v1_core.read_namespaced_pod(pod_name, KF_NAMESPACE)
except ApiException as e:
logging.error("Exception when calling read pod {}: {}\n".format(pod_name, e))
return None
| 34.061404
| 106
| 0.656966
|
7e25ea0717a3d9b8fa6bafb605441a53bc41d0b1
| 13,534
|
py
|
Python
|
tracery/__init__.py
|
dav000/pytracery
|
9d97a6cc979fcef1628167741523ad8c8828fb7e
|
[
"Apache-2.0"
] | null | null | null |
tracery/__init__.py
|
dav000/pytracery
|
9d97a6cc979fcef1628167741523ad8c8828fb7e
|
[
"Apache-2.0"
] | null | null | null |
tracery/__init__.py
|
dav000/pytracery
|
9d97a6cc979fcef1628167741523ad8c8828fb7e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from typing import Callable, Dict, List, Optional, Union
from enum import Enum
import re
import random
basestring = (str, bytes)
class NodeType(Enum):
RAW = -1
TEXT = 0
TAG = 1
ACTION = 2
class Node:
regexp = re.compile(r"\(([^)]+)\)")
def __init__(
self, parent: Union[Grammar, Node], child_index: int, settings: Dict
) -> None:
self.errors = []
if settings.get("raw", None) is None:
self.errors.append("Empty input for node")
settings["raw"] = ""
if isinstance(parent, Grammar):
self.grammar = parent
self.parent = None
self.depth = 0
self.child_index = 0
else:
self.grammar = parent.grammar
self.parent = parent
self.depth = parent.depth + 1
self.child_index = child_index
self.raw = settings["raw"]
self.type = settings.get("type", None)
self.is_expanded = False
def expand_children(self, child_rule: str, prevent_recursion: bool = False) -> None:
self.children = []
self.finished_text = ""
self.child_rule = child_rule
if self.child_rule is not None:
sections, errors = parse(child_rule)
self.errors.extend(errors)
for i, section in enumerate(sections):
node = Node(self, i, section)
self.children.append(node)
if not prevent_recursion:
node.expand(prevent_recursion)
self.finished_text += node.finished_text
else:
self.errors.append("No child rule provided, can't expand children")
def expand(self, prevent_recursion=False) -> None:
if not self.is_expanded:
self.is_expanded = True
# Types of nodes
# -1: raw, needs parsing
# 0: Plaintext
# 1: Tag ("#symbol.mod.mod2.mod3#" or
# "#[pushTarget:pushRule]symbol.mod")
# 2: Action ("[pushTarget:pushRule], [pushTarget:POP]",
# more in the future)
if self.type == NodeType.RAW:
self.expand_children(self.raw, prevent_recursion)
elif self.type == NodeType.TEXT:
self.finished_text = self.raw
elif self.type == NodeType.TAG:
self.preactions = []
self.postactions = []
parsed = parse_tag(self.raw)
self.symbol = parsed["symbol"]
self.modifiers = parsed["modifiers"]
for preaction in parsed["preactions"]:
self.preactions.append(NodeAction(self, preaction["raw"]))
for preaction in self.preactions:
if preaction.type == 0:
self.postactions.append(preaction.create_undo())
for preaction in self.preactions:
preaction.activate()
self.finished_text = self.raw
selected_rule = self.grammar.select_rule(self.symbol, self, self.errors)
self.expand_children(selected_rule, prevent_recursion)
# apply modifiers
for mod_name in self.modifiers:
mod_params = []
if mod_name.find("(") > 0:
matches = self.regexp.findall(mod_name)
if len(matches) > 0:
mod_params = matches[0].split(",")
mod_name = mod_name[: mod_name.find("(")]
mod = self.grammar.modifiers.get(mod_name, None)
if mod is None:
self.errors.append("Missing modifier " + mod_name)
self.finished_text += "((." + mod_name + "))"
else:
self.finished_text = mod(self.finished_text, *mod_params)
for postaction in self.postactions:
postaction.activate()
elif self.type == NodeType.ACTION:
self.action = NodeAction(self, self.raw)
self.action.activate()
self.finished_text = ""
def clear_escape_chars(self) -> None:
self.finished_text = (
self.finished_text.replace("\\\\", "DOUBLEBACKSLASH")
.replace("\\", "")
.replace("DOUBLEBACKSLASH", "\\")
)
def __repr__(self) -> str:
return f"{self.__class__}{self.type}('{self.raw}' d: {self.depth}')"
class ActionType(Enum):
PUSH = 0 # [key:rule]
POP = 1 # [key:POP]
FUNCTION = 2 # [function(param0,param1)]
class NodeAction: # has a 'raw' attribute
def __init__(self, node: Node, raw: str) -> None:
self.node = node
sections = raw.split(":")
self.target = sections[0]
if len(sections) == 1:
self.type = ActionType.FUNCTION
else:
self.rule = sections[1]
if self.rule == "POP":
self.type = ActionType.POP
else:
self.type = ActionType.PUSH
def create_undo(self) -> Optional[NodeAction]:
if self.type == ActionType.PUSH:
return NodeAction(self.node, self.target + ":POP")
return None
def activate(self) -> None:
grammar = self.node.grammar
if self.type == ActionType.PUSH:
self.rule_sections = self.rule.split(",")
self.finished_rules = []
for rule_section in self.rule_sections:
n = Node(grammar, 0, {"type": NodeType.RAW, "raw": rule_section})
n.expand()
self.finished_rules.append(n.finished_text)
grammar.push_rules(self.target, self.finished_rules)
elif self.type == ActionType.POP:
grammar.pop_rules(self.target)
elif self.type == ActionType.FUNCTION:
grammar.flatten(self.target, True)
def __repr__(self) -> str:
return f"{self.__class__}{self.type}('{self.node}' {self.target})"
class RuleSet:
def __init__(self, grammar: Grammar, raw: Union[List[str], str]) -> None:
self.raw = raw
self.grammar = grammar
# self.default_uses = [] not used
if isinstance(raw, list):
self.default_rules = raw
elif isinstance(raw, basestring):
self.default_rules = [raw]
else:
self.default_rules = []
def select_rule(self) -> str:
# in kate's code there's a bunch of stuff for different methods of
# selecting a rule, none of which seem to be implemented yet! so for
# now I'm just going to ...
return random.choice(self.default_rules)
def clear_state(self) -> None:
# self.default_uses = [] not used
pass
class Symbol:
def __init__(self, grammar: Grammar, key: str, raw_rules: List[str]) -> None:
self.grammar = grammar
self.key = key
self.raw_rules = raw_rules
self.base_rules = RuleSet(grammar, raw_rules)
self.clear_state()
def clear_state(self) -> None:
self.stack = [self.base_rules]
self.uses: List[Dict] = []
self.base_rules.clear_state()
def push_rules(self, raw_rules) -> None:
rules = RuleSet(self.grammar, raw_rules)
self.stack.append(rules)
def pop_rules(self) -> None:
self.stack.pop()
def select_rule(self, node, errors) -> str:
self.uses.append({"node": node})
if len(self.stack) == 0:
errors.append(
"The rule stack for '" + self.key + "' is empty, too many pops?"
)
return self.stack[-1].select_rule()
def get_active_rules(self) -> Optional[str]:
if len(self.stack) == 0:
return None
return self.stack[-1].select_rule()
class Grammar:
def __init__(self, raw: str, settings=None) -> None:
self.modifiers: Dict[str, Callable] = {}
self.load_from_raw_obj(raw)
self.errors: List[str] = []
if settings is None:
self.settings: Dict[str, str] = {}
def clear_state(self) -> None:
for val in self.symbols.values():
val.clear_state()
def add_modifiers(self, mods) -> None:
# not sure what this is for yet
for key in mods:
self.modifiers[key] = mods[key]
def load_from_raw_obj(self, raw) -> None:
self.raw = raw
self.symbols = {}
if raw:
self.symbols = dict((k, Symbol(self, k, v)) for k, v in raw.items())
def create_root(self, rule: str) -> Node:
return Node(self, 0, {"type": NodeType.RAW, "raw": rule})
def expand(self, rule: str, allow_escape_chars: bool = False) -> Node:
root = self.create_root(rule)
root.expand()
if not allow_escape_chars:
root.clear_escape_chars()
self.errors.extend(root.errors)
return root
def flatten(self, rule: str, allow_escape_chars: bool = False) -> str:
root = self.expand(rule, allow_escape_chars)
return root.finished_text
def push_rules(self, key: str, raw_rules: List[str]) -> None:
if key not in self.symbols:
self.symbols[key] = Symbol(self, key, raw_rules)
else:
self.symbols[key].push_rules(raw_rules)
def pop_rules(self, key) -> None:
if key not in self.symbols:
self.errors.append("Can't pop: no symbol for key " + key)
else:
self.symbols[key].pop_rules()
def select_rule(self, key: str, node: Node, errors: List[str]) -> str:
if key in self.symbols:
return self.symbols[key].select_rule(node, errors)
else:
if key is None:
key = str(None)
self.errors.append("No symbol for " + key)
return "((" + key + "))"
def parse_tag(tag_contents) -> Dict:
"""
returns a dictionary with 'symbol', 'modifiers', 'preactions',
'postactions'
"""
parsed = dict(symbol=None, preactions=[], postactions=[], modifiers=[])
sections, _ = parse(tag_contents)
symbol_section = None
for section in sections:
if section["type"] == NodeType.TEXT:
if symbol_section is None:
symbol_section = section["raw"]
else:
raise Exception("multiple main sections in " + tag_contents)
else:
parsed["preactions"].append(section)
if symbol_section is not None:
components = symbol_section.split(".")
parsed["symbol"] = components[0]
parsed["modifiers"] = components[1:]
return parsed
def parse(rule) -> tuple[List, List]:
depth = 0
in_tag = False
sections = []
escaped = False
errors = []
start = 0
escaped_substring = ""
last_escaped_char = None
if rule is None:
return sections, errors
def create_section(start: int, end: int, type_: NodeType) -> None:
if end - start < 1:
if type_ == NodeType.TAG:
errors.append(str(start) + ": empty tag")
elif type_ == NodeType.ACTION:
errors.append(str(start) + ": empty action")
raw_substring = None
if last_escaped_char is not None:
raw_substring = escaped_substring + "\\" + rule[last_escaped_char + 1 : end]
else:
raw_substring = rule[start:end]
sections.append({"type": type_, "raw": raw_substring})
for i, c in enumerate(rule):
if not escaped:
if c == "[":
if depth == 0 and not in_tag:
if start < i:
create_section(start, i, NodeType.TEXT)
last_escaped_char = None
escaped_substring = ""
start = i + 1
depth += 1
elif c == "]":
depth -= 1
if depth == 0 and not in_tag:
create_section(start, i, NodeType.ACTION)
last_escaped_char = None
escaped_substring = ""
start = i + 1
elif c == "#":
if depth == 0:
if in_tag:
create_section(start, i, NodeType.TAG)
last_escaped_char = None
escaped_substring = ""
start = i + 1
else:
if start < i:
create_section(start, i, NodeType.TEXT)
last_escaped_char = None
escaped_substring = ""
start = i + 1
in_tag = not in_tag
elif c == "\\":
escaped = True
escaped_substring = escaped_substring + rule[start:i]
start = i + 1
last_escaped_char = i
else:
escaped = False
if start < len(rule):
create_section(start, len(rule), NodeType.TEXT)
last_escaped_char = None
escaped_substring = ""
if in_tag:
errors.append("unclosed tag")
if depth > 0:
errors.append("too many [")
if depth < 0:
errors.append("too many ]")
sections = [
s for s in sections if not (s["type"] == NodeType.TEXT and len(s["raw"]) == 0)
]
return sections, errors
| 34.613811
| 88
| 0.535097
|
6a1257335a97c9e117acf2cb67223fbd48bb4a3f
| 820
|
py
|
Python
|
code/glucocheck/homepage/migrations/0001_initial.py
|
kmcgreg5/Glucocheck
|
4ab4ada7f967ae41c1241c94523d14e693e05dd4
|
[
"FSFAP"
] | null | null | null |
code/glucocheck/homepage/migrations/0001_initial.py
|
kmcgreg5/Glucocheck
|
4ab4ada7f967ae41c1241c94523d14e693e05dd4
|
[
"FSFAP"
] | null | null | null |
code/glucocheck/homepage/migrations/0001_initial.py
|
kmcgreg5/Glucocheck
|
4ab4ada7f967ae41c1241c94523d14e693e05dd4
|
[
"FSFAP"
] | null | null | null |
# Generated by Django 3.1.6 on 2021-02-17 13:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('birth_date', models.DateField(blank=True, null=True)),
('state', models.CharField(max_length=200)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 30.37037
| 121
| 0.636585
|
8c5ea4d3b12f9267afefd92e5d749becc4f4352f
| 5,108
|
py
|
Python
|
var/spack/repos/builtin/packages/cbtf-argonavis/package.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2018-11-16T02:42:57.000Z
|
2019-06-06T19:18:50.000Z
|
var/spack/repos/builtin/packages/cbtf-argonavis/package.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32
|
2020-12-15T17:29:20.000Z
|
2022-03-21T15:08:31.000Z
|
var/spack/repos/builtin/packages/cbtf-argonavis/package.py
|
Kerilk/spack
|
e027942b55407a4a5fe323b93d8e57200c873a43
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-07-19T20:31:27.000Z
|
2021-07-19T21:14:14.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class CbtfArgonavis(CMakePackage):
"""CBTF Argo Navis project contains the CUDA collector and supporting
libraries that was done as a result of a DOE SBIR grant.
"""
homepage = "http://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-argonavis.git"
version('develop', branch='master')
version('1.9.4.1', branch='1.9.4.1')
version('1.9.4', branch='1.9.4')
version('1.9.3', branch='1.9.3')
variant('cti', default=False,
description="Build MRNet with the CTI startup option")
variant('crayfe', default=False,
description="build only the FE tool using the runtime_dir \
to point to target build.")
variant('runtime', default=False,
description="build only the runtime libraries and collectors.")
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo'))
depends_on("cmake@3.0.2:", type='build')
# To specify ^elfutils@0.170 on the command line spack
# apparently needs/wants this dependency explicity here
# even though it is referenced downstream
depends_on("elfutils", type="link")
# For boost
depends_on("boost@1.70.0:")
# For MRNet
depends_on("mrnet@5.0.1-3:+cti", when='@develop+cti', type=('build', 'link', 'run'))
depends_on("mrnet@5.0.1-3:+lwthreads", when='@develop~cti', type=('build', 'link', 'run'))
depends_on("mrnet@5.0.1-3+cti", when='@1.9.3:9999+cti', type=('build', 'link', 'run'))
depends_on("mrnet@5.0.1-3+lwthreads", when='@1.9.3:9999~cti', type=('build', 'link', 'run'))
# For CBTF
depends_on("cbtf@develop", when='@develop', type=('build', 'link', 'run'))
depends_on("cbtf@1.9.3:9999", when='@1.9.3:9999', type=('build', 'link', 'run'))
# For CBTF with cti
depends_on("cbtf@develop+cti", when='@develop+cti', type=('build', 'link', 'run'))
depends_on("cbtf@1.9.3:9999+cti", when='@1.9.3:9999+cti', type=('build', 'link', 'run'))
# For CBTF with runtime
depends_on("cbtf@develop+runtime", when='@develop+runtime', type=('build', 'link', 'run'))
depends_on("cbtf@1.9.3:9999+runtime", when='@1.9.3:9999+runtime', type=('build', 'link', 'run'))
# For libmonitor
depends_on("libmonitor@2013.02.18+commrank", type=('build', 'link', 'run'))
# For PAPI
depends_on("papi@5.4.1:", type=('build', 'link', 'run'))
# For CBTF-KRELL
depends_on("cbtf-krell@develop", when='@develop', type=('build', 'link', 'run'))
depends_on("cbtf-krell@1.9.3:9999", when='@1.9.3:9999', type=('build', 'link', 'run'))
depends_on('cbtf-krell@develop+cti', when='@develop+cti', type=('build', 'link', 'run'))
depends_on('cbtf-krell@1.9.3:9999+cti', when='@1.9.3:9999+cti', type=('build', 'link', 'run'))
depends_on('cbtf-krell@develop+runtime', when='@develop+runtime', type=('build', 'link', 'run'))
depends_on('cbtf-krell@1.9.3:9999+runtime', when='@1.9.3:9999+runtime', type=('build', 'link', 'run'))
# For CUDA
depends_on("cuda")
parallel = False
build_directory = 'build_cbtf_argonavis'
def cmake_args(self):
spec = self.spec
compile_flags = "-O2 -g -Wall"
cmake_args = [
'-DCMAKE_CXX_FLAGS=%s' % compile_flags,
'-DCMAKE_C_FLAGS=%s' % compile_flags,
'-DCUDA_DIR=%s' % spec['cuda'].prefix,
'-DCUDA_INSTALL_PATH=%s' % spec['cuda'].prefix,
'-DCUDA_TOOLKIT_ROOT_DIR=%s' % spec['cuda'].prefix,
'-DCUPTI_DIR=%s' % spec['cuda'].prefix.extras.CUPTI,
'-DCUPTI_ROOT=%s' % spec['cuda'].prefix.extras.CUPTI,
'-DPAPI_ROOT=%s' % spec['papi'].prefix,
'-DCBTF_DIR=%s' % spec['cbtf'].prefix,
'-DCBTF_KRELL_DIR=%s' % spec['cbtf-krell'].prefix,
'-DBoost_NO_SYSTEM_PATHS=TRUE',
'-DBoost_NO_BOOST_CMAKE=TRUE',
'-DBOOST_ROOT=%s' % spec['boost'].prefix,
'-DBoost_DIR=%s' % spec['boost'].prefix,
'-DBOOST_LIBRARYDIR=%s' % spec['boost'].prefix.lib,
'-DMRNET_DIR=%s' % spec['mrnet'].prefix,
'-DLIBMONITOR_DIR=%s' % spec['libmonitor'].prefix]
return cmake_args
def setup_run_environment(self, env):
"""Set up the compile and runtime environments for a package."""
env.prepend_path(
'LD_LIBRARY_PATH',
self.spec['cuda'].prefix + '/extras/CUPTI/lib64')
def setup_build_environment(self, env):
"""Set up the compile and runtime environments for a package."""
env.prepend_path(
'LD_LIBRARY_PATH',
self.spec['cuda'].prefix + '/extras/CUPTI/lib64')
| 41.868852
| 106
| 0.591817
|
1f609f158c146d33abcc9d438d636590f2cd4066
| 19,188
|
py
|
Python
|
synapse/replication/tcp/streams/_base.py
|
hailzam/synapse
|
837293c314b47e988fe9532115476a6536cd6406
|
[
"Apache-2.0"
] | null | null | null |
synapse/replication/tcp/streams/_base.py
|
hailzam/synapse
|
837293c314b47e988fe9532115476a6536cd6406
|
[
"Apache-2.0"
] | null | null | null |
synapse/replication/tcp/streams/_base.py
|
hailzam/synapse
|
837293c314b47e988fe9532115476a6536cd6406
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2017 Vector Creations Ltd
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import heapq
import logging
from collections import namedtuple
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
List,
Optional,
Tuple,
TypeVar,
)
import attr
from synapse.replication.http.streams import ReplicationGetStreamUpdates
if TYPE_CHECKING:
import synapse.server
logger = logging.getLogger(__name__)
# the number of rows to request from an update_function.
_STREAM_UPDATE_TARGET_ROW_COUNT = 100
# Some type aliases to make things a bit easier.
# A stream position token
Token = int
# The type of a stream update row, after JSON deserialisation, but before
# parsing with Stream.parse_row (which turns it into a `ROW_TYPE`). Normally it's
# just a row from a database query, though this is dependent on the stream in question.
#
StreamRow = TypeVar("StreamRow", bound=Tuple)
# The type returned by the update_function of a stream, as well as get_updates(),
# get_updates_since, etc.
#
# It consists of a triplet `(updates, new_last_token, limited)`, where:
# * `updates` is a list of `(token, row)` entries.
# * `new_last_token` is the new position in stream.
# * `limited` is whether there are more updates to fetch.
#
StreamUpdateResult = Tuple[List[Tuple[Token, StreamRow]], Token, bool]
# The type of an update_function for a stream
#
# The arguments are:
#
# * instance_name: the writer of the stream
# * from_token: the previous stream token: the starting point for fetching the
# updates
# * to_token: the new stream token: the point to get updates up to
# * target_row_count: a target for the number of rows to be returned.
#
# The update_function is expected to return up to _approximately_ target_row_count rows.
# If there are more updates available, it should set `limited` in the result, and
# it will be called again to get the next batch.
#
UpdateFunction = Callable[[str, Token, Token, int], Awaitable[StreamUpdateResult]]
class Stream:
"""Base class for the streams.
Provides a `get_updates()` function that returns new updates since the last
time it was called.
"""
NAME = None # type: str # The name of the stream
# The type of the row. Used by the default impl of parse_row.
ROW_TYPE = None # type: Any
@classmethod
def parse_row(cls, row: StreamRow):
"""Parse a row received over replication
By default, assumes that the row data is an array object and passes its contents
to the constructor of the ROW_TYPE for this stream.
Args:
row: row data from the incoming RDATA command, after json decoding
Returns:
ROW_TYPE object for this stream
"""
return cls.ROW_TYPE(*row)
def __init__(
self,
local_instance_name: str,
current_token_function: Callable[[str], Token],
update_function: UpdateFunction,
):
"""Instantiate a Stream
`current_token_function` and `update_function` are callbacks which
should be implemented by subclasses.
`current_token_function` takes an instance name, which is a writer to
the stream, and returns the position in the stream of the writer (as
viewed from the current process). On the writer process this is where
the writer has successfully written up to, whereas on other processes
this is the position which we have received updates up to over
replication. (Note that most streams have a single writer and so their
implementations ignore the instance name passed in).
`update_function` is called to get updates for this stream between a
pair of stream tokens. See the `UpdateFunction` type definition for more
info.
Args:
local_instance_name: The instance name of the current process
current_token_function: callback to get the current token, as above
update_function: callback go get stream updates, as above
"""
self.local_instance_name = local_instance_name
self.current_token = current_token_function
self.update_function = update_function
# The token from which we last asked for updates
self.last_token = self.current_token(self.local_instance_name)
def discard_updates_and_advance(self):
"""Called when the stream should advance but the updates would be discarded,
e.g. when there are no currently connected workers.
"""
self.last_token = self.current_token(self.local_instance_name)
async def get_updates(self) -> StreamUpdateResult:
"""Gets all updates since the last time this function was called (or
since the stream was constructed if it hadn't been called before).
Returns:
A triplet `(updates, new_last_token, limited)`, where `updates` is
a list of `(token, row)` entries, `new_last_token` is the new
position in stream, and `limited` is whether there are more updates
to fetch.
"""
current_token = self.current_token(self.local_instance_name)
updates, current_token, limited = await self.get_updates_since(
self.local_instance_name, self.last_token, current_token
)
self.last_token = current_token
return updates, current_token, limited
async def get_updates_since(
self, instance_name: str, from_token: Token, upto_token: Token
) -> StreamUpdateResult:
"""Like get_updates except allows specifying from when we should
stream updates
Returns:
A triplet `(updates, new_last_token, limited)`, where `updates` is
a list of `(token, row)` entries, `new_last_token` is the new
position in stream, and `limited` is whether there are more updates
to fetch.
"""
from_token = int(from_token)
if from_token == upto_token:
return [], upto_token, False
updates, upto_token, limited = await self.update_function(
instance_name, from_token, upto_token, _STREAM_UPDATE_TARGET_ROW_COUNT,
)
return updates, upto_token, limited
def current_token_without_instance(
current_token: Callable[[], int]
) -> Callable[[str], int]:
"""Takes a current token callback function for a single writer stream
that doesn't take an instance name parameter and wraps it in a function that
does accept an instance name parameter but ignores it.
"""
return lambda instance_name: current_token()
def make_http_update_function(hs, stream_name: str) -> UpdateFunction:
"""Makes a suitable function for use as an `update_function` that queries
the master process for updates.
"""
client = ReplicationGetStreamUpdates.make_client(hs)
async def update_function(
instance_name: str, from_token: int, upto_token: int, limit: int
) -> StreamUpdateResult:
result = await client(
instance_name=instance_name,
stream_name=stream_name,
from_token=from_token,
upto_token=upto_token,
)
return result["updates"], result["upto_token"], result["limited"]
return update_function
class BackfillStream(Stream):
"""We fetched some old events and either we had never seen that event before
or it went from being an outlier to not.
"""
BackfillStreamRow = namedtuple(
"BackfillStreamRow",
(
"event_id", # str
"room_id", # str
"type", # str
"state_key", # str, optional
"redacts", # str, optional
"relates_to", # str, optional
),
)
NAME = "backfill"
ROW_TYPE = BackfillStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_current_backfill_token),
store.get_all_new_backfill_event_rows,
)
class PresenceStream(Stream):
PresenceStreamRow = namedtuple(
"PresenceStreamRow",
(
"user_id", # str
"state", # str
"last_active_ts", # int
"last_federation_update_ts", # int
"last_user_sync_ts", # int
"status_msg", # str
"currently_active", # bool
),
)
NAME = "presence"
ROW_TYPE = PresenceStreamRow
def __init__(self, hs):
store = hs.get_datastore()
if hs.config.worker_app is None:
# on the master, query the presence handler
presence_handler = hs.get_presence_handler()
update_function = presence_handler.get_all_presence_updates
else:
# Query master process
update_function = make_http_update_function(hs, self.NAME)
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_current_presence_token),
update_function,
)
class TypingStream(Stream):
TypingStreamRow = namedtuple(
"TypingStreamRow", ("room_id", "user_ids") # str # list(str)
)
NAME = "typing"
ROW_TYPE = TypingStreamRow
def __init__(self, hs):
typing_handler = hs.get_typing_handler()
writer_instance = hs.config.worker.writers.typing
if writer_instance == hs.get_instance_name():
# On the writer, query the typing handler
update_function = typing_handler.get_all_typing_updates
else:
# Query the typing writer process
update_function = make_http_update_function(hs, self.NAME)
super().__init__(
hs.get_instance_name(),
current_token_without_instance(typing_handler.get_current_token),
update_function,
)
class ReceiptsStream(Stream):
ReceiptsStreamRow = namedtuple(
"ReceiptsStreamRow",
(
"room_id", # str
"receipt_type", # str
"user_id", # str
"event_id", # str
"data", # dict
),
)
NAME = "receipts"
ROW_TYPE = ReceiptsStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_max_receipt_stream_id),
store.get_all_updated_receipts,
)
class PushRulesStream(Stream):
"""A user has changed their push rules
"""
PushRulesStreamRow = namedtuple("PushRulesStreamRow", ("user_id",)) # str
NAME = "push_rules"
ROW_TYPE = PushRulesStreamRow
def __init__(self, hs):
self.store = hs.get_datastore()
super(PushRulesStream, self).__init__(
hs.get_instance_name(),
self._current_token,
self.store.get_all_push_rule_updates,
)
def _current_token(self, instance_name: str) -> int:
push_rules_token = self.store.get_max_push_rules_stream_id()
return push_rules_token
class PushersStream(Stream):
"""A user has added/changed/removed a pusher
"""
PushersStreamRow = namedtuple(
"PushersStreamRow",
("user_id", "app_id", "pushkey", "deleted"), # str # str # str # bool
)
NAME = "pushers"
ROW_TYPE = PushersStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_pushers_stream_token),
store.get_all_updated_pushers_rows,
)
class CachesStream(Stream):
"""A cache was invalidated on the master and no other stream would invalidate
the cache on the workers
"""
@attr.s(slots=True)
class CachesStreamRow:
"""Stream to inform workers they should invalidate their cache.
Attributes:
cache_func: Name of the cached function.
keys: The entry in the cache to invalidate. If None then will
invalidate all.
invalidation_ts: Timestamp of when the invalidation took place.
"""
cache_func = attr.ib(type=str)
keys = attr.ib(type=Optional[List[Any]])
invalidation_ts = attr.ib(type=int)
NAME = "caches"
ROW_TYPE = CachesStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
store.get_cache_stream_token_for_writer,
store.get_all_updated_caches,
)
class PublicRoomsStream(Stream):
"""The public rooms list changed
"""
PublicRoomsStreamRow = namedtuple(
"PublicRoomsStreamRow",
(
"room_id", # str
"visibility", # str
"appservice_id", # str, optional
"network_id", # str, optional
),
)
NAME = "public_rooms"
ROW_TYPE = PublicRoomsStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_current_public_room_stream_id),
store.get_all_new_public_rooms,
)
class DeviceListsStream(Stream):
"""Either a user has updated their devices or a remote server needs to be
told about a device update.
"""
@attr.s(slots=True)
class DeviceListsStreamRow:
entity = attr.ib(type=str)
NAME = "device_lists"
ROW_TYPE = DeviceListsStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_device_stream_token),
store.get_all_device_list_changes_for_remotes,
)
class ToDeviceStream(Stream):
"""New to_device messages for a client
"""
ToDeviceStreamRow = namedtuple("ToDeviceStreamRow", ("entity",)) # str
NAME = "to_device"
ROW_TYPE = ToDeviceStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_to_device_stream_token),
store.get_all_new_device_messages,
)
class TagAccountDataStream(Stream):
"""Someone added/removed a tag for a room
"""
TagAccountDataStreamRow = namedtuple(
"TagAccountDataStreamRow", ("user_id", "room_id", "data") # str # str # dict
)
NAME = "tag_account_data"
ROW_TYPE = TagAccountDataStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_max_account_data_stream_id),
store.get_all_updated_tags,
)
class AccountDataStream(Stream):
"""Global or per room account data was changed
"""
AccountDataStreamRow = namedtuple(
"AccountDataStream",
("user_id", "room_id", "data_type"), # str # Optional[str] # str
)
NAME = "account_data"
ROW_TYPE = AccountDataStreamRow
def __init__(self, hs: "synapse.server.HomeServer"):
self.store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(self.store.get_max_account_data_stream_id),
self._update_function,
)
async def _update_function(
self, instance_name: str, from_token: int, to_token: int, limit: int
) -> StreamUpdateResult:
limited = False
global_results = await self.store.get_updated_global_account_data(
from_token, to_token, limit
)
# if the global results hit the limit, we'll need to limit the room results to
# the same stream token.
if len(global_results) >= limit:
to_token = global_results[-1][0]
limited = True
room_results = await self.store.get_updated_room_account_data(
from_token, to_token, limit
)
# likewise, if the room results hit the limit, limit the global results to
# the same stream token.
if len(room_results) >= limit:
to_token = room_results[-1][0]
limited = True
# convert the global results to the right format, and limit them to the to_token
# at the same time
global_rows = (
(stream_id, (user_id, None, account_data_type))
for stream_id, user_id, account_data_type in global_results
if stream_id <= to_token
)
# we know that the room_results are already limited to `to_token` so no need
# for a check on `stream_id` here.
room_rows = (
(stream_id, (user_id, room_id, account_data_type))
for stream_id, user_id, room_id, account_data_type in room_results
)
# We need to return a sorted list, so merge them together.
#
# Note: We order only by the stream ID to work around a bug where the
# same stream ID could appear in both `global_rows` and `room_rows`,
# leading to a comparison between the data tuples. The comparison could
# fail due to attempting to compare the `room_id` which results in a
# `TypeError` from comparing a `str` vs `None`.
updates = list(heapq.merge(room_rows, global_rows, key=lambda row: row[0]))
return updates, to_token, limited
class GroupServerStream(Stream):
GroupsStreamRow = namedtuple(
"GroupsStreamRow",
("group_id", "user_id", "type", "content"), # str # str # str # dict
)
NAME = "groups"
ROW_TYPE = GroupsStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_group_stream_token),
store.get_all_groups_changes,
)
class UserSignatureStream(Stream):
"""A user has signed their own device with their user-signing key
"""
UserSignatureStreamRow = namedtuple("UserSignatureStreamRow", ("user_id")) # str
NAME = "user_signature"
ROW_TYPE = UserSignatureStreamRow
def __init__(self, hs):
store = hs.get_datastore()
super().__init__(
hs.get_instance_name(),
current_token_without_instance(store.get_device_stream_token),
store.get_all_user_signature_changes_for_remotes,
)
| 31.873754
| 88
| 0.648113
|
90c4e436bb1182cde5a4182bb06b5afab0b1f199
| 4,469
|
py
|
Python
|
cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py
|
stillbreeze/Online-Stereo-Calibration
|
7813f470a7a475c67608e536f7704bce47e8fadf
|
[
"BSD-3-Clause"
] | 14
|
2019-12-11T18:33:57.000Z
|
2022-01-04T04:52:45.000Z
|
cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py
|
stillbreeze/Online-Stereo-Calibration
|
7813f470a7a475c67608e536f7704bce47e8fadf
|
[
"BSD-3-Clause"
] | null | null | null |
cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py
|
stillbreeze/Online-Stereo-Calibration
|
7813f470a7a475c67608e536f7704bce47e8fadf
|
[
"BSD-3-Clause"
] | 5
|
2020-01-09T16:24:50.000Z
|
2021-09-24T11:10:49.000Z
|
import unittest
import gtsam
import gtsam_unstable
import numpy as np
def _timestamp_key_value(key, value):
return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue(
key, value
)
class TestFixedLagSmootherExample(unittest.TestCase):
'''
Tests the fixed lag smoother wrapper
'''
def test_FixedLagSmootherExample(self):
'''
Simple test that checks for equality between C++ example
file and the Python implementation. See
gtsam_unstable/examples/FixedLagSmootherExample.cpp
'''
# Define a batch fixed lag smoother, which uses
# Levenberg-Marquardt to perform the nonlinear optimization
lag = 2.0
smoother_batch = gtsam_unstable.BatchFixedLagSmoother(lag)
# Create containers to store the factors and linearization points
# that will be sent to the smoothers
new_factors = gtsam.NonlinearFactorGraph()
new_values = gtsam.Values()
new_timestamps = gtsam_unstable.FixedLagSmootherKeyTimestampMap()
# Create a prior on the first pose, placing it at the origin
prior_mean = gtsam.Pose2(0, 0, 0)
prior_noise = gtsam.noiseModel_Diagonal.Sigmas(
np.array([0.3, 0.3, 0.1]))
X1 = 0
new_factors.push_back(
gtsam.PriorFactorPose2(
X1, prior_mean, prior_noise))
new_values.insert(X1, prior_mean)
new_timestamps.insert(_timestamp_key_value(X1, 0.0))
delta_time = 0.25
time = 0.25
i = 0
ground_truth = [
gtsam.Pose2(0.995821, 0.0231012, 0.0300001),
gtsam.Pose2(1.49284, 0.0457247, 0.045),
gtsam.Pose2(1.98981, 0.0758879, 0.06),
gtsam.Pose2(2.48627, 0.113502, 0.075),
gtsam.Pose2(2.98211, 0.158558, 0.09),
gtsam.Pose2(3.47722, 0.211047, 0.105),
gtsam.Pose2(3.97149, 0.270956, 0.12),
gtsam.Pose2(4.4648, 0.338272, 0.135),
gtsam.Pose2(4.95705, 0.41298, 0.15),
gtsam.Pose2(5.44812, 0.495063, 0.165),
gtsam.Pose2(5.9379, 0.584503, 0.18),
]
# Iterates from 0.25s to 3.0s, adding 0.25s each loop
# In each iteration, the agent moves at a constant speed
# and its two odometers measure the change. The smoothed
# result is then compared to the ground truth
while time <= 3.0:
previous_key = 1000 * (time - delta_time)
current_key = 1000 * time
# assign current key to the current timestamp
new_timestamps.insert(_timestamp_key_value(current_key, time))
# Add a guess for this pose to the new values
# Assume that the robot moves at 2 m/s. Position is time[s] *
# 2[m/s]
current_pose = gtsam.Pose2(time * 2, 0, 0)
new_values.insert(current_key, current_pose)
# Add odometry factors from two different sources with different
# error stats
odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02)
odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas(
np.array([0.1, 0.1, 0.05]))
new_factors.push_back(
gtsam.BetweenFactorPose2(
previous_key,
current_key,
odometry_measurement_1,
odometry_noise_1))
odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01)
odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas(
np.array([0.05, 0.05, 0.05]))
new_factors.push_back(
gtsam.BetweenFactorPose2(
previous_key,
current_key,
odometry_measurement_2,
odometry_noise_2))
# Update the smoothers with the new factors. In this case,
# one iteration must pass for Levenberg-Marquardt to accurately
# estimate
if time >= 0.50:
smoother_batch.update(new_factors, new_values, new_timestamps)
estimate = smoother_batch.calculateEstimatePose2(current_key)
self.assertTrue(estimate.equals(ground_truth[i], 1e-4))
i += 1
new_timestamps.clear()
new_values.clear()
new_factors.resize(0)
time += delta_time
if __name__ == "__main__":
unittest.main()
| 36.333333
| 78
| 0.590736
|
2b09aeb7ef53ad5d88f46eee67ffea96915a9e33
| 9,225
|
py
|
Python
|
lazycon/semantics/analyzer.py
|
maxme1/lazycon
|
9a898bedeb0e7af506dad1f73a8f68062414b00d
|
[
"MIT"
] | 8
|
2021-04-03T08:13:12.000Z
|
2022-01-17T12:36:46.000Z
|
lazycon/semantics/analyzer.py
|
maxme1/lazycon
|
9a898bedeb0e7af506dad1f73a8f68062414b00d
|
[
"MIT"
] | 4
|
2021-04-02T11:59:30.000Z
|
2022-01-17T15:31:35.000Z
|
lazycon/semantics/analyzer.py
|
maxme1/lazycon
|
9a898bedeb0e7af506dad1f73a8f68062414b00d
|
[
"MIT"
] | null | null | null |
import ast
from collections import defaultdict
from enum import Enum
from typing import Iterable, List, Dict, Optional
from .locals import LocalsGatherer
from ..parser import IGNORE_NAME, extract_assign_targets
from ..statements import GlobalStatement, GlobalFunction, GlobalAssign, GlobalImport, Definitions
from ..exceptions import SemanticError
from .visitor import SemanticVisitor
def position(node: ast.AST):
return node.lineno, node.col_offset
# TODO: __folder__
READ_ONLY = {'__file__'}
NodeParents = Dict[str, List[str]]
class Semantics(SemanticVisitor):
def __init__(self, definitions: Definitions, builtins: Iterable[str]):
self.messages = defaultdict(lambda: defaultdict(set))
# scopes
self._builtins = builtins
self._global_scope: Dict[str, MarkedValue] = {}
self._statement_names: Dict[GlobalStatement, List[str]] = defaultdict(list)
self._local_scopes: List[Dict[str, Marked]] = []
marks = {}
for definition in definitions:
self._global_scope[definition.name] = marks.setdefault(
definition.statement, MarkedValue(definition.statement))
self._statement_names[definition.statement].append(definition.name)
*pos, source = definition.statement.position
if definition.name in READ_ONLY:
self.add_message('The value is read-only', f'"{definition.name}" at %d:%d' % tuple(pos), source)
self._statement_names = dict(self._statement_names)
# tracking
self._global_statement: Optional[GlobalStatement] = None
# TODO: use ordered set
self.parents: NodeParents = {d.name: [] for d in definitions}
# analysis
for statement in marks:
self.visit(statement)
@staticmethod
def format(message, elements):
message += ':\n'
for source, item in elements.items():
message += ' in %s\n ' % source
message += ', '.join(item)
message += '\n'
return message
def check(self):
message = ''
for msg, elements in self.messages.items():
message += self.format(msg, elements)
if message:
raise SemanticError(message)
def add_message(self, message, content, source=None):
# TODO: move line info here?
source = source or self._global_statement.source_path
self.messages[message][source].add(content)
# scope management
def enter_scope(self, names: Iterable[str], visited: Iterable[str] = ()):
scope = {}
for name in names:
scope[name] = Marked(VisitState.Undefined)
for name in visited:
scope[name] = Marked(VisitState.Defined)
self._local_scopes.append(scope)
def leave_scope(self):
self._local_scopes.pop()
def enter(self, name: str):
if self._local_scopes:
value = self._local_scopes[-1][name]
# allow multiple definitions
if value.state is not VisitState.Defined:
value.enter()
else:
self._global_scope[name].enter()
def leave(self, name: str):
if self._local_scopes:
value = self._local_scopes[-1][name]
# allow multiple definitions
if value.state is not VisitState.Defined:
value.leave()
else:
self._global_scope[name].leave()
def generic_visit(self, node: ast.AST, *args, **kwargs):
self.add_message('This syntactic structure is not supported',
f'{type(node).__name__} at %d:%d' % position(node))
# the most important part - variable resolving
def visit_name(self, node: ast.Name):
assert isinstance(node.ctx, ast.Load), node.ctx
name = node.id
if name == IGNORE_NAME:
self.add_message(f'The name "{IGNORE_NAME}" can only be used as wildcard during unpacking',
'at %d:%d' % position(node))
# local scopes
for level, scope in enumerate(reversed(self._local_scopes)):
if name in scope:
value = scope[name]
# allow late binding
if level == 0 and value.state is not VisitState.Defined:
self.add_message('Local variables referenced before being defined',
'"' + name + '" at %d:%d' % position(node))
return
# global scope
if name in self._global_scope:
value = self._global_scope[name]
if value.state is VisitState.Defining:
self.add_message('Values are referenced before being completely defined (cyclic dependency)',
'"' + name + '" at %d:%d' % position(node))
for current in self._statement_names[self._global_statement]:
self.parents[current].append(name)
return
# builtins
if name not in self._builtins:
self.add_message('Undefined names found', name)
# global definitions
def visit_global_assign(self, statement: GlobalAssign):
assert self._global_statement is None
self._global_statement = statement
# we can just pick the first name - the rest will enter the same state automatically
self.enter(self._statement_names[statement][0])
self.visit(statement.node.value)
self.leave(self._statement_names[statement][0])
self._global_statement = None
def visit_global_function(self, statement: GlobalFunction):
assert self._global_statement is None
self._global_statement = statement
self.visit(statement.node)
self._global_statement = None
def visit_global_import(self, statement: GlobalImport):
self.enter(statement.name)
self.leave(statement.name)
visit_global_import_from = visit_global_import
# local definitions
def visit_assign(self, node: ast.Assign):
names = extract_assign_targets(node.targets)
for name in names:
self.enter(name)
self.visit(node.value)
for name in names:
self.leave(name)
def visit_function_def(self, node: ast.FunctionDef):
self.enter(node.name)
self.visit(node.args)
# TODO: type annotations?
self._iterate_nodes(node.decorator_list)
self.leave(node.name)
# ignore docstring
body = node.body
if isinstance(body[0], ast.Expr) and isinstance(body[0].value, ast.Str):
body = body[1:]
self.enter_scope(LocalsGatherer.gather(body), self._gather_arg_names(node.args))
self._iterate_nodes(body)
self.leave_scope()
# other stuff that manages scope
def visit_lambda(self, node: ast.Lambda):
self.visit(node.args)
self.enter_scope([], self._gather_arg_names(node.args))
self.visit(node.body)
self.leave_scope()
def visit_list_comp(self, node):
for comp in node.generators:
self.visit(comp)
self.visit(node.elt)
for _ in node.generators:
self.leave_scope()
def visit_dict_comp(self, node):
for comp in node.generators:
self.visit(comp)
self.visit(node.key)
self.visit(node.value)
for _ in node.generators:
self.leave_scope()
visit_set_comp = visit_generator_exp = visit_list_comp
def visit_comprehension(self, node: ast.comprehension):
assert not getattr(node, 'is_async', False)
def get_names(target):
assert isinstance(target.ctx, ast.Store)
if isinstance(target, (ast.Tuple, ast.List)):
names = []
for elt in target.elts:
names.extend(get_names(elt))
return names
if isinstance(target, ast.Starred):
return [target.value]
assert isinstance(target, ast.Name), target
return [target.id]
self.visit(node.iter)
self.enter_scope({}, get_names(node.target))
for test in node.ifs:
self.visit(test)
# function-related stuff
def visit_return(self, node: ast.Return):
self._visit_valid(node.value)
@staticmethod
def _gather_arg_names(node: ast.arguments):
args = getattr(node, 'posonlyargs', []) + node.args + node.kwonlyargs
if node.vararg is not None:
args.append(node.vararg)
if node.kwarg is not None:
args.append(node.kwarg)
return [arg.arg for arg in args]
class VisitState(Enum):
Undefined, Defining, Defined = 0, 1, 2
class Marked:
def __init__(self, status: VisitState):
self.state = status
def enter(self):
assert self.state is VisitState.Undefined, self.state
self.state = VisitState.Defining
def leave(self):
assert self.state is not VisitState.Defined, self.state
self.state = VisitState.Defined
class MarkedValue(Marked):
def __init__(self, value):
super().__init__(VisitState.Undefined)
self.value = value
| 31.810345
| 112
| 0.617019
|
8f7eaf7904747d2e5876d8d13a725417e1040ffb
| 330
|
py
|
Python
|
flask/get_connection.py
|
Artelys/Safer-Roads
|
ba9eb4b2c0f02c40142caa612ed7998c7ee01155
|
[
"MIT"
] | null | null | null |
flask/get_connection.py
|
Artelys/Safer-Roads
|
ba9eb4b2c0f02c40142caa612ed7998c7ee01155
|
[
"MIT"
] | null | null | null |
flask/get_connection.py
|
Artelys/Safer-Roads
|
ba9eb4b2c0f02c40142caa612ed7998c7ee01155
|
[
"MIT"
] | null | null | null |
from elasticsearch import Elasticsearch
def get_connection():
# Elastic Search parameters
#AUTH = ('elastic', 'changeme')
PORT = 9200
HOST = "elastic"
# change this to use AUTH
#return Elasticsearch([{'host': HOST, 'port': PORT}], http_auth=AUTH)
return Elasticsearch([{'host': HOST, 'port': PORT}])
| 27.5
| 73
| 0.654545
|
be7b978048d3967ab2b2894dc1b2bcb6a5cf1bd6
| 3,238
|
py
|
Python
|
python3/297.serialize-and-deserialize-binary-tree.245224010.ac.py
|
Diego-Zulu/leetcode_answers
|
ad435df1bd95fb2c6e17d2d9ff349282c98ee0f4
|
[
"MIT"
] | null | null | null |
python3/297.serialize-and-deserialize-binary-tree.245224010.ac.py
|
Diego-Zulu/leetcode_answers
|
ad435df1bd95fb2c6e17d2d9ff349282c98ee0f4
|
[
"MIT"
] | null | null | null |
python3/297.serialize-and-deserialize-binary-tree.245224010.ac.py
|
Diego-Zulu/leetcode_answers
|
ad435df1bd95fb2c6e17d2d9ff349282c98ee0f4
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=297 lang=python3
#
# [297] Serialize and Deserialize Binary Tree
#
# https://leetcode.com/problems/serialize-and-deserialize-binary-tree/description/
#
# algorithms
# Hard (46.18%)
# Likes: 2721
# Dislikes: 136
# Total Accepted: 300.9K
# Total Submissions: 651K
# Testcase Example: '[1,2,3,null,null,4,5]'
#
# Serialization is the process of converting a data structure or object into a
# sequence of bits so that it can be stored in a file or memory buffer, or
# transmitted across a network connection link to be reconstructed later in the
# same or another computer environment.
#
# Design an algorithm to serialize and deserialize a binary tree. There is no
# restriction on how your serialization/deserialization algorithm should work.
# You just need to ensure that a binary tree can be serialized to a string and
# this string can be deserialized to the original tree structure.
#
# Example:
#
#
# You may serialize the following tree:
#
# 1
# / \
# 2 3
# / \
# 4 5
#
# as "[1,2,3,null,null,4,5]"
#
#
# Clarification: The above format is the same as how LeetCode serializes a
# binary tree. You do not necessarily need to follow this format, so please be
# creative and come up with different approaches yourself.
#
# Note: Do not use class member/global/static variables to store states. Your
# serialize and deserialize algorithms should be stateless.
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
LEFT = 0
RIGHT = 1
POSITIONS = [LEFT, RIGHT]
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
bfs = collections.deque([root])
serial_builder = []
while len(bfs) > 0:
curr = bfs.popleft()
if curr is not None:
serial_builder.append(str(curr.val))
bfs.append(curr.left)
bfs.append(curr.right)
serial_builder.append(',')
while len(serial_builder) > 0 and serial_builder[-1] == ',':
serial_builder.pop()
return ''.join(reversed(serial_builder))
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
deserial = data.split(',')
root = TreeNode(-1)
bfs = collections.deque([(root, RIGHT)])
while len(deserial) > 0:
curr_serial_val = deserial.pop()
curr_parent, insert_pos = bfs.popleft()
if curr_serial_val:
curr_val = int(curr_serial_val)
new_node = TreeNode(curr_val)
if insert_pos == LEFT:
curr_parent.left = new_node
else:
curr_parent.right = new_node
for p in POSITIONS:
bfs.append((new_node, p))
return root.right
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
# @lc code=end
| 29.436364
| 82
| 0.616121
|
7764b3453c5361fc2f4041c376081f37d7660bc1
| 577
|
py
|
Python
|
atcoder/abc151C_welcome_to_atcoder.py
|
da-edra/kyopro
|
ad531d15bcccf6aafdaaef3cc69db850b0f7c471
|
[
"BSD-3-Clause"
] | 2
|
2020-08-31T17:19:07.000Z
|
2021-01-08T21:35:48.000Z
|
atcoder/abc151C_welcome_to_atcoder.py
|
edglaz/kyopro
|
b8ac4f6873418ad20ad417e46d731c35a8062c0d
|
[
"BSD-3-Clause"
] | null | null | null |
atcoder/abc151C_welcome_to_atcoder.py
|
edglaz/kyopro
|
b8ac4f6873418ad20ad417e46d731c35a8062c0d
|
[
"BSD-3-Clause"
] | null | null | null |
# unihernandez22
# https://atcoder.jp/contests/abc151/tasks/abc151_c
# implementation
class Counter(dict):
def __missing__(self, i):
return 0
class BoolDict(dict):
def __missing__(self, i):
return False
n, m = map(int, input().split())
passed = 0
penalties = Counter()
ac = BoolDict()
for i in range(m):
p, s = input().split()
if not ac[p]:
if s == "WA": penalties[p] += 1
elif s == "AC":
passed += 1
ac[p] = True
x = 0
for i in penalties:
if ac[i]:
x += penalties[i]
print(passed, x)
| 16.970588
| 51
| 0.559792
|
e5cc8267025203629fd31151aae4bbbe6db3045f
| 5,016
|
py
|
Python
|
nipype/interfaces/camino2trackvis/convert.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | 1
|
2018-09-09T14:47:04.000Z
|
2018-09-09T14:47:04.000Z
|
nipype/interfaces/camino2trackvis/convert.py
|
MarcCote/nipype
|
b4014e1d87509f35242f0547f51d2b8962f83cbe
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/camino2trackvis/convert.py
|
MarcCote/nipype
|
b4014e1d87509f35242f0547f51d2b8962f83cbe
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from nipype.interfaces.base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File
from nipype.utils.filemanip import split_filename
import os
"""Provides interfaces to various commands provided by Camino-Trackvis """
class Camino2TrackvisInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='-i %s', mandatory=True, position=1,
desc='The input .Bfloat (camino) file.')
out_file = File(argstr='-o %s', genfile=True, mandatory=False, position=2,
desc='The filename to which to write the .trk (trackvis) file.')
min_length = traits.Float(argstr='-l %d', mandatory=False, position=3,
units='mm', desc='The minimum length of tracts to output')
data_dims = traits.List(traits.Int, argstr='-d %s', sep=',',
mandatory=True, position=4, minlen=3, maxlen=3,
desc='Three comma-separated integers giving the number of voxels along each dimension of the source scans.')
voxel_dims = traits.List(traits.Float, argstr='-x %s', sep=',',
mandatory=True, position=5, minlen=3, maxlen=3,
desc='Three comma-separated numbers giving the size of each voxel in mm.')
#Change to enum with all combinations? i.e. LAS, LPI, RAS, etc..
voxel_order = File(argstr='--voxel-order %s', mandatory=True, position=6,
desc='Set the order in which various directions were stored.\
Specify with three letters consisting of one each \
from the pairs LR, AP, and SI. These stand for Left-Right, \
Anterior-Posterior, and Superior-Inferior. \
Whichever is specified in each position will \
be the direction of increasing order. \
Read coordinate system from a NIfTI file.')
nifti_file = File(argstr='--nifti %s', exists=True,
mandatory=False, position=7, desc='Read coordinate system from a NIfTI file.')
class Camino2TrackvisOutputSpec(TraitedSpec):
trackvis = File(exists=True, desc='The filename to which to write the .trk (trackvis) file.')
class Camino2Trackvis(CommandLine):
""" Wraps camino_to_trackvis from Camino-Trackvis
Convert files from camino .Bfloat format to trackvis .trk format.
Example
-------
>>> import nipype.interfaces.camino2trackvis as cam2trk
>>> c2t = cam2trk.Camino2Trackvis()
>>> c2t.inputs.in_file = 'data.Bfloat'
>>> c2t.inputs.out_file = 'streamlines.trk'
>>> c2t.inputs.min_length = 30
>>> c2t.inputs.data_dims = [128, 104, 64]
>>> c2t.inputs.voxel_dims = [2.0, 2.0, 2.0]
>>> c2t.inputs.voxel_order = 'LAS'
>>> c2t.run() # doctest: +SKIP
"""
_cmd = 'camino_to_trackvis'
input_spec=Camino2TrackvisInputSpec
output_spec=Camino2TrackvisOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['trackvis'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '.trk'
class Trackvis2CaminoInputSpec(CommandLineInputSpec):
""" Wraps trackvis_to_camino from Camino-Trackvis
Convert files from camino .Bfloat format to trackvis .trk format.
Example
-------
>>> import nipype.interfaces.camino2trackvis as cam2trk
>>> t2c = cam2trk.Trackvis2Camino()
>>> t2c.inputs.in_file = 'streamlines.trk'
>>> t2c.inputs.out_file = 'streamlines.Bfloat'
>>> t2c.run() # doctest: +SKIP
"""
in_file = File(exists=True, argstr='-i %s',
mandatory=True, position=1,
desc='The input .trk (trackvis) file.')
out_file = File(argstr='-o %s', genfile=True,
mandatory=False, position=2, desc='The filename to which to write the .Bfloat (camino).')
append_file = File(exists=True, argstr='-a %s',
mandatory=False, position=2, desc='A file to which the append the .Bfloat data. ')
class Trackvis2CaminoOutputSpec(TraitedSpec):
camino = File(exists=True, desc='The filename to which to write the .Bfloat (camino).')
class Trackvis2Camino(CommandLine):
_cmd = 'trackvis_to_camino'
input_spec=Trackvis2CaminoInputSpec
output_spec=Trackvis2CaminoOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['camino'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '.Bfloat'
| 37.714286
| 116
| 0.666069
|
f875dffef493cc08efb4fecc1f0b1e2f87c7eadb
| 19,894
|
py
|
Python
|
app-sg/utils_tf_train.py
|
AleksanderZawisza/Human-Emotion-Classification-Kit
|
13c71a65898ab3f539cae58f4b42960a596968cf
|
[
"MIT"
] | 1
|
2022-01-18T18:38:53.000Z
|
2022-01-18T18:38:53.000Z
|
app-sg/utils_tf_train.py
|
AleksanderZawisza/Human-Emotion-Classification-Kit
|
13c71a65898ab3f539cae58f4b42960a596968cf
|
[
"MIT"
] | null | null | null |
app-sg/utils_tf_train.py
|
AleksanderZawisza/Human-Emotion-Classification-Kit
|
13c71a65898ab3f539cae58f4b42960a596968cf
|
[
"MIT"
] | 1
|
2022-01-18T18:40:38.000Z
|
2022-01-18T18:40:38.000Z
|
import tensorflow as tf
import numpy as np
import random
import cv2
import os
import PySimpleGUI as sg
import dlib
from keras import backend as K
from sklearn.metrics import roc_auc_score, classification_report
class tf_flags_StopSave():
def __init__(self, stopped=False, save=False):
self.stopped = stopped
self.save = save
class StopTrainingOnWindowCloseAndPause(tf.keras.callbacks.Callback):
""" NewCallback descends from Callback
"""
def __init__(self, window, tf_flags):
""" Save params in constructor
"""
self.window = window
self.tf_flags = tf_flags
def on_train_batch_end(self, batch, logs=None):
event, values = self.window.read(0)
if event == "Exit" or event == sg.WIN_CLOSED or event == '-CANCEL_B-' or event == '-SAVE-':
self.model.stop_training = True
if event == '-CANCEL_B-':
self.tf_flags.stopped = True
if event == '-SAVE-':
self.tf_flags.stopped = True
self.tf_flags.save = True
# def all_scores(labels, preds):
# report = classification_report(labels, preds, digits=3, output_dict=True)
# acc_sc = report['accuracy']
# f1_sc = report['macro avg']['f1-score']
# recall_sc = report['macro avg']['recall']
# precision_sc = report['macro avg']['precision']
# return acc_sc, f1_sc, recall_sc, precision_sc
class CategoricalPrecision(tf.keras.metrics.Metric):
def __init__(self, name='precision', **kwargs):
super().__init__(name=name, **kwargs)
self.prec = self.add_weight(name='prec', initializer='zeros')
self.precision_fn0 = tf.keras.metrics.Precision(class_id=0)
self.precision_fn1 = tf.keras.metrics.Precision(class_id=1)
self.precision_fn2 = tf.keras.metrics.Precision(class_id=2)
self.precision_fn3 = tf.keras.metrics.Precision(class_id=3)
self.precision_fn4 = tf.keras.metrics.Precision(class_id=4)
self.precision_fn5 = tf.keras.metrics.Precision(class_id=5)
self.precision_fn6 = tf.keras.metrics.Precision(class_id=6)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = K.one_hot(K.argmax(y_pred), num_classes=7)
p0 = self.precision_fn0(y_true, y_pred)
p1 = self.precision_fn1(y_true, y_pred)
p2 = self.precision_fn2(y_true, y_pred)
p3 = self.precision_fn3(y_true, y_pred)
p4 = self.precision_fn4(y_true, y_pred)
p5 = self.precision_fn5(y_true, y_pred)
p6 = self.precision_fn6(y_true, y_pred)
# since prec is a variable, we use assign
self.prec.assign((p0+p1+p2+p3+p4+p5+p6)/7)
def result(self):
return self.prec
def reset_states(self):
# we also need to reset the state of the precision and recall objects
self.precision_fn0.reset_states()
self.precision_fn1.reset_states()
self.precision_fn2.reset_states()
self.precision_fn3.reset_states()
self.precision_fn4.reset_states()
self.precision_fn5.reset_states()
self.precision_fn6.reset_states()
self.prec.assign(0)
class CategoricalRecall(tf.keras.metrics.Metric):
def __init__(self, name='recall', **kwargs):
super().__init__(name=name, **kwargs)
self.rec = self.add_weight(name='rec', initializer='zeros')
self.recall_fn0 = tf.keras.metrics.Recall(class_id=0)
self.recall_fn1 = tf.keras.metrics.Recall(class_id=1)
self.recall_fn2 = tf.keras.metrics.Recall(class_id=2)
self.recall_fn3 = tf.keras.metrics.Recall(class_id=3)
self.recall_fn4 = tf.keras.metrics.Recall(class_id=4)
self.recall_fn5 = tf.keras.metrics.Recall(class_id=5)
self.recall_fn6 = tf.keras.metrics.Recall(class_id=6)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = K.one_hot(K.argmax(y_pred), num_classes=7)
r0 = self.recall_fn0(y_true, y_pred)
r1 = self.recall_fn1(y_true, y_pred)
r2 = self.recall_fn2(y_true, y_pred)
r3 = self.recall_fn3(y_true, y_pred)
r4 = self.recall_fn4(y_true, y_pred)
r5 = self.recall_fn5(y_true, y_pred)
r6 = self.recall_fn6(y_true, y_pred)
# since rec is a variable, we use assign
self.rec.assign((r0+r1+r2+r3+r4+r5+r6)/7)
def result(self):
return self.rec
def reset_states(self):
# we also need to reset the state of the precision and recall objects
self.recall_fn0.reset_states()
self.recall_fn1.reset_states()
self.recall_fn2.reset_states()
self.recall_fn3.reset_states()
self.recall_fn4.reset_states()
self.recall_fn5.reset_states()
self.recall_fn6.reset_states()
self.rec.assign(0)
class F1_Score(tf.keras.metrics.Metric):
def __init__(self, name='f1_score', **kwargs):
super().__init__(name=name, **kwargs)
self.f1 = self.add_weight(name='f1', initializer='zeros')
# self.precision_fn = tf.keras.metrics.Precision(class_id=6)
# self.recall_fn = tf.keras.metrics.Recall(class_id=6)
self.precision_fn = CategoricalPrecision()
self.recall_fn = CategoricalRecall()
def update_state(self, y_true, y_pred, sample_weight=None):
p = self.precision_fn(y_true, y_pred)
r = self.recall_fn(y_true, y_pred)
# since f1 is a variable, we use assign
self.f1.assign(2 * ((p * r) / (p + r + 1e-6)))
def result(self):
return self.f1
def reset_states(self):
# we also need to reset the state of the precision and recall objects
self.precision_fn.reset_states()
self.recall_fn.reset_states()
self.f1.assign(0)
def facial_landmarks(image, predictor):
# image = cv2.imread(filepath)
face_rects = [dlib.rectangle(left=1, top=1, right=len(image) - 1, bottom=len(image) - 1)]
face_landmarks = np.matrix([[p.x, p.y] for p in predictor(image, face_rects[0]).parts()])
return face_landmarks
def conv_block_r9(in_channels, out_channels, pool=False):
inputs = tf.keras.Input((None, None, in_channels))
results = tf.keras.layers.Conv2D(out_channels, kernel_size=(3, 3), padding='same')(inputs)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.ReLU()(results)
if pool: results = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(results)
return tf.keras.Model(inputs=inputs, outputs=results)
def ResNet9(**kwargs):
inputs = tf.keras.Input((None, None, 3))
results = conv_block_r9(in_channels=3, out_channels=64)(inputs)
results = conv_block_r9(64, 64, pool=True)(results)
shortcut = conv_block_r9(64, 64, pool=True)(results)
results = conv_block_r9(64, 32)(shortcut)
results = conv_block_r9(32, 64)(results)
results = tf.keras.layers.Add()([results, shortcut])
results = tf.keras.layers.Dropout(0.5)(results)
shortcut = conv_block_r9(64, 64, pool=True)(results)
results = conv_block_r9(64, 32)(shortcut)
results = conv_block_r9(32, 64)(results)
results = tf.keras.layers.Add()([results, shortcut])
results = tf.keras.layers.Dropout(0.5)(results)
shortcut = conv_block_r9(64, 64, pool=True)(results)
results = conv_block_r9(64, 32)(shortcut)
results = conv_block_r9(32, 64)(results)
results = tf.keras.layers.Add()([results, shortcut])
results = tf.keras.layers.Dropout(0.5)(results)
results = tf.keras.layers.MaxPool2D(pool_size=(6, 6))(results)
results = tf.keras.layers.Flatten()(results)
return tf.keras.Model(inputs=inputs, outputs=results, **kwargs)
def EmotionsRN9():
inputs = tf.keras.Input((197, 197, 3))
results = ResNet9(name='resnet9')(inputs)
results = tf.keras.layers.Dense(7, activation=tf.keras.activations.softmax)(results)
return tf.keras.Model(inputs=inputs, outputs=results)
def ResnetBlock(in_channels, out_channels, down_sample=False):
inputs = tf.keras.Input((None, None, in_channels)) # inputs.shape = (batch, height, width, in_channels)
if down_sample:
shortcut = tf.keras.layers.Conv2D(out_channels, kernel_size=(1, 1), strides=(2, 2), padding='same')(inputs)
shortcut = tf.keras.layers.BatchNormalization()(shortcut)
else:
shortcut = inputs
results = tf.keras.layers.Conv2D(out_channels, kernel_size=(3, 3), strides=(2, 2) if down_sample else (1, 1),
padding='same')(inputs)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.ReLU()(results)
results = tf.keras.layers.Conv2D(out_channels, kernel_size=(3, 3), strides=(1, 1), padding='same')(results)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.Add()([results, shortcut])
results = tf.keras.layers.ReLU()(results)
return tf.keras.Model(inputs=inputs, outputs=results)
def ResNet18(**kwargs):
inputs = tf.keras.Input((None, None, 3))
results = tf.keras.layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='same')(inputs)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.ReLU()(results)
results = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 128, down_sample=True)(results)
results = ResnetBlock(128, 128)(results)
results = ResnetBlock(128, 256, down_sample=True)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 512, down_sample=True)(results)
results = ResnetBlock(512, 512)(results)
results = tf.keras.layers.GlobalAveragePooling2D()(results) # results.shape = (batch, 512)
return tf.keras.Model(inputs=inputs, outputs=results, **kwargs)
def EmotionsRN18():
inputs = tf.keras.Input((197, 197, 3))
results = ResNet18(name='resnet18')(inputs)
results = tf.keras.layers.Dense(7, activation=tf.keras.activations.softmax)(results)
return tf.keras.Model(inputs=inputs, outputs=results)
def ResNet34(**kwargs):
inputs = tf.keras.Input((None, None, 3))
results = tf.keras.layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='same')(inputs)
results = tf.keras.layers.BatchNormalization()(results)
results = tf.keras.layers.ReLU()(results)
results = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 64)(results)
results = ResnetBlock(64, 128, down_sample=True)(results)
results = ResnetBlock(128, 128)(results)
results = ResnetBlock(128, 128)(results)
results = ResnetBlock(128, 128)(results)
results = ResnetBlock(128, 256, down_sample=True)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 256)(results)
results = ResnetBlock(256, 512, down_sample=True)(results)
results = ResnetBlock(512, 512)(results)
results = ResnetBlock(512, 512)(results)
results = tf.keras.layers.GlobalAveragePooling2D()(results) # results.shape = (batch, 512)
return tf.keras.Model(inputs=inputs, outputs=results, **kwargs)
def EmotionsRN34():
inputs = tf.keras.Input((197, 197, 3))
results = ResNet34(name='resnet34')(inputs)
results = tf.keras.layers.Dense(7, activation=tf.keras.activations.softmax)(results)
return tf.keras.Model(inputs=inputs, outputs=results)
# def facial_landmarks(image, predictor):
# # image = cv2.imread(filepath)
# face_rects = [dlib.rectangle(left=1, top=1, right=len(image) - 1, bottom=len(image) - 1)]
# face_landmarks = np.matrix([[p.x, p.y] for p in predictor(image, face_rects[0]).parts()])
# return face_landmarks
# predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
def load_filenames(directory):
emotions_dict = {"anger": 0, "disgust": 1, "fear": 2, "happiness": 3, "neutrality": 4, "sadness": 5, "surprise": 6}
samples = []
for emotion in emotions_dict:
path = directory + "/" + emotion
for file in os.listdir(path):
if file.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp')):
filepath = path + "/" + file
emotion_label = emotions_dict[emotion]
samples.append([filepath, emotion_label])
return samples
def rotate_image(image, deg):
rows, cols, c = image.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), deg, 1)
image = cv2.warpAffine(image, M, (cols, rows))
return image
def generator(samples, aug=False, batch_size=32, shuffle_data=True, resize=197, window=None):
"""
Yields the next training batch.
Suppose `samples` is an array [[image1_filename,label1], [image2_filename,label2],...].
"""
num_samples = len(samples)
while True: # Loop forever so the generator never terminates
random.shuffle(samples)
# Get index to start each batch: [0, batch_size, 2*batch_size, ..., max multiple of batch_size <= num_samples]
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = samples[offset:offset + batch_size]
# Initialise X_train and y_train arrays for this batch
X1 = []
# X2 = []
y = []
# For each example
for batch_sample in batch_samples:
# Load image (X) and label (y)
img_path = batch_sample[0]
label = batch_sample[1]
img = cv2.imread(img_path)
img = cv2.resize(img, (resize, resize))
if aug: # augumentations
img = rotate_image(img, random.uniform(-10, 10))
# features = facial_landmarks(img, predictor)
img = img / 255
onehot = [0 for i in range(7)]
onehot[label] += 1
# apply any kind of preprocessing
# Add example to arrays
X1.append(img)
# X2.append(features)
y.append(onehot)
# Make sure they're numpy arrays (as opposed to lists)
X1 = np.array(X1)
# X2 = np.array(X2)
y = np.array(y)
if window:
print('', end='')
window.refresh()
# The generator-y part: yield the next training batch
# yield [X1, X2], y
yield X1, y
def old_generator(samples, predictor, aug=False, batch_size=32, shuffle_data=True, resize=197, window=None):
"""
Yields the next training batch.
Suppose `samples` is an array [[image1_filename,label1], [image2_filename,label2],...].
"""
num_samples = len(samples)
while True: # Loop forever so the generator never terminates
random.shuffle(samples)
# Get index to start each batch: [0, batch_size, 2*batch_size, ..., max multiple of batch_size <= num_samples]
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = samples[offset:offset + batch_size]
# Initialise X_train and y_train arrays for this batch
X1 = []
X2 = []
y = []
# For each example
for batch_sample in batch_samples:
# Load image (X) and label (y)
img_path = batch_sample[0]
label = batch_sample[1]
img = cv2.imread(img_path)
img = cv2.resize(img, (resize, resize))
if aug: # augumentations
img = rotate_image(img, random.uniform(-10, 10))
features = facial_landmarks(img, predictor)
img = img / 255
onehot = [0 for i in range(7)]
onehot[label] += 1
# apply any kind of preprocessing
# Add example to arrays
X1.append(img)
X2.append(features)
y.append(onehot)
# Make sure they're numpy arrays (as opposed to lists)
X1 = np.array(X1)
X2 = np.array(X2)
y = np.array(y)
if window:
print('', end='')
window.refresh()
# The generator-y part: yield the next training batch
yield [X1, X2], y
# yield X1, y
def image_generator(dataset, aug=False, BS=32, get_datagen=False):
if aug:
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255,
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
horizontal_flip=True)
else:
datagen = tf.keras.preprocessing.image.ImageDataGenerator.ImageDataGenerator(rescale=1. / 255)
if get_datagen:
return datagen
return datagen.flow_from_directory(
dataset,
target_size=(197, 197),
color_mode='rgb',
shuffle=True,
class_mode='categorical',
batch_size=BS)
if __name__ == "__main__":
SGD_LEARNING_RATE = 0.01
ADAM_LEARNING_RATE = 0.001
SGD_DECAY = 0.0001
EPOCHS = 5
BS = 32
Resize_pixelsize = 197
sgd = tf.keras.optimizers.SGD(lr=SGD_LEARNING_RATE, momentum=0.9, decay=SGD_DECAY, nesterov=True)
optim = tf.keras.optimizers.Adam(lr=ADAM_LEARNING_RATE, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
print('loading models')
rn9 = EmotionsRN9()
print(rn9)
# rn18 = EmotionsRN18()
# print(rn18)
# rn34 = EmotionsRN34()
# print(rn34)
# rn50 = tf.keras.applications.ResNet50(weights=None, classes=7)
# rn101 = tf.keras.applications.ResNet101(weights=None, classes=7)
# rn152 = tf.keras.applications.ResNet152(weights=None, classes=7)
# print(rn50)
# rn34.save('rn34.h5')
rn9.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy',
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
F1_Score(name='f1_score'),
tf.keras.metrics.AUC(name='auc_roc')])
samples_train = load_filenames("C:/Users/hp/Documents/data/train")
# samples_dev = load_filenames("C:/Users/hp/Documents/data/dev")
# samples_test = load_filenames("C:/Users/hp/Documents/data/test")
#
# train_generator = generator(samples_train, True)
# dev_generator = generator(samples_dev)
# test_generator = generator(samples_test)
train_generator = image_generator("C:/Users/hp/Documents/data/train", True)
metrics = {'loss': [], 'acc': [], 'precision': [], 'recall': [], 'f1_score': [], 'auc_roc': []}
# for ep in range(EPOCHS):
# history = rn9.fit_generator(
# generator=train_generator,
# # validation_data=dev_generator,
# steps_per_epoch=len(samples_train) // BS //100,
# # validation_steps=len(samples_dev) // BS,
# epochs=1,
# # use_multiprocessing=True
# )
# for key in metrics.keys():
# metrics[key].extend(history.history[key])
# print(metrics)
| 40.271255
| 119
| 0.628079
|
2987c5c6a6a15d841d7857f33acfddf4c18fb60e
| 754
|
py
|
Python
|
Chapter02/moviereviewsproject/moviereviews/urls.py
|
PacktPublishing/Django-4-for-the-Impatient
|
a7571124eb414fb0f8bcabe7ae23d64460a1a882
|
[
"MIT"
] | null | null | null |
Chapter02/moviereviewsproject/moviereviews/urls.py
|
PacktPublishing/Django-4-for-the-Impatient
|
a7571124eb414fb0f8bcabe7ae23d64460a1a882
|
[
"MIT"
] | null | null | null |
Chapter02/moviereviewsproject/moviereviews/urls.py
|
PacktPublishing/Django-4-for-the-Impatient
|
a7571124eb414fb0f8bcabe7ae23d64460a1a882
|
[
"MIT"
] | null | null | null |
"""moviereviews URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.272727
| 77
| 0.710875
|
c0f869b52046328a3e938abc2d2cb9c2ae8535dc
| 2,860
|
py
|
Python
|
test/python/providers/test_fake_backends.py
|
dlyongemallo/qiskit-terra
|
2909eac4215e772440c35c5ee64197187894c719
|
[
"Apache-2.0"
] | 2
|
2020-12-26T21:12:30.000Z
|
2021-05-18T12:53:42.000Z
|
test/python/providers/test_fake_backends.py
|
dlyongemallo/qiskit-terra
|
2909eac4215e772440c35c5ee64197187894c719
|
[
"Apache-2.0"
] | 1
|
2020-03-29T19:57:14.000Z
|
2020-03-29T21:49:25.000Z
|
test/python/providers/test_fake_backends.py
|
dlyongemallo/qiskit-terra
|
2909eac4215e772440c35c5ee64197187894c719
|
[
"Apache-2.0"
] | 1
|
2020-07-13T17:56:46.000Z
|
2020-07-13T17:56:46.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-class-docstring,missing-function-docstring
# pylint: disable=missing-module-docstring
import operator
from test import combine
from ddt import ddt, data
from qiskit.circuit import QuantumCircuit
from qiskit.execute import execute
from qiskit.test.base import QiskitTestCase
from qiskit.test.mock import FakeProvider
from qiskit.test.mock.fake_backend import HAS_AER
FAKE_PROVIDER = FakeProvider()
@ddt
class TestFakeBackends(QiskitTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.circuit = QuantumCircuit(2)
cls.circuit.h(0)
cls.circuit.h(1)
cls.circuit.h(0)
cls.circuit.h(1)
cls.circuit.x(0)
cls.circuit.x(1)
cls.circuit.measure_all()
@combine(backend=[be for be in FAKE_PROVIDER.backends()
if be.configuration().num_qubits > 1],
optimization_level=[0, 1, 2, 3])
def test_circuit_on_fake_backend(self, backend, optimization_level):
if not HAS_AER and backend.configuration().num_qubits > 20:
self.skipTest(
'Unable to run fake_backend %s without qiskit-aer' %
backend.configuration().backend_name)
job = execute(self.circuit, backend,
optimization_level=optimization_level,
seed_simulator=42, seed_transpiler=42)
result = job.result()
counts = result.get_counts()
max_count = max(counts.items(), key=operator.itemgetter(1))[0]
self.assertEqual(max_count, '11')
@data(*FAKE_PROVIDER.backends())
def test_to_dict_properties(self, backend):
properties = backend.properties()
if properties:
self.assertIsInstance(backend.properties().to_dict(), dict)
else:
self.assertTrue(backend.configuration().simulator)
@data(*FAKE_PROVIDER.backends())
def test_to_dict_configuration(self, backend):
configuration = backend.configuration()
self.assertIsInstance(configuration.to_dict(), dict)
@data(*FAKE_PROVIDER.backends())
def test_defaults_to_dict(self, backend):
if hasattr(backend, 'defaults'):
self.assertIsInstance(backend.defaults().to_dict(), dict)
else:
self.skipTest('Backend %s does not have defaults' % backend)
| 34.457831
| 77
| 0.676923
|
2f631c2bb8b095e8f46a31c8409e587f71c8a96b
| 4,264
|
py
|
Python
|
web_project_architectural_designs/settings.py
|
Yordanova-arch/web_project_architectural_designs
|
7b379d760287ba920a20f2beb637e4e43747be4a
|
[
"MIT"
] | null | null | null |
web_project_architectural_designs/settings.py
|
Yordanova-arch/web_project_architectural_designs
|
7b379d760287ba920a20f2beb637e4e43747be4a
|
[
"MIT"
] | null | null | null |
web_project_architectural_designs/settings.py
|
Yordanova-arch/web_project_architectural_designs
|
7b379d760287ba920a20f2beb637e4e43747be4a
|
[
"MIT"
] | null | null | null |
"""
Django settings for web_project_architectural_designs project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from os.path import join
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
from django.urls import reverse_lazy
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*!a31_8$9_8&w%5gfg5_-71(gg)s9-e0&(r-dhlf&idql#8=pm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'common',
'designs',
'designs_auth',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web_project_architectural_designs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web_project_architectural_designs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'designs',
'USER': 'postgres',
'PASSWORD': 'nedkorachev',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = ''
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = join(BASE_DIR, 'media')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django.db': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
},
}
LOGIN_REDIRECT_URL = reverse_lazy('index')
LOGIN_URL = reverse_lazy('login user')
LOGOUT_REDIRECT_URL = reverse_lazy('index')
| 23.821229
| 91
| 0.651501
|
792be766e0c5e2a899b1ea6890f3a81a67f524c5
| 853
|
py
|
Python
|
src/main.py
|
nick-cheatwood7/spotify-recommend-python
|
5d00756d1d4ff973f74e73841ae5640742c7745e
|
[
"MIT"
] | null | null | null |
src/main.py
|
nick-cheatwood7/spotify-recommend-python
|
5d00756d1d4ff973f74e73841ae5640742c7745e
|
[
"MIT"
] | null | null | null |
src/main.py
|
nick-cheatwood7/spotify-recommend-python
|
5d00756d1d4ff973f74e73841ae5640742c7745e
|
[
"MIT"
] | null | null | null |
# Import needed packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Import custom module
import spotify as sp
def main():
# Init seaborn
sns.set()
# Read in data
data = pd.read_csv("./data/tracks.csv")
# Get metrics on the data
# data.info()
# Test for empty values
data.isnull().sum()
# Clean the data
data["name"].fillna("Unknown Title", inplace=True)
# Test for empty values again
data.isnull().sum()
# Init the recommender instance
recommender = sp.SpotifyRecommender(data)
# Get recommendation by Track title
print(recommender.get_recommendations("Re: Stacks", 20))
# Get recommendation by Track Id
print(recommender.get_recommendations_byId("2LthqyP0MLhGUBICwR1535", 20))
if __name__ == "__main__":
main()
| 20.309524
| 77
| 0.682298
|
cfafd615d5bc5bc7612c7fad1ac4434d25657eaa
| 35,737
|
py
|
Python
|
reviewboard/datagrids/columns.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 921
|
2015-01-01T15:26:28.000Z
|
2022-03-29T11:30:38.000Z
|
reviewboard/datagrids/columns.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 5
|
2015-03-17T18:57:47.000Z
|
2020-10-02T13:24:31.000Z
|
reviewboard/datagrids/columns.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 285
|
2015-01-12T06:24:36.000Z
|
2022-03-29T11:03:50.000Z
|
from __future__ import unicode_literals
from django.core.urlresolvers import NoReverseMatch
from django.template.defaultfilters import date
from django.utils import six
from django.utils.html import (conditional_escape, escape, format_html,
format_html_join)
from django.utils.safestring import mark_safe
from django.utils.six.moves import reduce
from django.utils.translation import ugettext_lazy as _, ugettext
from djblets.datagrid.grids import CheckboxColumn, Column, DateTimeColumn
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.accounts.models import Profile, ReviewRequestVisit
from reviewboard.avatars import avatar_services
from reviewboard.reviews.models import ReviewRequest
from reviewboard.reviews.templatetags.reviewtags import render_star
from reviewboard.site.urlresolvers import local_site_reverse
class BaseStarColumn(Column):
"""Indicates if an item is starred.
This is the base class for all columns that deal with starring items.
The star is interactive, allowing the user to star or unstar the item.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(BaseStarColumn, self).__init__(
image_class='rb-icon rb-icon-star-on',
image_alt=_('Starred'),
detailed_label=_('Starred'),
shrink=True,
*args, **kwargs)
def setup_state(self, state):
"""Set up the state for this column."""
state.all_starred = set()
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
obj.starred = obj.pk in state.all_starred
return render_star(state.datagrid.request.user, obj)
class UsernameColumn(Column):
"""A column for showing a username and the user's avatar.
The username and avatar will link to the user's profile page and will
show basic profile information when hovering over the link.
When constructing an instance of this column, the relation between the
object being represented in the datagrid and the user can be specified
as a tuple or list of field names forming a path to the user field.
"""
AVATAR_SIZE = 24
def __init__(self, label=_('Username'), user_relation=[], *args, **kwargs):
"""Initialize the column.
Args:
label (unicode, optional):
The label for the column.
user_relation (list of unicode, optional):
A list of fields forming a relation path to the user. This can
be left blank if representing the user.
*args (tuple):
Additional positional arguments to pass to the column.
**kwargs (dict):
Additional keyword arguments to pass to the column.
"""
self._user_relation = user_relation
super(UsernameColumn, self).__init__(
label=label,
db_field='__'.join(user_relation + ['username']),
css_class='submitter-column',
shrink=True,
sortable=True,
link=True,
link_func=self._link_user,
link_css_class='user',
*args, **kwargs)
def get_user(self, obj):
"""Return the user associated with this object.
Args:
obj (object):
The object provided to the column.
Returns:
django.contrib.auth.models.User:
The resulting user.
"""
# Look up the user in the provided obj by traversing the relation.
# If _user_relation is empty, then obj is the user.
user = obj
for field_name in self._user_relation:
user = getattr(user, field_name)
return user
def render_data(self, state, obj):
"""Render the user's name and avatar as HTML.
Args:
state (djblets.datagrid.grids.StatefulColumn):
The column state.
obj (django.db.models.Model):
The object being rendered in the datagrid.
Returns:
django.utils.safestring.SafeText:
The HTML for the column.
"""
user = self.get_user(obj)
# If avatars are eanbled, we'll want to include that in the resulting
# HTML.
siteconfig = SiteConfiguration.objects.get_current()
request = state.datagrid.request
avatar_html = ''
if siteconfig.get(avatar_services.AVATARS_ENABLED_KEY):
avatar_service = avatar_services.for_user(user)
if avatar_service:
avatar_html = avatar_service.render(request=request,
user=user,
size=self.AVATAR_SIZE)
# Render the link to the user page, using the avatar and username.
username = user.username
return format_html('{0}{1}', avatar_html, username)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset.
This will select fields for the user and the user's profile, to
help with query performance.
Args:
state (djblets.datagrid.grids.StatefulColumn):
The column state.
queryset (django.db.models.query.QuerySet):
The queryset to augment.
Returns:
django.db.models.query.QuerySet:
The resulting queryset.
"""
user_field = '__'.join(self._user_relation)
if user_field:
fields = [user_field, '%s__profile' % user_field]
else:
fields = ['profile']
return queryset.select_related(*fields)
def _link_user(self, state, obj, *args):
"""Return the URL to link the user associated with this object.
Args:
state (djblets.datagrid.grids.StatefulColumn, unused):
The column state.
obj (object):
The object provided to the column.
*args (tuple):
Additional keyword arguments provided to the method.
Returns:
unicode:
The URL for the user.
"""
return local_site_reverse(
'user',
request=state.datagrid.request,
kwargs={
'username': self.get_user(obj).username,
})
class FullNameColumn(Column):
"""Shows the full name of the user when appropriate."""
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset.
This will select fields for the user and the user's profile, to
help with query performance.
Args:
state (djblets.datagrid.grids.StatefulColumn):
The column state.
queryset (django.db.models.query.QuerySet):
The queryset to augment.
Returns:
django.db.models.query.QuerySet:
The resulting queryset.
"""
return queryset.select_related('profile')
def render_data(self, state, user):
"""Render the full name, or blank if not visible to the user.
Args:
state (djblets.datagrid.grids.StatefulColumn):
The column state.
user (django.contrib.auth.models.User):
The user whose full name is to be rendered.
Returns:
unicode:
Either the full name (if visible to the user) or an empty string.
"""
profile = user.get_profile()
if user.is_profile_visible(state.datagrid.request.user):
display_name = \
profile.get_display_name(state.datagrid.request.user)
else:
display_name = ''
return escape(display_name)
class BugsColumn(Column):
"""Shows the list of bugs specified on a review request.
The list of bugs will be linked to the bug tracker, if a bug tracker
was configured for the repository the review request's change is on.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
# Note that we're enabling linking but overriding the link function
# to return None. This is to disable the automatic linking to the
# review request, so that the cell isn't generally clickable,
# preventing visual and interaction issues with the bug links.
super(BugsColumn, self).__init__(
label=_('Bugs'),
css_class='bugs',
link=False,
shrink=True,
sortable=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('repository')
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
bugs = review_request.get_bug_list()
repository = review_request.repository
local_site_name = None
if review_request.local_site:
local_site_name = review_request.local_site.name
if repository and repository.bug_tracker:
links = []
for bug in bugs:
try:
url = local_site_reverse(
'bug_url',
local_site_name=local_site_name,
args=[review_request.display_id, bug])
links.append(
format_html('<a class="bug" href="{0}">{1}</a>',
url, bug))
except NoReverseMatch:
links.append(escape(bug))
return ', '.join(links)
return format_html_join(
', ',
'<span class="bug">{0}</span>',
((bug,) for bug in bugs)
)
class ReviewRequestCheckboxColumn(CheckboxColumn):
"""A column containing a check-box."""
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
if self.is_selectable(state, obj):
checked = ''
if self.is_selected(state, obj):
checked = 'checked="true"'
return ('<input type="checkbox" data-object-id="%s" '
'data-checkbox-name="%s" %s />'
% (obj.display_id, escape(self.checkbox_name), checked))
else:
return ''
class DateTimeSinceColumn(DateTimeColumn):
"""Displays how long it has been since a given date/time.
These columns will dynamically update as the page is shown, so that the
number of minutes, hours, days, etc. ago is correct.
"""
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
return '<time class="timesince" datetime="%s">%s</time>' % (
date(getattr(obj, self.field_name), 'c'),
super(DateTimeSinceColumn, self).render_data(state, obj))
class DiffUpdatedColumn(DateTimeColumn):
"""Shows the date/time that the diff was last updated."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(DiffUpdatedColumn, self).__init__(
label=_('Diff Updated'),
db_field='diffset_history__last_diff_updated',
field_name='last_diff_updated',
sortable=True,
link=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('diffset_history')
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
if obj.diffset_history.last_diff_updated:
return super(DiffUpdatedColumn, self).render_data(
state, obj.diffset_history)
else:
return ''
class DiffUpdatedSinceColumn(DateTimeSinceColumn):
"""Shows the elapsed time since the diff was last updated."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(DiffUpdatedSinceColumn, self).__init__(
label=_('Diff Updated'),
db_field='diffset_history__last_diff_updated',
field_name='last_diff_updated',
sortable=True,
link=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('diffset_history')
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
if obj.diffset_history.last_diff_updated:
return super(DiffUpdatedSinceColumn, self).render_data(
state, obj.diffset_history)
else:
return ''
class GroupMemberCountColumn(Column):
"""Shows the number of users that are part of a review group."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(GroupMemberCountColumn, self).__init__(
link=True,
link_func=self.link_to_object,
*args, **kwargs)
def render_data(self, state, group):
"""Return the rendered contents of the column."""
return six.text_type(group.users.count())
def link_to_object(self, state, group, value):
"""Return the link to the object in the column."""
return local_site_reverse('group-members',
request=state.datagrid.request,
args=[group.name])
class GroupsColumn(Column):
"""Shows the list of groups requested to review the review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(GroupsColumn, self).__init__(
label=_('Groups'),
detailed_label=_('Target Groups'),
sortable=False,
shrink=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.prefetch_related('target_groups')
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
groups = review_request.target_groups.all()
return reduce(lambda a, d: a + d.name + ' ', groups, '')
class MyCommentsColumn(Column):
"""Shows if the current user has reviewed the review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(MyCommentsColumn, self).__init__(
image_class='rb-icon rb-icon-datagrid-comment-draft',
image_alt=_('My Comments'),
detailed_label=_('My Comments'),
shrink=True,
*args, **kwargs)
# XXX It'd be nice to be able to sort on this, but datagrids currently
# can only sort based on stored (in the DB) values, not computed
# values.
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_anonymous():
return queryset
query_dict = {
'user_id': six.text_type(user.id),
}
return queryset.extra(select={
'mycomments_my_reviews': """
SELECT COUNT(1)
FROM reviews_review
WHERE reviews_review.user_id = %(user_id)s
AND reviews_review.review_request_id =
reviews_reviewrequest.id
""" % query_dict,
'mycomments_private_reviews': """
SELECT COUNT(1)
FROM reviews_review
WHERE reviews_review.user_id = %(user_id)s
AND reviews_review.review_request_id =
reviews_reviewrequest.id
AND NOT reviews_review.public
""" % query_dict,
'mycomments_shipit_reviews': """
SELECT COUNT(1)
FROM reviews_review
WHERE reviews_review.user_id = %(user_id)s
AND reviews_review.review_request_id =
reviews_reviewrequest.id
AND reviews_review.ship_it
""" % query_dict,
})
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
user = state.datagrid.request.user
if user.is_anonymous() or review_request.mycomments_my_reviews == 0:
return ''
# Priority is ranked in the following order:
#
# 1) Non-public (draft) reviews
# 2) Public reviews marked "Ship It"
# 3) Public reviews not marked "Ship It"
if review_request.mycomments_private_reviews > 0:
icon_class = 'rb-icon-datagrid-comment-draft'
image_alt = _('Comments drafted')
else:
if review_request.mycomments_shipit_reviews > 0:
icon_class = 'rb-icon-datagrid-comment-shipit'
image_alt = _('Comments published. Ship it!')
else:
icon_class = 'rb-icon-datagrid-comment'
image_alt = _('Comments published')
return '<div class="rb-icon %s" title="%s"></div>' % \
(icon_class, image_alt)
class NewUpdatesColumn(Column):
"""Indicates if there are new updates on a review request.
This will show an icon if the review request has had any new updates
or reviews since the user last saw it.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(NewUpdatesColumn, self).__init__(
image_class='rb-icon rb-icon-new-updates',
image_alt=_('New Updates'),
detailed_label=_('New Updates'),
shrink=True,
*args, **kwargs)
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
# Review requests for un-authenticated users will not contain the
# new_review_count attribute, so confirm its existence before
# attempting to access.
if (hasattr(review_request, 'new_review_count') and
review_request.new_review_count > 0):
return '<div class="%s" title="%s" />' % \
(self.image_class, self.image_alt)
return ''
class PendingCountColumn(Column):
"""Shows the pending number of review requests for a user or group.
This will show the pending number of review requests for the given
review group or user. It only applies to group or user lists.
"""
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
return six.text_type(
getattr(obj, self.field_name).filter(
public=True, status='P').count())
class PeopleColumn(Column):
"""Shows the list of people requested to review the review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(PeopleColumn, self).__init__(
label=_('People'),
detailed_label=_('Target People'),
sortable=False,
shrink=False,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.prefetch_related('target_people')
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
people = review_request.target_people.all()
return reduce(lambda a, d: a + d.username + ' ', people, '')
class RepositoryColumn(Column):
"""Shows the name of the repository the review request's change is on."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(RepositoryColumn, self).__init__(
label=_('Repository'),
db_field='repository__name',
shrink=True,
sortable=True,
link=False,
css_class='repository-column',
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('repository')
def render_data(self, state, obj):
"""Return the rendered contents of the column."""
return super(RepositoryColumn, self).render_data(state, obj) or ''
class ReviewCountColumn(Column):
"""Shows the number of published reviews for a review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(ReviewCountColumn, self).__init__(
label=_('Reviews'),
detailed_label=_('Number of Reviews'),
shrink=True,
link=True,
link_func=self.link_to_object,
*kwargs, **kwargs)
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
return six.text_type(review_request.publicreviewcount_count)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.extra(select={
'publicreviewcount_count': """
SELECT COUNT(*)
FROM reviews_review
WHERE reviews_review.public
AND reviews_review.base_reply_to_id is NULL
AND reviews_review.review_request_id =
reviews_reviewrequest.id
"""
})
def link_to_object(self, state, review_request, value):
"""Return the link to the object in the column."""
return '%s#last-review' % review_request.get_absolute_url()
class ReviewGroupStarColumn(BaseStarColumn):
"""Indicates if a review group is starred.
The star is interactive, allowing the user to star or unstar the group.
"""
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_authenticated():
state.all_starred = set(
user.get_profile().starred_groups
.filter(pk__in=state.datagrid.id_list)
.values_list('pk', flat=True)
)
return queryset
class ReviewRequestIDColumn(Column):
"""Displays the ID of the review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(ReviewRequestIDColumn, self).__init__(
label=_('ID'),
detailed_label=_('Review Request ID'),
shrink=True,
link=True,
sortable=True,
*args, **kwargs)
def get_sort_field(self, state):
"""Return the model field for sorting this column."""
if state.datagrid.local_site:
return 'local_id'
else:
return 'id'
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
return review_request.display_id
class ReviewRequestStarColumn(BaseStarColumn):
"""Indicates if a review request is starred.
The star is interactive, allowing the user to star or unstar the
review request.
"""
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_authenticated():
state.all_starred = set(
user.get_profile().starred_review_requests
.filter(pk__in=state.datagrid.id_list)
.values_list('pk', flat=True)
)
return queryset
class ShipItColumn(Column):
"""Shows the "Ship It" count for a review request."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(ShipItColumn, self).__init__(
image_class='rb-icon rb-icon-datagrid-column-shipits-issues',
image_alt=_('Ship It!/Issue Counts'),
detailed_label=_('Ship It!/Issue Counts'),
db_field='shipit_count',
sortable=True,
shrink=True,
*args, **kwargs)
def render_data(self, state, review_request):
"""Return the rendered contents of the column.
Args:
state (djblets.datagrid.grids.StatefulColumn):
The state for the datagrid.
review_request (reviewboard.reviews.models.review_request.
ReviewRequest):
The review request.
Returns:
django.utils.safestring.SafeText:
The rendered HTML for the column.
"""
open_issues = review_request.issue_open_count
verifying_issues = review_request.issue_verifying_count
if open_issues > 0 and verifying_issues > 0:
return self._render_counts([
{
'count': open_issues,
'title': _('Open issue count'),
},
{
'count': verifying_issues,
'css_class': 'issue-verifying-count',
'icon_name': 'issue-verifying',
'title': _('Verifying issue count'),
},
])
elif open_issues > 0:
return self._render_counts([{
'count': open_issues,
'title': _('Open issue count'),
}])
elif verifying_issues > 0:
return self._render_counts([{
'count': verifying_issues,
'icon_name': 'issue-verifying',
'title': _('Verifying issue count'),
}])
elif review_request.shipit_count:
return self._render_counts(
[{
'count': review_request.shipit_count,
'css_class': 'shipit-count',
'icon_name': 'shipit',
'title': _('Ship It! count'),
}],
container_css_class='shipit-count-container')
else:
return ''
def _render_counts(self, count_details,
container_css_class='issue-count-container'):
"""Render the counts for the column.
This will render a container bubble in the column and render each
provided count and icon in the bubble. This can be used for issues,
Ship Its, or anything else we need down the road.
Args:
count_details (list of dict):
The list of details for the count. This must have ``count``
and ``title`` keys, and may optionally have ``css_class`` and
``icon_name`` keys.
container_css_class (unicode, optional):
The optional CSS class name for the outer container.
Returns:
django.utils.safestring.SafeText:
The resulting HTML for the counts bubble.
"""
# Note that the HTML is very whitespace-sensitive, so don't try to
# change the templates to be nicely indented. The spacing is this way
# for a reason.
#
# We also can't use format_html_join, unfortunately, as that doesn't
# support keyword arguments.
return format_html(
'<div class="{container_css_class}">{count_html}</div>',
container_css_class=container_css_class,
count_html=mark_safe(''.join(
format_html(
'<span class="{css_class}">'
'<span class="rb-icon rb-icon-datagrid-{icon_name}"'
' title="{title}"></span>'
'{count}'
'</span>',
**dict({
'css_class': 'issue-count',
'icon_name': 'open-issues',
}, **count_detail))
for count_detail in count_details
)))
class SummaryColumn(Column):
"""Shows the summary of a review request.
This will also prepend the draft/submitted/discarded state, if any,
to the summary.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(SummaryColumn, self).__init__(
label=_('Summary'),
expand=True,
link=True,
link_css_class='review-request-link',
css_class='summary',
sortable=True,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_anonymous():
return queryset
return queryset.extra(select={
'draft_summary': """
SELECT reviews_reviewrequestdraft.summary
FROM reviews_reviewrequestdraft
WHERE reviews_reviewrequestdraft.review_request_id =
reviews_reviewrequest.id
""",
'visibility': """
SELECT accounts_reviewrequestvisit.visibility
FROM accounts_reviewrequestvisit
WHERE accounts_reviewrequestvisit.review_request_id =
reviews_reviewrequest.id
AND accounts_reviewrequestvisit.user_id = %(user_id)s
""" % {
'user_id': six.text_type(user.id)
}
})
def render_data(self, state, review_request):
"""Return the rendered contents of the column.
Args:
state (djblets.datagrids.grids.StatefulColumn):
The state for the datagrid.
review_request (reviewboard.reviews.models.review_request.ReviewRequest):
The review request.
Returns:
django.utils.safestring.SafeText:
The rendered column.
"""
summary = review_request.summary
labels = []
if review_request.submitter_id == state.datagrid.request.user.id:
if review_request.draft_summary is not None:
summary = review_request.draft_summary
labels.append(('label-draft', _('Draft')))
elif (not review_request.public and
review_request.status == ReviewRequest.PENDING_REVIEW):
labels.append(('label-draft', _('Draft')))
# review_request.visibility is not defined when the user is not
# logged in.
if state.datagrid.request.user.is_authenticated():
if review_request.visibility == ReviewRequestVisit.ARCHIVED:
labels.append(('label-archived', _('Archived')))
elif review_request.visibility == ReviewRequestVisit.MUTED:
labels.append(('label-muted', _('Muted')))
if review_request.status == ReviewRequest.SUBMITTED:
labels.append(('label-submitted', _('Submitted')))
elif review_request.status == ReviewRequest.DISCARDED:
labels.append(('label-discarded', _('Discarded')))
result = [
format_html_join('', '<label class="{0}">{1}</label>', labels)
]
if summary:
result.append(format_html('<span>{0}</span>', summary))
else:
result.append(format_html('<span class="no-summary">{0}</span>',
_('No Summary')))
return mark_safe(''.join(result))
class ReviewSummaryColumn(SummaryColumn):
"""Shows the summary of the review request of a review.
This does not (yet) prepend the draft/submitted/discarded state, if any,
to the summary.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(SummaryColumn, self).__init__(
label=_('Review Request Summary'),
expand=True,
link=True,
css_class='summary',
*args, **kwargs)
def render_data(self, state, review):
"""Return the rendered contents of the column."""
return conditional_escape(review.review_request.summary)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
return queryset.select_related('review_request')
class ToMeColumn(Column):
"""Indicates if the user is requested to review the change.
This will show an indicator if the user is on the Target People reviewers
list.
"""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
raquo = '\u00BB'
super(ToMeColumn, self).__init__(
label=raquo,
detailed_label=_('To Me'),
detailed_label_html=(ugettext('%s To Me') % raquo),
shrink=True,
*args, **kwargs)
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset."""
user = state.datagrid.request.user
if user.is_authenticated():
state.all_to_me = set(
user.directed_review_requests.filter(
pk__in=state.datagrid.id_list).values_list('pk',
flat=True))
else:
state.all_to_me = set()
return queryset
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
if review_request.pk in state.all_to_me:
return ('<div title="%s"><b>»</b></div>'
% (self.detailed_label))
return ''
class DiffSizeColumn(Column):
"""Indicates line add/delete counts for the latest diffset."""
def __init__(self, *args, **kwargs):
"""Initialize the column."""
super(DiffSizeColumn, self).__init__(
label=_('Diff Size'),
sortable=False,
shrink=True,
*args, **kwargs)
def render_data(self, state, review_request):
"""Return the rendered contents of the column."""
if review_request.repository_id is None:
return ''
diffsets = list(review_request.diffset_history.diffsets.all())
if not diffsets:
return ''
diffset = diffsets[-1]
counts = diffset.get_total_line_counts()
insert_count = counts.get('raw_insert_count')
delete_count = counts.get('raw_delete_count')
result = []
if insert_count:
result.append('<span class="diff-size-column insert">+%d</span>' %
insert_count)
if delete_count:
result.append('<span class="diff-size-column delete">-%d</span>' %
delete_count)
if result:
return ' '.join(result)
return ''
def augment_queryset(self, state, queryset):
"""Add additional queries to the queryset.
This will prefetch the diffsets and filediffs needed to perform the
line calculations.
Args:
state (djblets.datagrid.grids.StatefulColumn):
The column state.
queryset (django.db.models.query.QuerySet):
The queryset to augment.
Returns:
django.db.models.query.QuerySet:
The resulting queryset.
"""
# TODO: Update this to fetch only the specific fields when we move
# to a newer version of Django.
return queryset.prefetch_related('diffset_history__diffsets',
'diffset_history__diffsets__files')
| 34.561896
| 85
| 0.584268
|
ab24d09b4b04fd7384ccf30dd51abe9f61cd739a
| 10,782
|
py
|
Python
|
ivy/functional/backends/jax/general.py
|
Darshan-H-E/ivy
|
f31bb5886722ab9c1ccbeacd01b9d26f2f7ea8e7
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/jax/general.py
|
Darshan-H-E/ivy
|
f31bb5886722ab9c1ccbeacd01b9d26f2f7ea8e7
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/jax/general.py
|
Darshan-H-E/ivy
|
f31bb5886722ab9c1ccbeacd01b9d26f2f7ea8e7
|
[
"Apache-2.0"
] | null | null | null |
"""Collection of Jax general functions, wrapped to fit Ivy syntax and signature."""
# global
from optparse import Option
import jax as jax
import numpy as np
import jax.numpy as jnp
import jaxlib as jaxlib
from numbers import Number
from operator import mul as _mul
from functools import reduce as _reduce
from jaxlib.xla_extension import Buffer
from typing import Iterable, Optional
import multiprocessing as _multiprocessing
from haiku._src.data_structures import FlatMapping
# local
import ivy
from ivy.functional.ivy.device import default_device
from ivy.functional.ivy import default_dtype
from ivy.functional.backends.jax.device import to_dev, _to_array, dev as callable_dev
from ivy.functional.backends.jax import JaxArray
# noinspection PyUnresolvedReferences,PyProtectedMember
def is_native_array(x, exclusive=False):
if exclusive:
return isinstance(
x,
(
jax.interpreters.xla._DeviceArray,
jaxlib.xla_extension.DeviceArray,
Buffer,
),
)
return isinstance(
x,
(
jax.interpreters.xla._DeviceArray,
jaxlib.xla_extension.DeviceArray,
Buffer,
jax.interpreters.ad.JVPTracer,
jax.core.ShapedArray,
jax.interpreters.partial_eval.DynamicJaxprTracer,
),
)
def _to_array(x):
if isinstance(x, jax.interpreters.ad.JVPTracer):
return _to_array(x.primal)
elif isinstance(x, jax.interpreters.partial_eval.DynamicJaxprTracer):
return _to_array(x.aval)
return x
def copy_array(x: JaxArray) -> JaxArray:
return jnp.array(x)
def array_equal(x0: JaxArray, x1: JaxArray) -> bool:
return jnp.array_equal(x0, x1)
def to_numpy(x: JaxArray) -> np.ndarray:
return np.asarray(_to_array(x))
def to_scalar(x: JaxArray) -> Number:
if isinstance(x, Number):
return x
else:
return _to_array(x).item()
def to_list(x: JaxArray) -> list:
return _to_array(x).tolist()
shape = lambda x, as_tensor=False: jnp.asarray(jnp.shape(x)) if as_tensor else x.shape
shape.__name__ = "shape"
get_num_dims = (
lambda x, as_tensor=False: jnp.asarray(len(jnp.shape(x)))
if as_tensor
else len(x.shape)
)
container_types = lambda: [FlatMapping]
def floormod(x: JaxArray, y: JaxArray, out: Optional[JaxArray] = None) -> JaxArray:
ret = x % y
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def unstack(x, axis, keepdims=False):
if x.shape == ():
return [x]
dim_size = x.shape[axis]
# ToDo: make this faster somehow, jnp.split is VERY slow for large dim_size
x_split = jnp.split(x, dim_size, axis)
if keepdims:
return x_split
return [jnp.squeeze(item, axis) for item in x_split]
def inplace_update(x, val):
(x_native, val_native), _ = ivy.args_to_native(x, val)
if ivy.is_ivy_array(x):
x.data = val_native
else:
x = ivy.Array(val_native)
return x
inplace_arrays_supported = lambda: False
inplace_variables_supported = lambda: False
def cumsum(x: JaxArray, axis: int = 0, out: Optional[JaxArray] = None) -> JaxArray:
if ivy.exists(out):
return ivy.inplace_update(out, jnp.cumsum(x, axis))
else:
return jnp.cumsum(x, axis)
def cumprod(
x: JaxArray,
axis: int = 0,
exclusive: Optional[bool] = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
if exclusive:
x = jnp.swapaxes(x, axis, -1)
x = jnp.concatenate((jnp.ones_like(x[..., -1:]), x[..., :-1]), -1)
res = jnp.cumprod(x, -1)
if ivy.exists(out):
return ivy.inplace_update(out, jnp.copy(jnp.swapaxes(res, axis, -1)))
else:
return jnp.swapaxes(res, axis, -1)
if ivy.exists(out):
return ivy.inplace_update(out, jnp.cumprod(x, axis))
else:
return jnp.cumprod(x, axis)
def scatter_flat(
indices, updates, size=None, tensor=None, reduction="sum", device=None
):
target = tensor
target_given = ivy.exists(target)
if ivy.exists(size) and ivy.exists(target):
assert len(target.shape) == 1 and target.shape[0] == size
if device is None:
device = callable_dev(updates)
if reduction == "sum":
if not target_given:
target = jnp.zeros([size], dtype=updates.dtype)
target = target.at[indices].add(updates)
elif reduction == "replace":
if not target_given:
target = jnp.zeros([size], dtype=updates.dtype)
target = target.at[indices].set(updates)
elif reduction == "min":
if not target_given:
target = jnp.ones([size], dtype=updates.dtype) * 1e12
target = target.at[indices].min(updates)
if not target_given:
target = jnp.where(target == 1e12, 0.0, target)
elif reduction == "max":
if not target_given:
target = jnp.ones([size], dtype=updates.dtype) * -1e12
target = target.at[indices].max(updates)
if not target_given:
target = jnp.where(target == -1e12, 0.0, target)
else:
raise Exception(
'reduction is {}, but it must be one of "sum", "min" or "max"'.format(
reduction
)
)
return to_dev(target, device)
# noinspection PyShadowingNames
def scatter_nd(indices, updates, shape=None, tensor=None, reduction="sum", device=None):
# parse numeric inputs
if indices not in [Ellipsis, ()] and not (
isinstance(indices, Iterable) and Ellipsis in indices
):
indices = [[indices]] if isinstance(indices, Number) else indices
indices = jnp.array(indices)
if len(indices.shape) < 2:
indices = jnp.expand_dims(indices, -1)
updates = [updates] if isinstance(updates, Number) else updates
updates = jnp.array(
updates,
dtype=ivy.dtype(tensor, as_str=False)
if ivy.exists(tensor)
else ivy.default_dtype(item=updates),
)
# handle Ellipsis
if isinstance(indices, tuple) or indices is Ellipsis:
indices_tuple = indices
else:
indices_flat = indices.reshape(-1, indices.shape[-1]).T
indices_tuple = tuple(indices_flat) + (Ellipsis,)
# implementation
target = tensor
target_given = ivy.exists(target)
if ivy.exists(shape) and ivy.exists(target):
assert ivy.shape_to_tuple(target.shape) == ivy.shape_to_tuple(shape)
if device is None:
device = callable_dev(updates)
shape = list(shape) if ivy.exists(shape) else list(tensor.shape)
if reduction == "sum":
if not target_given:
target = jnp.zeros(shape, dtype=updates.dtype)
target = target.at[indices_tuple].add(updates)
elif reduction == "replace":
if not target_given:
target = jnp.zeros(shape, dtype=updates.dtype)
target = target.at[indices_tuple].set(updates)
elif reduction == "min":
if not target_given:
target = jnp.ones(shape, dtype=updates.dtype) * 1e12
target = target.at[indices_tuple].min(updates)
if not target_given:
target = jnp.where(target == 1e12, 0.0, target)
elif reduction == "max":
if not target_given:
target = jnp.ones(shape, dtype=updates.dtype) * -1e12
target = target.at[indices_tuple].max(updates)
if not target_given:
target = jnp.where(target == -1e12, 0.0, target)
else:
raise Exception(
'reduction is {}, but it must be one of "sum", "min" or "max"'.format(
reduction
)
)
return to_dev(target, device)
def gather(
params: JaxArray,
indices: JaxArray,
axis: Optional[int] = -1,
device: Optional[str] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if device is None:
device = callable_dev(params)
if ivy.exists(out):
return ivy.inplace_update(
out, to_dev(jnp.take_along_axis(params, indices, axis), device)
)
else:
return to_dev(jnp.take_along_axis(params, indices, axis), device)
def gather_nd(params, indices, device=None):
if device is None:
device = callable_dev(params)
indices_shape = indices.shape
params_shape = params.shape
num_index_dims = indices_shape[-1]
res_dim_sizes_list = [
_reduce(_mul, params_shape[i + 1 :], 1) for i in range(len(params_shape) - 1)
] + [1]
result_dim_sizes = jnp.array(res_dim_sizes_list)
implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
flat_params = jnp.reshape(params, (-1,))
new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
indices_scales = jnp.reshape(result_dim_sizes[0:num_index_dims], new_shape)
indices_for_flat_tiled = jnp.tile(
jnp.reshape(jnp.sum(indices * indices_scales, -1, keepdims=True), (-1, 1)),
(1, implicit_indices_factor),
)
implicit_indices = jnp.tile(
jnp.expand_dims(jnp.arange(implicit_indices_factor), 0),
(indices_for_flat_tiled.shape[0], 1),
)
indices_for_flat = indices_for_flat_tiled + implicit_indices
flat_indices_for_flat = jnp.reshape(indices_for_flat, (-1,)).astype(jnp.int32)
flat_gather = jnp.take(flat_params, flat_indices_for_flat, 0)
new_shape = list(indices_shape[:-1]) + list(params_shape[num_index_dims:])
ret = jnp.reshape(flat_gather, new_shape)
return to_dev(ret, device)
multiprocessing = (
lambda context=None: _multiprocessing
if context is None
else _multiprocessing.get_context(context)
)
# noinspection PyUnusedLocal
def one_hot(indices, depth, device=None):
# from https://stackoverflow.com/questions/38592324/one-hot-encoding-using-numpy
res = jnp.eye(depth)[jnp.array(indices).reshape(-1)]
return to_dev(res.reshape(list(indices.shape) + [depth]), default_device(device))
def indices_where(x):
where_x = jnp.where(x)
ret = jnp.concatenate([jnp.expand_dims(item, -1) for item in where_x], -1)
return ret
def inplace_decrement(x, val):
(x_native, val_native), _ = ivy.args_to_native(x, val)
if ivy.is_ivy_array(x):
x.data -= val_native
else:
x = ivy.Array(val_native)
return x
def inplace_increment(x, val):
(x_native, val_native), _ = ivy.args_to_native(x, val)
if ivy.is_ivy_array(x):
x.data += val_native
else:
x = ivy.Array(val_native)
return x
compile = lambda fn, dynamic=True, example_inputs=None, static_argnums=None, static_argnames=None: jax.jit(
fn, static_argnums=static_argnums, static_argnames=static_argnames
)
current_framework_str = lambda: "jax"
current_framework_str.__name__ = "current_framework_str"
| 31.711765
| 107
| 0.650158
|
a4c709290d894f38242afab6d7a0c34dc31879ae
| 1,717
|
py
|
Python
|
catalog/redis_keys.py
|
cognitivefashion/cf-sdk-python
|
9eb90245314a54d1a472f835fb427e7f6509d92a
|
[
"Apache-2.0"
] | 9
|
2019-03-05T02:50:48.000Z
|
2022-02-25T20:21:42.000Z
|
catalog/redis_keys.py
|
cognitivefashion/cf-sdk-python
|
9eb90245314a54d1a472f835fb427e7f6509d92a
|
[
"Apache-2.0"
] | 1
|
2019-05-21T02:04:27.000Z
|
2020-02-10T20:33:10.000Z
|
catalog/redis_keys.py
|
cognitivefashion/cf-sdk-python
|
9eb90245314a54d1a472f835fb427e7f6509d92a
|
[
"Apache-2.0"
] | 5
|
2017-06-16T00:00:13.000Z
|
2021-02-08T19:23:59.000Z
|
#------------------------------------------------------------------------------
# Get the redis key/value pairs.
# GET /v1/redis/{db}
#------------------------------------------------------------------------------
import os
import json
import requests
from urlparse import urljoin
from pprint import pprint
from props import *
#------------------------------------------------------------------------------
# API URL
#------------------------------------------------------------------------------
api_gateway_url = props['api_gateway_url']
#------------------------------------------------------------------------------
# HEADERS
#------------------------------------------------------------------------------
headers = {}
# API key
headers['X-Api-Key'] = props['X-Api-Key']
# Data collection opt out.
headers['X-Data-Collection-Opt-Out'] = props['X-Data-Collection-Opt-Out']
#------------------------------------------------------------------------------
# OPTIONAL QUERY PARAMETERS
#------------------------------------------------------------------------------
params = {}
#------------------------------------------------------------------------------
# PATH PARAMETERS
#------------------------------------------------------------------------------
# the db number (from 0 to 15)
db = 1
#------------------------------------------------------------------------------
# API END POINT
#------------------------------------------------------------------------------
api_endpoint = '/v1/redis/%s'%(db)
url = urljoin(api_gateway_url,api_endpoint)
response = requests.get(url,
headers=headers,
params=params)
print response.status_code
pprint(response.json())
| 33.019231
| 79
| 0.301107
|
9c1eb3a0dcfcd55a091cb583d47dc6ab329b5f04
| 1,239
|
py
|
Python
|
clients/keto/python/setup.py
|
kolotaev/sdk
|
0dda1becd70be8d7b9d678321ebe780c1ba00485
|
[
"Apache-2.0"
] | null | null | null |
clients/keto/python/setup.py
|
kolotaev/sdk
|
0dda1becd70be8d7b9d678321ebe780c1ba00485
|
[
"Apache-2.0"
] | null | null | null |
clients/keto/python/setup.py
|
kolotaev/sdk
|
0dda1becd70be8d7b9d678321ebe780c1ba00485
|
[
"Apache-2.0"
] | null | null | null |
"""
ORY Keto
Ory Keto is a cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
The version of the OpenAPI document: v0.6.0-alpha.6
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "ory-keto-client"
VERSION = "v0.6.0-alpha.6"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"urllib3 >= 1.25.3",
"python-dateutil",
]
setup(
name=NAME,
version=VERSION,
description="ORY Keto",
author="ORY",
author_email="hi@ory.sh",
url="https://github.com/ory/sdk",
keywords=["OpenAPI", "OpenAPI-Generator", "ORY Keto"],
python_requires=">=3.6",
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
license="Apache 2.0",
long_description="""\
Ory Keto is a cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
"""
)
| 27.533333
| 173
| 0.677966
|
bda99a4d078a24aa9dfdd1aa055ba53c05e7ea51
| 3,328
|
py
|
Python
|
kivymd/toast/kivytoast/kivytoast.py
|
gottadiveintopython/KivyMD
|
68bccd69219192dc50ff2f1f18cddfc2c13a8443
|
[
"MIT"
] | 9
|
2019-06-19T01:15:19.000Z
|
2021-09-05T16:26:22.000Z
|
kivymd/toast/kivytoast/kivytoast.py
|
gottadiveintopython/KivyMD
|
68bccd69219192dc50ff2f1f18cddfc2c13a8443
|
[
"MIT"
] | null | null | null |
kivymd/toast/kivytoast/kivytoast.py
|
gottadiveintopython/KivyMD
|
68bccd69219192dc50ff2f1f18cddfc2c13a8443
|
[
"MIT"
] | 12
|
2019-07-14T10:54:59.000Z
|
2022-02-02T18:38:42.000Z
|
"""
KivyToast
=========
Copyright (c) 2019 Ivanov Yuri
For suggestions and questions:
<kivydevelopment@gmail.com>
This file is distributed under the terms of the same license,
as the Kivy framework.
Example:
from kivy.app import App
from kivymd.theming import ThemeManager
from kivymd.toast.kivytoast.kivytoast import toast
class Test(App):
theme_cls = ThemeManager()
def show_toast(self):
toast('Test Kivy Toast')
def build(self):
return Builder.load_string(
'''
BoxLayout:
orientation:'vertical'
MDToolbar:
id: toolbar
title: 'Test Toast'
md_bg_color: app.theme_cls.primary_color
left_action_items: [['menu', lambda x: '']]
FloatLayout:
MDRaisedButton:
text: 'TEST KIVY TOAST'
on_release: app.show_toast()
pos_hint: {'center_x': .5, 'center_y': .5}
'''
)
Test().run()
"""
from kivy.core.window import Window
from kivy.properties import NumericProperty
from kivy.uix.label import Label
from kivy.animation import Animation
from kivy.uix.modalview import ModalView
from kivy.clock import Clock
from kivy.metrics import dp
from kivy.lang import Builder
from kivymd import images_path
Builder.load_string(
"""
<Toast>:
canvas:
Color:
rgba: .2, .2, .2, 1
RoundedRectangle:
pos: self.pos
size: self.size
radius: [15,]
"""
)
class Toast(ModalView):
duration = NumericProperty(2.5)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.size_hint = (None, None)
self.pos_hint = {"center_x": 0.5, "center_y": 0.1}
self.background_color = [0, 0, 0, 0]
self.background = f"{images_path}transparent.png"
self.opacity = 0
self.auto_dismiss = True
self.label_toast = Label(size_hint=(None, None), opacity=0)
self.label_toast.bind(texture_size=self.label_check_texture_size)
self.add_widget(self.label_toast)
def label_check_texture_size(self, instance, texture_size):
texture_width, texture_height = texture_size
if texture_width > Window.width:
instance.text_size = (Window.width - dp(10), None)
instance.texture_update()
texture_width, texture_height = instance.texture_size
self.size = (texture_width + 25, texture_height + 25)
def toast(self, text_toast):
self.label_toast.text = text_toast
self.open()
def on_open(self):
self.fade_in()
Clock.schedule_once(self.fade_out, self.duration)
def fade_in(self):
Animation(opacity=1, duration=0.4).start(self.label_toast)
Animation(opacity=1, duration=0.4).start(self)
def fade_out(self, interval):
Animation(opacity=0, duration=0.4).start(self.label_toast)
anim_body = Animation(opacity=0, duration=0.4)
anim_body.bind(on_complete=lambda *x: self.dismiss())
anim_body.start(self)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
if self.auto_dismiss:
self.dismiss()
return False
super(ModalView, self).on_touch_down(touch)
return True
def toast(text, duration=2.5):
Toast(duration=duration).toast(text)
| 26
| 73
| 0.643029
|
1342c28ed684e1aad4a5e79b3971a4ee843121f5
| 2,044
|
py
|
Python
|
Crypto/__init__.py
|
matthid/ironpycrypto
|
fbcffa058cbe32bef681cc57046f9182fb2ea363
|
[
"MIT"
] | 1
|
2018-09-18T15:01:49.000Z
|
2018-09-18T15:01:49.000Z
|
Crypto/__init__.py
|
matthid/ironpycrypto
|
fbcffa058cbe32bef681cc57046f9182fb2ea363
|
[
"MIT"
] | null | null | null |
Crypto/__init__.py
|
matthid/ironpycrypto
|
fbcffa058cbe32bef681cc57046f9182fb2ea363
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Python Cryptography Toolkit
A collection of cryptographic modules implementing various algorithms
and protocols.
Subpackages:
Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4)
Crypto.Hash Hashing algorithms (MD5, SHA, HMAC)
Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing
transform). This package does not contain any
network protocols.
Crypto.PublicKey Public-key encryption and signature algorithms
(RSA, DSA)
Crypto.Util Various useful modules and functions (long-to-string
conversion, random number generation, number
theoretic functions)
"""
__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util']
__version__ = '2.1.0' # See also below and setup.py
__revision__ = "$Id$"
# New software should look at this instead of at __version__ above.
version_info = (2, 1, 0, 'final', 0) # See also above and setup.py
| 43.489362
| 78
| 0.642857
|
cbe23edc512c5f2941248826a2dd6a4fe41703d8
| 2,249
|
py
|
Python
|
Asset/MRRModels/MRRFourActions.py
|
vd1371/GIAMS
|
dd6551f344b8d0377131d4496846eb5d03b6189c
|
[
"MIT"
] | null | null | null |
Asset/MRRModels/MRRFourActions.py
|
vd1371/GIAMS
|
dd6551f344b8d0377131d4496846eb5d03b6189c
|
[
"MIT"
] | null | null | null |
Asset/MRRModels/MRRFourActions.py
|
vd1371/GIAMS
|
dd6551f344b8d0377131d4496846eb5d03b6189c
|
[
"MIT"
] | null | null | null |
#Loading dependencies
import numpy as np
from collections import Counter
from .BaseMRRPlan import *
class MRRFourActions(BaseMRRPlan):
def __init__(self, **params):
super().__init__(**params)
maint_duration = params.pop('maint_duration')
rehab_duration = params.pop('rehab_duration')
recon_duration = params.pop('recon_duration')
self.mrr_duration = {MAINT: maint_duration, REHAB: rehab_duration, RECON: recon_duration}
self.randomize_mrr()
def set_mrr(self, new_mrr):
'''Set the MRR'''
if np.shape(new_mrr) != (self.settings.n_elements, self.settings.n_steps*self.settings.dt):
raise ValueError(f"Expected shape of mrr is {(self.settings.n_elements, self.settings.n_steps*self.settings.dt)}"\
f"But {new_mrr.shape} was given")
self.mrr = new_mrr
def randomize_mrr(self):
'''Randomly initialize the mrr'''
self.mrr = np.random.randint(2, size=(self.settings.n_elements, self.settings.dt*self.settings.n_steps))
return self.mrr
def mrr_to_decimal(self, mrr_binary = None):
'''Converting the binray representation to decicaml representations'''
if mrr_binary is None:
mrr = self.mrr
else:
mrr = mrr_binary
self.mrr_decoded = []
for element_idx in range (self.settings.n_elements):
element_mrr = []
for j in range(0, len(mrr[element_idx]), 2):
element_mrr.append(int(str(int(mrr[element_idx][j]*10+ mrr[element_idx][j+1])), 2))
self.mrr_decoded.append(element_mrr)
return self.mrr_decoded
def mrr_to_binary(self, decoded_mrr):
'''Converting the decimal representation to binary representations'''
self.mrr = []
for i in range(len(decoded_mrr)):
temp_mrr = []
for val in decoded_mrr[i]:
for binar in bin(val)[2:]:
temp_mrr.append(binar)
self.mrr.append(temp_mrr)
return self.mrr
def check_policy(self):
'''Checking if a policy is acceptable'''
mrr_decimal = self.mrr_to_decimal()
for elem_mrr in mrr_decimal:
counts = Counter(elem_mrr)
if counts[RECON] > 2:
return False
elif counts[REHAB] > 3:
return False
elif counts[MAINT] > 5:
return False
for i in range (len(mrr_decimal)):
for val1, val2 in zip(mrr_decimal[i][:-1], mrr_decimal[i][1:]):
if val1 * val2 > 0:
return False
return True
| 28.468354
| 117
| 0.707426
|
e6d5b4989a0e8a624d8fe2f437c27765c78cd0df
| 11,942
|
py
|
Python
|
tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/iam.py
|
vixadd/grpc
|
7c9e8b425166276232653725de32ea0422a39b33
|
[
"Apache-2.0"
] | 1
|
2021-07-01T03:15:14.000Z
|
2021-07-01T03:15:14.000Z
|
tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/iam.py
|
vixadd/grpc
|
7c9e8b425166276232653725de32ea0422a39b33
|
[
"Apache-2.0"
] | null | null | null |
tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/iam.py
|
vixadd/grpc
|
7c9e8b425166276232653725de32ea0422a39b33
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import datetime
import functools
import logging
from typing import Any, Dict, FrozenSet, Optional
from framework.helpers import retryers
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
# Type aliases
_timedelta = datetime.timedelta
_HttpRequest = gcp.api.HttpRequest
class EtagConflict(gcp.api.Error):
"""
Indicates concurrent policy changes.
https://cloud.google.com/iam/docs/policies#etag
"""
pass
def handle_etag_conflict(func):
def wrap_retry_on_etag_conflict(*args, **kwargs):
retryer = retryers.exponential_retryer_with_timeout(
retry_on_exceptions=(EtagConflict, gcp.api.TransportError),
wait_min=_timedelta(seconds=1),
wait_max=_timedelta(seconds=10),
timeout=_timedelta(minutes=2))
return retryer(func, *args, **kwargs)
return wrap_retry_on_etag_conflict
def _replace_binding(policy: 'Policy', binding: 'Policy.Binding',
new_binding: 'Policy.Binding') -> 'Policy':
new_bindings = set(policy.bindings)
new_bindings.discard(binding)
new_bindings.add(new_binding)
return dataclasses.replace(policy, bindings=frozenset(new_bindings))
@dataclasses.dataclass(frozen=True)
class ServiceAccount:
"""An IAM service account.
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts
Note: "etag" field is skipped because it's deprecated
"""
name: str
projectId: str
uniqueId: str
email: str
oauth2ClientId: str
displayName: str = ''
description: str = ''
disabled: bool = False
@classmethod
def from_response(cls, response: Dict[str, Any]) -> 'ServiceAccount':
return cls(name=response['name'],
projectId=response['projectId'],
uniqueId=response['uniqueId'],
email=response['email'],
oauth2ClientId=response['oauth2ClientId'],
description=response.get('description', ''),
displayName=response.get('displayName', ''),
disabled=response.get('disabled', False))
def as_dict(self) -> Dict[str, Any]:
return dataclasses.asdict(self)
@dataclasses.dataclass(frozen=True)
class Expr:
"""
Represents a textual expression in the Common Expression Language syntax.
https://cloud.google.com/iam/docs/reference/rest/v1/Expr
"""
expression: str
title: str = ''
description: str = ''
location: str = ''
@classmethod
def from_response(cls, response: Dict[str, Any]) -> 'Expr':
return cls(**response)
def as_dict(self) -> Dict[str, Any]:
return dataclasses.asdict(self)
@dataclasses.dataclass(frozen=True)
class Policy:
"""An Identity and Access Management (IAM) policy, which specifies
access controls for Google Cloud resources.
https://cloud.google.com/iam/docs/reference/rest/v1/Policy
Note: auditConfigs not supported by this implementation.
"""
@dataclasses.dataclass(frozen=True)
class Binding:
"""Policy Binding. Associates members with a role.
https://cloud.google.com/iam/docs/reference/rest/v1/Policy#binding
"""
role: str
members: FrozenSet[str]
condition: Optional[Expr] = None
@classmethod
def from_response(cls, response: Dict[str, Any]) -> 'Policy.Binding':
fields = {
'role': response['role'],
'members': frozenset(response.get('members', [])),
}
if 'condition' in response:
fields['condition'] = Expr.from_response(response['condition'])
return cls(**fields)
def as_dict(self) -> Dict[str, Any]:
result = {
'role': self.role,
'members': list(self.members),
}
if self.condition is not None:
result['condition'] = self.condition.as_dict()
return result
bindings: FrozenSet[Binding]
etag: str
version: Optional[int] = None
@functools.lru_cache(maxsize=128)
def find_binding_for_role(
self,
role: str,
condition: Optional[Expr] = None) -> Optional['Policy.Binding']:
results = (binding for binding in self.bindings
if binding.role == role and binding.condition == condition)
return next(results, None)
@classmethod
def from_response(cls, response: Dict[str, Any]) -> 'Policy':
bindings = frozenset(
cls.Binding.from_response(b) for b in response.get('bindings', []))
return cls(bindings=bindings,
etag=response['etag'],
version=response.get('version'))
def as_dict(self) -> Dict[str, Any]:
result = {
'bindings': [binding.as_dict() for binding in self.bindings],
'etag': self.etag,
}
if self.version is not None:
result['version'] = self.version
return result
class IamV1(gcp.api.GcpProjectApiResource):
"""
Identity and Access Management (IAM) API.
https://cloud.google.com/iam/docs/reference/rest
"""
_service_accounts: gcp.api.discovery.Resource
# Operations that affect conditional role bindings must specify version 3.
# Otherwise conditions are omitted, and role names returned with a suffix,
# f.e. roles/iam.workloadIdentityUser_withcond_f1ec33c9beb41857dbf0
# https://cloud.google.com/iam/docs/reference/rest/v1/Policy#FIELDS.version
POLICY_VERSION: str = 3
def __init__(self, api_manager: gcp.api.GcpApiManager, project: str):
super().__init__(api_manager.iam('v1'), project)
# Shortcut to projects/*/serviceAccounts/ endpoints
self._service_accounts = self.api.projects().serviceAccounts()
def service_account_resource_name(self, account) -> str:
"""
Returns full resource name of the service account.
The resource name of the service account in the following format:
projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}.
The ACCOUNT value can be the email address or the uniqueId of the
service account.
Ref https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/get
Args:
account: The ACCOUNT value
"""
return f'projects/{self.project}/serviceAccounts/{account}'
def get_service_account(self, account: str) -> ServiceAccount:
resource_name = self.service_account_resource_name(account)
request: _HttpRequest = self._service_accounts.get(name=resource_name)
response: Dict[str, Any] = self._execute(request)
logger.debug('Loaded Service Account:\n%s',
self._resource_pretty_format(response))
return ServiceAccount.from_response(response)
def get_service_account_iam_policy(self, account: str) -> Policy:
resource_name = self.service_account_resource_name(account)
request: _HttpRequest = self._service_accounts.getIamPolicy(
resource=resource_name,
options_requestedPolicyVersion=self.POLICY_VERSION)
response: Dict[str, Any] = self._execute(request)
logger.debug('Loaded Service Account Policy:\n%s',
self._resource_pretty_format(response))
return Policy.from_response(response)
def set_service_account_iam_policy(self, account: str,
policy: Policy) -> Policy:
"""Sets the IAM policy that is attached to a service account.
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/setIamPolicy
"""
resource_name = self.service_account_resource_name(account)
body = {'policy': policy.as_dict()}
logger.debug('Updating Service Account %s policy:\n%s', account,
self._resource_pretty_format(body))
try:
request: _HttpRequest = self._service_accounts.setIamPolicy(
resource=resource_name, body=body)
response: Dict[str, Any] = self._execute(request)
return Policy.from_response(response)
except gcp.api.ResponseError as error:
if error.status == 409:
# https://cloud.google.com/iam/docs/policies#etag
logger.debug(error)
raise EtagConflict from error
raise
@handle_etag_conflict
def add_service_account_iam_policy_binding(self, account: str, role: str,
member: str) -> None:
"""Add an IAM policy binding to an IAM service account.
See for details on updating policy bindings:
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/setIamPolicy
"""
policy: Policy = self.get_service_account_iam_policy(account)
binding: Optional[Policy.Binding] = policy.find_binding_for_role(role)
if binding and member in binding.members:
logger.debug('Member %s already has role %s for Service Account %s',
member, role, account)
return
if binding is None:
updated_binding = Policy.Binding(role, frozenset([member]))
else:
updated_members: FrozenSet[str] = binding.members.union({member})
updated_binding: Policy.Binding = dataclasses.replace(
binding, members=updated_members)
updated_policy: Policy = _replace_binding(policy, binding,
updated_binding)
self.set_service_account_iam_policy(account, updated_policy)
logger.debug('Role %s granted to member %s for Service Account %s',
role, member, account)
@handle_etag_conflict
def remove_service_account_iam_policy_binding(self, account: str, role: str,
member: str) -> None:
"""Remove an IAM policy binding from the IAM policy of a service
account.
See for details on updating policy bindings:
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/setIamPolicy
"""
policy: Policy = self.get_service_account_iam_policy(account)
binding: Optional[Policy.Binding] = policy.find_binding_for_role(role)
if binding is None:
logger.debug('Noop: Service Account %s has no bindings for role %s',
account, role)
return
if member not in binding.members:
logger.debug(
'Noop: Service Account %s binding for role %s has no member %s',
account, role, member)
return
updated_members: FrozenSet[str] = binding.members.difference({member})
updated_binding: Policy.Binding = dataclasses.replace(
binding, members=updated_members)
updated_policy: Policy = _replace_binding(policy, binding,
updated_binding)
self.set_service_account_iam_policy(account, updated_policy)
logger.debug('Role %s revoked from member %s for Service Account %s',
role, member, account)
| 38.153355
| 97
| 0.639843
|
04b47a28e98e5289717eac983cde7e2f5830476a
| 111,643
|
py
|
Python
|
OpenGLWrapper_JE/venv/Lib/site-packages/pkg_resources/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/pkg_resources/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/pkg_resources/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
import ntpath
import posixpath
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
try:
FileExistsError
except NameError:
FileExistsError = OSError
from pkg_resources.extern import six
from pkg_resources.extern.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from . import py31compat
from pkg_resources.extern import appdirs
from pkg_resources.extern import packaging
__import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')
__metaclass__ = type
if (3, 0) < sys.version_info < (3, 5):
raise RuntimeError("Python 3.5 or later is required")
if six.PY2:
# Those builtin exceptions are only defined in Python 3
PermissionError = None
NotADirectoryError = None
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
def parse_version(v):
try:
return packaging.version.Version(v)
except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Warnings
'PkgResourcesDeprecationWarning',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = '{}.{}'.format(*sys.version_info)
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macosx_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet:
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False, extras=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment:
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
path = self._get_metadata_path(name)
return self._has(path)
def get_metadata(self, name):
if not self.egg_info:
return ""
path = self._get_metadata_path(name)
value = self._get(path)
if six.PY2:
return value
try:
return value.decode('utf-8')
except UnicodeDecodeError as exc:
# Include the path in the error message to simplify
# troubleshooting, and without changing the exception type.
exc.reason += ' in {} file at path: {}'.format(name, path)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
@staticmethod
def _validate_resource_path(path):
"""
Validate the resource paths according to the docs.
https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
>>> warned = getfixture('recwarn')
>>> warnings.simplefilter('always')
>>> vrp = NullProvider._validate_resource_path
>>> vrp('foo/bar.txt')
>>> bool(warned)
False
>>> vrp('../foo/bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('/foo/bar.txt')
>>> bool(warned)
True
>>> vrp('foo/../../bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('foo/f../bar.txt')
>>> bool(warned)
False
Windows path separators are straight-up disallowed.
>>> vrp(r'\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
>>> vrp(r'C:\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
Blank values are allowed
>>> vrp('')
>>> bool(warned)
False
Non-string values are not.
>>> vrp(None)
Traceback (most recent call last):
...
AttributeError: ...
"""
invalid = (
os.path.pardir in path.split(posixpath.sep) or
posixpath.isabs(path) or
ntpath.isabs(path)
)
if not invalid:
return
msg = "Use of .. or absolute path in a resource path is not allowed."
# Aggressively disallow Windows absolute paths
if ntpath.isabs(path) and not posixpath.isabs(path):
raise ValueError(msg)
# for compatibility, warn; in future
# raise ValueError(msg)
warnings.warn(
msg[:-1] + " and will raise exceptions in a future release.",
DeprecationWarning,
stacklevel=4,
)
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.7 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith('.dist-info'):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
return
entries = safe_listdir(path_item)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (
entry
for entry in entries
if dist_factory(path_item, entry, only)
)
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
"""
Return a dist_factory for a path_item and entry
"""
lower = entry.lower()
is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
return (
distributions_from_metadata
if is_meta else
find_distributions
if not only and _is_egg_path(entry) else
resolve_egg_link
if not only and lower.endswith('.egg-link') else
NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
if six.PY2:
__nonzero__ = __bool__
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
# Python 2 on Windows needs to be handled this way :(
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root, entry, metadata, precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename): # pragma: nocover
"""
Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
symlink components. Using
os.path.abspath() works around this limitation. A fix in os.getcwd()
would probably better, in Cygwin even more so, except
that this seems to be by design...
"""
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return path.lower().endswith('.egg')
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
_is_egg_path(path) and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint:
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution:
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = (
"Missing 'Version:' header and/or {} file at path: {}"
).format(self.PKG_INFO, path)
raise ValueError(msg, self)
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker)
or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
"""
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
# since _get_metadata_path() is marked private.
path = self._provider._get_metadata_path(name)
# Handle exceptions e.g. in case the distribution's metadata
# provider doesn't support _get_metadata_path().
except Exception:
return '[could not detect]'
return path
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
return version
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
| set(
attr for attr in self._provider.__dir__()
if not attr.startswith('_')
)
)
if not hasattr(object, '__dir__'):
# python 2.7 not supported
del __dir__
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = self._get_version()
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.url,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(
dist.activate(replace=False)
for dist in working_set
)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
class PkgResourcesDeprecationWarning(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
| 33.861996
| 93
| 0.602384
|
560904ae10cde13399916dcb6b9dace105d04165
| 3,935
|
py
|
Python
|
MWCIoTDemo/device/modules/ImageClassifierService/app/predict.py
|
jamiewilbraham/IoTDemos
|
fb61ed134984daccece3512202d602be4173a387
|
[
"MIT"
] | 129
|
2020-03-28T15:56:42.000Z
|
2022-03-30T19:54:19.000Z
|
MWCIoTDemo/device/modules/ImageClassifierService/app/predict.py
|
jamiewilbraham/IoTDemos
|
fb61ed134984daccece3512202d602be4173a387
|
[
"MIT"
] | 23
|
2020-04-14T01:03:47.000Z
|
2022-02-27T12:37:04.000Z
|
MWCIoTDemo/device/modules/ImageClassifierService/app/predict.py
|
marcusgaspar/IoTDemos
|
d91650de84b2ae42e7d822e6c7f841f8942111c6
|
[
"MIT"
] | 62
|
2020-04-23T23:17:48.000Z
|
2022-03-21T01:43:31.000Z
|
from urllib.request import urlopen
from datetime import datetime
from operator import itemgetter
import tensorflow as tf
from PIL import Image, ImageDraw
from object_detection import ObjectDetection
from azure.storage.blob import BlobServiceClient, BlobClient
import numpy as np
import sys
import uuid
import io
import time
MODEL_FILENAME = 'model.pb'
LABELS_FILENAME = 'labels.txt'
CONTAINER_NAME = "forkliftimages"
od_model = None
last_upload_time = None
latest_blob_image = ''
send_to_blob_interval = 5
labels = []
class TFObjectDetection(ObjectDetection):
"""Object Detection class for TensorFlow"""
def __init__(self, graph_def, labels):
super(TFObjectDetection, self).__init__(labels)
self.graph = tf.compat.v1.Graph()
with self.graph.as_default():
input_data = tf.compat.v1.placeholder(tf.float32, [1, None, None, 3], name='Placeholder')
tf.import_graph_def(graph_def, input_map={"Placeholder:0": input_data}, name="")
def predict(self, preprocessed_image):
inputs = np.array(preprocessed_image, dtype=np.float)[:, :, (2, 1, 0)] # RGB -> BGR
with tf.compat.v1.Session(graph=self.graph) as sess:
output_tensor = sess.graph.get_tensor_by_name('model_outputs:0')
outputs = sess.run(output_tensor, {'Placeholder:0': inputs[np.newaxis, ...]})
return outputs[0]
def initialize():
global od_model
# Load a TensorFlow model
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(MODEL_FILENAME, 'rb') as f:
graph_def.ParseFromString(f.read())
# Load labels
with open(LABELS_FILENAME, 'r') as f:
labels = [l.strip() for l in f.readlines()]
od_model = TFObjectDetection(graph_def, labels)
def predict_image(image, blob_service_client):
global last_upload_time
global latest_blob_image
try:
if image.mode != "RGB":
print("Converting to RGB")
image = image.convert("RGB")
predictions = od_model.predict_image(image)
response = { 'created': datetime.utcnow().isoformat(), 'forklift': 0, 'bloburl' : latest_blob_image }
if predictions:
highest_prediction = max(predictions, key=itemgetter('probability'))
print(highest_prediction)
if highest_prediction['probability'] > 0.6:
response['forklift'] = 1
if last_upload_time is None or time.time() - last_upload_time >= send_to_blob_interval:
bounding_box = highest_prediction['boundingBox']
x0 = image.width * bounding_box['left']
y0 = image.height * bounding_box['top']
x1 = image.width * (bounding_box['left'] + bounding_box['width'])
y1 = image.height * (bounding_box['top'] + bounding_box['height'])
draw = ImageDraw.Draw(image)
draw.rectangle(((x0, y0), (x1, y1)), outline="red", width=3)
stream = io.BytesIO()
image.save(stream, format='PNG')
img_byte_array = stream.getvalue()
file_name = str(uuid.uuid4()) + ".png"
blob_client = blob_service_client.get_blob_client(container=CONTAINER_NAME, blob=file_name)
blob_client.upload_blob(img_byte_array)
last_upload_time = time.time()
latest_blob_image = blob_client.url
response['bloburl'] = latest_blob_image
print("sent image to blob:" + str(latest_blob_image))
else:
latest_blob_image = ''
response['bloburl'] = latest_blob_image
print("Results: " + str(response))
return response
except Exception as e:
print(str(e))
return 'Error: Could not preprocess image for prediction. ' + str(e)
| 34.517544
| 111
| 0.621601
|
eb999ca9955fa9cab5136254a18f690879a9052b
| 1,565
|
py
|
Python
|
download_pdfs.py
|
Randl/arxiv-sanity-preserver
|
f14997edcb834622c89aba47f82e1df124aaa137
|
[
"MIT"
] | 1
|
2021-06-03T20:34:00.000Z
|
2021-06-03T20:34:00.000Z
|
download_pdfs.py
|
Randl/arxiv-sanity-preserver
|
f14997edcb834622c89aba47f82e1df124aaa137
|
[
"MIT"
] | null | null | null |
download_pdfs.py
|
Randl/arxiv-sanity-preserver
|
f14997edcb834622c89aba47f82e1df124aaa137
|
[
"MIT"
] | null | null | null |
import os
import pickle
import random
import shutil
import time
from urllib.request import urlopen
from utils import Config
timeout_secs = 10 # after this many seconds we give up on a paper
if not os.path.exists(Config.pdf_dir):
os.makedirs(Config.pdf_dir)
print('Reading pdf list')
files = list()
for (dirpath, dirnames, filenames) in os.walk(Config.pdf_dir):
files += [os.path.join(dirpath, file) for file in filenames]
have = set([os.path.split(pdf_path)[-1] for pdf_path in files]) # get list of all pdfs we already have
print('Read pdf list')
numok = 0
numtot = 0
db = pickle.load(open(Config.db_path, 'rb'))
for pid, j in db.items():
pdfs = [x['href'] for x in j['links'] if x['type'] == 'application/pdf']
assert len(pdfs) == 1
pdf_url = pdfs[0] + '.pdf'
basename = pdf_url.split('/')[-1]
fname = os.path.join(Config.pdf_dir, basename)
# try retrieve the pdf
numtot += 1
try:
if not basename in have:
print('fetching %s into %s' % (pdf_url, fname))
req = urlopen(pdf_url, None, timeout_secs)
with open(fname, 'wb') as fp:
shutil.copyfileobj(req, fp)
time.sleep(0.05 + random.uniform(0, 0.1))
print('%d/%d of %d downloaded ok.' % (numok, numtot, len(db)))
else:
pass
# print('%s exists, skipping' % (fname, ))
numok += 1
except Exception as e:
print('error downloading: ', pdf_url)
print(e)
print('final number of papers downloaded okay: %d/%d' % (numok, len(db)))
| 30.096154
| 103
| 0.615974
|
2f73119355d263ff7538c9f3857ae2e6a3aee29c
| 199
|
py
|
Python
|
django_wireguard/templatetags/django_wireguard_filters.py
|
thatsed/django-wireguard
|
d7f076ad0a0170bcf3b63f1b18a664cbfe3d95d7
|
[
"MIT"
] | 1
|
2021-11-27T09:59:42.000Z
|
2021-11-27T09:59:42.000Z
|
django_wireguard/templatetags/django_wireguard_filters.py
|
thatsed/django-wireguard
|
d7f076ad0a0170bcf3b63f1b18a664cbfe3d95d7
|
[
"MIT"
] | null | null | null |
django_wireguard/templatetags/django_wireguard_filters.py
|
thatsed/django-wireguard
|
d7f076ad0a0170bcf3b63f1b18a664cbfe3d95d7
|
[
"MIT"
] | null | null | null |
import base64
from django import template
register = template.Library()
@register.filter
def base64encode(string: str) -> str:
return base64.b64encode(string.encode('utf-8')).decode('ascii')
| 18.090909
| 67
| 0.743719
|
c784f3a10ea54191f28a85b2f8811a508071eb44
| 977
|
py
|
Python
|
eval.py
|
714627034/PaddlePaddle-MobileFaceNets
|
d810f0d786e423bd10ec7ff6a015ea534fe08d76
|
[
"Apache-2.0"
] | 23
|
2021-05-08T09:00:13.000Z
|
2022-03-30T06:21:17.000Z
|
eval.py
|
714627034/PaddlePaddle-MobileFaceNets
|
d810f0d786e423bd10ec7ff6a015ea534fe08d76
|
[
"Apache-2.0"
] | null | null | null |
eval.py
|
714627034/PaddlePaddle-MobileFaceNets
|
d810f0d786e423bd10ec7ff6a015ea534fe08d76
|
[
"Apache-2.0"
] | 3
|
2021-05-28T05:53:42.000Z
|
2021-12-10T08:44:42.000Z
|
import argparse
import functools
import paddle
from utils.utils import add_arguments, print_arguments, get_lfw_list
from utils.utils import get_features, get_feature_dict, test_performance
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 64, '训练的批量大小')
add_arg('model_path', str, 'models/infer/model', '预测模型的路径')
add_arg('test_list_path', str, 'dataset/lfw_test.txt', '测试数据的数据列表路径')
args = parser.parse_args()
print_arguments(args)
# 加载模型
model = paddle.jit.load(args.model_path)
model.eval()
# 获取测试数据
img_paths = get_lfw_list(args.test_list_path)
features = get_features(model, img_paths, batch_size=args.batch_size)
fe_dict = get_feature_dict(img_paths, features)
accuracy, threshold = test_performance(fe_dict, args.test_list_path)
print('准确率为:%f, 最优阈值为:%f' % (accuracy, threshold))
| 36.185185
| 90
| 0.721597
|
1748f80ea50b348545a4015bf3ef54a43b930553
| 2,049
|
py
|
Python
|
samples/simple_spellcheck.py
|
ElementAI/eai-spellchecker
|
9c68be2c50d9312f4f43ab263a60a5221f1f00f4
|
[
"MIT"
] | 1
|
2020-05-22T22:46:50.000Z
|
2020-05-22T22:46:50.000Z
|
samples/simple_spellcheck.py
|
arita37/eai-spellchecker
|
9c68be2c50d9312f4f43ab263a60a5221f1f00f4
|
[
"MIT"
] | 1
|
2018-09-28T17:08:11.000Z
|
2018-09-28T17:08:11.000Z
|
samples/simple_spellcheck.py
|
arita37/eai-spellchecker
|
9c68be2c50d9312f4f43ab263a60a5221f1f00f4
|
[
"MIT"
] | 1
|
2020-05-22T22:46:53.000Z
|
2020-05-22T22:46:53.000Z
|
# simple_spellcheck.py
# Guy Dumais, 2018-08-30
# Copyright (c) 2018 Element AI. All rights reserved.
import pkg_resources
from spellchecker.symspell import SymSpell, Verbosity # import the module
def main():
# create object
initial_capacity = 83000
# maximum edit distance per dictionary precalculation
max_edit_distance_dictionary = 2
prefix_length = 7
sym_spell = SymSpell(initial_capacity, max_edit_distance_dictionary,
prefix_length)
# load dictionary
dictionary_path = pkg_resources.resource_filename('spellchecker', 'frequency_dictionary_en_82_765.txt')
iterator = SymSpell.SpaceDelimitedFileIterator(0, 1, dictionary_path)
sym_spell.load_dictionary(iterator)
# lookup suggestions for single-word input strings
input_term = "memebers" # misspelling of "members"
# max edit distance per lookup
# (max_edit_distance_lookup <= max_edit_distance_dictionary)
max_edit_distance_lookup = 2
suggestion_verbosity = Verbosity.CLOSEST # TOP, CLOSEST, ALL
suggestions = sym_spell.lookup(input_term, suggestion_verbosity,
max_edit_distance_lookup)
# display suggestion term, term frequency, and edit distance
for suggestion in suggestions:
print(suggestion)
# lookup suggestions for multi-word input strings (supports compound
# splitting & merging)
input_term = ("whereis th elove hehad dated forImuch of thepast who "
"couqdn'tread in sixtgrade and ins pired him")
# max edit distance per lookup (per single word, not per whole input string)
max_edit_distance_lookup = 2
suggestions = sym_spell.lookup_compound(input_term,
max_edit_distance_lookup)
# display suggestion term, edit distance, and term frequency
for suggestion in suggestions:
print("{}, {}, {}".format(suggestion.term, suggestion.count,
suggestion.distance))
if __name__ == "__main__":
main()
| 40.176471
| 107
| 0.695949
|
f8923b282b2fd0f95590c6de4cddc465f1ddb82a
| 75,237
|
py
|
Python
|
venv/lib/python3.8/site-packages/bqplot/marks.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/bqplot/marks.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/bqplot/marks.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | null | null | null |
# Copyright 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
=====
Marks
=====
.. currentmodule:: bqplot.marks
.. autosummary::
:toctree: _generate/
Mark
Lines
FlexLine
Scatter
Hist
Bars
Graph
GridHeatMap
HeatMap
Label
OHLC
Pie
Map
"""
import os
import json
from warnings import warn
import ipywidgets as widgets
from ipywidgets import (Widget, DOMWidget, CallbackDispatcher,
Color, widget_serialization)
from traitlets import (Int, Unicode, List, Enum, Dict, Bool, Float,
Instance, TraitError, validate)
from traittypes import Array
from numpy import histogram
import numpy as np
from .scales import Scale, OrdinalScale, LinearScale
from .traits import (Date, array_serialization,
array_squeeze, array_dimension_bounds, array_supported_kinds)
from ._version import __frontend_version__
from .colorschemes import CATEGORY10
def register_mark(key=None):
"""Returns a decorator registering a mark class in the mark type registry.
If no key is provided, the class name is used as a key. A key is provided
for each core bqplot mark so that the frontend can use
this key regardless of the kernel language.
"""
def wrap(mark):
name = key if key is not None else mark.__module__ + mark.__name__
Mark.mark_types[name] = mark
return mark
return wrap
# Shape constraint for array-types
def shape(*dimensions):
def validator(trait, value):
err_msg_tmpl = 'Expected an array of shape {} ' + \
'but got an array of shape {}'
if value.shape != dimensions:
raise TraitError(err_msg_tmpl.format(dimensions, value.shape))
else:
return value
return validator
class Mark(Widget):
"""The base mark class.
Traitlet mark attributes may be decorated with metadata.
**Data Attribute Decoration**
Data attributes are decorated with the following values:
scaled: bool
Indicates whether the considered attribute is a data attribute which
must be associated with a scale in order to be taken into account.
rtype: string
Range type of the associated scale.
atype: string
Key in bqplot's axis registry of the recommended axis type to represent
this scale. When not specified, the default is 'bqplot.Axis'.
Attributes
----------
display_name: string
Holds a user-friendly name for the trait attribute.
mark_types: dict (class-level attribute)
A registry of existing mark types.
scales: Dict of scales (default: {})
A dictionary of scales holding scales for each data attribute.
- If a mark holds a scaled attribute named 'x', the scales dictionary
must have a corresponding scale for the key 'x'.
- The scale's range type should be equal to the scaled attribute's
range type (rtype).
scales_metadata: Dict (default: {})
A dictionary of dictionaries holding metadata on the way scales are
used by the mark. For example, a linear scale may be used to count
pixels horizontally or vertically. The content of this dictionary
may change dynamically. It is an instance-level attribute.
preserve_domain: dict (default: {})
Indicates if this mark affects the domain(s) of the specified scale(s).
The keys of this dictionary are the same as the ones of the "scales"
attribute, and values are boolean. If a key is missing, it is
considered as False.
display_legend: bool (default: False)
Display toggle for the mark legend in the general figure legend
labels: list of unicode strings (default: [])
Labels of the items of the mark. This attribute has different meanings
depending on the type of mark.
apply_clip: bool (default: True)
Indicates whether the items that are beyond the limits of the chart
should be clipped.
visible: bool (default: True)
Visibility toggle for the mark.
selected_style: dict (default: {})
CSS style to be applied to selected items in the mark.
unselected_style: dict (default: {})
CSS style to be applied to items that are not selected in the mark,
when a selection exists.
selected: list of integers or None (default: None)
Indices of the selected items in the mark.
tooltip: DOMWidget or None (default: None)
Widget to be displayed as tooltip when elements of the scatter are
hovered on
tooltip_style: Dictionary (default: {'opacity': 0.9})
Styles to be applied to the tooltip widget
enable_hover: Bool (default: True)
Boolean attribute to control the hover interaction for the scatter. If
this is false, the on_hover custom mssg is not sent back to the python
side
interactions: Dictionary (default: {'hover': 'tooltip'})
Dictionary listing the different interactions for each mark. The key is
the event which triggers the interaction and the value is the kind of
interactions. Keys and values can only take strings from separate enums
for each mark.
tooltip_location : {'mouse', 'center'} (default: 'mouse')
Enum specifying the location of the tooltip. 'mouse' places the tooltip
at the location of the mouse when the tooltip is activated and 'center'
places the tooltip at the center of the figure. If tooltip is linked to
a click event, 'mouse' places the tooltip at the location of the click
that triggered the tooltip to be visible.
"""
mark_types = {}
scales = Dict(value_trait=Instance(Scale)).tag(sync=True, **widget_serialization)
scales_metadata = Dict().tag(sync=True)
preserve_domain = Dict().tag(sync=True)
display_legend = Bool().tag(sync=True, display_name='Display legend')
labels = List(trait=Unicode()).tag(sync=True, display_name='Labels')
apply_clip = Bool(True).tag(sync=True)
visible = Bool(True).tag(sync=True)
selected_style = Dict().tag(sync=True)
unselected_style = Dict().tag(sync=True)
selected = Array(None, allow_none=True).tag(sync=True, **array_serialization)
enable_hover = Bool(True).tag(sync=True)
tooltip = Instance(DOMWidget, allow_none=True, default_value=None)\
.tag(sync=True, **widget_serialization)
tooltip_style = Dict({'opacity': 0.9}).tag(sync=True)
interactions = Dict({'hover': 'tooltip'}).tag(sync=True)
tooltip_location = Enum(['mouse', 'center'], default_value='mouse')\
.tag(sync=True)
_model_name = Unicode('MarkModel').tag(sync=True)
_model_module = Unicode('bqplot').tag(sync=True)
_view_module = Unicode('bqplot').tag(sync=True)
_view_module_version = Unicode(__frontend_version__).tag(sync=True)
_model_module_version = Unicode(__frontend_version__).tag(sync=True)
_ipython_display_ = None
def _get_dimension_scales(self, dimension, preserve_domain=False):
"""
Return the list of scales corresponding to a given dimension.
The preserve_domain optional argument specifies whether one should
filter out the scales for which preserve_domain is set to True.
"""
if preserve_domain:
return [
self.scales[k] for k in self.scales if (
k in self.scales_metadata and
self.scales_metadata[k].get('dimension') == dimension and
not self.preserve_domain.get(k)
)
]
else:
return [
self.scales[k] for k in self.scales if (
k in self.scales_metadata and
self.scales_metadata[k].get('dimension') == dimension
)
]
@validate('scales')
def _validate_scales(self, proposal):
"""
Validates the `scales` based on the mark's scaled attributes metadata.
First checks for missing scale and then for 'rtype' compatibility.
"""
# Validate scales' 'rtype' versus data attribute 'rtype' decoration
# At this stage it is already validated that all values in self.scales
# are instances of Scale.
scales = proposal.value
for name in self.trait_names(scaled=True):
trait = self.traits()[name]
if name not in scales:
# Check for missing scale
if not trait.allow_none:
raise TraitError("Missing scale for data attribute %s." %
name)
else:
# Check scale range type compatibility
if scales[name].rtype != trait.metadata['rtype']:
raise TraitError("Range type mismatch for scale %s." %
name)
return scales
def __init__(self, **kwargs):
super(Mark, self).__init__(**kwargs)
self._hover_handlers = CallbackDispatcher()
self._click_handlers = CallbackDispatcher()
self._legend_click_handlers = CallbackDispatcher()
self._legend_hover_handlers = CallbackDispatcher()
self._element_click_handlers = CallbackDispatcher()
self._bg_click_handlers = CallbackDispatcher()
self._name_to_handler = {
'hover': self._hover_handlers,
'click': self._click_handlers,
'legend_click': self._legend_click_handlers,
'legend_hover': self._legend_hover_handlers,
'element_click': self._element_click_handlers,
'background_click': self._bg_click_handlers
}
self.on_msg(self._handle_custom_msgs)
def on_hover(self, callback, remove=False):
self._hover_handlers.register_callback(callback, remove=remove)
def on_click(self, callback, remove=False):
self._click_handlers.register_callback(callback, remove=remove)
def on_legend_click(self, callback, remove=False):
self._legend_click_handlers.register_callback(callback, remove=remove)
def on_legend_hover(self, callback, remove=False):
self._legend_hover_handlers.register_callback(callback, remove=remove)
def on_element_click(self, callback, remove=False):
self._element_click_handlers.register_callback(callback, remove=remove)
def on_background_click(self, callback, remove=False):
self._bg_click_handlers.register_callback(callback, remove=remove)
def _handle_custom_msgs(self, _, content, buffers=None):
try:
handler = self._name_to_handler[content['event']]
except KeyError:
return
handler(self, content)
@register_mark('bqplot.Lines')
class Lines(Mark):
"""Lines mark.
In the case of the Lines mark, scales for 'x' and 'y' MUST be provided.
Attributes
----------
icon: string (class-level attribute)
Font-awesome icon for the respective mark
name: string (class-level attribute)
User-friendly name of the mark
colors: list of colors (default: CATEGORY10)
List of colors of the Lines. If the list is shorter than the number
of lines, the colors are reused.
close_path: bool (default: False)
Whether to close the paths or not.
fill: {'none', 'bottom', 'top', 'inside', 'between'}
Fill in the area defined by the curves
fill_colors: list of colors (default: [])
Fill colors for the areas. Defaults to stroke-colors when no
color provided
opacities: list of floats (default: [])
Opacity for the lines and patches. Defaults to 1 when the list is too
short, or the element of the list is set to None.
fill_opacities: list of floats (default: [])
Opacity for the areas. Defaults to 1 when the list is too
short, or the element of the list is set to None.
stroke_width: float (default: 2)
Stroke width of the Lines
labels_visibility: {'none', 'label'}
Visibility of the curve labels
curves_subset: list of integers or None (default: [])
If set to None, all the lines are displayed. Otherwise, only the items
in the list will have full opacity, while others will be faded.
line_style: {'solid', 'dashed', 'dotted', 'dash_dotted'}
Line style.
interpolation: {'linear', 'basis', 'cardinal', 'monotone'}
Interpolation scheme used for interpolation between the data points
provided. Please refer to the svg interpolate documentation for details
about the different interpolation schemes.
marker: {'circle', 'cross', 'diamond', 'square', 'triangle-down', 'triangle-up', 'arrow', 'rectangle', 'ellipse'}
Marker shape
marker_size: nonnegative int (default: 64)
Default marker size in pixels
Data Attributes
x: numpy.ndarray (default: [])
abscissas of the data points (1d or 2d array)
y: numpy.ndarray (default: [])
ordinates of the data points (1d or 2d array)
color: numpy.ndarray (default: None)
colors of the different lines based on data. If it is [], then the
colors from the colors attribute are used. Each line has a single color
and if the size of colors is less than the number of lines, the
remaining lines are given the default colors.
Notes
-----
The fields which can be passed to the default tooltip are:
name: label of the line
index: index of the line being hovered on
color: data attribute for the color of the line
The following are the events which can trigger interactions:
click: left click of the mouse
hover: mouse-over an element
The following are the interactions which can be linked to the above events:
tooltip: display tooltip
"""
# Mark decoration
icon = 'fa-line-chart'
name = 'Lines'
# Scaled attributes
x = Array([]).tag(sync=True, scaled=True,
rtype='Number', atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 2), array_supported_kinds())
y = Array([]).tag(sync=True, scaled=True,
rtype='Number', atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 2), array_supported_kinds())
color = Array(None, allow_none=True).tag(sync=True,
scaled=True,
rtype='Color',
atype='bqplot.ColorAxis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
# Other attributes
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'},
'color': {'dimension': 'color'}
}).tag(sync=True)
colors = List(trait=Color(default_value=None, allow_none=True),
default_value=CATEGORY10)\
.tag(sync=True, display_name='Colors')
fill_colors = List(trait=Color(default_value=None, allow_none=True))\
.tag(sync=True, display_name='Fill colors')
stroke_width = Float(2.0).tag(sync=True, display_name='Stroke width')
labels_visibility = Enum(['none', 'label'], default_value='none')\
.tag(sync=True, display_name='Labels visibility')
curves_subset = List().tag(sync=True)
line_style = Enum(['solid', 'dashed', 'dotted', 'dash_dotted'],
default_value='solid')\
.tag(sync=True, display_name='Line style')
# TODO: Only Lines have interpolatoin but we can extend for other types of graphs
interpolation = Enum(['linear', 'basis', 'basis-open',
'basis-closed', 'bundle',
'cardinal', 'cardinal-open',
'cardinal-closed', 'monotone', 'step-before',
'step-after'],
default_value='linear')\
.tag(sync=True, display_name='Interpolation')
close_path = Bool().tag(sync=True, display_name='Close path')
fill = Enum(['none', 'bottom', 'top', 'inside', 'between'],
default_value='none')\
.tag(sync=True, display_name='Fill')
marker = Enum(['circle', 'cross', 'diamond', 'square', 'triangle-down',
'triangle-up', 'arrow', 'rectangle', 'ellipse'],
default_value=None, allow_none=True)\
.tag(sync=True, display_name='Marker')
marker_size = Int(64).tag(sync=True, display_name='Default size')
opacities = List().tag(sync=True, display_name='Opacity')
fill_opacities = List().tag(sync=True, display_name='Fill Opacity')
_view_name = Unicode('Lines').tag(sync=True)
_model_name = Unicode('LinesModel').tag(sync=True)
@register_mark('bqplot.FlexLine')
class FlexLine(Mark):
"""Flexible Lines mark.
In the case of the FlexLines mark, scales for 'x' and 'y' MUST be provided.
Scales for the color and width data attributes are optional. In the case
where another data attribute than 'x' or 'y' is provided but the
corresponding scale is missing, the data attribute is ignored.
Attributes
----------
name: string (class-level attributes)
user-friendly name of the mark
colors: list of colors (default: CATEGORY10)
List of colors for the Lines
stroke_width: float (default: 1.5)
Default stroke width of the Lines
Data Attributes
x: numpy.ndarray (default: [])
abscissas of the data points (1d array)
y: numpy.ndarray (default: [])
ordinates of the data points (1d array)
color: numpy.ndarray or None (default: None)
Array controlling the color of the data points
width: numpy.ndarray or None (default: None)
Array controlling the widths of the Lines.
"""
# Mark decoration
icon = 'fa-line-chart'
name = 'Flexible lines'
# Scaled attributes
x = Array([]).tag(sync=True, scaled=True, rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
y = Array([]).tag(sync=True, scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
color = Array(None, allow_none=True)\
.tag(sync=True, scaled=True, rtype='Color',
atype='bqplot.ColorAxis',
**array_serialization).valid(array_squeeze)
width = Array(None, allow_none=True)\
.tag(sync=True, scaled=True, rtype='Number',
**array_serialization).valid(array_squeeze)
# Other attributes
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'},
'color': {'dimension': 'color'}
}).tag(sync=True)
stroke_width = Float(1.5).tag(sync=True, display_name='Stroke width')
colors = List(trait=Color(default_value=None, allow_none=True),
default_value=CATEGORY10).tag(sync=True)
_view_name = Unicode('FlexLine').tag(sync=True)
_model_name = Unicode('FlexLineModel').tag(sync=True)
class _ScatterBase(Mark):
"""
Base Mark for Label and Scatter
"""
# Scaled attributes
x = Array([], allow_none=True).tag(sync=True, scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_dimension_bounds(1, 1))
y = Array([], allow_none=True).tag(sync=True, scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_dimension_bounds(1, 1))
color = Array(None, allow_none=True).tag(sync=True,
scaled=True,
rtype='Color',
atype='bqplot.ColorAxis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
opacity = Array(None, allow_none=True).tag(sync=True,
scaled=True,
rtype='Number',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
size = Array(None, allow_none=True).tag(sync=True, scaled=True,
rtype='Number',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
rotation = Array(None, allow_none=True).tag(sync=True, scaled=True,
rtype='Number',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
# Other attributes
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'},
'color': {'dimension': 'color'},
'size': {'dimension': 'size'},
'opacity': {'dimension': 'opacity'},
'rotation': {'dimension': 'rotation'}
}).tag(sync=True)
opacities = Array([1.0])\
.tag(sync=True, display_name='Opacities', **array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
hovered_style = Dict().tag(sync=True)
unhovered_style = Dict().tag(sync=True)
hovered_point = Int(None, allow_none=True).tag(sync=True)
enable_move = Bool().tag(sync=True)
enable_delete = Bool().tag(sync=True)
restrict_x = Bool().tag(sync=True)
restrict_y = Bool().tag(sync=True)
update_on_move = Bool().tag(sync=True)
def __init__(self, **kwargs):
self._drag_start_handlers = CallbackDispatcher()
self._drag_handlers = CallbackDispatcher()
self._drag_end_handlers = CallbackDispatcher()
super(_ScatterBase, self).__init__(**kwargs)
self._name_to_handler.update({
'drag_start': self._drag_start_handlers,
'drag_end': self._drag_end_handlers,
'drag': self._drag_handlers
})
def on_drag_start(self, callback, remove=False):
self._drag_start_handlers.register_callback(callback, remove=remove)
def on_drag(self, callback, remove=False):
self._drag_handlers.register_callback(callback, remove=remove)
def on_drag_end(self, callback, remove=False):
self._drag_end_handlers.register_callback(callback, remove=remove)
@register_mark('bqplot.Scatter')
class Scatter(_ScatterBase):
"""Scatter mark.
In the case of the Scatter mark, scales for 'x' and 'y' MUST be provided.
The scales of other data attributes are optional. In the case where another
data attribute than 'x' or 'y' is provided but the corresponding scale is
missing, the data attribute is ignored.
Attributes
----------
icon: string (class-level attribute)
Font-awesome icon for that mark
name: string (class-level attribute)
User-friendly name of the mark
marker: {'circle', 'cross', 'diamond', 'square', 'triangle-down', 'triangle-up', 'arrow', 'rectangle', 'ellipse'}
Marker shape
colors: list of colors (default: ['steelblue'])
List of colors of the markers. If the list is shorter than the number
of points, the colors are reused.
default_colors: Deprecated
Same as `colors`, deprecated as of version 0.8.4
fill: Bool (default: True)
Whether to fill the markers or not
stroke: Color or None (default: None)
Stroke color of the marker
stroke_width: Float (default: 1.5)
Stroke width of the marker
opacities: list of floats (default: [1.0])
Default opacities of the markers. If the list is shorter than
the number
of points, the opacities are reused.
default_skew: float (default: 0.5)
Default skew of the marker.
This number is validated to be between 0 and 1.
default_size: nonnegative int (default: 64)
Default marker size in pixel.
If size data is provided with a scale, default_size stands for the
maximal marker size (i.e. the maximum value for the 'size' scale range)
drag_size: nonnegative float (default: 5.)
Ratio of the size of the dragged scatter size to the default
scatter size.
names: numpy.ndarray (default: None)
Labels for the points of the chart
display_names: bool (default: True)
Controls whether names are displayed for points in the scatter
label_display_horizontal_offset: float (default: None)
Adds an offset, in pixels, to the horizontal positioning of the 'names'
label above each data point
label_display_vertical_offset: float (default: None)
Adds an offset, in pixels, to the vertical positioning of the 'names'
label above each data point
enable_move: bool (default: False)
Controls whether points can be moved by dragging. Refer to restrict_x,
restrict_y for more options.
restrict_x: bool (default: False)
Restricts movement of the point to only along the x axis. This is valid
only when enable_move is set to True. If both restrict_x and restrict_y
are set to True, the point cannot be moved.
restrict_y: bool (default: False)
Restricts movement of the point to only along the y axis. This is valid
only when enable_move is set to True. If both restrict_x and restrict_y
are set to True, the point cannot be moved.
Data Attributes
x: numpy.ndarray (default: [])
abscissas of the data points (1d array)
y: numpy.ndarray (default: [])
ordinates of the data points (1d array)
color: numpy.ndarray or None (default: None)
color of the data points (1d array). Defaults to default_color when not
provided or when a value is NaN
opacity: numpy.ndarray or None (default: None)
opacity of the data points (1d array). Defaults to default_opacity when
not provided or when a value is NaN
size: numpy.ndarray or None (default: None)
size of the data points. Defaults to default_size when not provided or
when a value is NaN
skew: numpy.ndarray or None (default: None)
skewness of the markers representing the data points. Defaults to
default_skew when not provided or when a value is NaN
rotation: numpy.ndarray or None (default: None)
orientation of the markers representing the data points.
The rotation scale's range is [0, 180]
Defaults to 0 when not provided or when a value is NaN.
Notes
-----
The fields which can be passed to the default tooltip are:
All the data attributes
index: index of the marker being hovered on
The following are the events which can trigger interactions:
click: left click of the mouse
hover: mouse-over an element
The following are the interactions which can be linked to the above events:
tooltip: display tooltip
add: add new points to the scatter (can only linked to click)
"""
# Mark decoration
icon = 'fa-cloud'
name = 'Scatter'
# Scaled attributes
skew = Array(None, allow_none=True).tag(sync=True, scaled=True,
rtype='Number',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
# Other attributes
marker = Enum(['circle', 'cross', 'diamond', 'square', 'triangle-down',
'triangle-up', 'arrow', 'rectangle', 'ellipse'],
default_value='circle').tag(sync=True, display_name='Marker')
colors = List(trait=Color(default_value=None, allow_none=True),
default_value=['steelblue'])\
.tag(sync=True, display_name='Colors')
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'},
'color': {'dimension': 'color'},
'size': {'dimension': 'size'},
'opacity': {'dimension': 'opacity'},
'rotation': {'dimension': 'rotation'},
'skew': {'dimension': 'skew'}
}).tag(sync=True)
@property
def default_colors(self):
return self.colors
@default_colors.setter
def default_colors(self, value):
warn("default_colors is deprecated, use colors instead.",
DeprecationWarning)
self.colors = value
@property
def default_opacities(self):
return self.opacities
@default_opacities.setter
def default_opacities(self, value):
warn("default_opacities is deprecated, use opacities instead.",
DeprecationWarning)
self.opacities = value
stroke = Color(None, allow_none=True).tag(sync=True,
display_name='Stroke color')
stroke_width = Float(1.5).tag(sync=True, display_name='Stroke width')
default_skew = Float(0.5, min=0, max=1).tag(sync=True)
default_size = Int(64).tag(sync=True, display_name='Default size')
names = Array(None, allow_none=True)\
.tag(sync=True, **array_serialization).valid(array_squeeze)
display_names = Bool(True).tag(sync=True, display_name='Display names')
label_display_horizontal_offset = Float(allow_none=True).tag(sync=True)
label_display_vertical_offset = Float(allow_none=True).tag(sync=True)
fill = Bool(True).tag(sync=True)
drag_color = Color(None, allow_none=True).tag(sync=True)
drag_size = Float(5.).tag(sync=True)
names_unique = Bool(True).tag(sync=True)
_view_name = Unicode('Scatter').tag(sync=True)
_model_name = Unicode('ScatterModel').tag(sync=True)
@register_mark('bqplot.ScatterGL')
class ScatterGL(Scatter):
_view_name = Unicode('ScatterGL').tag(sync=True)
_model_name = Unicode('ScatterGLModel').tag(sync=True)
@register_mark('bqplot.Label')
class Label(_ScatterBase):
"""Label mark.
Attributes
----------
x_offset: int (default: 0)
horizontal offset in pixels from the stated x location
y_offset: int (default: 0)
vertical offset in pixels from the stated y location
text: string (default: '')
text to be displayed
default_size: string (default: '14px')
font size in px, em or ex
font_weight: {'bold', 'normal', 'bolder'}
font weight of the caption
drag_size: nonnegative float (default: 1.)
Ratio of the size of the dragged label font size to the default
label font size.
align: {'start', 'middle', 'end'}
alignment of the text with respect to the provided location
enable_move: Bool (default: False)
Enable the label to be moved by dragging. Refer to restrict_x,
restrict_y for more options.
restrict_x: bool (default: False)
Restricts movement of the label to only along the x axis. This is valid
only when enable_move is set to True. If both restrict_x and restrict_y
are set to True, the label cannot be moved.
restrict_y: bool (default: False)
Restricts movement of the label to only along the y axis. This is valid
only when enable_move is set to True. If both restrict_x and restrict_y
are set to True, the label cannot be moved.
Data Attributes
x: numpy.ndarray (default: [])
horizontal position of the labels, in data coordinates or in
figure coordinates
y: numpy.ndarray (default: [])
vertical position of the labels, in data coordinates or in
figure coordinates
color: numpy.ndarray or None (default: None)
label colors
size: numpy.ndarray or None (default: None)
label sizes
rotation: numpy.ndarray or None (default: None)
label rotations
opacity: numpy.ndarray or None (default: None)
label opacities
"""
# Mark decoration
icon = 'fa-font'
name = 'Labels'
# Other attributes
x_offset = Int(0).tag(sync=True)
y_offset = Int(0).tag(sync=True)
colors = List(trait=Color(default_value=None,
allow_none=True),
default_value=CATEGORY10)\
.tag(sync=True, display_name='Colors')
rotate_angle = Float(0.0).tag(sync=True)
text = Array(None, allow_none=True)\
.tag(sync=True, **array_serialization).valid(array_squeeze)
default_size = Float(16.).tag(sync=True)
drag_size = Float(1.).tag(sync=True)
font_unit = Enum(['px', 'em', 'pt', '%'],
default_value='px').tag(sync=True)
font_weight = Enum(['bold', 'normal', 'bolder'],
default_value='bold').tag(sync=True)
align = Enum(['start', 'middle', 'end'],
default_value='start').tag(sync=True)
_view_name = Unicode('Label').tag(sync=True)
_model_name = Unicode('LabelModel').tag(sync=True)
@register_mark('bqplot.Hist')
class Hist(Mark):
"""Histogram mark.
In the case of the Hist mark, scales for 'sample' and 'count' MUST be
provided.
Attributes
----------
icon: string (class-level attribute)
font-awesome icon for that mark
name: string (class-level attribute)
user-friendly name of the mark
bins: nonnegative int (default: 10)
number of bins in the histogram
normalized: bool (default: False)
Boolean attribute to return normalized values which
sum to 1 or direct counts for the `count` attribute. The scale of
`count` attribute is determined by the value of this flag.
colors: list of colors (default: ['steelblue'])
List of colors of the Histogram. If the list is shorter than the number
of bins, the colors are reused.
stroke: Color or None (default: None)
Stroke color of the histogram
opacities: list of floats (default: [])
Opacity for the bins of the histogram. Defaults to 1 when the list
is too short, or the element of the list is set to None.
midpoints: list (default: [])
midpoints of the bins of the histogram. It is a read-only attribute.
Data Attributes
sample: numpy.ndarray (default: [])
sample of which the histogram must be computed.
count: numpy.ndarray (read-only)
number of sample points per bin. It is a read-only attribute.
Notes
-----
The fields which can be passed to the default tooltip are:
midpoint: mid-point of the bin related to the rectangle hovered on
count: number of elements in the bin hovered on
bin_start: start point of the bin
bin-end: end point of the bin
index: index of the bin
"""
# Mark decoration
icon = 'fa-signal'
name = 'Histogram'
# Scaled attributes
sample = Array([]).tag(sync=True, display_name='Sample',
scaled=True, rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
count = Array([], read_only=True).tag(sync=True,
display_name='Count',
scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze)
normalized = Bool().tag(sync=True)
# Other attributes
scales_metadata = Dict({
'sample': {'orientation': 'horizontal', 'dimension': 'x'},
'count': {'orientation': 'vertical', 'dimension': 'y'}
}).tag(sync=True)
bins = Int(10).tag(sync=True, display_name='Number of bins')
midpoints = List(read_only=True).tag(sync=True, display_name='Mid points')
# midpoints is a read-only attribute that is set when the mark is drawn
colors = List(trait=Color(default_value=None, allow_none=True),
default_value=['steelblue'])\
.tag(sync=True, display_name='Colors')
stroke = Color(None, allow_none=True).tag(sync=True)
opacities = List(trait=Float(1.0, min=0, max=1, allow_none=True))\
.tag(sync=True, display_name='Opacities')
_view_name = Unicode('Hist').tag(sync=True)
_model_name = Unicode('HistModel').tag(sync=True)
@register_mark('bqplot.Boxplot')
class Boxplot(Mark):
"""Boxplot marks.
Attributes
----------
stroke: Color or None
stroke color of the marker
color: Color
fill color of the box
opacities: list of floats (default: [])
Opacities for the markers of the boxplot. Defaults to 1 when the
list is too short, or the element of the list is set to None.
outlier-color: color
color for the outlier
box_width: int (default: None)
width of the box in pixels. The minimum value is 5.
If set to None, box_with is auto calculated
auto_detect_outliers: bool (default: True)
Flag to toggle outlier auto-detection
Data Attributes
x: numpy.ndarray (default: [])
abscissas of the data points (1d array)
y: numpy.ndarray (default: [[]])
Sample data points (2d array)
"""
# Mark decoration
icon = 'fa-birthday-cake'
name = 'Boxplot chart'
# Scaled attributes
x = Array([]).tag(sync=True, scaled=True, rtype='Number',
atype='bqplot.Axis', **array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
# Second dimension must contain OHLC data, otherwise the behavior
# is undefined.
y = Array([[]]).tag(sync=True, scaled=True, rtype='Number',
atype='bqplot.Axis', **array_serialization)\
.valid(array_dimension_bounds(1, 2), array_supported_kinds())
# Other attributes
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'}
}).tag(sync=True)
stroke = Color(None, allow_none=True)\
.tag(sync=True, display_name='Stroke color')
box_fill_color = Color('steelblue')\
.tag(sync=True, display_name='Fill color for the box')
outlier_fill_color = Color('gray').tag(sync=True,
display_name='Outlier fill color')
opacities = List(trait=Float(1.0, min=0, max=1, allow_none=True))\
.tag(sync=True, display_name='Opacities')
box_width = Int(None, min=5, allow_none=True).tag(sync=True, display_name='Box Width')
auto_detect_outliers = Bool(True).tag(sync=True, display_name='Auto-detect Outliers')
_view_name = Unicode('Boxplot').tag(sync=True)
_model_name = Unicode('BoxplotModel').tag(sync=True)
@register_mark('bqplot.Bars')
class Bars(Mark):
"""Bar mark.
In the case of the Bars mark, scales for 'x' and 'y' MUST be provided.
The scales of other data attributes are optional. In the case where another
data attribute than 'x' or 'y' is provided but the corresponding scale is
missing, the data attribute is ignored.
Attributes
----------
icon: string (class-level attribute)
font-awesome icon for that mark
name: string (class-level attribute)
user-friendly name of the mark
color_mode: {'auto', 'group', 'element', 'no_group'}
Specify how default colors are applied to bars.
The 'group' mode means colors are assigned per group. If the list
of colors is shorter than the number of groups, colors are reused.
The 'element' mode means colors are assigned per group element. If the list
of colors is shorter than the number of bars in a group, colors are reused.
The 'no_group' mode means colors are assigned per bar, discarding the fact
that there are groups or stacks. If the list of colors is shorter than the
total number of bars, colors are reused.
opacity_mode: {'auto', 'group', 'element', 'no_group'}
Same as the `color_mode` attribute, but for the opacity.
type: {'stacked', 'grouped'}
whether 2-dimensional bar charts should appear grouped or stacked.
colors: list of colors (default: ['steelblue'])
list of colors for the bars.
orientation: {'horizontal', 'vertical'}
Specifies whether the bar chart is drawn horizontally or vertically.
If a horizontal bar chart is drawn, the x data is drawn vertically.
padding: float (default: 0.05)
Attribute to control the spacing between the bars value is specified
as a percentage of the width of the bar
fill: Bool (default: True)
Whether to fill the bars or not
stroke: Color or None (default: None)
Stroke color for the bars
stroke_width: Float (default: 0.)
Stroke width of the bars
opacities: list of floats (default: [])
Opacities for the bars. Defaults to 1 when the list is too
short, or the element of the list is set to None.
base: float (default: 0.0)
reference value from which the bars are drawn. defaults to 0.0
align: {'center', 'left', 'right'}
alignment of bars with respect to the tick value
label_display: bool (default: False)
whether or not to display bar data labels
label_display_format: string (default: .2f)
format for displaying values.
label_font_style: dict
CSS style for the text of each cell
label_display_vertical_offset: float
vertical offset value for the label display
label_display_horizontal_offset: float
horizontal offset value for the label display
Data Attributes
x: numpy.ndarray (default: [])
abscissas of the data points (1d array)
y: numpy.ndarray (default: [])
ordinates of the values for the data points
color: numpy.ndarray or None (default: None)
color of the data points (1d array). Defaults to default_color when not
provided or when a value is NaN
Notes
-----
The fields which can be passed to the default tooltip are:
All the data attributes
index: index of the bar being hovered on
sub_index: if data is two dimensional, this is the minor index
"""
# Mark decoration
icon = 'fa-bar-chart'
name = 'Bar chart'
# Scaled attributes
x = Array([]).tag(sync=True, scaled=True, rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
y = Array([]).tag(sync=True, scaled=True, rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_dimension_bounds(1, 2), array_supported_kinds())
color = Array(None, allow_none=True)\
.tag(sync=True, scaled=True, rtype='Color',
atype='bqplot.ColorAxis', **array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
# Bar text labels attributes -- add default values.
# Add bool for displaying a label or not. Add d3 formatting in docstring
label_display = Bool(default_value=False).tag(sync=True)
label_display_format = Unicode(default_value=".2f",
allow_none=False).tag(sync=True)
label_font_style = Dict().tag(sync=True)
label_display_vertical_offset = Float(default_value=0.0,
allow_none=False).tag(sync=True)
label_display_horizontal_offset = Float(default_value=0.0,
allow_none=False).tag(sync=True)
# Other attributes
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'},
'color': {'dimension': 'color'}
}).tag(sync=True)
color_mode = Enum(['auto', 'group', 'element', 'no_group'], default_value='auto')\
.tag(sync=True)
opacity_mode = Enum(['auto', 'group', 'element', 'no_group'], default_value='auto')\
.tag(sync=True)
type = Enum(['stacked', 'grouped'], default_value='stacked')\
.tag(sync=True, display_name='Type')
colors = List(trait=Color(default_value=None,
allow_none=True),
default_value=['steelblue'])\
.tag(sync=True, display_name='Colors')
padding = Float(0.05).tag(sync=True)
fill = Bool(True).tag(sync=True)
stroke = Color(None, allow_none=True).tag(sync=True)
stroke_width = Float(1.).tag(sync=True, display_name='Stroke width')
base = Float().tag(sync=True)
opacities = List(trait=Float(1.0, min=0, max=1, allow_none=True))\
.tag(sync=True, display_name='Opacities')
align = Enum(['center', 'left', 'right'], default_value='center')\
.tag(sync=True)
orientation = Enum(['vertical', 'horizontal'], default_value='vertical')\
.tag(sync=True)
@validate('orientation')
def _validate_orientation(self, proposal):
value = proposal['value']
x_orient = "horizontal" if value == "vertical" else "vertical"
self.scales_metadata = {'x': {'orientation': x_orient,
'dimension': 'x'},
'y': {'orientation': value, 'dimension': 'y'}}
return value
_view_name = Unicode('Bars').tag(sync=True)
_model_name = Unicode('BarsModel').tag(sync=True)
@register_mark('bqplot.Bins')
class Bins(Bars):
"""Backend histogram mark.
A `Bars` instance that bins sample data.
It is very similar in purpose to the `Hist` mark, the difference being that
the binning is done in the backend (python), which avoids large amounts of
data being shipped back and forth to the frontend. It should therefore be
preferred for large data.
The binning method is the numpy `histogram` method.
The following documentation is in part taken from the numpy documentation.
Attributes
----------
icon: string (class-level attribute)
font-awesome icon for that mark
name: string (class-level attribute)
user-friendly name of the mark
bins: nonnegative int (default: 10)
or {'auto', 'fd', 'doane', 'scott', 'rice', 'sturges', 'sqrt'}
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default).
If `bins` is a string (method name), `histogram` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range.
density : bool (default: `False`)
If `False`, the height of each bin is the number of samples in it.
If `True`, the height of each bin is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
min : float (default: None)
The lower range of the bins. If not provided, lower range
is simply `x.min()`.
max : float (default: None)
The upper range of the bins. If not provided, lower range
is simply `x.max()`.
Data Attributes
sample: numpy.ndarray (default: [])
sample of which the histogram must be computed.
Notes
-----
The fields which can be passed to the default tooltip are:
All the `Bars` data attributes (`x`, `y`, `color`)
index: index of the bin
"""
# Mark decoration
icon = 'fa-signal'
name = 'Backend Histogram'
# Scaled Attributes
sample = Array([]).tag(
sync=False, display_name='Sample', rtype='Number',
atype='bqplot.Axis', **array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
# Binning options
min = Float(None, allow_none=True).tag(sync=True)
max = Float(None, allow_none=True).tag(sync=True)
density = Bool().tag(sync=True)
bins = (Int(10) | List() | Enum(['auto', 'fd', 'doane',
'scott', 'rice', 'sturges', 'sqrt']))\
.tag(sync=True, display_name='Number of bins')
def __init__(self, **kwargs):
'''
Sets listeners on the data and the binning parameters.
Adjusts `Bars` defaults to suit a histogram better.
'''
self.observe(self.bin_data,
names=['sample', 'bins', 'density', 'min', 'max'])
# One unique color by default
kwargs.setdefault('colors', [CATEGORY10[0]])
# No spacing between bars
kwargs.setdefault('padding', 0.)
super(Bins, self).__init__(**kwargs)
def bin_data(self, *args):
'''
Performs the binning of `sample` data, and draws the corresponding bars
'''
# Get range
_min = self.sample.min() if self.min is None else self.min
_max = self.sample.max() if self.max is None else self.max
_range = (min(_min, _max), max(_min, _max))
# Bin the samples
counts, bin_edges = histogram(self.sample, bins=self.bins,
range=_range, density=self.density)
midpoints = (bin_edges[:-1] + bin_edges[1:]) / 2
# Redraw the underlying Bars
with self.hold_sync():
self.x, self.y = midpoints, counts
@register_mark('bqplot.OHLC')
class OHLC(Mark):
"""Open/High/Low/Close marks.
Attributes
----------
icon: string (class-level attribute)
font-awesome icon for that mark
name: string (class-level attribute)
user-friendly name of the mark
marker: {'candle', 'bar'}
marker type
stroke: color (default: None)
stroke color of the marker
stroke_width: float (default: 1.0)
stroke width of the marker
colors: List of colors (default: ['limegreen', 'red'])
fill colors for the markers (up/down)
opacities: list of floats (default: [])
Opacities for the markers of the OHLC mark. Defaults to 1 when
the list is too short, or the element of the list is set to None.
format: string (default: 'ohlc')
description of y data being passed
supports all permutations of the strings 'ohlc', 'oc', and 'hl'
Data Attributes
x: numpy.ndarray
abscissas of the data points (1d array)
y: numpy.ndarrays
Open/High/Low/Close ordinates of the data points (2d array)
Notes
-----
The fields which can be passed to the default tooltip are:
x: the x value associated with the bar/candle
open: open value for the bar/candle
high: high value for the bar/candle
low: low value for the bar/candle
close: close value for the bar/candle
index: index of the bar/candle being hovered on
"""
# Mark decoration
icon = 'fa-birthday-cake'
name = 'OHLC chart'
# Scaled attributes
x = Array([]).tag(sync=True, scaled=True,
rtype='Number', atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
y = Array([[]]).tag(sync=True, scaled=True,
rtype='Number', atype='bqplot.Axis',
**array_serialization)\
.valid(array_dimension_bounds(1, 2))
# Other attributes
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'}
}).tag(sync=True)
marker = Enum(['candle', 'bar'], default_value='candle')\
.tag(sync=True, display_name='Marker')
stroke = Color(None, allow_none=True)\
.tag(sync=True, display_name='Stroke color')
stroke_width = Float(1.0).tag(sync=True, display_name='Stroke Width')
colors = List(trait=Color(default_value=None, allow_none=True),
default_value=['green', 'red'])\
.tag(sync=True, display_name='Colors')
opacities = List(trait=Float(1.0, min=0, max=1, allow_none=True))\
.tag(sync=True, display_name='Opacities')
format = Unicode('ohlc').tag(sync=True, display_name='Format')
_view_name = Unicode('OHLC').tag(sync=True)
_model_name = Unicode('OHLCModel').tag(sync=True)
@register_mark('bqplot.Pie')
class Pie(Mark):
"""Piechart mark.
Attributes
----------
colors: list of colors (default: CATEGORY10)
list of colors for the slices.
stroke: color (default: 'white')
stroke color for the marker
opacities: list of floats (default: [])
Opacities for the slices of the Pie mark. Defaults to 1 when the list
is too short, or the element of the list is set to None.
sort: bool (default: False)
sort the pie slices by descending sizes
x: Float (default: 0.5) or Date
horizontal position of the pie center, in data coordinates or in figure
coordinates
y: Float (default: 0.5)
vertical y position of the pie center, in data coordinates or in figure
coordinates
radius: Float
radius of the pie, in pixels
inner_radius: Float
inner radius of the pie, in pixels
start_angle: Float (default: 0.0)
start angle of the pie (from top), in degrees
end_angle: Float (default: 360.0)
end angle of the pie (from top), in degrees
display_labels: {'none', 'inside', 'outside'} (default: 'inside')
label display options
display_values: bool (default: False)
if True show values along with labels
values_format: string (default: '.2f')
format for displaying values
label_color: Color or None (default: None)
color of the labels
font_size: string (default: '14px')
label font size in px, em or ex
font_weight: {'bold', 'normal', 'bolder'} (default: 'normal')
label font weight
Data Attributes
sizes: numpy.ndarray (default: [])
proportions of the pie slices
color: numpy.ndarray or None (default: None)
color of the data points. Defaults to colors when not provided.
Notes
-----
The fields which can be passed to the default tooltip are:
: the x value associated with the bar/candle
open: open value for the bar/candle
high: high value for the bar/candle
low: low value for the bar/candle
close: close value for the bar/candle
index: index of the bar/candle being hovered on
"""
# Mark decoration
icon = 'fa-pie-chart'
name = 'Pie chart'
# Scaled attributes
sizes = Array([]).tag(sync=True, rtype='Number', **array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
color = Array(None, allow_none=True).tag(sync=True,
scaled=True,
rtype='Color',
atype='bqplot.ColorAxis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
# Other attributes
x = (Float(0.5) | Date() | Unicode()).tag(sync=True)
y = (Float(0.5) | Date() | Unicode()).tag(sync=True)
scales_metadata = Dict({'color': {'dimension': 'color'}}).tag(sync=True)
sort = Bool().tag(sync=True)
colors = List(trait=Color(default_value=None, allow_none=True),
default_value=CATEGORY10).tag(sync=True,
display_name='Colors')
stroke = Color(None, allow_none=True).tag(sync=True)
opacities = List(trait=Float(1.0, min=0, max=1, allow_none=True))\
.tag(sync=True, display_name='Opacities')
radius = Float(180.0, min=0.0, max=float('inf')).tag(sync=True)
inner_radius = Float(0.1, min=0.0, max=float('inf')).tag(sync=True)
start_angle = Float().tag(sync=True)
end_angle = Float(360.0).tag(sync=True)
display_labels = Enum(['none', 'inside', 'outside'],
default_value='inside').tag(sync=True)
display_values = Bool(False).tag(sync=True)
values_format = Unicode(default_value='.1f').tag(sync=True)
label_color = Color(None, allow_none=True).tag(sync=True)
font_size = Unicode(default_value='12px').tag(sync=True)
font_weight = Enum(['bold', 'normal', 'bolder'],
default_value='normal').tag(sync=True)
_view_name = Unicode('Pie').tag(sync=True)
_model_name = Unicode('PieModel').tag(sync=True)
def topo_load(name):
with open(os.path.join(os.path.split(os.path.realpath(__file__))[0],
name)) as data_file:
data = json.load(data_file)
return data
@register_mark('bqplot.Map')
class Map(Mark):
"""Map mark.
Attributes
----------
colors: Dict (default: {})
default colors for items of the map when no color data is passed.
The dictionary should be indexed by the id of the element and have
the corresponding colors as values. The key `default_color`
controls the items for which no color is specified.
selected_styles: Dict (default: {'selected_fill': 'Red',
'selected_stroke': None, 'selected_stroke_width': 2.0})
Dictionary containing the styles for selected subunits
hovered_styles: Dict (default: {'hovered_fill': 'Orange',
'hovered_stroke': None, 'hovered_stroke_width': 2.0})
Dictionary containing the styles for hovered subunits
hover_highlight: bool (default: True)
boolean to control if the map should be aware of which country is being
hovered on.
map_data: dict (default: topo_load("map_data/WorldMap.json"))
a topojson-formatted dictionary with the objects to map under the key
'subunits'.
Data Attributes
color: Dict or None (default: None)
dictionary containing the data associated with every country for the
color scale
"""
# Mark decoration
icon = 'fa-globe'
name = 'Map'
# Scaled attributes
color = Dict(allow_none=True).tag(sync=True, scaled=True, rtype='Color',
atype='bqplot.ColorAxis')
# Other attributes
scales_metadata = Dict({'color': {'dimension': 'color'}}).tag(sync=True)
hover_highlight = Bool(True).tag(sync=True)
hovered_styles = Dict({
'hovered_fill': 'Orange',
'hovered_stroke': None,
'hovered_stroke_width': 2.0}, allow_none=True).tag(sync=True)
stroke_color = Color(default_value=None, allow_none=True).tag(sync=True)
colors = Dict().tag(sync=True, display_name='Colors')
scales_metadata = Dict({'color': {'dimension': 'color'},
'projection': {'dimension': 'geo'}}).tag(sync=True)
selected_styles = Dict({
'selected_fill': 'Red',
'selected_stroke': None,
'selected_stroke_width': 2.0
}).tag(sync=True)
map_data = Dict(topo_load('map_data/WorldMap.json')).tag(sync=True)
_view_name = Unicode('Map').tag(sync=True)
_model_name = Unicode('MapModel').tag(sync=True)
@register_mark('bqplot.GridHeatMap')
class GridHeatMap(Mark):
"""GridHeatMap mark.
Alignment: The tiles can be aligned so that the data matches either the
start, the end or the midpoints of the tiles. This is controlled by the
align attribute.
Suppose the data passed is a m-by-n matrix. If the scale for the rows is
Ordinal, then alignment is by default the mid points. For a non-ordinal
scale, the data cannot be aligned to the mid points of the rectangles.
If it is not ordinal, then two cases arise. If the number of rows passed
is m, then align attribute can be used. If the number of rows passed
is m+1, then the data are the boundaries of the m rectangles.
If rows and columns are not passed, and scales for them are also
not passed, then ordinal scales are generated for the rows and columns.
Attributes
----------
row_align: Enum(['start', 'end'])
This is only valid if the number of entries in `row` exactly match the
number of rows in `color` and the `row_scale` is not `OrdinalScale`.
`start` aligns the row values passed to be aligned with the start
of the tiles and `end` aligns the row values to the end of the tiles.
column_align: Enum(['start', end'])
This is only valid if the number of entries in `column` exactly
match the number of columns in `color` and the `column_scale` is
not `OrdinalScale`. `start` aligns the column values passed to
be aligned with the start of the tiles and `end` aligns the
column values to the end of the tiles.
anchor_style: dict (default: {})
Controls the style for the element which serves as the anchor during
selection.
display_format: string (default: None)
format for displaying values. If None, then values are not displayed
font_style: dict
CSS style for the text of each cell
Data Attributes
color: numpy.ndarray or None (default: None)
color of the data points (2d array). The number of elements in
this array correspond to the number of cells created in the heatmap.
row: numpy.ndarray or None (default: None)
labels for the rows of the `color` array passed. The length of
this can be no more than 1 away from the number of rows in `color`.
This is a scaled attribute and can be used to affect the height of the
cells as the entries of `row` can indicate the start or the end points
of the cells. Refer to the property `row_align`.
If this property is None, then a uniformly spaced grid is generated in
the row direction.
column: numpy.ndarray or None (default: None)
labels for the columns of the `color` array passed. The length of
this can be no more than 1 away from the number of columns in `color`
This is a scaled attribute and can be used to affect the width of the
cells as the entries of `column` can indicate the start or the
end points of the cells. Refer to the property `column_align`.
If this property is None, then a uniformly spaced grid is generated in
the column direction.
"""
# Scaled attributes
row = Array(None, allow_none=True).tag(sync=True, scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
column = Array(None, allow_none=True).tag(sync=True, scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
color = Array(None, allow_none=True).tag(sync=True, scaled=True,
rtype='Color',
atype='bqplot.ColorAxis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 2))
# Other attributes
scales_metadata = Dict({
'row': {'orientation': 'vertical', 'dimension': 'y'},
'column': {'orientation': 'horizontal', 'dimension': 'x'},
'color': {'dimension': 'color'}
}).tag(sync=True)
row_align = Enum(['start', 'end'], default_value='start').tag(sync=True)
column_align = Enum(['start', 'end'], default_value='start').tag(sync=True)
null_color = Color('black', allow_none=True).tag(sync=True)
stroke = Color('black', allow_none=True).tag(sync=True)
opacity = Float(1.0, min=0.2, max=1).tag(sync=True, display_name='Opacity')
anchor_style = Dict().tag(sync=True)
display_format = Unicode(default_value=None, allow_none=True)\
.tag(sync=True)
font_style = Dict().tag(sync=True)
def __init__(self, **kwargs):
# Adding scales in case they are not passed too.
scales = kwargs.pop('scales', {})
if(scales.get('row', None) is None):
row_scale = OrdinalScale(reverse=True)
scales['row'] = row_scale
if(scales.get('column', None) is None):
column_scale = OrdinalScale()
scales['column'] = column_scale
kwargs['scales'] = scales
super(GridHeatMap, self).__init__(**kwargs)
@validate('row')
def _validate_row(self, proposal):
row = proposal.value
if row is None:
return row
color = np.asarray(self.color)
n_rows = color.shape[0]
if len(row) != n_rows and len(row) != n_rows + 1 and len(row) != n_rows - 1:
raise TraitError('row must be an array of size color.shape[0]')
return row
@validate('column')
def _validate_column(self, proposal):
column = proposal.value
if column is None:
return column
color = np.asarray(self.color)
n_columns = color.shape[1]
if len(column) != n_columns and len(column) != n_columns + 1 and len(column) != n_columns - 1:
raise TraitError('column must be an array of size color.shape[1]')
return column
_view_name = Unicode('GridHeatMap').tag(sync=True)
_model_name = Unicode('GridHeatMapModel').tag(sync=True)
@register_mark('bqplot.HeatMap')
class HeatMap(Mark):
"""HeatMap mark.
Attributes
----------
Data Attributes
color: numpy.ndarray or None (default: None)
color of the data points (2d array).
x: numpy.ndarray or None (default: None)
labels for the columns of the `color` array passed. The length of
this has to be the number of columns in `color`.
This is a scaled attribute.
y: numpy.ndarray or None (default: None)
labels for the rows of the `color` array passed. The length of this has
to be the number of rows in `color`.
This is a scaled attribute.
"""
# Scaled attributes
x = Array(None, allow_none=True).tag(sync=True, scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
y = Array(None, allow_none=True).tag(sync=True, scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
color = Array(None, allow_none=True).tag(sync=True, scaled=True,
rtype='Color',
atype='bqplot.ColorAxis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(2, 2))
# Other attributes
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'},
'color': {'dimension': 'color'}
}).tag(sync=True)
null_color = Color('black', allow_none=True).tag(sync=True)
def __init__(self, **kwargs):
data = kwargs['color']
kwargs.setdefault('x', range(data.shape[1]))
kwargs.setdefault('y', range(data.shape[0]))
scales = kwargs.pop('scales', {})
# Adding default x and y data if they are not passed.
# Adding scales in case they are not passed too.
if(scales.get('x', None) is None):
x_scale = LinearScale()
scales['x'] = x_scale
if(scales.get('y', None) is None):
y_scale = LinearScale()
scales['y'] = y_scale
kwargs['scales'] = scales
super(HeatMap, self).__init__(**kwargs)
_view_name = Unicode('HeatMap').tag(sync=True)
_model_name = Unicode('HeatMapModel').tag(sync=True)
@register_mark('bqplot.Graph')
class Graph(Mark):
"""Graph with nodes and links.
Attributes
----------
node_data: List
list of node attributes for the graph
link_matrix: numpy.ndarray of shape(len(nodes), len(nodes))
link data passed as 2d matrix
link_data: List
list of link attributes for the graph
charge: int (default: -600)
charge of force layout. Will be ignored when x and y data attributes
are set
static: bool (default: False)
whether the graph is static or not
link_distance: float (default: 100)
link distance in pixels between nodes. Will be ignored when x and y
data attributes are set
link_type: {'arc', 'line', 'slant_line'} (default: 'arc')
Enum representing link type
directed: bool (default: True)
directed or undirected graph
highlight_links: bool (default: True)
highlights incoming and outgoing links when hovered on a node
colors: list (default: CATEGORY10)
list of node colors
Data Attributes
x: numpy.ndarray (default: [])
abscissas of the node data points (1d array)
y: numpy.ndarray (default: [])
ordinates of the node data points (1d array)
color: numpy.ndarray or None (default: None)
color of the node data points (1d array).
link_color: numpy.ndarray of shape(len(nodes), len(nodes))
link data passed as 2d matrix
"""
charge = Int(-600).tag(sync=True)
static = Bool(False).tag(sync=True)
link_distance = Float(100).tag(sync=True)
node_data = List().tag(sync=True)
link_data = List().tag(sync=True)
link_matrix = Array([]).tag(sync=True, rtype='Number',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 2))
link_type = Enum(['arc', 'line', 'slant_line'],
default_value='arc').tag(sync=True)
directed = Bool(True).tag(sync=True)
colors = List(trait=Color(default_value=None, allow_none=True),
default_value=CATEGORY10).tag(sync=True,
display_name='Colors')
interactions = Dict({'hover': 'tooltip', 'click': 'select'}).tag(sync=True)
highlight_links = Bool(True).tag(sync=True)
# Scaled attributes
x = Array([], allow_none=True).tag(sync=True,
scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_dimension_bounds(1, 1))
y = Array([], allow_none=True).tag(sync=True,
scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_dimension_bounds(1, 1))
color = Array(None, allow_none=True).tag(sync=True,
scaled=True,
rtype='Color',
atype='bqplot.ColorAxis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 1))
link_color = Array([]).tag(sync=True, rtype='Color',
atype='bqplot.ColorAxis',
**array_serialization)\
.valid(array_squeeze, array_dimension_bounds(1, 2))
hovered_style = Dict().tag(sync=True)
unhovered_style = Dict().tag(sync=True)
hovered_point = Int(None, allow_none=True).tag(sync=True)
# Other attributes
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'},
'color': {'dimension': 'color'},
'link_color': {'dimension': 'link_color'}
}).tag(sync=True)
_model_name = Unicode('GraphModel').tag(sync=True)
_view_name = Unicode('Graph').tag(sync=True)
@register_mark('bqplot.Image')
class Image(Mark):
"""Image mark, based on the ipywidgets image
If no scales are passed, uses the parent Figure scales.
Attributes
----------
image: Instance of ipywidgets.Image
Image to be displayed
Data Attributes
x: tuple (default: (0, 1))
abscissas of the left and right-hand side of the image
in the format (x0, x1)
y: tuple (default: (0, 1))
ordinates of the bottom and top side of the image
in the format (y0, y1)
"""
_view_name = Unicode('Image').tag(sync=True)
_model_name = Unicode('ImageModel').tag(sync=True)
image = Instance(widgets.Image).tag(sync=True, **widget_serialization)
pixelated = Bool(True).tag(sync=True)
x = Array(default_value=(0, 1)).tag(sync=True, scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, shape(2))
y = Array(default_value=(0, 1)).tag(sync=True, scaled=True,
rtype='Number',
atype='bqplot.Axis',
**array_serialization)\
.valid(array_squeeze, shape(2))
scales_metadata = Dict({
'x': {'orientation': 'horizontal', 'dimension': 'x'},
'y': {'orientation': 'vertical', 'dimension': 'y'},
}).tag(sync=True)
| 41.293633
| 117
| 0.618605
|
3066392e4ca2d54076669b0b883660ad1c79b53f
| 3,118
|
py
|
Python
|
app.py
|
herreriasjose/Twitter_user_timeline_entities_recognition
|
cf1be6b0df50defe7d4c4358f5f95cf4bf1f7584
|
[
"MIT"
] | 1
|
2019-02-15T15:29:31.000Z
|
2019-02-15T15:29:31.000Z
|
app.py
|
herreriasjose/Twitter_user_timeline_entities_recognition
|
cf1be6b0df50defe7d4c4358f5f95cf4bf1f7584
|
[
"MIT"
] | null | null | null |
app.py
|
herreriasjose/Twitter_user_timeline_entities_recognition
|
cf1be6b0df50defe7d4c4358f5f95cf4bf1f7584
|
[
"MIT"
] | null | null | null |
# app.py
__author__ = "Jose Herrerias"
__version__ = "0.1.0"
__email__ = "herreriasjose@gmail.com"
__status__ = "Test"
import datetime
import json
import logging
import pickle
import re
import sqlite3
import spacy
import time
from geopy.geocoders import Nominatim
logging.basicConfig(level=logging.INFO)
logger = logger = logging.getLogger(__name__)
def geolocate(geolocator, entity):
time.sleep(.25)
try:
location = geolocator.geocode(entity,timeout=None)
if location:
return (location.latitude, location.longitude)
return None, None
except:
time.sleep(2)
logger.info("Exception getting location.")
return None, None
def remove_urls(entity):
try:
pattern = re.compile(r'(http|https)://[\w\-]+(\.[\w\-]+)+\S*',re.IGNORECASE)
result = re.subn(pattern, r'',entity)
return result[0]
except Exception as e:
logger.info("Exception cleaning text")
return "None"
def remove_punctuation(entity):
return re.sub(r'[^\w\s]','',entity)
def read_db(db):
db = sqlite3.connect(db)
cursor = db.cursor()
cursor.execute("SELECT created_at, text FROM tweets")
data_in_tuples = cursor.fetchall()
data_in_a_dict = {}
for i in range(len(data_in_tuples)):
data_in_a_dict[data_in_tuples[i][0]] = data_in_tuples[i][1]
nlp = spacy.load('en')
dates_and_entities = []
# Only the last 2000 Tweets.
for key in sorted(data_in_a_dict)[-2000:]:
doc = nlp(data_in_a_dict[key])
ents = []
for ent in doc.ents:
ents.append([ent.label_, ent.text])
dates_and_entities.append({key:ents})
logger.info("All the entities extracted...")
# Since it takes a while, here we make a backup of the entities extracted.
with open('dates_and_entities.pkl','wb') as f:
pickle.dump(dates_and_entities,f)
logger.info("Entities saved...")
def clean_db():
with open("dates_and_entities.pkl","rb") as f:
dates_and_entities = pickle.load(f)
geolocator = Nominatim()
list_of_jsons = []
for elem in dates_and_entities:
for k,v in elem.items():
entities = [(e[1]) for e in v if e[0] == 'GPE']
entities = [(remove_urls(e)) for e in entities]
entities = [(remove_punctuation(e)) for e in entities]
for entity in entities:
lat, lon = geolocate(geolocator,entity)
# Dismiss all the entities without latitude or longitud.
if (lat and lon):
list_of_jsons.append(json.dumps({'created_at':int(k),'entity':entity,'lat':lat,'lon':lon}))
file_name = 'entities_dataset.json'
all_the_entities = ",".join(list_of_jsons)
with open(file_name,'w') as f:
f.write('[')
f.write(all_the_entities)
f.write(']')
logger.info("Done!")
if __name__ == '__main__':
# Notice the scrape code to create the DB is not included in this package.
read_db('realDonaldTrump.db')
clean_db()
| 27.113043
| 118
| 0.616742
|
ee7d64c6d4404290c830b711b451418d214e77e6
| 35,298
|
py
|
Python
|
Packs/CortexXDR/Scripts/XDRSyncScript/XDRSyncScript_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CortexXDR/Scripts/XDRSyncScript/XDRSyncScript_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CortexXDR/Scripts/XDRSyncScript/XDRSyncScript_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
import copy
import json
import XDRSyncScript as xdr_script
from XDRSyncScript import ASSIGNED_USER_MAIL_XDR_FIELD, MODIFICATION_TIME_XDR_FIELD, MANUAL_SEVERITY_XDR_FIELD, \
SEVERITY_XDR_FIELD
INCIDENT_IN_DEMISTO = {
"sourceInstance": "Palo Alto Networks Cortext XDR IR_instance_1",
"occurred": "2019-05-30T14:32:22.398+03:00",
"closeReason": "",
"modified": "2019-06-02T11:15:09.323251+03:00",
"CustomFields": {
"xdrincidentid": "697567",
"xdrurl": "http://example.com/incident-view/697567",
"xdrdescription": "WildFire Malware detected on host HostNameFFM8VIP9",
"xdralertcount": 1,
"xdrstatus": "new",
"xdrassignedusermail": "",
"xdrassigneduserprettyname": "",
"xdrmodificationtime": "2019-06-02T11:15:09.323251+03:00",
"xdralerts": [
{
"category": "WildFirePostDetection",
"action_pretty": "Detected (Reported)",
"description": "Suspicious executable detected",
"severity": "high",
"host_ip": "8.8.8.8",
"source": "Traps",
"alert_id": "50820",
"host_name": "HostNameFFM8VIP9",
"detection_timestamp": 1559215835437,
"action": "REPORTED",
"user_name": "N/A",
"name": "WildFire Malware"
}
],
"xdrfileartifacts": [
{
"file_signature_status": "SIGNATURE_UNAVAILABLE",
"is_process": None,
"file_name": "LCTGSK7IML.docx",
"file_wildfire_verdict": "UNKNOWN",
"alert_count": 1,
"is_malicious": None,
"is_manual": None,
"file_signature_vendor_name": None,
"type": "HASH",
"file_sha256": "384654fa409c7a500a4a843d33a005c9d670d4845d3a9e096efc8b00ad05a621"
}
],
"xdrnetworkartifacts": []
},
"severity": 1,
"name": "#697567 - WildFire Malware detected on host HostNameFFM8VIP9",
"created": "2019-06-02T11:13:54.674006+03:00",
"sourceBrand": "Palo Alto Networks Cortext XDR IR",
}
OLD_INCIDENT_IN_DEMISTO = {
"sourceInstance": "Palo Alto Networks Cortext XDR IR_instance_1",
"occurred": "2019-05-30T14:32:22.398+03:00",
"closeReason": "",
"modified": "2019-06-02T11:15:09.323251+03:00",
"CustomFields": {
"xdrincidentid": "697567",
"xdrurl": "http://example.com/incident-view/697567",
"xdrdescription": "WildFire Malware detected on host HostNameFFM8VIP9",
"xdralertcount": 1,
"xdrstatus": "new",
"xdrassignedusermail": "",
"xdrassigneduserprettyname": "",
"xdralerts": [
{
"category": "WildFirePostDetection",
"action_pretty": "Detected (Reported)",
"description": "Suspicious executable detected",
"severity": "high",
"host_ip": "8.8.8.8",
"source": "Traps",
"alert_id": "50820",
"host_name": "HostNameFFM8VIP9",
"detection_timestamp": 1559215835437,
"action": "REPORTED",
"user_name": "N/A",
"name": "WildFire Malware"
}
],
"xdrfileartifacts": [
{
"file_signature_status": "SIGNATURE_UNAVAILABLE",
"is_process": None,
"file_name": "LCTGSK7IML.docx",
"file_wildfire_verdict": "UNKNOWN",
"alert_count": 1,
"is_malicious": None,
"is_manual": None,
"file_signature_vendor_name": None,
"type": "HASH",
"file_sha256": "384654fa409c7a500a4a843d33a005c9d670d4845d3a9e096efc8b00ad05a621"
}
],
"xdrnetworkartifacts": []
},
"labels": [
{
"type": "modification_time",
"value": 1559463309323,
}
],
"severity": 1,
"name": "#697567 - WildFire Malware detected on host HostNameFFM8VIP9",
"created": "2019-06-02T11:13:54.674006+03:00",
"sourceBrand": "Palo Alto Networks Cortext XDR IR",
}
INCIDENT_FROM_XDR = {
"host_count": 1,
"manual_severity": None,
"xdr_url": "http://example.com/incident-view/697567",
"assigned_user_pretty_name": None,
"alert_count": 1,
"med_severity_alert_count": 0,
"detection_time": None,
"user_count": 1,
"severity": "low",
"alerts": [
{
"category": "WildFirePostDetection",
"action_pretty": "Detected (Reported)",
"description": "Suspicious executable detected",
"severity": "high",
"host_ip": "8.8.8.8",
"source": "Traps",
"alert_id": "50820",
"host_name": "HostNameFFM8VIP9",
"detection_timestamp": 1559215835437,
"action": "REPORTED",
"user_name": "N/A",
"name": "WildFire Malware"
}
],
"low_severity_alert_count": 0,
"status": "new",
"description": "WildFire Malware detected on host HostNameFFM8VIP9",
"resolve_comment": None,
"creation_time": 1559215942398,
"modification_time": 1559215942398,
"network_artifacts": [],
"file_artifacts": [
{
"file_signature_status": "SIGNATURE_UNAVAILABLE",
"is_process": None,
"file_name": "LCTGSK7IML.docx",
"file_wildfire_verdict": "UNKNOWN",
"alert_count": 1,
"is_malicious": None,
"is_manual": None,
"file_signature_vendor_name": None,
"type": "HASH",
"file_sha256": "384654fa409c7a500a4a843d33a005c9d670d4845d3a9e096efc8b00ad05a621"
}
],
"manual_description": None,
"incident_id": "697567",
"notes": None,
"assigned_user_mail": None,
"high_severity_alert_count": 1
}
INCIDENT_FROM_XDR_RAW_RESPONSE = {
'incident': INCIDENT_FROM_XDR
}
def test_compare_incident_in_demisto_vs_xdr_context___incident_not_modified():
"""
Given
- incident in xdr which already in context
- incident in demisto
When
- nothing has changed
Then
- compare function returns
is_modified=False
"""
incident_id = "100"
fields_mapping = {
"status": "xdrstatus",
"severity": "severity"
}
incident_in_demisto = copy.deepcopy(INCIDENT_IN_DEMISTO)
xdr_incident_in_context = copy.deepcopy(INCIDENT_FROM_XDR)
xdr_incident_in_context['severity'] = 1
is_modified, update_args = xdr_script.compare_incident_in_demisto_vs_xdr_context(incident_in_demisto,
xdr_incident_in_context,
incident_id,
fields_mapping)
assert not is_modified
def test_compare_incident_in_demisto_vs_xdr_context___status_was_modified():
"""
Given
- incident in xdr which already in context
- incident in demisto
When
- xdrstatus field in demisto changed to closed
Then
- compare function returns
is_modified=True
update_args contains status
"""
incident_id = "100"
fields_mapping = {
"status": "xdrstatus",
"severity": "severity"
}
incident_in_demisto = copy.deepcopy(INCIDENT_IN_DEMISTO)
incident_in_demisto["CustomFields"]["xdrstatus"] = "closed"
xdr_incident_in_context = copy.deepcopy(INCIDENT_FROM_XDR)
xdr_incident_in_context['severity'] = 1
is_modified, update_args = xdr_script.compare_incident_in_demisto_vs_xdr_context(incident_in_demisto,
xdr_incident_in_context,
incident_id,
fields_mapping)
assert is_modified
assert {
"incident_id": "100",
"status": "closed"
} == update_args
def test_compare_incident_in_demisto_vs_xdr_context___severity_was_modified():
"""
Given
- incident in xdr which already in context
- incident in demisto
When
- severity field in demisto changed to 3 (high)
Then
- compare function returns
is_modified=True
update_args contains manual_severity
"""
incident_id = "100"
fields_mapping = {
"status": "xdrstatus",
"severity": "severity"
}
incident_in_demisto = copy.deepcopy(INCIDENT_IN_DEMISTO)
incident_in_demisto["severity"] = 3
xdr_incident_in_context = copy.deepcopy(INCIDENT_FROM_XDR)
is_modified, update_args = xdr_script.compare_incident_in_demisto_vs_xdr_context(incident_in_demisto,
xdr_incident_in_context,
incident_id,
fields_mapping)
assert is_modified
assert {
"incident_id": "100",
"manual_severity": "high"
} == update_args
def test_compare_incident_in_demisto_vs_xdr_context___status_and_severity_was_modified():
"""
Given
- incident in xdr which already in context
- incident in demisto
When
- severity field in demisto changed
- xdrstatus field in demisto changed
Then
- compare function returns
is_modified=True
update_args contains manual_severity and status
"""
incident_id = "100"
fields_mapping = {
"status": "xdrstatus",
"severity": "severity"
}
incident_in_demisto = copy.deepcopy(INCIDENT_IN_DEMISTO)
incident_in_demisto["severity"] = 3
incident_in_demisto["CustomFields"]["xdrstatus"] = "closed"
xdr_incident_in_context = copy.deepcopy(INCIDENT_FROM_XDR)
is_modified, update_args = xdr_script.compare_incident_in_demisto_vs_xdr_context(incident_in_demisto,
xdr_incident_in_context,
incident_id,
fields_mapping)
assert is_modified
assert {
"incident_id": "100",
"manual_severity": "high",
"status": "closed"
} == update_args
def test_compare_incident_latest_xdr_incident_with_older_xdr_in_context____when_nothing_changed():
"""
Given
- incident from xdr - latest
- incident from xdr - older
- fields_mapping:
status: xdrstatus,
severity: xdrseverity,
manual_severity: severity
When
- nothing changed
Then
- ensure compare returns is_modified=False
"""
fields_mapping = {
"status": "xdrstatus",
"severity": "severity"
}
incident_in_xdr_latest = copy.deepcopy(INCIDENT_FROM_XDR)
incident_from_xdr_in_context = copy.deepcopy(INCIDENT_FROM_XDR)
incident_from_xdr_in_context['severity'] = 1
is_modified, update_args = xdr_script.compare_incident_in_xdr_vs_previous_xdr_in_context(
incident_in_xdr_latest,
incident_from_xdr_in_context,
fields_mapping)
assert not is_modified
def test_compare_incident_latest_xdr_incident_with_older_xdr_in_context____when_status_changed():
"""
Given
- incident from xdr - latest
- incident from xdr - older
- fields_mapping:
status: xdrstatus,
severity: xdrseverity,
manual_severity: severity
When
- status changed from new to under_investigation
Then
- ensure compare returns is_modified=True
- ensure compare returns update_args contains xdrstatus=under_investigation
"""
fields_mapping = {
"status": "xdrstatus",
"severity": "severity",
}
incident_in_xdr_latest = copy.deepcopy(INCIDENT_FROM_XDR)
incident_in_xdr_latest["status"] = "under_investigation"
incident_in_xdr_latest["modification_time"] += 100
incident_from_xdr_in_context = copy.deepcopy(INCIDENT_FROM_XDR)
incident_from_xdr_in_context['severity'] = 1
is_modified, update_args = xdr_script.compare_incident_in_xdr_vs_previous_xdr_in_context(
incident_in_xdr_latest,
incident_from_xdr_in_context,
fields_mapping)
assert is_modified
assert {
"xdrstatus": "under_investigation",
} == update_args
def test_compare_incident_latest_xdr_incident_with_older_xdr_in_context____when_manual_severity_changed():
"""
Given
- incident from xdr - latest
- incident from xdr - older
- fields_mapping:
status: xdrstatus,
severity: xdrseverity,
manual_severity: severity
When
- manual_severity changed from None to medium
Then
- ensure compare returns is_modified=True
- ensure compare returns update_args contains severity=medium
"""
fields_mapping = {
"status": "xdrstatus",
"manual_severity": "severity"
}
incident_in_xdr_latest = copy.deepcopy(INCIDENT_FROM_XDR)
incident_in_xdr_latest["manual_severity"] = "medium"
incident_in_xdr_latest["modification_time"] += 100
incident_from_xdr_in_context = copy.deepcopy(INCIDENT_FROM_XDR)
incident_from_xdr_in_context['severity'] = 1
is_modified, update_args = xdr_script.compare_incident_in_xdr_vs_previous_xdr_in_context(
incident_in_xdr_latest,
incident_from_xdr_in_context,
fields_mapping)
assert is_modified
assert {
"severity": "medium",
} == update_args
def test_compare_incident_latest_xdr_incident_with_older_xdr_in_context____when_status_and_severity_changed():
"""
Given
- incident from xdr - latest
- incident from xdr - older
- fields_mapping:
status: xdrstatus,
severity: xdrseverity,
manual_severity: severity
When
- manual_severity changed from None to medium
- status changed from new to under_investigation
-
Then
- ensure compare returns is_modified=True
- ensure compare returns update_args contains severity=medium
"""
fields_mapping = {
"status": "xdrstatus",
"severity": "xdrseverity",
"manual_severity": "severity"
}
incident_in_xdr_latest = copy.deepcopy(INCIDENT_FROM_XDR)
incident_in_xdr_latest["manual_severity"] = "medium"
incident_in_xdr_latest["status"] = "under_investigation"
incident_in_xdr_latest["modification_time"] += 100
incident_from_xdr_in_context = copy.deepcopy(INCIDENT_FROM_XDR)
incident_from_xdr_in_context['severity'] = 1
is_modified, update_args = xdr_script.compare_incident_in_xdr_vs_previous_xdr_in_context(
incident_in_xdr_latest,
incident_from_xdr_in_context,
fields_mapping)
assert is_modified
assert {
"severity": "medium",
"xdrstatus": "under_investigation"
} == update_args
def test_args_to_str_1():
xdr_incident = copy.deepcopy(INCIDENT_FROM_XDR)
args = {
"incident_id": "11",
"assigned_user_mail": "xdrassigneduser",
"status": "xdrstatus",
"severity": "xdrseverity",
"playbook_to_run": "XDR Demo",
"first": "true"
}
actual = xdr_script.args_to_str(args, xdr_incident)
expected = 'incident_id=`11` assigned_user_mail=`xdrassigneduser` status=`xdrstatus` severity=`xdrseverity` ' \
'playbook_to_run=`XDR Demo` first=`false` xdr_incident_from_previous_run=`{}` '\
.format(json.dumps(xdr_incident))
assert expected == actual
def test_args_to_str_2():
xdr_incident = copy.deepcopy(INCIDENT_FROM_XDR)
args = {
"incident_id": "11",
"assigned_user_mail": "xdrassigneduser",
"status": "xdrstatus",
"severity": "xdrseverity",
"playbook_to_run": "XDR Demo",
"first": "false",
"xdr_incident_from_previous_run": "some previous value"
}
actual = xdr_script.args_to_str(args, xdr_incident)
expected = 'incident_id=`11` assigned_user_mail=`xdrassigneduser` status=`xdrstatus` severity=`xdrseverity` ' \
'playbook_to_run=`XDR Demo` first=`false` xdr_incident_from_previous_run=`{}` '\
.format(json.dumps(xdr_incident))
assert expected == actual
def test_compare_incident_in_demisto_when_the_severity_is_unknown():
"""
Given
- incident in demisto
- incident from xdr - older
- fields_mapping:
severity: severity
When
- severity in demisto is unknown
Then
- ensure severity is not updated in XDR
"""
incident_id = "100"
fields_mapping = {
"severity": "severity"
}
incident_in_demisto = copy.deepcopy(INCIDENT_IN_DEMISTO)
incident_in_demisto["severity"] = 0
xdr_incident_in_context = copy.deepcopy(INCIDENT_FROM_XDR)
is_modified, update_args = xdr_script.compare_incident_in_demisto_vs_xdr_context(incident_in_demisto,
xdr_incident_in_context,
incident_id,
fields_mapping)
assert is_modified is False
assert {} == update_args
def test_fix_bug_19669(mocker, capfd):
"""
bug fix https://github.com/demisto/etc/issues/19669
The script was throwing `local variable 'latest_incident_in_xdr' referenced before assignment`
Given
- xdr script
When
- script executed and xdr_incident_sync raised an exception
Then
- the xdr_script should not fail on syntax error: `local variable 'latest_incident_in_xdr'
referenced before assignment`
- the script should return error entry with message `Raised exception`
"""
import XDRSyncScript as xdr_script
import demistomock as demisto
import sys
mocker.patch.object(xdr_script, 'xdr_incident_sync', side_effect=Exception('Raised exception'))
mocker.patch.object(demisto, 'results')
# mocking exit we make sure that return_error don't stop the test - bad practice but have no choise for now
mocker.patch.object(sys, 'exit')
mocker.patch.object(demisto, 'executeCommand', return_value=[{
'Contents': {
'id': '1000'
},
'Type': entryTypes['note'],
'Format': formats['json']
}])
args = {
'interval': '1'
}
with capfd.disabled(): # this line should prevent the test failing on writing to demisto.error => print => stdout
xdr_script.main(args)
assert demisto.results.call_count == 1
# call_args is tuple (args list, kwargs). we only need the first one
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0]['Type'] == entryTypes['error']
assert results[0]['Contents'] == 'Raised exception'
def create_test_incident(no_assignee=False, severity=None):
xdr_incident = copy.deepcopy(INCIDENT_FROM_XDR)
demisto_incident = copy.deepcopy(INCIDENT_IN_DEMISTO)
if no_assignee:
xdr_incident['assigned_user_pretty_name'] = None
xdr_incident[ASSIGNED_USER_MAIL_XDR_FIELD] = None
demisto_incident['xdrassignedusermail'] = ''
demisto_incident['xdrassigneduserprettyname'] = ''
if severity:
xdr_incident[SEVERITY_XDR_FIELD] = severity
# 3=high, 2=medium, 1=low
demisto_incident['severity'] = {
'high': 3,
'medium': 2,
'low': 1
}[severity]
xdr_incident_from_previous_run = copy.deepcopy(xdr_incident)
if 'alerts' in xdr_incident_from_previous_run:
del xdr_incident_from_previous_run['alerts']
if 'file_artifacts' in xdr_incident_from_previous_run:
del xdr_incident_from_previous_run['file_artifacts']
if 'network_artifacts' in xdr_incident_from_previous_run:
del xdr_incident_from_previous_run['network_artifacts']
return demisto_incident, xdr_incident_from_previous_run, xdr_incident
def get_execute_command_call(mocked_execute_command, script_name):
"""
Returns:
is_called - True means script was called via demisto.executeCommand
script_args - The arguments that demisto.executeCommand was called with
"""
if mocked_execute_command.call_count == 0:
return False, None
for call_args in mocked_execute_command.call_args_list:
if call_args[0][0] == script_name:
return True, call_args[0][1]
return False, None
def test_incident_was_modified_in_xdr(mocker):
"""
- incident in demisto
- incident in xdr
- incident assignee in xdr is updated by the user to be foo@test.com
- XDRSyncScript executed
- ensure incident assignee in demisto is updated to be foo@test.com
- ensure current playbook was re-executed
- ensure XDRSyncScript is scheduled to be executed in the next internal with
xdr_incident_from_previous_run has assignee foo@test.com
"""
import XDRSyncScript as xdr_script
import demistomock as demisto
# - incident in demisto
# - incident in xdr
demisto_incident, xdr_incident_from_previous_run, xdr_incident_latest = create_test_incident(no_assignee=True)
# - incident assignee in xdr is updated by the user to be foo@test.com
xdr_incident_latest[ASSIGNED_USER_MAIL_XDR_FIELD] = 'foo@test.com'
xdr_incident_latest[MODIFICATION_TIME_XDR_FIELD] = xdr_incident_from_previous_run[MODIFICATION_TIME_XDR_FIELD] + 100
mocker.patch.object(demisto, 'incidents', return_value=[demisto_incident])
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'executeCommand', return_value=[{
'Contents': {
'incident': xdr_incident_latest,
'alerts': {
'data': xdr_incident_latest['alerts']
},
'file_artifacts': {
'data': xdr_incident_latest['file_artifacts']
},
'network_artifacts': {
'data': xdr_incident_latest['network_artifacts']
}
},
'HumanReadable': 'nla',
'Type': entryTypes['note'],
'Format': formats['json']
}])
args = {
'interval': '1',
'verbose': 'true',
'first': 'false',
ASSIGNED_USER_MAIL_XDR_FIELD: 'xdrassignedusermail',
'xdr_alerts': 'xdralerts',
'xdr_file_artifacts': 'xdrfileartifacts',
'xdr_network_artifacts': 'xdrnetworkartifacts',
'xdr_incident_from_previous_run': json.dumps(xdr_incident_from_previous_run)
}
xdr_script.main(args)
# - ensure incident assignee in demisto is updated to be foo@test.com
is_called, set_incident_args = get_execute_command_call(demisto.executeCommand, 'setIncident')
assert is_called is True
assert set_incident_args['xdrassignedusermail'] == 'foo@test.com'
# - ensure current playbook was re-executed
is_playbook_executed, _ = get_execute_command_call(demisto.executeCommand, 'setPlaybook')
assert is_playbook_executed is True
# - ensure XDRSyncScript is scheduled to be executed in the next internal with
# xdr_incident_from_previous_run has assignee foo@test.com
is_called, scheduled_command_args = get_execute_command_call(demisto.executeCommand, 'ScheduleCommand')
assert is_called is True
scheduled_command = scheduled_command_args['command']
assert '"assigned_user_mail": "foo@test.com"' in scheduled_command
def test_incident_was_modified_in_demisto(mocker):
"""
- incident in demisto and in XDR with low severity
- incident severity in Demisto is updated by the user to be "high"
- XDRSyncScript executed
- ensure incident severity in XDR is updated to be high
- ensure playbook is NOT executed
- ensure XDRSyncScript is scheduled to be executed in the next internal with
xdr_incident_from_previous_run has severity=high
"""
import XDRSyncScript as xdr_script
import demistomock as demisto
# - incident in demisto
# - incident in xdr
demisto_incident, xdr_incident_from_previous_run, xdr_incident_latest = create_test_incident(severity='low')
# - incident severity in Demisto is updated by the user to be "high"
demisto_incident['severity'] = 3
# - XDRSyncScript executed
mocker.patch.object(demisto, 'incidents', return_value=[demisto_incident])
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'executeCommand', return_value=[{
'Contents': {
'incident': xdr_incident_latest,
'alerts': {
'data': xdr_incident_latest['alerts']
},
'file_artifacts': {
'data': xdr_incident_latest['file_artifacts']
},
'network_artifacts': {
'data': xdr_incident_latest['network_artifacts']
}
},
'HumanReadable': 'nla',
'Type': entryTypes['note'],
'Format': formats['json']
}])
args = {
'interval': '1',
'verbose': 'true',
'first': 'false',
SEVERITY_XDR_FIELD: 'severity',
'xdr_alerts': 'xdralerts',
'xdr_file_artifacts': 'xdrfileartifacts',
'xdr_network_artifacts': 'xdrnetworkartifacts',
'xdr_incident_from_previous_run': json.dumps(xdr_incident_from_previous_run)
}
xdr_script.main(args)
# - ensure incident severity in XDR is updated to be high
is_called, xdr_update_args = get_execute_command_call(demisto.executeCommand, 'xdr-update-incident')
assert is_called is True
assert xdr_update_args[MANUAL_SEVERITY_XDR_FIELD] == 'high'
# - ensure playbook is NOT executed
is_playbook_executed, _ = get_execute_command_call(demisto.executeCommand, 'setPlaybook')
assert not is_playbook_executed
# - ensure XDRSyncScript is scheduled to be executed in the next internal with
# xdr_incident_from_previous_run has severity=high
is_called, scheduled_command_args = get_execute_command_call(demisto.executeCommand, 'ScheduleCommand')
assert is_called is True
EXPECTED_INCIDENT = {
'incident_id': '697567',
'manual_severity': None,
'assigned_user_mail': None,
'high_severity_alert_count': None,
'host_count': None,
'xdr_url': 'http://example.com/incident-view/697567',
'assigned_user_pretty_name': '',
'alert_count': 1,
'med_severity_alert_count': None,
'user_count': None, 'severity': 1,
'low_severity_alert_count': None,
'status': 'new',
'description': 'WildFire Malware detected on host HostNameFFM8VIP9',
'resolve_comment': None,
'notes': None,
'modification_time': 1559463309323
}
def test_create_incident_from_saved_data_without_extra_data():
"""
Given
- incident in demisto
- fields_mapping:
status: xdrstatus,
severity: xdrseverity,
manual_severity: severity
- include_extra_data = False
When
- creating an incident object from the context incident
Then
- ensure date fields are parsed correctly
- ensure all relevant fields are present
"""
fields_mapping = {
"alert_count": "xdralertcount",
"assigned_user_mail": "xdrassigneduseremail",
"assigned_user_pretty_name": "xdrassigneduserprettyname",
"description": "xdrdescription",
"high_severity_alert_count": "xdrhighseverityalertcount",
"host_count": "xdrhostcount",
"incident_id": "10",
"low_severity_alert_count": "xdrlowseverityalertcount",
"manual_severity": "xdrmanualseverity",
"med_severity_alert_count": "xdrmediumseverityalertcount",
"modification_time": "xdrmodificationtime",
"notes": "xdrnotes",
"resolve_comment": "xdrresolvecomment",
"severity": "severity",
"status": "xdrstatus",
"user_count": "xdrusercount",
"xdr_url": "xdrurl"
}
incident_from_context = copy.deepcopy(INCIDENT_IN_DEMISTO)
created_incident = xdr_script.create_incident_from_saved_data(incident_from_context, fields_mapping)
assert created_incident == EXPECTED_INCIDENT
EXPECTED_INCIDENT_EXTRA_DATA = {
"xdralerts": [
{
"category": "WildFirePostDetection",
"action_pretty": "Detected (Reported)",
"description": "Suspicious executable detected",
"severity": "high",
"host_ip": "8.8.8.8",
"source": "Traps",
"alert_id": "50820",
"host_name": "HostNameFFM8VIP9",
"detection_timestamp": 1559215835437,
"action": "REPORTED",
"user_name": "N/A",
"name": "WildFire Malware"
}
],
"xdrfileartifacts": [
{
"file_signature_status": "SIGNATURE_UNAVAILABLE",
"is_process": None,
"file_name": "LCTGSK7IML.docx",
"file_wildfire_verdict": "UNKNOWN",
"alert_count": 1,
"is_malicious": None,
"is_manual": None,
"file_signature_vendor_name": None,
"type": "HASH",
"file_sha256": "384654fa409c7a500a4a843d33a005c9d670d4845d3a9e096efc8b00ad05a621"
}
],
"xdrnetworkartifacts": []
}
def test_create_incident_from_saved_data_with_extra_data():
"""
Given
- incident in demisto
- fields_mapping:
status: xdrstatus,
severity: xdrseverity,
manual_severity: severity
- include_extra_data = True
When
- creating an incident object from the context incident
Then
- ensure date fields are parsed correctly
- ensure all relevant fields are present
"""
fields_mapping = {
"status": "xdrstatus",
"severity": "severity"
}
incident_from_context = copy.deepcopy(INCIDENT_IN_DEMISTO)
created_incident = xdr_script.create_incident_from_saved_data(incident_from_context, fields_mapping, True)
assert created_incident == EXPECTED_INCIDENT_EXTRA_DATA
def test_create_incident_from_saved_data_without_extra_data_old_incident():
"""
Given
- an old incident in demisto (which means that 'xdrmodificationtime' is not mapped but present in 'labels')
- fields_mapping:
{
"alert_count": "xdralertcount",
"assigned_user_mail": "xdrassigneduseremail",
"assigned_user_pretty_name": "xdrassigneduserprettyname",
"description": "xdrdescription",
"high_severity_alert_count": "xdrhighseverityalertcount",
"host_count": "xdrhostcount",
"incident_id": "10",
"low_severity_alert_count": "xdrlowseverityalertcount",
"manual_severity": "xdrmanualseverity",
"med_severity_alert_count": "xdrmediumseverityalertcount",
"modification_time": "xdrmodificationtime",
"notes": "xdrnotes",
"resolve_comment": "xdrresolvecomment",
"severity": "severity",
"status": "xdrstatus",
"user_count": "xdrusercount",
"xdr_url": "xdrurl"
}
- include_extra_data = False
When
- creating an incident object from the context incident
Then
- ensure date fields are parsed correctly
- ensure all relevant fields are present
"""
fields_mapping = {
"alert_count": "xdralertcount",
"assigned_user_mail": "xdrassigneduseremail",
"assigned_user_pretty_name": "xdrassigneduserprettyname",
"description": "xdrdescription",
"high_severity_alert_count": "xdrhighseverityalertcount",
"host_count": "xdrhostcount",
"incident_id": "10",
"low_severity_alert_count": "xdrlowseverityalertcount",
"manual_severity": "xdrmanualseverity",
"med_severity_alert_count": "xdrmediumseverityalertcount",
"modification_time": "xdrmodificationtime",
"notes": "xdrnotes",
"resolve_comment": "xdrresolvecomment",
"severity": "severity",
"status": "xdrstatus",
"user_count": "xdrusercount",
"xdr_url": "xdrurl"
}
incident_from_context = copy.deepcopy(OLD_INCIDENT_IN_DEMISTO)
created_incident = xdr_script.create_incident_from_saved_data(incident_from_context, fields_mapping)
assert created_incident == EXPECTED_INCIDENT
def test_create_incident_from_saved_data_old_incident_no_modification_time():
"""
Given
- an old incident in demisto (which means that 'xdrmodificationtime' is not mapped and not in 'labels')
- fields_mapping:
{
"alert_count": "xdralertcount",
"assigned_user_mail": "xdrassigneduseremail",
"assigned_user_pretty_name": "xdrassigneduserprettyname",
"description": "xdrdescription",
"high_severity_alert_count": "xdrhighseverityalertcount",
"host_count": "xdrhostcount",
"incident_id": "10",
"low_severity_alert_count": "xdrlowseverityalertcount",
"manual_severity": "xdrmanualseverity",
"med_severity_alert_count": "xdrmediumseverityalertcount",
"modification_time": "xdrmodificationtime",
"notes": "xdrnotes",
"resolve_comment": "xdrresolvecomment",
"severity": "severity",
"status": "xdrstatus",
"user_count": "xdrusercount",
"xdr_url": "xdrurl"
}
- include_extra_data = False
When
- creating an incident object from the context incident
Then
- ensure date fields are parsed correctly
- ensure all relevant fields are present
"""
fields_mapping = {
"alert_count": "xdralertcount",
"assigned_user_mail": "xdrassigneduseremail",
"assigned_user_pretty_name": "xdrassigneduserprettyname",
"description": "xdrdescription",
"high_severity_alert_count": "xdrhighseverityalertcount",
"host_count": "xdrhostcount",
"incident_id": "10",
"low_severity_alert_count": "xdrlowseverityalertcount",
"manual_severity": "xdrmanualseverity",
"med_severity_alert_count": "xdrmediumseverityalertcount",
"modification_time": "xdrmodificationtime",
"notes": "xdrnotes",
"resolve_comment": "xdrresolvecomment",
"severity": "severity",
"status": "xdrstatus",
"user_count": "xdrusercount",
"xdr_url": "xdrurl"
}
EXPECTED_INCIDENT['modification_time'] = 0
incident_from_context = copy.deepcopy(OLD_INCIDENT_IN_DEMISTO)
incident_from_context["labels"] = []
created_incident = xdr_script.create_incident_from_saved_data(incident_from_context, fields_mapping)
assert created_incident['modification_time'] == 0
assert created_incident == EXPECTED_INCIDENT
| 33.362949
| 120
| 0.6359
|
e2fe4903ce988a00434a1f1123e6a351b714827c
| 2,294
|
py
|
Python
|
hand2.py
|
JiemeZen/gestureController
|
ee066f87c0113341f6bc65bcc558bb8a22698ac8
|
[
"MIT"
] | 1
|
2022-02-28T05:05:37.000Z
|
2022-02-28T05:05:37.000Z
|
hand2.py
|
JiemeZen/gestureController
|
ee066f87c0113341f6bc65bcc558bb8a22698ac8
|
[
"MIT"
] | null | null | null |
hand2.py
|
JiemeZen/gestureController
|
ee066f87c0113341f6bc65bcc558bb8a22698ac8
|
[
"MIT"
] | null | null | null |
import mediapipe as mp
import cv2
import numpy as np
import uuid
import os
import time
# Grabbing the Holistic Model from Mediapipe and
# Initializing the Model
mp_holistic = mp.solutions.holistic
holistic_model = mp_holistic.Holistic(
min_detection_confidence=0.5,
min_tracking_confidence=0.5
)
# Initializing the drawng utils for drawing the facial landmarks on image
mp_drawing = mp.solutions.drawing_utils
# (0) in VideoCapture is used to connect to your computer's default camera
capture = cv2.VideoCapture(0)
# Initializing current time and precious time for calculating the FPS
previousTime = 0
currentTime = 0
while capture.isOpened():
# capture frame by frame
ret, frame = capture.read()
# resizing the frame for better view
frame = cv2.resize(frame, (800, 600))
# Converting the from from BGR to RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Making predictions using holistic model
# To improve performance, optionally mark the image as not writable to
# pass by reference.
image.flags.writable = False
results = holistic_model.process(image)
image.flags.writable = True
# Converting back the RGB image to BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Drawing the Facial Landmarks
mp_drawing.draw_landmarks(
image,
results.face_landmarks,
mp_holistic.FACE_CONNECTIONS,
mp_drawing.DrawingSpec(
color=(255,0,255),
thickness=1,
circle_radius=1
),
mp_drawing.DrawingSpec(
color=(0,255,255),
thickness=1,
circle_radius=1
)
)
# Drawing Right hand Land Marks
mp_drawing.draw_landmarks(
image,
results.right_hand_landmarks,
mp_holistic.HAND_CONNECTIONS
)
# Drawing Left hand Land Marks
mp_drawing.draw_landmarks(
image,
results.left_hand_landmarks,
mp_holistic.HAND_CONNECTIONS
)
# Calculating the FPS
currentTime = time.time()
fps = 1 / (currentTime-previousTime)
previousTime = currentTime
# Displaying FPS on the image
cv2.putText(image, str(int(fps))+" FPS", (10, 70), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
# Display the resulting image
cv2.imshow("Facial and Hand Landmarks", image)
# Enter key 'q' to break the loop
if cv2.waitKey(5) & 0xFF == ord('q'):
break
# When all the process is done
# Release the capture and destroy all windows
capture.release()
cv2.destroyAllWindows()
| 23.895833
| 94
| 0.757193
|
390edd4379662f301b3f30237b797caca412286a
| 2,581
|
py
|
Python
|
cnc/hal.py
|
UncleRus/PyCNC
|
dfb710f4a516bd39d30b902f78409310a1baf676
|
[
"MIT"
] | null | null | null |
cnc/hal.py
|
UncleRus/PyCNC
|
dfb710f4a516bd39d30b902f78409310a1baf676
|
[
"MIT"
] | null | null | null |
cnc/hal.py
|
UncleRus/PyCNC
|
dfb710f4a516bd39d30b902f78409310a1baf676
|
[
"MIT"
] | 1
|
2021-09-26T16:16:16.000Z
|
2021-09-26T16:16:16.000Z
|
# This implementation allows to use different hardware.
# Imported module contains functions for hardware access fo some board/SoC.
# List of HAL methods that should be implemented in each module:
# def init():
# """ Initialize GPIO pins and machine itself, including calibration if
# needed. Do not return till all procedure is completed.
# """
# logging.info("initialize hal")
# do_something()
#
#
# def spindle_control(percent):
# """ Spindle control implementation.
# :param percent: Spindle speed in percent. 0 turns spindle off.
# """
# logging.info("spindle control: {}%".format(percent))
# do_something()
#
#
# def move_linear(delta, velocity):
# """ Move head to specified distance with specified speed.
# :param delta: Coordinated object, delta position in mm
# :param velocity: velocity in mm per min
# """
# do_something()
#
#
# def move_circular(delta, radius, plane, velocity, direction):
# """ Move with circular interpolation.
# :param delta: finish position delta from the beginning, must be on
# circle on specified plane. Zero means full circle.
# :param radius: vector to center of circle.
# :param plane: plane to interpolate.
# :param velocity: velocity in mm per min.
# :param direction: clockwise or counterclockwise.
# """
# do_something()
#
#
# def join():
# """ Wait till motors work.
# """
# do_something()
#
#
# def deinit():
# """ De-initialise hal, stop any hardware.
# """
# do_something()
# check which module to import
try:
from cnc.hal_raspberry.hal import *
except ImportError:
print("----- Hardware not detected, using virtual environment -----")
print("----- Use M111 command to enable more detailed debug -----")
from cnc.hal_virtual import *
# check if all methods that is needed is implemented
if 'init' not in locals():
raise NotImplementedError("hal.init() not implemented")
if 'spindle_control' not in locals():
raise NotImplementedError("hal.spindle_control() not implemented")
if 'move_linear' not in locals():
raise NotImplementedError("hal.move_linear() not implemented")
if 'move_circular' not in locals():
raise NotImplementedError("hal.move_circular() not implemented")
if 'join' not in locals():
raise NotImplementedError("hal.join() not implemented")
if 'deinit' not in locals():
raise NotImplementedError("hal.deinit() not implemented")
| 34.878378
| 78
| 0.650136
|
252937609897076f21efb57f7b801c0c2450870e
| 31,261
|
py
|
Python
|
tests/test_sessionmanager.py
|
fantomfp/python-omemo
|
43244e37bae31b3d1591474c73e948652c1c0e91
|
[
"MIT"
] | null | null | null |
tests/test_sessionmanager.py
|
fantomfp/python-omemo
|
43244e37bae31b3d1591474c73e948652c1c0e91
|
[
"MIT"
] | null | null | null |
tests/test_sessionmanager.py
|
fantomfp/python-omemo
|
43244e37bae31b3d1591474c73e948652c1c0e91
|
[
"MIT"
] | null | null | null |
import pytest
import cProfile
import logging
import os
import time
logging.basicConfig(level = logging.DEBUG)
import omemo
from omemo import SessionManager
from omemo.exceptions import *
from omemo_backend_signal import BACKEND as SignalBackend
from asyncinmemorystorage import AsyncInMemoryStorage
from syncinmemorystorage import SyncInMemoryStorage
from deletingotpkpolicy import DeletingOTPKPolicy
from keepingotpkpolicy import KeepingOTPKPolicy
from example_data import *
from example_data import (
ALICE_BARE_JID as A_JID,
BOB_BARE_JID as B_JID,
CHARLIE_BARE_JID as C_JID,
DAVE_BARE_JID as D_JID,
ALICE_DEVICE_ID as A_DID,
BOB_DEVICE_ID as B_DID,
CHARLIE_DEVICE_ID as C_DID,
DAVE_DEVICE_ID as D_DID,
ALICE_DEVICE_IDS as A_DIDS,
BOB_DEVICE_IDS as B_DIDS,
CHARLIE_DEVICE_IDS as C_DIDS,
DAVE_DEVICE_IDS as D_DIDS
)
def assertPromiseFulfilled(promise):
assert isinstance(promise, omemo.promise.Promise)
while not promise.done: time.sleep(.01)
assert promise.fulfilled
return promise.value
def assertPromiseFulfilledOrRaise(promise):
assert isinstance(promise, omemo.promise.Promise)
while not promise.done: time.sleep(.01)
if promise.fulfilled:
return promise.value
raise promise.reason
def assertPromiseRejected(promise):
assert isinstance(promise, omemo.promise.Promise)
while not promise.done: time.sleep(.01)
assert promise.rejected
return promise.reason
def overrideOwnData(st_sync, st_async, jid, did):
done = False
def cb(success, value):
assert success
done = True
st_sync.storeOwnData(None, jid, did)
st_async.storeOwnData(cb, jid, did)
while not cb: pass
def getDevices(sm_sync, sm_async, jid, inactive, active):
inactive = set(inactive)
active = set(active)
devices_sync = sm_sync.getDevices(jid)
devices_async = assertPromiseFulfilled(sm_async.getDevices(jid))
assert set(devices_sync ["inactive"].keys()) == inactive
assert set(devices_async["inactive"].keys()) == inactive
assert devices_sync ["active"] == active
assert devices_async["active"] == active
def newDeviceList(sm_sync, sm_async, jid, devices):
sm_sync.newDeviceList(jid, devices)
assertPromiseFulfilled(sm_async.newDeviceList(jid, devices))
def createSessionManagers(st_sync = None, st_async = None, expect = None):
if st_sync == None:
st_sync = SyncInMemoryStorage()
if st_async == None:
st_async = AsyncInMemoryStorage()
try:
sm_sync = SessionManager.create(
st_sync,
DeletingOTPKPolicy,
SignalBackend,
A_JID,
A_DID
)
except Exception as e:
assert expect != None
assert isinstance(e, expect)
sm_async_promise = SessionManager.create(
st_async,
DeletingOTPKPolicy,
SignalBackend,
A_JID,
A_DID
)
if expect == None:
sm_async = assertPromiseFulfilled(sm_async_promise)
else:
assert isinstance(assertPromiseRejected(sm_async_promise), expect)
if expect == None:
assert isinstance(sm_sync, SessionManager)
assert isinstance(sm_async, SessionManager)
return st_sync, sm_sync, st_async, sm_async
def createOtherSessionManagers(jid, dids, other_dids, otpk_policy = None):
if otpk_policy == None:
otpk_policy = DeletingOTPKPolicy
sms_sync = {}
sms_async = {}
for did in dids:
st_sync = SyncInMemoryStorage()
st_async = AsyncInMemoryStorage()
sm_sync = SessionManager.create(st_sync, otpk_policy, SignalBackend, jid, did)
sm_async = assertPromiseFulfilled(SessionManager.create(
st_async,
otpk_policy,
SignalBackend,
jid,
did
))
assert isinstance(sm_sync, SessionManager)
assert isinstance(sm_async, SessionManager)
for other_jid in other_dids:
newDeviceList(sm_sync, sm_async, other_jid, other_dids[other_jid])
sms_sync[did] = sm_sync
sms_async[did] = sm_async
return sms_sync, sms_async
def trust(sm_sync, sm_async, sms_sync, sms_async, jid_to_trust, devices_to_trust):
try:
for device in devices_to_trust:
ik_sync = sms_sync [device].public_bundle.ik
ik_async = sms_async[device].public_bundle.ik
sm_sync.setTrust(jid_to_trust, device, ik_sync, True)
assertPromiseFulfilled(sm_async.setTrust(
jid_to_trust,
device,
ik_async,
True
))
except TypeError:
ik_sync = sms_sync .public_bundle.ik
ik_async = sms_async.public_bundle.ik
sm_sync.setTrust(jid_to_trust, devices_to_trust, ik_sync, True)
assertPromiseFulfilled(sm_async.setTrust(
jid_to_trust,
devices_to_trust,
ik_async,
True
))
def distrust(sm_sync, sm_async, sms_sync, sms_async, jid_to_trust, devices_to_trust):
try:
for device in devices_to_trust:
ik_sync = sms_sync [device].public_bundle.ik
ik_async = sms_async[device].public_bundle.ik
sm_sync.setTrust(jid_to_trust, device, ik_sync, False)
assertPromiseFulfilled(sm_async.setTrust(
jid_to_trust,
device,
ik_async,
False
))
except TypeError:
ik_sync = sms_sync .public_bundle.ik
ik_async = sms_async.public_bundle.ik
sm_sync.setTrust(jid_to_trust, devices_to_trust, ik_sync, False)
assertPromiseFulfilled(sm_async.setTrust(
jid_to_trust,
devices_to_trust,
ik_async,
False
))
def messageEncryption(
pass_bundles = None,
trust_devices = None,
pass_devices = True,
expect_problems = None,
expected_problems = None,
trust_alice = True,
allow_untrusted_decryption = False,
expect_untrusted_decryption = None
):
if pass_bundles == None:
pass_bundles = set(B_DIDS)
else:
pass_bundles = set(pass_bundles)
if trust_devices == None:
trust_devices = set(B_DIDS)
else:
trust_devices = set(trust_devices)
if expect_problems == None:
expect_problems = set()
else:
expect_problems = set(expect_problems)
st_sync, sm_sync, st_async, sm_async = createSessionManagers()
b_sms_sync, b_sms_async = createOtherSessionManagers(
B_JID,
B_DIDS,
{ A_JID: [ A_DID ] }
)
if pass_devices:
newDeviceList(sm_sync, sm_async, B_JID, B_DIDS)
trust(sm_sync, sm_async, b_sms_sync, b_sms_async, B_JID, trust_devices)
if trust_alice:
for b_did in B_DIDS:
trust(b_sms_sync[b_did], b_sms_async[b_did], sm_sync, sm_async, A_JID, A_DID)
bundles_sync = {
did: b_sms_sync[did].public_bundle
for did in B_DIDS
if did in pass_bundles
}
bundles_async = {
did: b_sms_async[did].public_bundle
for did in B_DIDS
if did in pass_bundles
}
problems_sync = []
problems_async = []
msg = "single message".encode("UTF-8")
try:
encrypted_sync = sm_sync.encryptMessage(
[ B_JID ],
msg,
{ B_JID: bundles_sync },
{ B_JID: expect_problems }
)
except EncryptionProblemsException as e:
problems_sync = e.problems
try:
encrypted_async = assertPromiseFulfilledOrRaise(sm_async.encryptMessage(
[ B_JID ],
msg,
{ B_JID: bundles_async },
{ B_JID: expect_problems }
))
except EncryptionProblemsException as e:
problems_async = e.problems
if expected_problems == None:
successes_sync = set(encrypted_sync ["keys"][B_JID].keys())
successes_async = set(encrypted_async["keys"][B_JID].keys())
expected_successes = set(B_DIDS) - expect_problems
assert expected_successes == successes_sync == successes_async
for did in expected_successes:
try:
# Check that the pre_key flag is set correctly
expect_pre_key = did in bundles_sync
assert encrypted_sync["keys"][B_JID][did]["pre_key"] == expect_pre_key
decrypted_sync = b_sms_sync[did].decryptMessage(
A_JID,
A_DID,
encrypted_sync["iv"],
encrypted_sync["keys"][B_JID][did]["data"],
encrypted_sync["keys"][B_JID][did]["pre_key"],
encrypted_sync["payload"],
allow_untrusted = allow_untrusted_decryption
)
assert expect_untrusted_decryption == None
except TrustException as e:
assert e == TrustException(
A_JID,
A_DID,
sm_sync.public_bundle.ik,
expect_untrusted_decryption
)
try:
# Check that the pre_key flag is set correctly
expect_pre_key = did in bundles_async
assert encrypted_async["keys"][B_JID][did]["pre_key"] == expect_pre_key
decrypted_async = assertPromiseFulfilledOrRaise(
b_sms_async[did].decryptMessage(
A_JID,
A_DID,
encrypted_async["iv"],
encrypted_async["keys"][B_JID][did]["data"],
encrypted_async["keys"][B_JID][did]["pre_key"],
encrypted_async["payload"],
allow_untrusted = allow_untrusted_decryption
)
)
assert expect_untrusted_decryption == None
except TrustException as e:
assert expect_untrusted_decryption
assert e == TrustException(
A_JID,
A_DID,
sm_async.public_bundle.ik,
expect_untrusted_decryption
)
if expect_untrusted_decryption == None:
assert decrypted_sync == decrypted_async == msg
else:
assert len(problems_sync) == len(problems_async) == len(expected_problems)
zipped = zip(problems_sync, problems_async, expected_problems)
for problem_sync, problem_async, problem_expected in zipped:
if isinstance(problem_expected, TrustException):
problem_expected_sync = TrustException(
problem_expected.bare_jid,
problem_expected.device,
sm_sync.public_bundle.ik
if problem_expected.bare_jid == A_JID else
b_sms_sync[problem_expected.device].public_bundle.ik,
problem_expected.problem
)
problem_expected_async = TrustException(
problem_expected.bare_jid,
problem_expected.device,
sm_async.public_bundle.ik
if problem_expected.bare_jid == A_JID else
b_sms_async[problem_expected.device].public_bundle.ik,
problem_expected.problem
)
assert problem_sync == problem_expected_sync
assert problem_async == problem_expected_async
else:
assert problem_sync == problem_async == problem_expected
def test_create():
st_sync, _, st_async, _ = createSessionManagers()
# Create using the same storage with the same information
createSessionManagers(st_sync, st_async)
# Replace the device id
overrideOwnData(st_sync, st_async, A_JID, B_DID)
# This time, the create call should raise an InconsistentInfoException
createSessionManagers(st_sync, st_async, InconsistentInfoException)
# Replace the jid
overrideOwnData(st_sync, st_async, B_JID, A_DID)
# This time, the create call should raise an InconsistentInfoException
createSessionManagers(st_sync, st_async, InconsistentInfoException)
# Replace both the device id and the jid
overrideOwnData(st_sync, st_async, B_JID, B_DID)
# This time, the create call should raise an InconsistentInfoException
createSessionManagers(st_sync, st_async, InconsistentInfoException)
# Go back to the original data
overrideOwnData(st_sync, st_async, A_JID, A_DID)
# Create using the same storage with the same information
createSessionManagers(st_sync, st_async)
def test_bundle_serialization():
_, sm_sync, _, sm_async = createSessionManagers()
bundle_sync = sm_sync.public_bundle
bundle_async = sm_async.public_bundle
sb = SignalBackend
ex = omemo.ExtendedPublicBundle
assert ex.parse(sb, **bundle_sync.serialize(sb)) == bundle_sync
assert ex.parse(sb, **bundle_async.serialize(sb)) == bundle_async
def test_deviceList():
_, sm_sync, _, sm_async = createSessionManagers()
getDevices(sm_sync, sm_async, None, [], [ A_DID ])
getDevices(sm_sync, sm_async, A_JID, [], [ A_DID ])
newDeviceList(sm_sync, sm_async, A_JID, A_DIDS)
getDevices(sm_sync, sm_async, A_JID, [], A_DIDS)
newDeviceList(sm_sync, sm_async, A_JID, A_DIDS[:2])
getDevices(sm_sync, sm_async, A_JID, A_DIDS[2:], A_DIDS[:2])
newDeviceList(sm_sync, sm_async, A_JID, [])
getDevices(sm_sync, sm_async, A_JID, set(A_DIDS) - set([ A_DID ]), [ A_DID ])
def test_messageEncryption():
messageEncryption()
# This test was added due to a report that the SessionManager behaves incorrectly when
# passing an empty string in the list of recipients while encrypting. This behaviour could
# not be reproduced.
def test_messageEncryption_emptyStringRecipient():
# Create multiple SessionManagers for the same JID and make their device lists known
sms_sync, sms_async = createOtherSessionManagers(
A_JID,
A_DIDS,
{ A_JID: A_DIDS }
)
# Use the first SessionManager for the active part of the test
sm_sync = sms_sync[A_DIDS[0]]
sm_async = sms_async[A_DIDS[0]]
# Make the SM trust all other devices
for a_did in A_DIDS[1:]:
trust(sm_sync, sm_async, sms_sync[a_did], sms_async[a_did], A_JID, a_did)
# Get the bundles of all devices
bundles_sync = { did: sms_sync[did].public_bundle for did in A_DIDS }
bundles_async = { did: sms_async[did].public_bundle for did in A_DIDS }
msg = "single message".encode("UTF-8")
# Encrypt the message, passing an array containing an empty string as the list of
# recipients. Make sure that a NoDevicesException is thrown for the empty string.
try:
encrypted_sync = sm_sync.encryptMessage(
[ "" ],
msg,
{ A_JID: bundles_sync }
)
except EncryptionProblemsException as e:
assert len(e.problems) == 1
assert isinstance(e.problems[0], NoDevicesException)
assert e.problems[0].bare_jid == ""
try:
encrypted_async = assertPromiseFulfilledOrRaise(sm_async.encryptMessage(
[ "" ],
msg,
{ A_JID: bundles_async }
))
except EncryptionProblemsException as e:
assert len(e.problems) == 1
assert isinstance(e.problems[0], NoDevicesException)
assert e.problems[0].bare_jid == ""
def test_messageEncryption_missingBundle():
messageEncryption(pass_bundles = B_DIDS[:2], expected_problems = [
MissingBundleException(B_JID, B_DIDS[2])
])
def test_messageEncryption_allBundlesMissing():
messageEncryption(pass_bundles = [], expected_problems = [
MissingBundleException(B_JID, B_DIDS[0]),
MissingBundleException(B_JID, B_DIDS[1]),
MissingBundleException(B_JID, B_DIDS[2]),
NoEligibleDevicesException(B_JID)
])
def test_messageEncryption_untrustedDevice():
messageEncryption(trust_devices = B_DIDS[:2], expected_problems = [
TrustException(B_JID, B_DIDS[2], "placeholder", "undecided") # TODO
])
def test_messageEncryption_noTrustedDevices():
messageEncryption(trust_devices = [], expected_problems = [
TrustException(B_JID, B_DIDS[0], "placeholder", "undecided"), # TODO
TrustException(B_JID, B_DIDS[1], "placeholder", "undecided"), # TODO
TrustException(B_JID, B_DIDS[2], "placeholder", "undecided"), # TODO
NoEligibleDevicesException(B_JID)
])
def test_messageEncryption_noDevices():
messageEncryption(pass_devices = False, expected_problems = [
NoDevicesException(B_JID)
])
def test_messageEncryption_expectProblems():
messageEncryption(
pass_bundles = B_DIDS[:2],
trust_devices = B_DIDS[1:],
expected_problems = [
MissingBundleException(B_JID, B_DIDS[2]),
TrustException(B_JID, B_DIDS[0], "placeholder", "undecided") # TODO
]
)
messageEncryption(
pass_bundles = B_DIDS[:2],
trust_devices = B_DIDS[1:],
expect_problems = [ B_DIDS[0], B_DIDS[2] ]
)
def test_ratchetForwardingMessage():
_, sm_sync, _, sm_async = createSessionManagers()
b_sms_sync, b_sms_async = createOtherSessionManagers(
B_JID,
[ B_DID ],
{ A_JID: [ A_DID ] }
)
newDeviceList(sm_sync, sm_async, B_JID, [ B_DID ])
# This should not require trusting the devices.
#trust(sm_sync, sm_async, b_sms_sync, b_sms_async, B_JID, [ B_DID ])
b_sm_sync = b_sms_sync [B_DID]
b_sm_async = b_sms_async[B_DID]
encrypted_sync = sm_sync.encryptRatchetForwardingMessage(
[ B_JID ],
{ B_JID: { B_DID: b_sm_sync.public_bundle } }
)
encrypted_async = assertPromiseFulfilled(sm_async.encryptRatchetForwardingMessage(
[ B_JID ],
{ B_JID: { B_DID: b_sm_async.public_bundle } }
))
b_sm_sync.decryptRatchetForwardingMessage(
A_JID,
A_DID,
encrypted_sync["iv"],
encrypted_sync["keys"][B_JID][B_DID]["data"],
encrypted_sync["keys"][B_JID][B_DID]["pre_key"],
allow_untrusted = True
)
assertPromiseFulfilledOrRaise(b_sm_async.decryptRatchetForwardingMessage(
A_JID,
A_DID,
encrypted_async["iv"],
encrypted_async["keys"][B_JID][B_DID]["data"],
encrypted_async["keys"][B_JID][B_DID]["pre_key"],
allow_untrusted = True
))
def test_messageDecryption_noTrust():
messageEncryption(trust_alice = False, expect_untrusted_decryption = "undecided")
def test_messageDecryption_noTrust_allowUntrusted():
messageEncryption(trust_alice = False, allow_untrusted_decryption = True)
def test_messageDecryption_noSession():
_, sm_sync, _, sm_async = createSessionManagers()
b_sms_sync, b_sms_async = createOtherSessionManagers(
B_JID,
[ B_DID ],
{ A_JID: [ A_DID ] }
)
newDeviceList(sm_sync, sm_async, B_JID, [ B_DID ])
trust(sm_sync, sm_async, b_sms_sync, b_sms_async, B_JID, [ B_DID ])
b_sm_sync = b_sms_sync [B_DID]
b_sm_async = b_sms_async[B_DID]
sm_sync.encryptMessage(
[ B_JID ],
"first message".encode("UTF-8"),
{ B_JID: { B_DID: b_sm_sync.public_bundle } }
)
assertPromiseFulfilled(sm_async.encryptMessage(
[ B_JID ],
"first message".encode("UTF-8"),
{ B_JID: { B_DID: b_sm_async.public_bundle } }
))
encrypted_sync = sm_sync.encryptMessage(
[ B_JID ],
"second message".encode("UTF-8")
)
encrypted_async = assertPromiseFulfilled(sm_async.encryptMessage(
[ B_JID ],
"second message".encode("UTF-8")
))
try:
decrypted_sync = b_sm_sync.decryptMessage(
A_JID,
A_DID,
encrypted_sync["iv"],
encrypted_sync["keys"][B_JID][B_DID]["data"],
encrypted_sync["keys"][B_JID][B_DID]["pre_key"],
encrypted_sync["payload"]
)
assert False
except NoSessionException as e:
assert e == NoSessionException(A_JID, A_DID)
try:
decrypted_async = assertPromiseFulfilledOrRaise(b_sm_async.decryptMessage(
A_JID,
A_DID,
encrypted_async["iv"],
encrypted_async["keys"][B_JID][B_DID]["data"],
encrypted_async["keys"][B_JID][B_DID]["pre_key"],
encrypted_async["payload"]
))
assert False
except NoSessionException as e:
assert e == NoSessionException(A_JID, A_DID)
def otpkPolicyTest(otpk_policy, expect_exception):
_, sm_sync, _, sm_async = createSessionManagers()
b_sms_sync, b_sms_async = createOtherSessionManagers(
B_JID,
[ B_DID ],
{ A_JID: [ A_DID ] },
otpk_policy = otpk_policy
)
newDeviceList(sm_sync, sm_async, B_JID, [ B_DID ])
b_sm_sync = b_sms_sync [B_DID]
b_sm_async = b_sms_async[B_DID]
trust(sm_sync, sm_async, b_sms_sync, b_sms_async, B_JID, [ B_DID ])
trust(b_sm_sync, b_sm_async, sm_sync, sm_async, A_JID, A_DID)
pre_key_message_sync = sm_sync.encryptMessage(
[ B_JID ],
"first message".encode("UTF-8"),
{ B_JID: { B_DID: b_sm_sync.public_bundle } }
)
pre_key_message_async = assertPromiseFulfilled(sm_async.encryptMessage(
[ B_JID ],
"first message".encode("UTF-8"),
{ B_JID: { B_DID: b_sm_async.public_bundle } }
))
params_sync = [
A_JID,
A_DID,
pre_key_message_sync["iv"],
pre_key_message_sync["keys"][B_JID][B_DID]["data"],
pre_key_message_sync["keys"][B_JID][B_DID]["pre_key"],
pre_key_message_sync["payload"]
]
params_async = [
A_JID,
A_DID,
pre_key_message_async["iv"],
pre_key_message_async["keys"][B_JID][B_DID]["data"],
pre_key_message_async["keys"][B_JID][B_DID]["pre_key"],
pre_key_message_async["payload"]
]
b_sm_sync.decryptMessage(*params_sync)
assertPromiseFulfilled(b_sm_async.decryptMessage(*params_async))
try:
b_sm_sync.decryptMessage(*params_sync)
assert not expect_exception
except KeyExchangeException as e:
assert expect_exception
assert e == KeyExchangeException(A_JID, A_DID, "unused")
try:
assertPromiseFulfilledOrRaise(b_sm_async.decryptMessage(*params_async))
assert not expect_exception
except KeyExchangeException as e:
assert expect_exception
assert e == KeyExchangeException(A_JID, A_DID, "unused")
def test_otpkPolicy_deleting():
otpkPolicyTest(DeletingOTPKPolicy, True)
def test_otpkPolicy_keeping():
otpkPolicyTest(KeepingOTPKPolicy, False)
def test_trustRetrieval():
_, sm_sync, _, sm_async = createSessionManagers()
b_sms_sync, b_sms_async = createOtherSessionManagers(
B_JID,
[ B_DID ],
{ A_JID: [ A_DID ] }
)
newDeviceList(sm_sync, sm_async, B_JID, [ B_DID ])
assert sm_sync.getTrustForDevice(B_JID, B_DID) == None
assert assertPromiseFulfilled(sm_async.getTrustForDevice(B_JID, B_DID)) == None
trust(sm_sync, sm_async, b_sms_sync, b_sms_async, B_JID, [ B_DID ])
assert sm_sync.getTrustForDevice(B_JID, B_DID) == {
"key": b_sms_sync[B_DID].public_bundle.ik,
"trusted": True
}
assert assertPromiseFulfilled(sm_async.getTrustForDevice(B_JID, B_DID)) == {
"key": b_sms_async[B_DID].public_bundle.ik,
"trusted": True
}
distrust(sm_sync, sm_async, b_sms_sync, b_sms_async, B_JID, [ B_DID ])
assert sm_sync.getTrustForDevice(B_JID, B_DID) == {
"key": b_sms_sync[B_DID].public_bundle.ik,
"trusted": False
}
assert assertPromiseFulfilled(sm_async.getTrustForDevice(B_JID, B_DID)) == {
"key": b_sms_async[B_DID].public_bundle.ik,
"trusted": False
}
assert sm_sync.getTrustForJID(B_JID) == {
"active": {
B_DID: {
"key": b_sms_sync[B_DID].public_bundle.ik,
"trusted": False
}
},
"inactive": {}
}
assert assertPromiseFulfilled(sm_async.getTrustForJID(B_JID)) == {
"active": {
B_DID: {
"key": b_sms_async[B_DID].public_bundle.ik,
"trusted": False
}
},
"inactive": {}
}
def test_serialization():
st_sync, sm_sync, st_async, sm_async = createSessionManagers()
b_sms_sync, b_sms_async = createOtherSessionManagers(
B_JID,
[ B_DID ],
{ A_JID: [ A_DID ] }
)
newDeviceList(sm_sync, sm_async, B_JID, [ B_DID ])
trust(sm_sync, sm_async, b_sms_sync, b_sms_async, B_JID, [ B_DID ])
b_sm_sync = b_sms_sync [B_DID]
b_sm_async = b_sms_async[B_DID]
encrypted_sync = sm_sync.encryptRatchetForwardingMessage(
[ B_JID ],
{ B_JID: { B_DID: b_sm_sync.public_bundle } }
)
encrypted_async = assertPromiseFulfilled(sm_async.encryptRatchetForwardingMessage(
[ B_JID ],
{ B_JID: { B_DID: b_sm_async.public_bundle } }
))
b_sm_sync.decryptRatchetForwardingMessage(
A_JID,
A_DID,
encrypted_sync["iv"],
encrypted_sync["keys"][B_JID][B_DID]["data"],
encrypted_sync["keys"][B_JID][B_DID]["pre_key"],
allow_untrusted = True
)
assertPromiseFulfilledOrRaise(b_sm_async.decryptRatchetForwardingMessage(
A_JID,
A_DID,
encrypted_async["iv"],
encrypted_async["keys"][B_JID][B_DID]["data"],
encrypted_async["keys"][B_JID][B_DID]["pre_key"],
allow_untrusted = True
))
# After this code is done, there is an updated state and a session in the cache.
# Create new SessionManagers using the storage of the old one and check, whether the
# state and the session are still usable.
_, sm_sync, _, sm_async = createSessionManagers(
st_sync = st_sync,
st_async = st_async
)
encrypted_sync = sm_sync.encryptRatchetForwardingMessage(
[ B_JID ],
{ B_JID: { B_DID: b_sm_sync.public_bundle } }
)
encrypted_async = assertPromiseFulfilled(sm_async.encryptRatchetForwardingMessage(
[ B_JID ],
{ B_JID: { B_DID: b_sm_async.public_bundle } }
))
b_sm_sync.decryptRatchetForwardingMessage(
A_JID,
A_DID,
encrypted_sync["iv"],
encrypted_sync["keys"][B_JID][B_DID]["data"],
encrypted_sync["keys"][B_JID][B_DID]["pre_key"],
allow_untrusted = True
)
assertPromiseFulfilledOrRaise(b_sm_async.decryptRatchetForwardingMessage(
A_JID,
A_DID,
encrypted_async["iv"],
encrypted_async["keys"][B_JID][B_DID]["data"],
encrypted_async["keys"][B_JID][B_DID]["pre_key"],
allow_untrusted = True
))
def test_stresstest_sync():
# Create 100 random JIDs with 10 random devices each
devices = {}
main_jid = None
main_did = None
while len(devices) < 100:
jid = generateRandomJID()
if main_jid == None:
main_jid = jid
devices[jid] = set()
while len(devices[jid]) < 10:
did = omemo.util.generateDeviceID(devices[jid])
if main_did == None:
main_did = did
devices[jid].add(did)
sms = {}
for jid in devices:
sms[jid] = {}
for did in devices[jid]:
# Create a SessionManager for that jid+did
sms[jid][did] = SessionManager.create(
SyncInMemoryStorage(),
DeletingOTPKPolicy,
SignalBackend,
jid,
did
)
bundles = {}
for jid in devices:
bundles[jid] = {}
for did in devices[jid]:
bundles[jid][did] = sms[jid][did].public_bundle
main = sms[main_jid][main_did]
# Tell the main SessionManager about all of the other jids and devices
for jid in devices:
main.newDeviceList(jid, devices[jid])
# Tell the main SessionManager to trust all other jids and devices
for jid in devices:
for did in devices[jid]:
main.setTrust(jid, did, sms[jid][did].public_bundle.ik, True)
cProfile.runctx("""
main.encryptMessage(
list(devices.keys()),
"This is a stresstest!".encode("UTF-8"),
bundles = bundles
)
""", {}, {
"main": main,
"devices": devices,
"bundles": bundles
})
# If the code reaches this point, the stress test has passed
assert True
def test_stresstest_async():
# Create 100 random JIDs with 10 random devices each
devices = {}
main_jid = None
main_did = None
while len(devices) < 100:
jid = generateRandomJID()
if main_jid == None:
main_jid = jid
devices[jid] = set()
while len(devices[jid]) < 10:
did = omemo.util.generateDeviceID(devices[jid])
if main_did == None:
main_did = did
devices[jid].add(did)
sms = {}
for jid in devices:
sms[jid] = {}
for did in devices[jid]:
# Create a SessionManager for that jid+did
sms[jid][did] = assertPromiseFulfilled(SessionManager.create(
AsyncInMemoryStorage(),
DeletingOTPKPolicy,
SignalBackend,
jid,
did
))
bundles = {}
for jid in devices:
bundles[jid] = {}
for did in devices[jid]:
bundles[jid][did] = sms[jid][did].public_bundle
main = sms[main_jid][main_did]
# Tell the main SessionManager about all of the other jids and devices
for jid in devices:
assertPromiseFulfilled(main.newDeviceList(jid, devices[jid]))
# Tell the main SessionManager to trust all other jids and devices
for jid in devices:
for did in devices[jid]:
main.setTrust(jid, did, sms[jid][did].public_bundle.ik, True)
cProfile.runctx("""
assertPromiseFulfilledOrRaise(main.encryptMessage(
list(devices.keys()),
"This is a stresstest!".encode("UTF-8"),
bundles = bundles
))
""", {
"assertPromiseFulfilledOrRaise": assertPromiseFulfilledOrRaise
}, {
"main": main,
"devices": devices,
"bundles": bundles
})
# If the code reaches this point, the stress test has passed
assert True
def charFromByte(c):
try:
c = ord(c)
except TypeError:
pass
c %= 26
c += ord('a')
return chr(c)
def generateRandomJID():
bytes = os.urandom(16)
return "{}@{}.im".format(
"".join(map(charFromByte, bytes[:8])),
"".join(map(charFromByte, bytes[8:]))
)
# TODO
# Default OTPKPolicy
# KeyExchangeExceptions during encryptMessage
# Inactive device cleanup
# Whole JID deletion
# resetTrust method
# encryptKeyTransportMessage
| 30.409533
| 90
| 0.627331
|
8f79173c22e818aaf95dff3a4f8955ebc3f7b348
| 4,111
|
py
|
Python
|
test/functional/feature_notifications.py
|
QuarterCoin/QuaterCoin-Wallet
|
bf6bf8ec8a2907e1fa29305df389e0ae7156e544
|
[
"MIT"
] | null | null | null |
test/functional/feature_notifications.py
|
QuarterCoin/QuaterCoin-Wallet
|
bf6bf8ec8a2907e1fa29305df389e0ae7156e544
|
[
"MIT"
] | null | null | null |
test/functional/feature_notifications.py
|
QuarterCoin/QuaterCoin-Wallet
|
bf6bf8ec8a2907e1fa29305df389e0ae7156e544
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Quartercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.test_framework import QuartercoinTestFramework
from test_framework.util import assert_equal, wait_until, connect_nodes_bi
class NotificationsTest(QuartercoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt")
self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt")
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [["-blockversion=2",
"-alertnotify=echo %%s >> %s" % self.alert_filename,
"-blocknotify=echo %%s >> %s" % self.block_filename],
["-blockversion=211",
"-rescan",
"-walletnotify=echo %%s >> %s" % self.tx_filename]]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generate(block_count)
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated blocks hashes
with open(self.block_filename, 'r') as f:
assert_equal(sorted(blocks), sorted(f.read().splitlines()))
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
os.remove(self.tx_filename)
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.restart_node(1)
connect_nodes_bi(self.nodes, 0, 1)
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
# Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
self.log.info("test -alertnotify")
self.nodes[1].generate(41)
self.sync_all()
# Give quartercoind 10 seconds to write the alert notification
wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10)
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text = f.read()
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generate(2)
self.sync_all()
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text2 = f.read()
self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
assert_equal(alert_text, alert_text2)
if __name__ == '__main__':
NotificationsTest().main()
| 46.191011
| 138
| 0.662126
|
beaf1d7f5050214fc20a220341da3989c69f3191
| 55
|
py
|
Python
|
conftest.py
|
ssebastianj/pywebtasks
|
7596558ab806de2b4a48a01c6d324f21f22bdd0f
|
[
"MIT"
] | 7
|
2015-05-25T22:15:04.000Z
|
2020-03-14T14:11:11.000Z
|
conftest.py
|
ssebastianj/pywebtasks
|
7596558ab806de2b4a48a01c6d324f21f22bdd0f
|
[
"MIT"
] | null | null | null |
conftest.py
|
ssebastianj/pywebtasks
|
7596558ab806de2b4a48a01c6d324f21f22bdd0f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
collect_ignore = ["setup.py"]
| 13.75
| 29
| 0.563636
|
7fdad6eb7d7ee02360fc26e3fc206911aba22fda
| 7,437
|
py
|
Python
|
python/paddle/tensor/stat.py
|
slf12/Paddle
|
fa43d74a3a16ac696db5dc893c9a7b1c6913dc85
|
[
"Apache-2.0"
] | 1
|
2020-05-02T00:00:20.000Z
|
2020-05-02T00:00:20.000Z
|
python/paddle/tensor/stat.py
|
slf12/Paddle
|
fa43d74a3a16ac696db5dc893c9a7b1c6913dc85
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/tensor/stat.py
|
slf12/Paddle
|
fa43d74a3a16ac696db5dc893c9a7b1c6913dc85
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define statistical functions of a tensor
from ..fluid.layers import mean #DEFINE_ALIAS
from ..fluid.layers import reduce_mean #DEFINE_ALIAS
__all__ = ['mean', 'reduce_mean', 'std', 'var']
import numpy as np
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import in_dygraph_mode
from ..fluid import layers
from .search import where
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
"""
Computes the variance of the input Variable's elements along the specified
axis.
Args:
input (Variable): The input Variable to be computed variance, with data
type float32 and float64 supported.
axis (list|int, optional): The axis along which the variance is computed.
If `None`, compute the variance over all elements of :attr:`input`
and return a Variable with a single element, otherwise it must be in
the range :math:`[-rank(input), rank(input))`. If :math:`axis[i] < 0`,
the axis to compute is :math:`rank(input) + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimensions in
the output Variable. The dimensions in :attr:`axis` will be squeezed
and the result Variable will have :attr:`len(axis)` fewer dimensions
than the :attr:`input` unless :attr:`keepdim` is true, default False.
unbiased (bool, optional): Whether to compute variance via the unbiased
estimator, in which the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along :attr:`axis`, otherwise the divisor is :math:`N`. Default True.
out (Variable, optional): Alternate output Variable to store the result
variance. Default None.
name (str, optional): The name for this layer. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default None.
Returns:
Variable: The result variance with the same dtype as :attr:`input`.
If :attr:`out = None`, returns a new Variable containing the
variance, otherwise returns a reference to the output Variable.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid.dygraph as dg
a = np.array([[1.0, 2.0], [3.0, 4.0]]).astype("float32")
with dg.guard():
data = dg.to_variable(a)
variance = paddle.var(data, axis=[1])
print(variance.numpy())
# [0.5 0.5]
"""
dtype = convert_dtype(input.dtype)
if dtype not in ["float32", "float64"]:
raise ValueError("Layer tensor.var() only supports floating-point "
"dtypes, but received {}.".format(dtype))
rank = len(input.shape)
axes = axis if axis != None and axis != [] else range(rank)
axes = [e if e >= 0 else e + rank for e in axes]
inp_shape = input.shape if in_dygraph_mode() else layers.shape(input)
mean = layers.reduce_mean(input, dim=axis, keep_dim=True, name=name)
tmp = layers.reduce_mean(
(input - mean)**2, dim=axis, keep_dim=keepdim, name=name)
if unbiased:
n = 1
for i in axes:
n *= inp_shape[i]
if not in_dygraph_mode():
n = layers.cast(n, dtype)
zero_const = layers.fill_constant(shape=[1], dtype=dtype, value=0.0)
factor = where(n > 1.0, n / (n - 1.0), zero_const)
else:
factor = n / (n - 1.0) if n > 1.0 else 0.0
tmp *= factor
if out:
layers.assign(input=tmp, output=out)
return out
else:
return tmp
def std(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
"""
Computes the standard-deviation of the input Variable's elements along the specified
axis.
Args:
input (Variable): The input Variable to be computed standard-deviation, with data
type float32 and float64 supported.
axis (list|int, optional): The axis along which the standard-deviation is computed.
If `None`, compute the standard-deviation over all elements of :attr:`input`
and return a Variable with a single element, otherwise it must be in
the range :math:`[-rank(input), rank(input))`. If :math:`axis[i] < 0`,
the axis to compute is :math:`rank(input) + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimensions in
the output Variable. The dimensions in :attr:`axis` will be squeezed
and the result Variable will have :attr:`len(axis)` fewer dimensions
than the :attr:`input` unless :attr:`keepdim` is true, default False.
unbiased (bool, optional): Whether to compute standard-deviation via the unbiased
estimator, in which the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along :attr:`axis`, otherwise the divisor is :math:`N`. Default True.
out (Variable, optional): Alternate output Variable to store the result
standard-deviation . Default None.
name (str, optional): The name for this layer. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default None.
Returns:
Variable: The result standard-deviation with the same dtype as :attr:`input`.
If :attr:`out = None`, returns a new Variable containing the
standard-deviation , otherwise returns a reference to the output Variable.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
paddle.std(x) # [0.28252685]
paddle.std(x, axis=[0]) # [0.0707107, 0.07071075, 0.07071064, 0.1414217]
paddle.std(x, axis=[-1]) # [0.30956957, 0.29439208]
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'std')
tmp = var(input, axis=axis, keepdim=keepdim, unbiased=unbiased, name=name)
tmp = layers.sqrt(tmp)
if out is not None:
layers.assign(input=tmp, output=out)
return out
else:
return tmp
| 46.773585
| 96
| 0.631303
|
15a5661fbe5fe7f39236ae5a800e37694d635e03
| 658
|
py
|
Python
|
aws-pentesting-with-python/user.py
|
qodirovshohijahon/dates-in-uzbek
|
8ac4adb1624a9735ea85819714249ca41a041bce
|
[
"CNRI-Python"
] | null | null | null |
aws-pentesting-with-python/user.py
|
qodirovshohijahon/dates-in-uzbek
|
8ac4adb1624a9735ea85819714249ca41a041bce
|
[
"CNRI-Python"
] | null | null | null |
aws-pentesting-with-python/user.py
|
qodirovshohijahon/dates-in-uzbek
|
8ac4adb1624a9735ea85819714249ca41a041bce
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/env python3
# get acoount authorization detailsusing boto3 and aws-shell (aws-cli)
import boto3
import json
import sys
import os
import pprint
from color import color
# declare env var for aws-shell
AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"]
AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"]
def list_users():
iam = boto3.client("iam")
paginator = iam.get_paginator('list_users')
for response in paginator.paginate():
for user in response["Users"]:
print(f"{color.BOLD}Username:{color.END} {user['UserName']}, {color.BOLD}Arn:{color.END} {user['Arn']}")
print(user)
list_users()
| 26.32
| 116
| 0.709726
|
c482f715629b3ce5dcb8b6a0232fff4b0db095a3
| 642
|
py
|
Python
|
yahoo_news_jp.py
|
ingyunson/personal_assistant
|
798f2e81aef58853828a9f77d01f6f2888237c2a
|
[
"MIT"
] | null | null | null |
yahoo_news_jp.py
|
ingyunson/personal_assistant
|
798f2e81aef58853828a9f77d01f6f2888237c2a
|
[
"MIT"
] | null | null | null |
yahoo_news_jp.py
|
ingyunson/personal_assistant
|
798f2e81aef58853828a9f77d01f6f2888237c2a
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup as bs
url = 'https://news.yahoo.co.jp/'
news = requests.get(url)
html = news.text
soup = bs(html, 'lxml')
headline_title = []
headline_url = []
news_head = soup.select('#epTabTop > ul.topics > li.topTpi > div > h1 > a')
headline_title.append(news_head[0].text.replace("写真",""))
headline_url.append(news_head[0].get('href'))
for i in range(2,9):
news_info = soup.select('#epTabTop > ul.topics > li:nth-of-type({0}) > div > p > a'.format(i))
headline_title.append(news_info[0].text.replace("写真",""))
headline_url.append(news_info[0].get('href'))
print(headline_title, headline_url)
| 30.571429
| 98
| 0.690031
|
60192d228495af3bd28104561f9439323e4faac8
| 1,010
|
py
|
Python
|
LineMe/utils.py
|
HevLfreis/LineMe
|
11456ca14d1ae61af65372df6b0cb5e1a1c03748
|
[
"PostgreSQL",
"MIT"
] | 6
|
2016-05-16T07:59:31.000Z
|
2018-08-19T17:51:18.000Z
|
LineMe/utils.py
|
HevLfreis/LineMe
|
11456ca14d1ae61af65372df6b0cb5e1a1c03748
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
LineMe/utils.py
|
HevLfreis/LineMe
|
11456ca14d1ae61af65372df6b0cb5e1a1c03748
|
[
"PostgreSQL",
"MIT"
] | 5
|
2016-05-16T08:00:20.000Z
|
2021-08-13T01:41:18.000Z
|
#!/usr/bin/env python
# coding: utf-8
# created by hevlhayt@foxmail.com
# Date: 2016/10/26
# Time: 19:09
import hashlib
import re
from LineMe.settings import DEPLOYED_LANGUAGE
def get_template_dir(appname):
lang = DEPLOYED_LANGUAGE
if lang == 'zh-cn':
return appname + '/zh_cn/'
else:
return appname + '/us_en/'
def logger_join(*args, **kwargs):
if not args:
return ''
else:
str_arg = ' '.join([str(arg) for arg in args])
if not kwargs:
return str_arg
else:
return str_arg + ' ' + \
' '.join([k.upper()+':'+str(v).replace('\n', '') for k, v in kwargs.items() if v is not None])
def md5(s):
if type(s) is str or unicode:
m = hashlib.md5()
m.update(s)
return m.hexdigest()
else:
return ''
def input_filter(arg):
if arg and (type(arg) is str or unicode):
return re.sub(ur"[^a-zA-Z0-9\u4e00-\u9fa5]", '', arg)
else:
return None
| 21.489362
| 113
| 0.556436
|
90922494ea9d058a94d66e2a883e605a86077cc0
| 69
|
py
|
Python
|
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotMaterial_t.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotMaterial_t.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotMaterial_t.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class dotMaterial_t(object):
# no doc
aMaterialString = None
| 17.25
| 28
| 0.695652
|
759d36adb4e2c375e08fd2cd584596b2d2346e2e
| 1,269
|
py
|
Python
|
poco/scripts/insert_raw_logs.py
|
sunliwen/poco
|
a4b8c4ede63711eea42a444fb9d922c350855364
|
[
"MIT"
] | null | null | null |
poco/scripts/insert_raw_logs.py
|
sunliwen/poco
|
a4b8c4ede63711eea42a444fb9d922c350855364
|
[
"MIT"
] | 7
|
2019-03-22T06:26:39.000Z
|
2021-06-10T19:36:06.000Z
|
poco/scripts/insert_raw_logs.py
|
sunliwen/poco
|
a4b8c4ede63711eea42a444fb9d922c350855364
|
[
"MIT"
] | 1
|
2017-10-25T03:43:51.000Z
|
2017-10-25T03:43:51.000Z
|
import json
import datetime
import time
from pymongo.errors import ConnectionFailure
from recommender.tasks import _write_log
def run(site_id, file_path):
answer = raw_input("Do you really want to insert raw_logs from %s to site: %s (enter 'yes' to continue)" % (file_path, site_id))
if answer == "yes":
cnt = 0
f = open(file_path, "r")
for line in f.readlines():
#if cnt < 38137:
# cnt += 1
# continue
line = line.strip()
try:
raw_log = json.loads(line)
except ValueError:
print "Invalid raw_log line:", line
import sys; sys.exit(0)
#print raw_log; sys.exit(0)
raw_log["created_on"] = datetime.datetime.strptime(raw_log["created_on"], "%Y-%m-%d %H:%M:%S")
try:
_write_log(site_id, raw_log, is_update_visitor_cache=False)
except ConnectionFailure:
print "Failed to insert:", raw_log
print cnt
import sys; sys.exit(0)
cnt += 1
if (cnt % 100) == 0:
print cnt
time.sleep(0.2)
else:
print "Exit without action."
sys.exit(0)
| 32.538462
| 132
| 0.527975
|
e084249bc0d8fee296864d21f07e751288ecd0f5
| 1,006
|
py
|
Python
|
mailchimp/urls.py
|
Wirzi/django-mailchimp
|
fe34b83fa05aede06c294ac5861cb3b983b49bde
|
[
"BSD-3-Clause"
] | null | null | null |
mailchimp/urls.py
|
Wirzi/django-mailchimp
|
fe34b83fa05aede06c294ac5861cb3b983b49bde
|
[
"BSD-3-Clause"
] | null | null | null |
mailchimp/urls.py
|
Wirzi/django-mailchimp
|
fe34b83fa05aede06c294ac5861cb3b983b49bde
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import *
from mailchimp.settings import VIEWS_INFO, VIEWS_OVERVIEW, VIEWS_SCHEDULE_OBJECT, VIEWS_TEST_OBJECT
from mailchimp.views import webhook, dequeue, cancel, test_real
urlpatterns = patterns('',
url(r'^$', VIEWS_OVERVIEW, name='mailchimp_overview', kwargs={'page':'1'}),
url(r'^(?P<page>\d+)/$', VIEWS_OVERVIEW, name='mailchimp_overview'),
url(r'^send/(?P<content_type>\d+)/(?P<pk>\d+)/$', VIEWS_SCHEDULE_OBJECT, name='mailchimp_schedule_for_object'),
url(r'^test/(?P<content_type>\d+)/(?P<pk>\d+)/$', VIEWS_TEST_OBJECT, name='mailchimp_test_for_object'),
url(r'^test/(?P<content_type>\d+)/(?P<pk>\d+)/real/$', test_real, name='mailchimp_real_test_for_object'),
url(r'^info/(?P<campaign_id>\w+)/$', VIEWS_INFO, name='mailchimp_campaign_info'),
url(r'^dequeue/(?P<id>\d+)/', dequeue, name='mailchimp_dequeue'),
url(r'^cancel/(?P<id>\d+)/', cancel, name='mailchimp_cancel'),
url(r'^webhook/(?P<key>\w+)/', webhook, name='mailchimp_webhook'),
)
| 62.875
| 115
| 0.687873
|
927de2582cd1816352e25de8f25451dfb9f819fe
| 23
|
py
|
Python
|
nbdev/__init__.py
|
tcapelle/nbdev
|
79ad686f4831fa25447e366b7bac66957675c53f
|
[
"Apache-2.0"
] | null | null | null |
nbdev/__init__.py
|
tcapelle/nbdev
|
79ad686f4831fa25447e366b7bac66957675c53f
|
[
"Apache-2.0"
] | null | null | null |
nbdev/__init__.py
|
tcapelle/nbdev
|
79ad686f4831fa25447e366b7bac66957675c53f
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.2.18"
| 11.5
| 22
| 0.652174
|
64e9054b224e15574b4bbac19ed8fcb2f3fed55c
| 358
|
py
|
Python
|
tests/pep542_testfile.py
|
aroberge/nonstandard
|
b415e4b5360a44ee2e1927a2b5b45c4d74ca9803
|
[
"MIT"
] | 2
|
2017-04-30T23:27:06.000Z
|
2017-05-01T18:30:54.000Z
|
tests/pep542_testfile.py
|
aroberge/nonstandard
|
b415e4b5360a44ee2e1927a2b5b45c4d74ca9803
|
[
"MIT"
] | null | null | null |
tests/pep542_testfile.py
|
aroberge/nonstandard
|
b415e4b5360a44ee2e1927a2b5b45c4d74ca9803
|
[
"MIT"
] | null | null | null |
from __nonstandard__ import pep542
def test_pep542():
class MyClass:
pass
def MyClass.square(self, x):
return x**2
my_instance = MyClass()
def my_instance.out():
return 42
assert my_instance.out() == 42
assert my_instance.square(3) == 9
if __name__ == "__main__":
test_pep542()
print("Success.")
| 17.047619
| 37
| 0.617318
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.