hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
638027c0eadc956d2dd934c655aead93cd6a42bf
| 893
|
py
|
Python
|
setup.py
|
flych3r/producer-consumer-service
|
2b9d87a058f7ac82cee3d7767f772a3a2c1fe8db
|
[
"MIT"
] | 1
|
2022-01-16T10:49:42.000Z
|
2022-01-16T10:49:42.000Z
|
setup.py
|
flych3r/producer-consumer-service
|
2b9d87a058f7ac82cee3d7767f772a3a2c1fe8db
|
[
"MIT"
] | null | null | null |
setup.py
|
flych3r/producer-consumer-service
|
2b9d87a058f7ac82cee3d7767f772a3a2c1fe8db
|
[
"MIT"
] | 1
|
2022-03-09T10:58:24.000Z
|
2022-03-09T10:58:24.000Z
|
from setuptools import find_packages, setup
with open('requirements.txt') as f:
DEPENDENCIES = [dep.strip() for dep in f.readlines()]
LICENSE = 'MIT License'
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
]
if LICENSE:
CLASSIFIERS.append(f'License :: OSI Approved :: {LICENSE}')
print(DEPENDENCIES)
setup(
name='app',
version='0.1.0',
author='Matheus Xavier',
author_email='matheus.sampaio011@gmail.com',
license=LICENSE,
python_requires='>=3.7',
description='A Producer and a Consumer service that will \
be connected through a Queue',
long_description_content_type='text/markdown',
url='consumer-producer-service',
packages=find_packages(),
classifiers=CLASSIFIERS,
install_requires=DEPENDENCIES,
include_package_data=True
)
| 26.264706
| 63
| 0.692049
|
e5956e99bab59a9e13d23122cbc3b2093546e27b
| 1,098
|
py
|
Python
|
src/bobbit/modules/mock.py
|
sebmaster36/bobbit
|
7c9106a5c1d30f6ea64dc4ada2458f626f94b047
|
[
"MIT"
] | 10
|
2020-05-20T20:28:01.000Z
|
2022-02-15T06:08:17.000Z
|
src/bobbit/modules/mock.py
|
sebmaster36/bobbit
|
7c9106a5c1d30f6ea64dc4ada2458f626f94b047
|
[
"MIT"
] | 28
|
2020-05-20T20:39:32.000Z
|
2021-12-31T16:37:05.000Z
|
src/bobbit/modules/mock.py
|
sebmaster36/bobbit
|
7c9106a5c1d30f6ea64dc4ada2458f626f94b047
|
[
"MIT"
] | 19
|
2020-05-27T23:47:11.000Z
|
2022-03-04T04:11:12.000Z
|
# mock.py
# Metadata
NAME = 'mock'
ENABLE = True
PATTERN = r'^!mock (?P<phrase>.*)'
USAGE = '''Usage: !mock <phrase|nick>
Given a phrase, this translates the phrase into a mocking spongebob phrase.
Example:
> !mock it should work on slack and irc
iT ShOuLd wOrK On sLaCk aNd iRc
Alternatively, given a nick, this translates the last message from the user
into a mocking spongebob phrase.
Example:
> !mock AndroidKitKat
I LoVe aPpLe
'''
# Command
async def mock(bot, message, phrase):
if phrase in bot.users:
try:
history = bot.history.search(message.channel, nick=phrase, limit=1, reverse=True)
phrase = list(history)[0].body
except IndexError:
pass
phrase = phrase.lower().rstrip()
response = ''
for count, letter in enumerate(phrase):
if count % 2:
letter = letter.upper()
response += letter
return message.with_body(response)
# Register
def register(bot):
return (
('command', PATTERN, mock),
)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
| 22.408163
| 93
| 0.631148
|
3d42ab9012bf1221c6bd0924b866dca98dc6e69c
| 565
|
py
|
Python
|
setup.py
|
conradbez/streamlit-node-graph
|
a29b8a28bc272c41d9a39dea5d57171615a6b43c
|
[
"MIT"
] | null | null | null |
setup.py
|
conradbez/streamlit-node-graph
|
a29b8a28bc272c41d9a39dea5d57171615a6b43c
|
[
"MIT"
] | null | null | null |
setup.py
|
conradbez/streamlit-node-graph
|
a29b8a28bc272c41d9a39dea5d57171615a6b43c
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name="streamlit-node-graph",
version="0.0.7",
author="",
author_email="",
description="",
long_description="",
long_description_content_type="text/plain",
url="",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[],
python_requires=">=3.6",
install_requires=[
# By definition, a Custom Component depends on Streamlit.
# If your component has other Python dependencies, list
# them here.
"streamlit >= 0.63",
],
)
| 24.565217
| 65
| 0.635398
|
7178756a0ee688001fb892fb1d48f2aed040bfcd
| 197
|
py
|
Python
|
mmpy_bot/plugins/ping.py
|
whoo/mmpy_bot
|
0ec39d44eff5cd474fcfc5a596910fd26c5c0a9d
|
[
"MIT"
] | 1
|
2020-04-21T16:23:26.000Z
|
2020-04-21T16:23:26.000Z
|
mmpy_bot/plugins/ping.py
|
whoo/mmpy_bot
|
0ec39d44eff5cd474fcfc5a596910fd26c5c0a9d
|
[
"MIT"
] | 6
|
2018-06-05T16:09:16.000Z
|
2018-08-26T00:26:04.000Z
|
mmpy_bot/plugins/ping.py
|
whoo/mmpy_bot
|
0ec39d44eff5cd474fcfc5a596910fd26c5c0a9d
|
[
"MIT"
] | 1
|
2021-03-05T20:11:17.000Z
|
2021-03-05T20:11:17.000Z
|
# -*- coding: utf-8 -*-
import re
from mmpy_bot.bot import respond_to
@respond_to('^ping$', re.IGNORECASE)
def ping_reply(message):
message.reply('pong')
ping_reply.__doc__ = "Send pong"
| 14.071429
| 36
| 0.690355
|
676e179a69163d02a9250af2d7947529edd58239
| 240
|
py
|
Python
|
cultureplatform/forum/forms.py
|
michaelroudnitski/cultureplatform
|
38a68faa541cd1b043ec0c0f98323b2fb7623d14
|
[
"Apache-2.0"
] | null | null | null |
cultureplatform/forum/forms.py
|
michaelroudnitski/cultureplatform
|
38a68faa541cd1b043ec0c0f98323b2fb7623d14
|
[
"Apache-2.0"
] | null | null | null |
cultureplatform/forum/forms.py
|
michaelroudnitski/cultureplatform
|
38a68faa541cd1b043ec0c0f98323b2fb7623d14
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.forms import ModelForm
from .models import Forum
class NewForumForm(ModelForm):
""" inherits the Forum model and casts as a form """
class Meta:
model = Forum
fields = ['title',]
| 26.666667
| 56
| 0.683333
|
6576ad21a81521db92f89009753b2c4420df2979
| 3,498
|
py
|
Python
|
dfetch/commands/update.py
|
jgeudens/dfetch
|
da9fb65a805a8eaf96ebde265b3f294080df3465
|
[
"MIT"
] | 11
|
2020-10-14T14:51:02.000Z
|
2022-02-07T18:40:43.000Z
|
dfetch/commands/update.py
|
jgeudens/dfetch
|
da9fb65a805a8eaf96ebde265b3f294080df3465
|
[
"MIT"
] | 138
|
2020-11-02T21:18:40.000Z
|
2022-03-31T20:44:08.000Z
|
dfetch/commands/update.py
|
jgeudens/dfetch
|
da9fb65a805a8eaf96ebde265b3f294080df3465
|
[
"MIT"
] | 5
|
2020-10-31T12:35:04.000Z
|
2022-01-27T12:51:55.000Z
|
"""Update is the main functionality of dfetch.
You can add Projects to your :ref:`Manifest` and update will fetch the version specified.
It tries to determine what kind of vcs it is: git, svn or something else.
.. uml:: /static/uml/update.puml
Child-manifests
~~~~~~~~~~~~~~~
It is possible that projects have manifests of their own.
After the projects of the main manifest are fetched,
*Dfetch* will look for new manifests and update these as well following the same logic as above.
If you don't what this, you can prevent *Dfetch*
checking child-manifests with ``--non-recursive``.
.. note:: Any name or destination clashes are currently up to the user.
"""
import argparse
import os
from typing import List
import dfetch.commands.command
import dfetch.manifest.manifest
import dfetch.manifest.project
import dfetch.manifest.validate
import dfetch.project.git
import dfetch.project.svn
from dfetch.log import get_logger
from dfetch.manifest.project import ProjectEntry
from dfetch.util.util import catch_runtime_exceptions, in_directory
logger = get_logger(__name__)
class Update(dfetch.commands.command.Command):
"""Update all modules from the manifest.
Verifies the manifest and checks all dependencies if updates are available.
"""
@staticmethod
def create_menu(subparsers: "argparse._SubParsersAction") -> None:
"""Add the menu for the update action."""
parser = dfetch.commands.command.Command.parser(subparsers, Update)
parser.add_argument(
"-N",
"--non-recursive",
action="store_true",
help="Don't recursively check for child manifests.",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="Always perform update, ignoring version check or local changes.",
)
parser.add_argument(
"projects",
metavar="<project>",
type=str,
nargs="*",
help="Specific project(s) to update",
)
def __call__(self, args: argparse.Namespace) -> None:
"""Perform the update."""
manifest, path = dfetch.manifest.manifest.get_manifest()
exceptions: List[str] = []
with in_directory(os.path.dirname(path)):
for project in manifest.selected_projects(args.projects):
with catch_runtime_exceptions(exceptions) as exceptions:
dfetch.project.make(project).update(force=args.force)
if not args.non_recursive and os.path.isdir(project.destination):
with in_directory(project.destination):
exceptions += Update.__update_child_manifests(
project, path, force=args.force
)
if exceptions:
raise RuntimeError("\n".join(exceptions))
@staticmethod
def __update_child_manifests(
project: ProjectEntry, path: str, force: bool = False
) -> List[str]:
exceptions: List[str] = []
for (
childmanifest,
childpath,
) in dfetch.manifest.manifest.get_childmanifests(project, skip=[path]):
with in_directory(os.path.dirname(childpath)):
for childproject in childmanifest.projects:
with catch_runtime_exceptions(exceptions) as exceptions:
dfetch.project.make(childproject).update(force)
return exceptions
| 34.633663
| 96
| 0.644082
|
f15ac40822df80131972ca404d5acbe64a55fb1e
| 58,508
|
py
|
Python
|
tensorflow2/tf2cv/models/model_store.py
|
tucan9389/imgclsmob
|
cf01fc242ce466a425de7779076ea023bf0148bc
|
[
"MIT"
] | 1
|
2020-04-10T16:02:19.000Z
|
2020-04-10T16:02:19.000Z
|
tensorflow2/tf2cv/models/model_store.py
|
tucan9389/imgclsmob
|
cf01fc242ce466a425de7779076ea023bf0148bc
|
[
"MIT"
] | null | null | null |
tensorflow2/tf2cv/models/model_store.py
|
tucan9389/imgclsmob
|
cf01fc242ce466a425de7779076ea023bf0148bc
|
[
"MIT"
] | 1
|
2020-12-10T18:44:27.000Z
|
2020-12-10T18:44:27.000Z
|
"""
Model store which provides pretrained models.
"""
__all__ = ['get_model_file']
import os
import zipfile
import logging
import hashlib
_model_sha1 = {name: (error, checksum, repo_release_tag, ds, scale) for
name, error, checksum, repo_release_tag, ds, scale in [
('alexnet', '1789', 'ecc4bb4e46e05dde17809978d2900f4fe14ea590', 'v0.0.422', 'in1k', 0.875),
('alexnetb', '1859', '9e390537e070ee42c5deeb6c456f81c991efbb49', 'v0.0.422', 'in1k', 0.875),
('zfnet', '1717', '9500db3008e9ca8bc8f8de8101ec760e5ac8c05a', 'v0.0.422', 'in1k', 0.875),
('zfnetb', '1480', '47533f6a367312c8b2f56202aeae0be366013116', 'v0.0.422', 'in1k', 0.875),
('vgg11', '1017', 'c20556f4179e9311f28baa310702b6ea9265fee8', 'v0.0.422', 'in1k', 0.875),
('vgg13', '0951', '9fa609fcb5cb44caf2737d13c0accc07cdea0c9d', 'v0.0.422', 'in1k', 0.875),
('vgg16', '0834', 'ce78831f5d0640bd2fd619ba7d8d5027e62eb4f2', 'v0.0.422', 'in1k', 0.875),
('vgg19', '0768', 'ec5ac0baa5d49c041af48e67d34d1a89f1a72e7f', 'v0.0.422', 'in1k', 0.875),
('bn_vgg11', '0936', 'ef31b86687e83d413cb9c95c9ead657c3de9f21b', 'v0.0.422', 'in1k', 0.875),
('bn_vgg13', '0887', '2cccc7252ab4798fd9a6c3ce9d0b59717c47e40b', 'v0.0.422', 'in1k', 0.875),
('bn_vgg16', '0759', '1ca9dee8ef41ed84a216636d3c21380988ea1bf8', 'v0.0.422', 'in1k', 0.875),
('bn_vgg19', '0688', '81d25be84932c1c2848cabd4533423e3fd2cdbec', 'v0.0.422', 'in1k', 0.875),
('bn_vgg11b', '0975', 'aeaccfdc4a655d895e280165cf5be856472ca91f', 'v0.0.422', 'in1k', 0.875),
('bn_vgg13b', '1019', '1102ffb7817ff11a8db85f1b9b8519b100da26a0', 'v0.0.422', 'in1k', 0.875),
('bn_vgg16b', '0862', '137178f78ace3943333a98d980dd88b4746e66af', 'v0.0.422', 'in1k', 0.875),
('bn_vgg19b', '0817', 'cd68a741183cbbab52562c4b7330d721e8ffa739', 'v0.0.422', 'in1k', 0.875),
('bninception', '0865', '4cab3cce0eb1b79b872b189f5b0d9e4bb20f5ff4', 'v0.0.423', 'in1k', 0.875),
('resnet10', '1390', '9e787f637312e04d3ec85136bf0ceca50acf8c80', 'v0.0.422', 'in1k', 0.875),
('resnet12', '1301', '8bc41d1b1da87463857bb5ca03fe252ef03116ad', 'v0.0.422', 'in1k', 0.875),
('resnet14', '1224', '7573d98872e622ef74e036c8a436a39ab75e9378', 'v0.0.422', 'in1k', 0.875),
('resnetbc14b', '1115', '5f30b7985b5a57d34909f3db08c52dfe1da065ac', 'v0.0.422', 'in1k', 0.875),
('resnet16', '1088', '14ce0d64680c3fe52f43b407a00d1a23b6cfd81c', 'v0.0.422', 'in1k', 0.875),
('resnet18_wd4', '1745', '6e80041645de7ccbe156ce5bc3cbde909cee6b41', 'v0.0.422', 'in1k', 0.875),
('resnet18_wd2', '1283', '85a7caff1b2f8e355a1b8cb559e836d5b0c22d12', 'v0.0.422', 'in1k', 0.875),
('resnet18_w3d4', '1067', 'c1735b7de29016779c95e8e1481e5ded955b2b63', 'v0.0.422', 'in1k', 0.875),
('resnet18', '0956', '6645845a7614afd265e997223d38e00433f00182', 'v0.0.422', 'in1k', 0.875),
('resnet26', '0837', 'a8f20f7194cdfcb6fd514a8dc9546105fd7a562a', 'v0.0.422', 'in1k', 0.875),
('resnetbc26b', '0757', 'd70a2cadfb648f4c528704f1b9983f35af94de6f', 'v0.0.422', 'in1k', 0.875),
('resnet34', '0744', '7f7d70e7780e24b4cb60cefc895198cdb2b94665', 'v0.0.422', 'in1k', 0.875),
('resnetbc38b', '0677', '75e405a71f7227de5abb6a3c3c44d807b5963c44', 'v0.0.422', 'in1k', 0.875),
('resnet50', '0604', '728800bf57bd49f79671399fd4fd2b7fe9883f07', 'v0.0.422', 'in1k', 0.875),
('resnet50b', '0614', 'b2a49da61dce6309c75e77226bb047b43247da24', 'v0.0.422', 'in1k', 0.875),
('resnet101', '0601', 'b6befeb4c8cf6d72d9c325c22df72ac792b51706', 'v0.0.422', 'in1k', 0.875),
('resnet101b', '0511', 'e3076227a06b394aebcce6260c4afc665224c987', 'v0.0.422', 'in1k', 0.875),
('resnet152', '0534', '2d8e394abcb9d35d2a853bb4dacb58460ff13551', 'v0.0.422', 'in1k', 0.875),
('resnet152b', '0480', 'b77f1e2c9158cc49deba2cf60b8a8e8d6605d654', 'v0.0.422', 'in1k', 0.875),
('preresnet10', '1402', '541bf0e17a576b1676069563a1ed0de0fde4090f', 'v0.0.422', 'in1k', 0.875),
('preresnet12', '1320', '349c0df4a835699bdb045bedc3d38a7747cd21d4', 'v0.0.422', 'in1k', 0.875),
('preresnet14', '1224', '194b876203e467fbad2ccd2e03b90a79bfec8dac', 'v0.0.422', 'in1k', 0.875),
('preresnetbc14b', '1152', 'bc4e06ff3df99e7ffa0b2bdafa224796fa46f5a9', 'v0.0.422', 'in1k', 0.875),
('preresnet16', '1080', 'e00c40ee6d211f553bff0274771e5461150c69f4', 'v0.0.422', 'in1k', 0.875),
('preresnet18_wd4', '1780', '6ac7bc592983ced18c863f203db80bbd30e87a0b', 'v0.0.422', 'in1k', 0.875),
('preresnet18_wd2', '1314', '0c0528c8ae4943aa68ba0298209f2ed418e4f644', 'v0.0.422', 'in1k', 0.875),
('preresnet18_w3d4', '1070', '056b46c6e8ee2c86ebee560efea81dd43bbd5de6', 'v0.0.422', 'in1k', 0.875),
('preresnet18', '0955', '621ead9297b93673ec1c040e091efff9142313b5', 'v0.0.422', 'in1k', 0.875),
('preresnet26', '0837', '1a92a73217b1611c27b0c7082a018328264a65ff', 'v0.0.422', 'in1k', 0.875),
('preresnetbc26b', '0788', '1f737cd6c173ed8e5d9a8a69b35e1cf696ba622e', 'v0.0.422', 'in1k', 0.875),
('preresnet34', '0754', '3cc5ae1481512a8b206fb96ac8b632bcc5ee2db9', 'v0.0.422', 'in1k', 0.875),
('preresnetbc38b', '0636', '3396b49b5d20e7d362f9bd8879c00a21e8d67df1', 'v0.0.422', 'in1k', 0.875),
('preresnet50', '0625', '208605629d347a64b9a354f5ad7f441f736eb418', 'v0.0.422', 'in1k', 0.875),
('preresnet50b', '0634', '711227b1a93dd721dd3e37709456acfde969ba18', 'v0.0.422', 'in1k', 0.875),
('preresnet101', '0573', 'd45ea488f72fb99af1c46e4064b12c5014a7b626', 'v0.0.422', 'in1k', 0.875),
('preresnet101b', '0539', '54d23aff956752be614c2ba66d8bff5477cf0367', 'v0.0.422', 'in1k', 0.875),
('preresnet152', '0532', '0ad4b58f2365028db9216f1e080898284328cc3e', 'v0.0.422', 'in1k', 0.875),
('preresnet152b', '0500', '119062d97d30f6636905c824c6d1b4e21be2c3f2', 'v0.0.422', 'in1k', 0.875),
('preresnet200b', '0563', '2f9c761d78714c33d3b260add782e3851b0078f4', 'v0.0.422', 'in1k', 0.875),
('preresnet269b', '0557', '7003b3c4a1dea496f915750b4411cc67042a111d', 'v0.0.422', 'in1k', 0.875),
('resnext14_16x4d', '1222', 'bff90c1d3dbde7ea4a6972bbacb619e252d344ea', 'v0.0.422', 'in1k', 0.875),
('resnext14_32x2d', '1247', '06aa6709cfb4cf23793eb0eee5d5fce42cfcb9cb', 'v0.0.422', 'in1k', 0.875),
('resnext14_32x4d', '1115', '3acdaec14a6c74284c03bc79ed47e9ecb394e652', 'v0.0.422', 'in1k', 0.875),
('resnext26_32x2d', '0851', '827791ccefaef07e5837f8fb1dae8733c871c029', 'v0.0.422', 'in1k', 0.875),
('resnext26_32x4d', '0718', '4f05525e34b9aeb82db2339f714b25055d94657b', 'v0.0.422', 'in1k', 0.875),
('resnext50_32x4d', '0547', '45234d14f0e80700afc5c61e1bd148d848d8d089', 'v0.0.422', 'in1k', 0.875),
('resnext101_32x4d', '0494', '3990ddd1e776c7e90625db9a8f683e1d6a6fb301', 'v0.0.422', 'in1k', 0.875),
('resnext101_64x4d', '0484', 'f8cf1580943cf3c6d6019f2fcc44f8adb857cb20', 'v0.0.422', 'in1k', 0.875),
('seresnet10', '1332', '33a592e1497d37a427920c1408be908ba28d2a6d', 'v0.0.422', 'in1k', 0.875),
('seresnet18', '0921', '46c847abfdbd82c41a096e385163f21ae29ee200', 'v0.0.422', 'in1k', 0.875),
('seresnet26', '0807', '5178b3b1ea71bb118ffcc5d471f782f4ae6150d4', 'v0.0.422', 'in1k', 0.875),
('seresnetbc26b', '0684', '1460a381603c880f24fb0a42bfb6b79b850e2b28', 'v0.0.422', 'in1k', 0.875),
('seresnetbc38b', '0575', '18fcfcc1fee078382ad957e0f7d139ff596732e7', 'v0.0.422', 'in1k', 0.875),
('seresnet50', '0560', 'f1b84c8de0d25bbd4e92fcaefd9dd5012fa74bc4', 'v0.0.441', 'in1k', 0.875),
('seresnet50b', '0533', '256002c3b489d5b685ee1ab6b62303d7768c5816', 'v0.0.422', 'in1k', 0.875),
('seresnet101', '0589', '2a22ba87f5b0d56d51063898161d4c42cac45325', 'v0.0.422', 'in1k', 0.875),
('seresnet101b', '0464', 'a10be1d25d3112825e7b77277d6c56eb276dc799', 'v0.0.460', 'in1k', 0.875),
('seresnet152', '0576', '8023259a13a53aa0a72d9df6468721314e702872', 'v0.0.422', 'in1k', 0.875),
('sepreresnet10', '1309', 'af20d06c486dc97cff0f6d9bc52a7c7458040514', 'v0.0.422', 'in1k', 0.875),
('sepreresnet18', '0940', 'fe403280f68a5dfa93366437b9ff37ce3a419cf8', 'v0.0.422', 'in1k', 0.875),
('sepreresnetbc26b', '0640', 'a72bf8765efb1024bdd33eebe9920fd3e22d0bd6', 'v0.0.422', 'in1k', 0.875),
('sepreresnetbc38b', '0567', '17d10c63f096db1b7bfb59b6c6ffe14b9c669676', 'v0.0.422', 'in1k', 0.875),
('seresnext50_32x4d', '0509', '4244900a583098a5fb6c174c834f44a7471305c2', 'v0.0.422', 'in1k', 0.875),
('seresnext101_32x4d', '0459', '13a9b2fd699a3e25ee18d93a408dbaf3dee74428', 'v0.0.422', 'in1k', 0.875),
('seresnext101_64x4d', '0465', 'ec0a3b132256c8a7d0f92c45775d201a456f25fb', 'v0.0.422', 'in1k', 0.875),
('senet16', '0805', 'f5f576568d02a572be5276b0b64e71ce4d1c4531', 'v0.0.422', 'in1k', 0.875),
('senet28', '0590', '667d56873564cc22b2f10478d5f3d55cda580c61', 'v0.0.422', 'in1k', 0.875),
('senet154', '0466', 'f1b79a9bf0f7073bacf534d846c03d1b71dc404b', 'v0.0.422', 'in1k', 0.875),
('ibn_resnet50', '0668', '4c72a071e13235ccea0db3d932db8ec5f691e155', 'v0.0.427', 'in1k', 0.875),
('ibn_resnet101', '0584', '2c2c4993de8b8d79a66a62a1dbf682e552eb16c1', 'v0.0.427', 'in1k', 0.875),
('ibnb_resnet50', '0695', '7178cc50d166fa2d2474b5110aaea7fcd41bd8ca', 'v0.0.427', 'in1k', 0.875),
('ibn_resnext101_32x4d', '0564', 'c149beb5a735b75d35a728f0f0054514899e9f8b', 'v0.0.427', 'in1k', 0.875),
('ibn_densenet121', '0749', '009d1919ec097777b9ffb3c1c4ff7802e0158201', 'v0.0.427', 'in1k', 0.875),
('ibn_densenet169', '0684', '7152d6ccf07babca362df603d45b09fd37ca6744', 'v0.0.427', 'in1k', 0.875),
('airnet50_1x64d_r2', '0623', '6940f0e553a65c1beb4b769e31685cdde59359b8', 'v0.0.423', 'in1k', 0.875),
('airnet50_1x64d_r16', '0650', 'b7bb86623e680f08a39828894052099cc5198842', 'v0.0.423', 'in1k', 0.875),
('airnext50_32x4d_r2', '0572', 'fa8e40ab400cd8507a02606db72d270382482ecf', 'v0.0.423', 'in1k', 0.875),
('bam_resnet50', '0697', '3a4101c80ee21a615835f954c5ca67a959978554', 'v0.0.424', 'in1k', 0.875),
('cbam_resnet50', '0639', '1d0bdb0e36545428975df6dcb32bac876934744c', 'v0.0.429', 'in1k', 0.875),
('pyramidnet101_a360', '0651', '9db84918734d8fe916664ecef49df0a0c0168530', 'v0.0.423', 'in1k', 0.875),
('diracnet18v2', '1113', '4d687b749342d23996d078a0984fd6affe63e47c', 'v0.0.429', 'in1k', 0.875),
('diracnet34v2', '0950', '161d97fda4104be091e918ea24c903bfffdc9b8d', 'v0.0.429', 'in1k', 0.875),
('densenet121', '0684', 'e9196a9c93534ca7b71ef136e5cc27f240370481', 'v0.0.422', 'in1k', 0.875),
('densenet161', '0591', '78224027b390f943b30130a7921ded2887776a77', 'v0.0.432', 'in1k', 0.875),
('densenet169', '0606', 'f708dc3310008e59814745ffc22ddf829fb2d25a', 'v0.0.422', 'in1k', 0.875),
('densenet201', '0591', '450c656858d693932253b486069690fe727f6f89', 'v0.0.426', 'in1k', 0.875),
('peleenet', '1129', 'e1c3cdea31e2c683d71f808765963c2fffcd672e', 'v0.0.429', 'in1k', 0.875),
('wrn50_2', '0614', 'bea17aa953afed82540c509d7c2964d602fcb2af', 'v0.0.423', 'in1k', 0.875),
('drnc26', '0788', '571eb2dc632b9aecd2726507847412e4e2d3149b', 'v0.0.425', 'in1k', 0.875),
('drnc42', '0693', '52dd60289e5d9cd8eeb66786eb31b9bd5b1b0b36', 'v0.0.425', 'in1k', 0.875),
('drnc58', '0626', 'e5c7be8922e6c9e60661d0aa88618f5b28961289', 'v0.0.425', 'in1k', 0.875),
('drnd22', '0848', '42f7a37bc912979db496fff8b808f724b4712974', 'v0.0.425', 'in1k', 0.875),
('drnd38', '0737', 'a110827559aa831a3b2b9a2b032c8adbc47769e5', 'v0.0.425', 'in1k', 0.875),
('drnd54', '0626', 'cb792485021c6f946e28cc3e72674e5a1286b9da', 'v0.0.425', 'in1k', 0.875),
('drnd105', '0583', '80eb9ec2efd053d2f1e73d08911208c5d787e7cf', 'v0.0.425', 'in1k', 0.875),
('dpn68', '0658', '5b70b7b86c33c3dfb04f5fa189e5d501e8804499', 'v0.0.427', 'in1k', 0.875),
('dpn98', '0528', '6883ec37bc83f092101511a4e46702f1587f970e', 'v0.0.427', 'in1k', 0.875),
('dpn131', '0524', '971af47c5c45175a9999002849d4bb5e47fa99f3', 'v0.0.427', 'in1k', 0.875),
('darknet_tiny', '1745', 'd30be41aad15edf40dfed0bbf53d0e68c520f9f3', 'v0.0.422', 'in1k', 0.875),
('darknet_ref', '1671', 'b4991f6b58ae95118aa9ea84cae4a27e328196b5', 'v0.0.422', 'in1k', 0.875),
('darknet53', '0558', '4a63ab3005e5138445da5fac4247c460de02a41b', 'v0.0.422', 'in1k', 0.875),
('bagnet9', '3553', '43eb57dcbbce90287d0c3158457077fcc6a4c5ef', 'v0.0.424', 'in1k', 0.875),
('bagnet17', '2154', '8a31e34793f4ebc9c7585f531dab1b47b3befc0d', 'v0.0.424', 'in1k', 0.875),
('bagnet33', '1497', 'ef600c89aacdd881c2c5483defa9cb220286d31b', 'v0.0.424', 'in1k', 0.875),
('dla34', '0823', '9232e3e7c299c2e83a49e5372affee2f19226518', 'v0.0.427', 'in1k', 0.875),
('dla46c', '1287', 'dfcae3b549121205008235fd7e59793b394f8998', 'v0.0.427', 'in1k', 0.875),
('dla46xc', '1229', 'a858beca359f41cfe836cec6d30b01ba98109d06', 'v0.0.427', 'in1k', 0.875),
('dla60', '0711', '7375fcfd8ec94bfd6587ef49d52e4f2dcefc0296', 'v0.0.427', 'in1k', 0.875),
('dla60x', '0621', '3c5941dbfdf66b879c02901282e8500288bc6498', 'v0.0.427', 'in1k', 0.875),
('dla60xc', '1075', 'a7850f0307de77fcce42afdbb7070776b7c219ca', 'v0.0.427', 'in1k', 0.875),
('dla102', '0643', '2be886b250ba9ea8721e8bdba62b4e32d33e19e4', 'v0.0.427', 'in1k', 0.875),
('dla102x', '0602', '46640eec0179abf109951d865f5c397024cf9297', 'v0.0.427', 'in1k', 0.875),
('dla102x2', '0553', '06c930313e017f2ef9596d9259f0029d399f563a', 'v0.0.427', 'in1k', 0.875),
('dla169', '0590', 'e010166d75cd6603a94f006f0dbf5a4d9185bf07', 'v0.0.427', 'in1k', 0.875),
('hrnet_w18_small_v1', '0974', '8db99936134de71bf0700e2855b5caef30a95298', 'v0.0.428', 'in1k', 0.875),
('hrnet_w18_small_v2', '0805', 'fcb8e21898d1dd5ace4587f33e1d5e9c335369e5', 'v0.0.428', 'in1k', 0.875),
('hrnetv2_w18', '0686', '71c614d725ecfca2506ccf5d71723796cc7ae275', 'v0.0.428', 'in1k', 0.875),
('hrnetv2_w30', '0606', '4883e3451691d7d14a3d7d3572aecc21f3aa8454', 'v0.0.428', 'in1k', 0.875),
('hrnetv2_w32', '0607', 'ef949840f95a1cd82bc7ad8795929c795058d78b', 'v0.0.428', 'in1k', 0.875),
('hrnetv2_w40', '0573', '29cece1c277ee70a91a373f3c5cb266f6a1af9e3', 'v0.0.428', 'in1k', 0.875),
('hrnetv2_w44', '0595', 'a4e4781ca1c32fc98beed3167832601ca51266c9', 'v0.0.428', 'in1k', 0.875),
('hrnetv2_w48', '0581', '3af4ed57e2c7dab91794f933f7e8105320935d31', 'v0.0.428', 'in1k', 0.875),
('hrnetv2_w64', '0553', 'aede8def2f12173f640f85187b531c5218615d92', 'v0.0.428', 'in1k', 0.875),
('vovnet39', '0825', '49cbcdc62cc7815a4bc76e508da8feee0f802e00', 'v0.0.431', 'in1k', 0.875),
('vovnet57', '0812', '0977958aceb28a7481a230e0ba52750d43e5b152', 'v0.0.431', 'in1k', 0.875),
('selecsls42b', '0676', '0d785bec0c31aee57e1d267900ae1a942a665fcb', 'v0.0.430', 'in1k', 0.875),
('selecsls60', '0630', 'a799a0e5ddcc3991808bd8d98a83a3e717ee87e4', 'v0.0.430', 'in1k', 0.875),
('selecsls60b', '0604', 'bc9c43191043382b38e3be5893d1d8316ca401e9', 'v0.0.430', 'in1k', 0.875),
('hardnet39ds', '1003', '4971cd5a76946293a137d78032ee024f0258c979', 'v0.0.435', 'in1k', 0.875),
('hardnet68ds', '0845', 'dd35f3f91bfe55c354d4aac2b5830c3a744741ed', 'v0.0.435', 'in1k', 0.875),
('hardnet68', '0740', '9ea05e3973dddb52b970872fc3ed76fa32d10731', 'v0.0.435', 'in1k', 0.875),
('hardnet85', '0644', '7892e2215c2d1c32996be09a724c8125c8c49572', 'v0.0.435', 'in1k', 0.875),
('squeezenet_v1_0', '1760', 'd13ba73265325f21eb34e782989a7269cad406c6', 'v0.0.422', 'in1k', 0.875),
('squeezenet_v1_1', '1742', '95b614487f1f0572bd0dba18e0fc6d63df3a6bfc', 'v0.0.422', 'in1k', 0.875),
('squeezeresnet_v1_0', '1783', 'db620d998257c84fd6d5e80bba48cc1022febda3', 'v0.0.422', 'in1k', 0.875),
('squeezeresnet_v1_1', '1789', '13d6bc6bd85adf83ef55325443495feb07c5788f', 'v0.0.422', 'in1k', 0.875),
('sqnxt23_w1', '1861', '379975ebe54b180f52349c3737b17ea7b2613953', 'v0.0.422', 'in1k', 0.875),
('sqnxt23v5_w1', '1762', '153b4ce73714d2ecdca294efb365ab9c026e2f41', 'v0.0.422', 'in1k', 0.875),
('sqnxt23_w3d2', '1334', 'a2ba956cfeed0b4bbfc37776c6a1cd5ca13d9345', 'v0.0.422', 'in1k', 0.875),
('sqnxt23v5_w3d2', '1284', '72efaa710f0f1645cb220cb9950b3660299f2bed', 'v0.0.422', 'in1k', 0.875),
('sqnxt23_w2', '1069', 'f43dee19c527460f9815fc4e5eeeaef99fae4df3', 'v0.0.422', 'in1k', 0.875),
('sqnxt23v5_w2', '1026', 'da80c6407a4c18be31bcdd08356666942a9ef2b4', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g1_wd4', '3681', '04a9e2d4ada22b3d317e2fc8b7d4ec11865c414f', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g3_wd4', '3618', 'c9aad0f08d129726bbc19219c9773b38cf38825e', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g1_wd2', '2236', '082db702c422d8bce12d4d79228de56f088a420d', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g3_wd2', '2059', 'e3aefeeb36c20e325d0c7fe46afc60484167609d', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g1_w3d4', '1679', 'a1cc5da3a288299a33353f697ed0297328dc3e95', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g3_w3d4', '1611', '89546a05f499f0fdf96dade0f3db430f92c5920d', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g1_w1', '1348', '52ddb20fd7ff288ae30a17757efda4653c09d5ca', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g2_w1', '1333', '2a8ba6928e6fac05a5fe8911a9a175268eb18382', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g3_w1', '1326', 'daaec8b84572023c1352e11830d296724123408e', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g4_w1', '1313', '35dbd6b9fb8bc3e97367ea210abbd61da407f226', 'v0.0.422', 'in1k', 0.875),
('shufflenet_g8_w1', '1322', '449fb27659101a2cf0a87c90e33f4632d1c5e9f2', 'v0.0.422', 'in1k', 0.875),
('shufflenetv2_wd2', '1843', 'd492d721d3167cd64ab1c2a1f33f3ca5f6dec7c3', 'v0.0.422', 'in1k', 0.875),
('shufflenetv2_w1', '1135', 'dae13ee9f24c89cd1ea12a58fb90b967223c8e2e', 'v0.0.422', 'in1k', 0.875),
('shufflenetv2_w3d2', '0923', 'ea615baab737fca3a3d90303844b4a2922ea2c62', 'v0.0.422', 'in1k', 0.875),
('shufflenetv2_w2', '0821', '6ccac868f595e4618ca7e5f67f7c113f021ffad4', 'v0.0.422', 'in1k', 0.875),
('shufflenetv2b_wd2', '1784', 'd5644a6ab8fcb6ff04f30a2eb862ebd2de92b94c', 'v0.0.422', 'in1k', 0.875),
('shufflenetv2b_w1', '1104', 'b7db0ca041e996ee76fec7f126dc39c4e5120e82', 'v0.0.422', 'in1k', 0.875),
('shufflenetv2b_w3d2', '0877', '9efb13f7d795d63c8fbee736622b9f1940dd5dd5', 'v0.0.422', 'in1k', 0.875),
('shufflenetv2b_w2', '0808', 'ba5c7ddcd8f7da3719f5d1de71d5fd30130d59d9', 'v0.0.422', 'in1k', 0.875),
('menet108_8x1_g3', '2039', '1a8cfc9296011cd994eb48e75e24c33ecf6580f5', 'v0.0.422', 'in1k', 0.875),
('menet128_8x1_g4', '1918', '7fb59f0a8d3e1f490c26546dfe93ea29ebd79c2b', 'v0.0.422', 'in1k', 0.875),
('menet160_8x1_g8', '2034', '3cf9eb2aa2d4e067aa49ce32e7a41e9db5262493', 'v0.0.422', 'in1k', 0.875),
('menet228_12x1_g3', '1291', '21bd19bf0adb73b10cb04ccce8688f119467a114', 'v0.0.422', 'in1k', 0.875),
('menet256_12x1_g4', '1217', 'd9f2e10e6402e5ee2aec485da07da72edf25f790', 'v0.0.422', 'in1k', 0.875),
('menet348_12x1_g3', '0937', 'cee7691c710f5c453b63ef9e8c3e15e699b004bb', 'v0.0.422', 'in1k', 0.875),
('menet352_12x1_g8', '1167', '54a916bcc3920c6ef24243c8c73604b25d728a6d', 'v0.0.422', 'in1k', 0.875),
('menet456_24x1_g3', '0779', '2a70b14bd17e8d4692f15f2f8e9d181e7d95b971', 'v0.0.422', 'in1k', 0.875),
('mobilenet_wd4', '2213', 'ad04596aa730e5bb4429115df70504c5a7dd5969', 'v0.0.422', 'in1k', 0.875),
('mobilenet_wd2', '1333', '01395e1b9e2a54065aafcc8b4c419644e7f6a655', 'v0.0.422', 'in1k', 0.875),
('mobilenet_w3d4', '1051', '7832561b956f0d763b002fbd9f2f880bbb712885', 'v0.0.422', 'in1k', 0.875),
('mobilenet_w1', '0866', '6939232b46fb98c8a9209d66368d630bb50941ed', 'v0.0.422', 'in1k', 0.875),
('fdmobilenet_wd4', '3062', '36aa16df43b344f42d6318cc840a81702951a033', 'v0.0.422', 'in1k', 0.875),
('fdmobilenet_wd2', '1977', '34541b84660b4e812830620c5d48df7c7a142078', 'v0.0.422', 'in1k', 0.875),
('fdmobilenet_w3d4', '1597', '0123c0313194a3094ec006f757d93f59aad73c2b', 'v0.0.422', 'in1k', 0.875),
('fdmobilenet_w1', '1312', 'fa99fb8d728f66f68464221e049a33cd2b8bfc6a', 'v0.0.422', 'in1k', 0.875),
('mobilenetv2_wd4', '2413', 'c3705f55b0df68919fba7ed79204c5651f6f71b1', 'v0.0.422', 'in1k', 0.875),
('mobilenetv2_wd2', '1446', 'b0c9a98b85b579ba77c17d228ace399809c6ab43', 'v0.0.422', 'in1k', 0.875),
('mobilenetv2_w3d4', '1044', 'e122c73eae885d204bc2ba46fb013a9da5cb282f', 'v0.0.422', 'in1k', 0.875),
('mobilenetv2_w1', '0863', 'b32cede3b68f40f2ed0552dcdf238c70f82e5705', 'v0.0.422', 'in1k', 0.875),
('mobilenetv2b_wd4', '2505', '4079f654160fcd7ce37212c2d2da424f22913a70', 'v0.0.453', 'in1k', 0.875),
('mobilenetv2b_wd2', '1473', '129cfd918a4c4ab1b2544bc9ae24ed03f97f00ee', 'v0.0.453', 'in1k', 0.875),
('mobilenetv2b_w3d4', '1152', 'fa93741abc02bf3feb9e276d26dcfc6249a7b0a4', 'v0.0.453', 'in1k', 0.875),
('mobilenetv2b_w1', '0943', '97f1b6763b054d821e3422c7b4faf990005ec2c0', 'v0.0.453', 'in1k', 0.875),
('mobilenetv3_large_w1', '0769', 'f66596ae10c8eaa1ea3cb79b2645bd93f946b059', 'v0.0.422', 'in1k', 0.875),
('igcv3_wd4', '2828', '309359dc5a0cd0439f2be5f629534aa3bdf2b4f9', 'v0.0.422', 'in1k', 0.875),
('igcv3_wd2', '1701', 'b952333ab2024f879d4bb9895331a617f2b957b5', 'v0.0.422', 'in1k', 0.875),
('igcv3_w3d4', '1100', '00294c7b1ab9dddf7ab2cef3e7ec0a627bd67b29', 'v0.0.422', 'in1k', 0.875),
('igcv3_w1', '0899', 'a0cb775dd5bb2c13dce35a21d6fd53a783959702', 'v0.0.422', 'in1k', 0.875),
('mnasnet_b1', '0802', '763d6849142ce86f46cb7ec4c003ccf15542d6eb', 'v0.0.422', 'in1k', 0.875),
('mnasnet_a1', '0756', '8e0f49481a3473b9457d0987c9c6f7e51ff57576', 'v0.0.422', 'in1k', 0.875),
('proxylessnas_cpu', '0751', '47e1431680e115462835e73ec21dec8b6e88eb13', 'v0.0.424', 'in1k', 0.875),
('proxylessnas_gpu', '0726', 'd536cb3e27a47a4a18aa8e230ebe6b4a8f748910', 'v0.0.424', 'in1k', 0.875),
('proxylessnas_mobile', '0783', 'da8cdb80c5bd618258c657ebd8506e1342eaeb0d', 'v0.0.424', 'in1k', 0.875),
('proxylessnas_mobile14', '0653', '478b58cdb6c94007f786ec06a9e71a8dbc14507f', 'v0.0.424', 'in1k', 0.875),
('fbnet_cb', '0784', 'acd12097da630e4bf9051d138f04c7e9535e58c1', 'v0.0.428', 'in1k', 0.875),
('xception', '0558', 'b95b50510de4e39e2ddf759e69501a7470787c00', 'v0.0.423', 'in1k', 0.875),
('inceptionv3', '0563', 'b0094c1c279551394aa5c9709003c567324dcd70', 'v0.0.427', 'in1k', 0.875),
('inceptionv4', '0541', 'c1fa5642c0218e89fbe3effb233bffeb24672ba9', 'v0.0.428', 'in1k', 0.875),
('inceptionresnetv2', '0495', '3e2cc5456bb14fbdaec55006430278970ab64050', 'v0.0.428', 'in1k', 0.875),
('polynet', '0451', 'e752c86bbde4f5ce07ab6d079673a62a7565acf7', 'v0.0.428', 'in1k', 0.875),
('nasnet_4a1056', '0833', '9710e638693fa52538b268e767706210bf37d667', 'v0.0.428', 'in1k', 0.875),
('nasnet_6a4032', '0427', '1f0d2198bffb71386290b9b4e2058af2610574d8', 'v0.0.428', 'in1k', 0.875),
('pnasnet5large', '0427', '90e804af249c36f5f4435eb58ee0f32debefb320', 'v0.0.428', 'in1k', 0.875),
('spnasnet', '0873', 'a38a57a3d582ec4e227405924b84928587ea362f', 'v0.0.427', 'in1k', 0.875),
('efficientnet_b0', '0725', 'fc13925b2b95f5469aba2bb7b8472fdbabd663c3', 'v0.0.427', 'in1k', 0.875),
('efficientnet_b1', '0630', '82e0c512dc557ccb4eb3fbdabf48106988251d6d', 'v0.0.427', 'in1k', 0.882),
('efficientnet_b0b', '0668', '771272448df362b9637c7edf94292ab2c9676314', 'v0.0.429', 'in1k', 0.875),
('efficientnet_b1b', '0577', 'b294ee16111847f37129ff069f9911f76a2233d4', 'v0.0.429', 'in1k', 0.882),
('efficientnet_b2b', '0530', '55bcdc5d03493a581c3a3778b5ee6c08142718b4', 'v0.0.429', 'in1k', 0.890),
('efficientnet_b3b', '0469', 'b8210e1ac4f331b25b95c4a6d30e4b024d84ceb3', 'v0.0.429', 'in1k', 0.904),
('efficientnet_b4b', '0399', '5e35e9c56c3a0f705a44a38087e2084a25ee0a2e', 'v0.0.429', 'in1k', 0.922),
('efficientnet_b5b', '0343', '0ed0c69daa1d75e2da35f49ddea6bcfa0383727f', 'v0.0.429', 'in1k', 0.934),
('efficientnet_b6b', '0312', 'faf631041f84b19668eb207201ec13b2d405e702', 'v0.0.429', 'in1k', 0.942),
('efficientnet_b7b', '0315', '4024912ec1499b559de26b2ee7d7be1c2a3e53cf', 'v0.0.429', 'in1k', 0.949),
('efficientnet_b0c', '0646', '2bd0e2af1d275ab2046002719305bf517137f6df', 'v0.0.433', 'in1k', 0.875),
('efficientnet_b1c', '0582', 'a760b325d867a5aa4093ae69d68e8df04ed7730b', 'v0.0.433', 'in1k', 0.882),
('efficientnet_b2c', '0533', 'ea6ca9cf3c5179ad3927d7c3386c1c18c7183e24', 'v0.0.433', 'in1k', 0.890),
('efficientnet_b3c', '0464', '1c8fced86bc52d3d97fdce3750180d6b694f53c6', 'v0.0.433', 'in1k', 0.904),
('efficientnet_b4c', '0390', 'dc4379eac0dc4144260a270d4eb4ea3835394703', 'v0.0.433', 'in1k', 0.922),
('efficientnet_b5c', '0310', '80258ef75ea1b068b6ccf66420b8dd346c0bcdaa', 'v0.0.433', 'in1k', 0.934),
('efficientnet_b6c', '0286', '285f830add2ce100c6ab035f2a0caf49a33308ad', 'v0.0.433', 'in1k', 0.942),
('efficientnet_b7c', '0276', '1ffad4eca775d49ba48a0aa168a9c81649dab5b1', 'v0.0.433', 'in1k', 0.949),
('efficientnet_b8c', '0270', 'aa691b94070f49e2b7f3a0ac11bc5ddbdb18b1f6', 'v0.0.433', 'in1k', 0.954),
('efficientnet_edge_small_b', '0642', '1c03bb7355c6ab14374520743cc56e1ee22e773b', 'v0.0.434', 'in1k', 0.875),
('efficientnet_edge_medium_b', '0565', '73153b188d8b79cd8cc0ab45991561499df87838', 'v0.0.434', 'in1k', 0.882),
('efficientnet_edge_large_b', '0496', 'd72edce103b4bdac37afeabec281f1aedc9632bc', 'v0.0.434', 'in1k', 0.904),
('mixnet_s', '0737', 'd68d63f1914beeaec4e068c0dbd1defe09c7ffb6', 'v0.0.427', 'in1k', 0.875),
('mixnet_m', '0679', 'f74eab6c0ed1bc453453f433bce1cdde2d3e6bda', 'v0.0.427', 'in1k', 0.875),
('mixnet_l', '0601', '5c2ccc0c906ae29985043dc590317133c0be3376', 'v0.0.427', 'in1k', 0.875),
('resneta50b', '0543', 'ba99aca40e3c117e94980b0d6786910eeae7b9ee', 'v0.0.452', 'in1k', 0.875),
('resneta101b', '0490', 'd6dfa5240ecfa2c11f851535c9e2dafdc3bf016f', 'v0.0.452', 'in1k', 0.875),
('resneta152b', '0465', 'a54b896fcef292ad3e5d6d1290e83cb760d97084', 'v0.0.452', 'in1k', 0.875),
('resnetd50b', '0549', '1c84294f68b78dc58e07496495be0f8ecd2f14e3', 'v0.0.447', 'in1k', 0.875),
('resnetd101b', '0459', '7cce7f1357a3de297f7000b33f505dc67c38fb96', 'v0.0.447', 'in1k', 0.875),
('resnetd152b', '0468', '4673f64c71cf438eeafc890b5a138e301437bf90', 'v0.0.447', 'in1k', 0.875),
('resnet20_cifar10', '0597', '451230e98c5da3cd24e364b76995cdf5bdd36b73', 'v0.0.438', 'cf', 0.0),
('resnet20_cifar100', '2964', '5fa28f78b6b33f507f6b79a41f7fca07f681e4a5', 'v0.0.438', 'cf', 0.0),
('resnet20_svhn', '0343', '3480eec0f2781350815d07aa57bb821ecadc8b69', 'v0.0.438', 'cf', 0.0),
('resnet56_cifar10', '0452', 'a39ad94af7aad7adf21f41436cb8d86a948c7e90', 'v0.0.438', 'cf', 0.0),
('resnet56_cifar100', '2488', '8e413ab97ce41f96e02888776bc9ec71df49d909', 'v0.0.438', 'cf', 0.0),
('resnet56_svhn', '0275', '5acc55374dab36f2ebe70948393112fad83c4b17', 'v0.0.438', 'cf', 0.0),
('resnet110_cifar10', '0369', 'c625643a3c10909cdfc6c955418f0fca174b8d01', 'v0.0.438', 'cf', 0.0),
('resnet110_cifar100', '2280', 'c248211b354f7058b3066c5fb4ad87b2d0bdb6a0', 'v0.0.438', 'cf', 0.0),
('resnet110_svhn', '0245', 'a07e849f5e3233ef458072a30d8cc04ae84ff054', 'v0.0.438', 'cf', 0.0),
('resnet164bn_cifar10', '0368', 'cf08cca79ac123304add47b3aaba11cb4c46a25b', 'v0.0.438', 'cf', 0.0),
('resnet164bn_cifar100', '2044', '1ba347905fe05d922c9ec5ba876611b6393c6c99', 'v0.0.438', 'cf', 0.0),
('resnet164bn_svhn', '0242', '1bfa8083c38c89c19a4e0b53f714876705624fa7', 'v0.0.438', 'cf', 0.0),
('resnet272bn_cifar10', '0333', 'c8b0a926aeba2cdd404454bb22a731a3aed5996c', 'v0.0.438', 'cf', 0.0),
('resnet272bn_cifar100', '2007', '5357e0df7431ce2fb41f748fa04454f5a7055d1c', 'v0.0.438', 'cf', 0.0),
('resnet272bn_svhn', '0243', 'e2a8e35588d6375815a9b633f66e019a393553f7', 'v0.0.438', 'cf', 0.0),
('resnet542bn_cifar10', '0343', 'c31829d4c5845f9604e1a0f5aec938f03fcc05c3', 'v0.0.438', 'cf', 0.0),
('resnet542bn_cifar100', '1932', '2db913a6e6e577a366e2ab30030b9e976a388008', 'v0.0.438', 'cf', 0.0),
('resnet542bn_svhn', '0234', '0d6759e722dd536b2ce16ef856b6926fba023c6d', 'v0.0.438', 'cf', 0.0),
('resnet1001_cifar10', '0328', '552ab287f0a8224ae960a4ec0b4aed0f309e6641', 'v0.0.438', 'cf', 0.0),
('resnet1001_cifar100', '1979', '75c8acac55fce2dfc5c3f56cd10dd0467e56ffd2', 'v0.0.438', 'cf', 0.0),
('resnet1001_svhn', '0241', 'c9a01550d011abc9e6bc14df63952715a88a506a', 'v0.0.438', 'cf', 0.0),
('resnet1202_cifar10', '0353', '3559a9431d3ddd3ef1ee24bf2baa1b7184a21108', 'v0.0.438', 'cf', 0.0),
('resnet1202_cifar100', '2156', '28fcf78635c21d23b018d70a812eeae2ae24ad39', 'v0.0.438', 'cf', 0.0),
('preresnet20_cifar10', '0651', 'd3e7771e923032393bb6fa88d62625f3da64d9fe', 'v0.0.439', 'cf', 0.0),
('preresnet20_cifar100', '3022', '447255f8c6ad79dc42a2644438e35bc39fdeed36', 'v0.0.439', 'cf', 0.0),
('preresnet20_svhn', '0322', '6dcae6129ca6839c35a1ae9b3d69c4d41591811d', 'v0.0.439', 'cf', 0.0),
('preresnet56_cifar10', '0449', 'b4bfdaa8eaa4370899d1fb0c3c360158cf3fa3f4', 'v0.0.439', 'cf', 0.0),
('preresnet56_cifar100', '2505', '180fc2081f3c694b0c3db2948cb05e06f1070ee2', 'v0.0.439', 'cf', 0.0),
('preresnet56_svhn', '0280', '6e074c73832de7afcb8e61405b2eb62bc969d35f', 'v0.0.439', 'cf', 0.0),
('preresnet110_cifar10', '0386', '287a4b0cdd424fdf29d862b411f556f3d8f57f98', 'v0.0.439', 'cf', 0.0),
('preresnet110_cifar100', '2267', 'ab677c09518f0b7aae855153fc820811bd530c28', 'v0.0.439', 'cf', 0.0),
('preresnet110_svhn', '0279', '226a0b342145852f4289630f6fd82d2c90f38e01', 'v0.0.439', 'cf', 0.0),
('preresnet164bn_cifar10', '0364', '29a459fad0f60028b48f1908970d3947728d76b0', 'v0.0.439', 'cf', 0.0),
('preresnet164bn_cifar100', '2018', 'c764970119e627e5c88fe3c7cb6a7d36cd7f29d0', 'v0.0.439', 'cf', 0.0),
('preresnet164bn_svhn', '0258', '2307c36f351e22d9bf0240fdcf5b5651dce03e57', 'v0.0.439', 'cf', 0.0),
('preresnet272bn_cifar10', '0325', '5bacdc955e8d800e08d6513a6ecd21ce79da6c84', 'v0.0.439', 'cf', 0.0),
('preresnet272bn_cifar100', '1963', '22e0919886949484354b5a18f6c87ab5aa33b61a', 'v0.0.439', 'cf', 0.0),
('preresnet272bn_svhn', '0234', '3451d5fbc8dfecf2da2e624319f0e0068091f358', 'v0.0.439', 'cf', 0.0),
('preresnet542bn_cifar10', '0314', 'd8324d47e327c92f3557db4ba806071041a56f69', 'v0.0.439', 'cf', 0.0),
('preresnet542bn_cifar100', '1871', '703875c6827c83e26e05cd3e516b5a3234d01747', 'v0.0.439', 'cf', 0.0),
('preresnet542bn_svhn', '0236', '5ca0759231c9a045df4ef40a47d8b81e624664f8', 'v0.0.439', 'cf', 0.0),
('preresnet1001_cifar10', '0265', '978844c1315a0a3f6261393bcc954cecb85c199a', 'v0.0.439', 'cf', 0.0),
('preresnet1001_cifar100', '1841', '7481e79c54d9a32d163c740eb53310c6a5f40b01', 'v0.0.439', 'cf', 0.0),
('preresnet1202_cifar10', '0339', 'ab04c456454c933245d91f36942166d45393a8bc', 'v0.0.439', 'cf', 0.0),
('resnext20_1x64d_cifar10', '0433', 'e0ab86674852a3c78f4a600e9e8ca50a06ff0bb9', 'v0.0.440', 'cf', 0.0),
('resnext20_1x64d_cifar100', '2197', '413945af9f271e173bb2085de38d65e98905f304', 'v0.0.440', 'cf', 0.0),
('resnext20_1x64d_svhn', '0298', '105736c8c2cb1bf8a4ac4538ccd7e139501095d6', 'v0.0.440', 'cf', 0.0),
('resnext20_2x32d_cifar10', '0453', '7aa966dd0803c3f731d0f858125baedca245cf86', 'v0.0.440', 'cf', 0.0),
('resnext20_2x32d_cifar100', '2255', 'bf34e56aea7d21fca0b99c14558d6b06aab1f94f', 'v0.0.440', 'cf', 0.0),
('resnext20_2x32d_svhn', '0296', 'b61e1395c12285ca0c765f3ddbfd8a5c4d252536', 'v0.0.440', 'cf', 0.0),
('resnext20_2x64d_cifar10', '0403', '367377ed36b429753d727369cba42db281b40443', 'v0.0.440', 'cf', 0.0),
('resnext20_2x64d_cifar100', '2060', '6eef33bcb44c73dfdfe51036f5d647b5eba286c5', 'v0.0.440', 'cf', 0.0),
('resnext20_2x64d_svhn', '0283', 'dedfbac24ad3e67c55609b79da689e01ad6ba759', 'v0.0.440', 'cf', 0.0),
('resnext20_4x16d_cifar10', '0470', '333e834da705f54958887ce7a34335b0e71fcfad', 'v0.0.440', 'cf', 0.0),
('resnext20_4x16d_cifar100', '2304', 'fa8d4e06a0455f49da492377be9fe90140795629', 'v0.0.440', 'cf', 0.0),
('resnext20_4x16d_svhn', '0317', 'cab6d9fd851d47f11863075e83dd699cddb21571', 'v0.0.440', 'cf', 0.0),
('resnext20_4x32d_cifar10', '0373', 'e4aa1b0dade046bbfc872f4c84ac5fe3bcbeda11', 'v0.0.440', 'cf', 0.0),
('resnext20_4x32d_cifar100', '2131', 'edabd5da34edfba348b8f1712bbb0dc3ce6c5a82', 'v0.0.440', 'cf', 0.0),
('resnext20_4x32d_svhn', '0298', '82b75cbb31f2ea3497548a19fdf1f5fb0531527c', 'v0.0.440', 'cf', 0.0),
('resnext20_8x8d_cifar10', '0466', '1dbd9f5e45f120c697d128558b4d263f2ac94f0e', 'v0.0.440', 'cf', 0.0),
('resnext20_8x8d_cifar100', '2282', '51922108355f86cb0131826715cef9e81513e399', 'v0.0.440', 'cf', 0.0),
('resnext20_8x8d_svhn', '0318', '6ef55252a46d6106a160d87da107a1293cbce654', 'v0.0.440', 'cf', 0.0),
('resnext20_8x16d_cifar10', '0404', '5329db5f6066a73e085805ab40969af31a43e4f7', 'v0.0.440', 'cf', 0.0),
('resnext20_8x16d_cifar100', '2172', '3665fda790f0164078ffd6403e022a0ba8186c47', 'v0.0.440', 'cf', 0.0),
('resnext20_8x16d_svhn', '0301', 'd1a547e4514e6338934b26c473061b49c669c632', 'v0.0.440', 'cf', 0.0),
('resnext20_16x4d_cifar10', '0404', 'c671993585f1cc878941475e87c266c8a1895ca8', 'v0.0.440', 'cf', 0.0),
('resnext20_16x4d_cifar100', '2282', 'e800aabb6ea23a0555d2ac5a1856d7d289a46bca', 'v0.0.440', 'cf', 0.0),
('resnext20_16x4d_svhn', '0321', '77a670a80e976b173272614cf9416e904f1defde', 'v0.0.440', 'cf', 0.0),
('resnext20_16x8d_cifar10', '0394', 'cf7c675c52499a714fb3391c0240c265d6f1bb01', 'v0.0.440', 'cf', 0.0),
('resnext20_16x8d_cifar100', '2173', '0a33029811f76f93e79b83bf6cb19d74711c2e5b', 'v0.0.440', 'cf', 0.0),
('resnext20_16x8d_svhn', '0293', '4ebac2762e92f1c12b28e3012c171333a63706e1', 'v0.0.440', 'cf', 0.0),
('resnext20_32x2d_cifar10', '0461', 'b05d34915134060c39ea4f6b9e356b539a1e147b', 'v0.0.440', 'cf', 0.0),
('resnext20_32x2d_cifar100', '2322', '2def8cc21fe9057a63aee6aef2c718720fd90230', 'v0.0.440', 'cf', 0.0),
('resnext20_32x2d_svhn', '0327', '0c099194b551bf0d72a0028a13a94a7ca277473b', 'v0.0.440', 'cf', 0.0),
('resnext20_32x4d_cifar10', '0420', '6011e9e91f901ab98107e451149065524d2acc30', 'v0.0.440', 'cf', 0.0),
('resnext20_32x4d_cifar100', '2213', '9508c15dddd01d0064938023904c6c23ad901da5', 'v0.0.440', 'cf', 0.0),
('resnext20_32x4d_svhn', '0309', 'c8a843e1a0ce40fe2f42e3406e671e9a0df55d82', 'v0.0.440', 'cf', 0.0),
('resnext20_64x1d_cifar10', '0493', 'a13300cea5f2c626c096ac1fbf9f707a6da46f0b', 'v0.0.440', 'cf', 0.0),
('resnext20_64x1d_cifar100', '2353', '91695baa3caba28fa7507b3ffa0629048e01aa6e', 'v0.0.440', 'cf', 0.0),
('resnext20_64x1d_svhn', '0342', 'a3bad459c16926727190d1875ae90e709d50145e', 'v0.0.440', 'cf', 0.0),
('resnext20_64x2d_cifar10', '0438', '3846d7a7ecea5fe4da1d0895da05b675b84e23d7', 'v0.0.440', 'cf', 0.0),
('resnext20_64x2d_cifar100', '2235', 'e4a559ccaba13da694828aca7f83bafc9e364dcd', 'v0.0.440', 'cf', 0.0),
('resnext20_64x2d_svhn', '0314', 'c755e25d61534ec355c2da1a458dc5772d1f790e', 'v0.0.440', 'cf', 0.0),
('resnext29_16x64d_cifar10', '0241', '712e474493fd9f504010ca0a8eb10a94431bffdb', 'v0.0.440', 'cf', 0.0),
('resnext29_16x64d_cifar100', '1693', '2df09272ed462101da32619e652074f8c1f3ec23', 'v0.0.440', 'cf', 0.0),
('resnext29_16x64d_svhn', '0268', 'c929fadabc9bd8c2b2e97d4e2703ec2fba31032b', 'v0.0.440', 'cf', 0.0),
('resnext29_32x4d_cifar10', '0315', '5ed2e0f0945e138c3aa0c9acc0c5fd08f2d840cd', 'v0.0.440', 'cf', 0.0),
('resnext29_32x4d_cifar100', '1950', 'e99791392f0930372efefbe0a54304230ac4cc90', 'v0.0.440', 'cf', 0.0),
('resnext29_32x4d_svhn', '0280', 'de6cba99c40a882e98d2ef002cc14d799f5bf8bc', 'v0.0.440', 'cf', 0.0),
('resnext56_1x64d_cifar10', '0287', '5da5fe18fdf2b55977266631e2eb4b7913e7d591', 'v0.0.440', 'cf', 0.0),
('resnext56_1x64d_cifar100', '1825', '727009516efca454a34a3e310608b45d4c9a4020', 'v0.0.440', 'cf', 0.0),
('resnext56_1x64d_svhn', '0242', 'dd7ac31ee1f1a0ffcd3049fc056e8e705cae93f0', 'v0.0.440', 'cf', 0.0),
('resnext56_2x32d_cifar10', '0301', '54d6f2df3a903cb23978cd674495ab1e8894ab09', 'v0.0.440', 'cf', 0.0),
('resnext56_2x32d_cifar100', '1786', '6639c30dd1bc152736c21c9de27823d0ce3b367c', 'v0.0.440', 'cf', 0.0),
('resnext56_2x32d_svhn', '0246', '61524d8aff0534121257ec5b8b65647cbdafda7f', 'v0.0.440', 'cf', 0.0),
('resnext56_4x16d_cifar10', '0311', '766ab89fccd5b2675d5d42a9372346fd7bf45b77', 'v0.0.440', 'cf', 0.0),
('resnext56_4x16d_cifar100', '1809', '61b41c3b953a4a7198dec6a379f789030a998e42', 'v0.0.440', 'cf', 0.0),
('resnext56_4x16d_svhn', '0244', 'b7ab24694a0c1f635fbb2b2e4130272b5e75b6bc', 'v0.0.440', 'cf', 0.0),
('resnext56_8x8d_cifar10', '0307', '685eab396974992f71402533be96229cdc3eb751', 'v0.0.440', 'cf', 0.0),
('resnext56_8x8d_cifar100', '1806', 'f3f80382faa7baadaef4e09fedb924b4d5deac78', 'v0.0.440', 'cf', 0.0),
('resnext56_8x8d_svhn', '0247', '85692d770f3bab690dc9aa57b4e3d9aa728121e9', 'v0.0.440', 'cf', 0.0),
('resnext56_16x4d_cifar10', '0312', '930e5d5baf62d2fe4e48afe7dbd928079fd5531a', 'v0.0.440', 'cf', 0.0),
('resnext56_16x4d_cifar100', '1824', '667ba1835c3db07e54ad4dfbc6ea99a0b12afd78', 'v0.0.440', 'cf', 0.0),
('resnext56_16x4d_svhn', '0256', '86f327a9652e79a4a38c0d6ebc9fda8f0a6c3ea4', 'v0.0.440', 'cf', 0.0),
('resnext56_32x2d_cifar10', '0314', '9e387e2e6c769802fbf7a911b67d2c490e14db85', 'v0.0.440', 'cf', 0.0),
('resnext56_32x2d_cifar100', '1860', '7a236896b7f00913f8a0846d39382d87bc56214c', 'v0.0.440', 'cf', 0.0),
('resnext56_32x2d_svhn', '0253', 'b93a0535890a340774a190fab2a521696b134600', 'v0.0.440', 'cf', 0.0),
('resnext56_64x1d_cifar10', '0341', 'bc7469474a3cf31622186aa86c0c837b9c05563a', 'v0.0.440', 'cf', 0.0),
('resnext56_64x1d_cifar100', '1816', '06c6c7a0bb97cd67360e624dd9ca3193969c3e06', 'v0.0.440', 'cf', 0.0),
('resnext56_64x1d_svhn', '0255', '9e9e3cc2bf26b8c691b5b2b12fb3908dd999f870', 'v0.0.440', 'cf', 0.0),
('resnext272_1x64d_cifar10', '0255', '6efe448a89da1340dca7158d12a0355d1b2d2d75', 'v0.0.440', 'cf', 0.0),
('resnext272_1x64d_cifar100', '1911', 'e9275c944ff841c29316a2728068a6162af39488', 'v0.0.440', 'cf', 0.0),
('resnext272_1x64d_svhn', '0234', '4d348e9ec9d261318d1264c61f4817de612797e4', 'v0.0.440', 'cf', 0.0),
('resnext272_2x32d_cifar10', '0274', '4e35f99476d34225bd07ed2f4274ed021fb635f3', 'v0.0.440', 'cf', 0.0),
('resnext272_2x32d_cifar100', '1834', '274ef60797974e3d7290644861facefa983bc7f2', 'v0.0.440', 'cf', 0.0),
('resnext272_2x32d_svhn', '0244', 'f792396540a630a0d51932f9c7557e5d96ddb66c', 'v0.0.440', 'cf', 0.0),
('seresnet20_cifar10', '0601', '2f392e4a48cffe1ff96b92ca28fd0f020e9d89aa', 'v0.0.442', 'cf', 0.0),
('seresnet20_cifar100', '2854', '598b585838afb8907e76c6e9af2b92417f5eeb08', 'v0.0.442', 'cf', 0.0),
('seresnet20_svhn', '0323', 'ef43ce80cc226dff6d7c0fd120daaa89fe353392', 'v0.0.442', 'cf', 0.0),
('seresnet56_cifar10', '0413', '0224e930258e0567cf18bd1b0f5ae8ffd85d6231', 'v0.0.442', 'cf', 0.0),
('seresnet56_cifar100', '2294', '9c86ec999dac74831ab3918682c1753fde447187', 'v0.0.442', 'cf', 0.0),
('seresnet56_svhn', '0264', 'a8fcc570f6ab95d188148f0070f714c052bcf0f3', 'v0.0.442', 'cf', 0.0),
('seresnet110_cifar10', '0363', '4c28f93f8fe23a216aba5cb80af8412023b42cdb', 'v0.0.442', 'cf', 0.0),
('seresnet110_cifar100', '2086', '6435b022d058e62f95bbd2bb6447cd76f0a14316', 'v0.0.442', 'cf', 0.0),
('seresnet110_svhn', '0235', '57751ac70c94c9bbe95a1229af30b5471db498b1', 'v0.0.442', 'cf', 0.0),
('seresnet164bn_cifar10', '0339', '64d051543b02cb26fb6a22220ad35bb5b80243e3', 'v0.0.442', 'cf', 0.0),
('seresnet164bn_cifar100', '1995', '121a777aa64b7249a9483baa1e8a677a7c9587df', 'v0.0.442', 'cf', 0.0),
('seresnet164bn_svhn', '0245', 'a19e2e88575459f35303a058e486a944e34f8379', 'v0.0.442', 'cf', 0.0),
('seresnet272bn_cifar10', '0339', 'baa561b6c4449558a11900ae24780d6fcdd9efdf', 'v0.0.442', 'cf', 0.0),
('seresnet272bn_cifar100', '1907', 'a29e50de59aac03cff1d657ce0653a02246c39dc', 'v0.0.442', 'cf', 0.0),
('seresnet272bn_svhn', '0238', '918ee0dea7a956bca36d23459e822488e3a0659e', 'v0.0.442', 'cf', 0.0),
('seresnet542bn_cifar10', '0347', 'e95ebdb9b79f4955731147c078e1607dd174ffe9', 'v0.0.442', 'cf', 0.0),
('seresnet542bn_cifar100', '1887', 'ddc4d5c89d56a0c560e5174194db071fcb960d81', 'v0.0.442', 'cf', 0.0),
('seresnet542bn_svhn', '0226', '5ec784aabe3030f519ca22821b7a58a30e0bf179', 'v0.0.442', 'cf', 0.0),
('sepreresnet20_cifar10', '0618', '22217b323af922b720bc044bce9556b0dde18d97', 'v0.0.443', 'cf', 0.0),
('sepreresnet20_cifar100', '2831', 'e8dab8b87dbe512dfabd7cdbaff9b08be81fb36b', 'v0.0.443', 'cf', 0.0),
('sepreresnet20_svhn', '0324', 'e7dbcc9678dfa8ce0b2699de601699d29a5cb868', 'v0.0.443', 'cf', 0.0),
('sepreresnet56_cifar10', '0451', '32637db56c6fed2a3d66778ee3335527f2d8e25d', 'v0.0.443', 'cf', 0.0),
('sepreresnet56_cifar100', '2305', 'aea4d90bc7fd0eb8f433e376d1aba8e3c0d1ac55', 'v0.0.443', 'cf', 0.0),
('sepreresnet56_svhn', '0271', 'ea024196ca9bd0ff331e8d8d3da376aecf9ea0c1', 'v0.0.443', 'cf', 0.0),
('sepreresnet110_cifar10', '0454', 'e317c56922fbf1cec478e46e49d6edd3c4ae3b03', 'v0.0.443', 'cf', 0.0),
('sepreresnet110_cifar100', '2261', '19a8d4a1563f8fb61c63a5c577f40f3363efec00', 'v0.0.443', 'cf', 0.0),
('sepreresnet110_svhn', '0259', '6291c548277580f90ed0e22845f06eb7b022f8f9', 'v0.0.443', 'cf', 0.0),
('sepreresnet164bn_cifar10', '0373', '253c0430d6e8d2ba9c4c5526beed3b2e90573fe4', 'v0.0.443', 'cf', 0.0),
('sepreresnet164bn_cifar100', '2005', '9c3ed25062e52a23f73600c1a0f99064f89b4a47', 'v0.0.443', 'cf', 0.0),
('sepreresnet164bn_svhn', '0256', 'c89523226a8a010459ebec9c48d940773946e7bf', 'v0.0.443', 'cf', 0.0),
('sepreresnet272bn_cifar10', '0339', '1ca0bed3b3ae20d55322fa2f75057edb744fb63d', 'v0.0.443', 'cf', 0.0),
('sepreresnet272bn_cifar100', '1913', 'eb75217f625dbc97af737e5878a9eab28fdf3b03', 'v0.0.443', 'cf', 0.0),
('sepreresnet272bn_svhn', '0249', '0a778e9d68f6921463563ef84054969221809aef', 'v0.0.443', 'cf', 0.0),
('sepreresnet542bn_cifar10', '0309', '7764e8bddba21c75b8f8d4775093721d859f850c', 'v0.0.443', 'cf', 0.0),
('sepreresnet542bn_cifar100', '1945', '969d2bf0a8d213757486e18c180ba14058e08eac', 'v0.0.443', 'cf', 0.0),
('sepreresnet542bn_svhn', '0247', '8e2427367762cf20b67b407e2a1ec8479b0ad41c', 'v0.0.443', 'cf', 0.0),
('pyramidnet110_a48_cifar10', '0372', '3b6ab16073fb0ff438d4376d320be9b119aee362', 'v0.0.444', 'cf', 0.0),
('pyramidnet110_a48_cifar100', '2095', '3490690ae62adc4b91dc29ba06f9dc2abf272fce', 'v0.0.444', 'cf', 0.0),
('pyramidnet110_a48_svhn', '0247', '1582739049630e1665b577781ccca1e65f961749', 'v0.0.444', 'cf', 0.0),
('pyramidnet110_a84_cifar10', '0298', 'bf303f3414123bdf79cb23d3316dd171df74f5d4', 'v0.0.444', 'cf', 0.0),
('pyramidnet110_a84_cifar100', '1887', '85789d68d11ad663a53ed921ce6fb28a98248874', 'v0.0.444', 'cf', 0.0),
('pyramidnet110_a84_svhn', '0243', 'aacb5f882c7810181c0d4de061c2a76dfbf4925b', 'v0.0.444', 'cf', 0.0),
('pyramidnet110_a270_cifar10', '0251', '983d99830e7bb23ca0123ec47dfa05143eb8a37e', 'v0.0.444', 'cf', 0.0),
('pyramidnet110_a270_cifar100', '1710', 'cc58021f2406c3593a51f62d03fea714d0649036', 'v0.0.444', 'cf', 0.0),
('pyramidnet110_a270_svhn', '0238', 'b8742320795657a0b51d35226c2e14fc76acac11', 'v0.0.444', 'cf', 0.0),
('pyramidnet164_a270_bn_cifar10', '0242', 'aa879193cd4730fd06430b494c11497121fad2df', 'v0.0.444', 'cf', 0.0),
('pyramidnet164_a270_bn_cifar100', '1670', '25ddf056b681987c1db76b60a08a1e1a7830a51e', 'v0.0.444', 'cf', 0.0),
('pyramidnet164_a270_bn_svhn', '0234', '94bb4029e52688f7616d5fd680acacf7c6e3cd4e', 'v0.0.444', 'cf', 0.0),
('pyramidnet200_a240_bn_cifar10', '0244', 'c269bf7d485a13a9beed9c0aade75ff959584ef9', 'v0.0.444', 'cf', 0.0),
('pyramidnet200_a240_bn_cifar100', '1609', 'd2b1682287b6047477c3efd322f305957bb393ef', 'v0.0.444', 'cf', 0.0),
('pyramidnet200_a240_bn_svhn', '0232', '77f2380c1fd77abb80b830e0d44f2986fde28ec9', 'v0.0.444', 'cf', 0.0),
('pyramidnet236_a220_bn_cifar10', '0247', '26aac5d0938a96902484f0a51f7f3440551c9c96', 'v0.0.444', 'cf', 0.0),
('pyramidnet236_a220_bn_cifar100', '1634', '37d5b197d45c3985ad3a9ba346f148e63cd271fb', 'v0.0.444', 'cf', 0.0),
('pyramidnet236_a220_bn_svhn', '0235', '6a9a8b0a5fbcce177c8b4449ad138b6f3a94f2bb', 'v0.0.444', 'cf', 0.0),
('pyramidnet272_a200_bn_cifar10', '0239', 'b57f64f1964798fac3d62fd796c87df8132cf18c', 'v0.0.444', 'cf', 0.0),
('pyramidnet272_a200_bn_cifar100', '1619', '5c233384141f7700da643c53f4245d2f0d00ded7', 'v0.0.444', 'cf', 0.0),
('pyramidnet272_a200_bn_svhn', '0240', '0a389e2f1811af7cacc2a27b6df748a7c46d951a', 'v0.0.444', 'cf', 0.0),
('densenet40_k12_cifar10', '0561', 'e6e20ebfcc60330050d4c1eb94d03d8fadb738df', 'v0.0.445', 'cf', 0.0),
('densenet40_k12_cifar100', '2490', 'ef38ff655136f7921e785836c659be7f1d11424d', 'v0.0.445', 'cf', 0.0),
('densenet40_k12_svhn', '0305', '7d5860ae4c8f912a4374e6214720d13ad52f3ffb', 'v0.0.445', 'cf', 0.0),
('densenet40_k12_bc_cifar10', '0643', '58950791713ee0ec19f6e1bc6e6e3731fc4a9484', 'v0.0.445', 'cf', 0.0),
('densenet40_k12_bc_cifar100', '2841', 'c7fbb0f4e74cafbd0e329597e63fbc81682c8e90', 'v0.0.445', 'cf', 0.0),
('densenet40_k12_bc_svhn', '0320', '77fd3ddf577ba336f7eac64f0ac6afaabbb25fd1', 'v0.0.445', 'cf', 0.0),
('densenet40_k24_bc_cifar10', '0452', '61a7fe9c0654161991da1e4eb1e0286d451d8cec', 'v0.0.445', 'cf', 0.0),
('densenet40_k24_bc_cifar100', '2267', 'b3878e8252d7ae1c53b6d2b5c6f77a857c281e9b', 'v0.0.445', 'cf', 0.0),
('densenet40_k24_bc_svhn', '0290', 'b8a231f7cd23b122bb8d9afe362c6de2663c1241', 'v0.0.445', 'cf', 0.0),
('densenet40_k36_bc_cifar10', '0404', 'ce27624f5701f020d2feff0e88e69da07b0ef958', 'v0.0.445', 'cf', 0.0),
('densenet40_k36_bc_cifar100', '2050', '045ae83a5ee3d1a85864cadadeb537242138c2d8', 'v0.0.445', 'cf', 0.0),
('densenet40_k36_bc_svhn', '0260', 'a176dcf180f086d88bbf4ff028b084bf02394a35', 'v0.0.445', 'cf', 0.0),
('densenet100_k12_cifar10', '0366', 'fc483c0bdd58e5013a3910f939334d5f40c65438', 'v0.0.445', 'cf', 0.0),
('densenet100_k12_cifar100', '1965', '4f0083d6698d42165c8b326c1e4beda6d9679796', 'v0.0.445', 'cf', 0.0),
('densenet100_k12_svhn', '0260', 'e810c38067bf34dc679caaeb4021623f2277b6b8', 'v0.0.445', 'cf', 0.0),
('densenet100_k24_cifar10', '0313', '7f9ee9b3787c2540c4448f424c504f0509000234', 'v0.0.445', 'cf', 0.0),
('densenet100_k24_cifar100', '1808', 'b0842c59c00f14df58d0f8bbac8348837e30e751', 'v0.0.445', 'cf', 0.0),
('densenet100_k12_bc_cifar10', '0416', '66beb8fc89f7e40d2b529e0f3270549324b5b784', 'v0.0.445', 'cf', 0.0),
('densenet100_k12_bc_cifar100', '2119', 'c1b857d51eb582eee8dbd7250d05871e40a7f4c4', 'v0.0.445', 'cf', 0.0),
('densenet190_k40_bc_cifar10', '0252', '9cc5cfcbef9425227370ac8c6404cfc1e3edbf55', 'v0.0.445', 'cf', 0.0),
('densenet250_k24_bc_cifar10', '0267', '3217a1b3c61afc9d08bc4b43bff4aac103da0012', 'v0.0.445', 'cf', 0.0),
('densenet250_k24_bc_cifar100', '1739', '02d967b564c48b25117aac6cd7b095fd5d30d4d5', 'v0.0.445', 'cf', 0.0),
('resnet10_cub', '2758', '1a6846b3854d1942997d7082e94b330ddce3db19', 'v0.0.446', 'cub', 0.0),
('resnet12_cub', '2668', '03c8073655ae51f21ceed7d7f86f9ed6169fc310', 'v0.0.446', 'cub', 0.0),
('resnet14_cub', '2435', '24b0bfebaa0d1b4442fa63a659d22de8ff594118', 'v0.0.446', 'cub', 0.0),
('resnet16_cub', '2328', '81cc8192c880c687175d636a0339e16463c61627', 'v0.0.446', 'cub', 0.0),
('resnet18_cub', '2335', '198bdc26bbfaad777ea6d494c41b9d66a493aac7', 'v0.0.446', 'cub', 0.0),
('resnet26_cub', '2264', '545967849063af9b5ec55a5cf339f5897f394e85', 'v0.0.446', 'cub', 0.0),
('seresnet10_cub', '2749', '484fc1661dda247db32dd6a54b88dc156da5156c', 'v0.0.446', 'cub', 0.0),
('seresnet12_cub', '2611', '0e5b4e23f30add924f8cad41704cb335a36b2049', 'v0.0.446', 'cub', 0.0),
('seresnet14_cub', '2375', '56c268728f7343aa1410cb2f046860c34428b123', 'v0.0.446', 'cub', 0.0),
('seresnet16_cub', '2321', 'ed3ead791be4af44aa1202f0dbf4b26fdb770963', 'v0.0.446', 'cub', 0.0),
('seresnet18_cub', '2309', 'f699f05f2a2ce41dae01d5d6c180ec2569356f0a', 'v0.0.446', 'cub', 0.0),
('seresnet26_cub', '2258', 'c02ba47493bc9185a7fb06584e23b5a740082e77', 'v0.0.446', 'cub', 0.0),
('mobilenet_w1_cub', '2346', 'b8f24c14b9ed9629efb161510547e30c4a37edc2', 'v0.0.446', 'cub', 0.0),
('proxylessnas_mobile_cub', '2202', '73ceed5a6a3f870b306da0c48318d969e53d6340', 'v0.0.446', 'cub', 0.0),
('pspnet_resnetd101b_voc', '7599', 'fbe47bfce77b8c9cab3c9c5913f6a42c04cce946', 'v0.0.448', 'voc', 0.0),
('pspnet_resnetd50b_ade20k', '2712', 'f4fadf0b3f5a39e1ab070736d792bd9259c0d371', 'v0.0.450', 'voc', 0.0),
('pspnet_resnetd101b_ade20k', '3259', 'ac8569f44bd646ee8875d2b3eae0ab54c72c4904', 'v0.0.450', 'voc', 0.0),
('pspnet_resnetd101b_coco', '5438', 'b64ff2dcde6d3f989c45cec2a021d3769f4cb9eb', 'v0.0.451', 'voc', 0.0),
('pspnet_resnetd101b_cityscapes', '5760', '6dc20af68e9de31b663469b170e75cb016bd3a1f', 'v0.0.449', 'cs', 0.0),
('deeplabv3_resnetd101b_voc', '7560', 'e261b6fd9c4878c41bfa088777ea53fcddb4fa51', 'v0.0.448', 'voc', 0.0),
('deeplabv3_resnetd152b_voc', '7791', '72038caba5f552c77d08ad768bda004643f1c53e', 'v0.0.448', 'voc', 0.0),
('deeplabv3_resnetd50b_ade20k', '3172', '2ba069a73d81d6b2ceaf7f2c57f2fe3dd673b78b', 'v0.0.450', 'voc', 0.0),
('deeplabv3_resnetd101b_ade20k', '3488', '08c90933a65061a56e3b22e9c143340a98455075', 'v0.0.450', 'voc', 0.0),
('deeplabv3_resnetd101b_coco', '5865', '39525a1333ebf12ca32578f32831b3e5b22a887a', 'v0.0.451', 'voc', 0.0),
('deeplabv3_resnetd152b_coco', '6067', 'f4dabc62dc8209e7a9adf0dceef97837b06b21c9', 'v0.0.451', 'voc', 0.0),
('fcn8sd_resnetd101b_voc', '8039', 'e140349ce60ad3943b535efb081b3e9c2a58f6e9', 'v0.0.448', 'voc', 0.0),
('fcn8sd_resnetd50b_ade20k', '3310', 'd440f859bad1c84790aa1c3e1c0addc21b171d4a', 'v0.0.450', 'voc', 0.0),
('fcn8sd_resnetd101b_ade20k', '3550', '970d968a1fb44670993b065c1603a6a7c0bd57a1', 'v0.0.450', 'voc', 0.0),
('fcn8sd_resnetd101b_coco', '5968', '69c001b3875c5399dfc1281eb5a051bafef40e4b', 'v0.0.451', 'voc', 0.0),
('icnet_resnetd50b_cityscapes', '6060', '1e53e1d1724e61cc740cfbc818ca6e14015185ef', 'v0.0.457', 'cs', 0.0),
('alphapose_fastseresnet101b_coco', '7415', 'd1f0464a0f2c520d8690d49d09fe1426b0ab3eab', 'v0.0.454', 'cocohpe', 0.0),
('simplepose_resnet18_coco', '6631', '4d907c70a6f3ccaba321c05406ce038351e0c67f', 'v0.0.455', 'cocohpe', 0.0),
('simplepose_resnet50b_coco', '7102', '74506b66735333e3deab5908d309d3ec04c94861', 'v0.0.455', 'cocohpe', 0.0),
('simplepose_resnet101b_coco', '7244', '6f9e08d6afa08e83176e8e04f7566e255265e080', 'v0.0.455', 'cocohpe', 0.0),
('simplepose_resnet152b_coco', '7253', 'c018fb87bb8e5f5d8d6daa6a922869b2f36481cf', 'v0.0.455', 'cocohpe', 0.0),
('simplepose_resneta50b_coco', '7170', 'c9ddc1c90ddac88b1f64eb962e1bda87887668a5', 'v0.0.455', 'cocohpe', 0.0),
('simplepose_resneta101b_coco', '7297', '6db62b714be632359020c972bedb459e5210820f', 'v0.0.455', 'cocohpe', 0.0),
('simplepose_resneta152b_coco', '7344', 'f65954b9df20bf9fa64a9791563729fa51983cf5', 'v0.0.455', 'cocohpe', 0.0),
('simplepose_mobile_resnet18_coco', '6625', '8f3e5cc4c6af306c23f0882887d7b36ee0b1079a', 'v0.0.456', 'cocohpe', 0.0), # noqa
('simplepose_mobile_resnet50b_coco', '7110', 'e8f61fdaf7aacbe58d006129943988ae95c9aef3', 'v0.0.456', 'cocohpe', 0.0), # noqa
('simplepose_mobile_mobilenet_w1_coco', '6410', '27c918b95148b87944eec36ac422bf18792513ae', 'v0.0.456', 'cocohpe', 0.0), # noqa
('simplepose_mobile_mobilenetv2b_w1_coco', '6374', '4bcc3462fb2af46ed6daed78d15920a274e58051', 'v0.0.456', 'cocohpe', 0.0), # noqa
('simplepose_mobile_mobilenetv3_small_w1_coco', '5434', '1cfee871467e99e7af23e5135bb9a4765f010a05', 'v0.0.456', 'cocohpe', 0.0), # noqa
('simplepose_mobile_mobilenetv3_large_w1_coco', '6367', '8c8583fbe6d60355c232a10b5de8a455a38ba073', 'v0.0.456', 'cocohpe', 0.0), # noqa
('lwopenpose2d_mobilenet_cmupan_coco', '3999', '626b66cb1d36d0721b59d5acaa8d08d7690ea830', 'v0.0.458', 'cocohpe', 0.0), # noqa
('lwopenpose3d_mobilenet_cmupan_coco', '3999', 'df9b1c5f667deb93a87f69479ce92093e7c9f3b6', 'v0.0.458', 'cocohpe', 0.0), # noqa
('ibppose_coco', '6487', '79500f3d5dd990fd63544e3e3ca65f0382b06e44', 'v0.0.459', 'cocohpe', 0.0),
]}
imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob'
def get_model_name_suffix_data(model_name):
if model_name not in _model_sha1:
raise ValueError("Pretrained model for {name} is not available.".format(name=model_name))
error, sha1_hash, repo_release_tag, ds, scale = _model_sha1[model_name]
return error, sha1_hash, repo_release_tag
def get_model_file(model_name,
local_model_store_dir_path=os.path.join("~", ".tensorflow", "models")):
"""
Return location for the pretrained on local file system. This function will download from online model zoo when
model cannot be found or has mismatch. The root directory will be created if it doesn't exist.
Parameters
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $TENSORFLOW_HOME/models
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name)
short_sha1 = sha1_hash[:8]
file_name = "{name}-{error}-{short_sha1}.tf2.h5".format(
name=model_name,
error=error,
short_sha1=short_sha1)
local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path)
file_path = os.path.join(local_model_store_dir_path, file_name)
if os.path.exists(file_path):
if _check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning("Mismatch in the content of model file detected. Downloading again.")
else:
logging.info("Model file not found. Downloading to {}.".format(file_path))
if not os.path.exists(local_model_store_dir_path):
os.makedirs(local_model_store_dir_path)
zip_file_path = file_path + ".zip"
_download(
url="{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip".format(
repo_url=imgclsmob_repo_url,
repo_release_tag=repo_release_tag,
file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_model_store_dir_path)
os.remove(zip_file_path)
if _check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError("Downloaded file has different hash. Please try again.")
def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""
Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns
-------
str
The file path of the downloaded file.
"""
import warnings
try:
import requests
except ImportError:
class requests_failed_to_import(object):
pass
requests = requests_failed_to_import
if path is None:
fname = url.split("/")[-1]
# Empty filenames are invalid
assert fname, "Can't construct file-name from this URL. Please set the `path` option manually."
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split("/")[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0"
if not verify_ssl:
warnings.warn(
"Unverified HTTPS request is being made (verify_ssl=False). "
"Adding certificate verification is strongly advised.")
if overwrite or not os.path.exists(fname) or (sha1_hash and not _check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print("Downloading {} from {}...".format(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError("Failed downloading url {}".format(url))
with open(fname, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if sha1_hash and not _check_sha1(fname, sha1_hash):
raise UserWarning("File {} is downloaded but the content hash does not match."
" The repo may be outdated or download may be incomplete. "
"If the `repo_url` is overridden, consider switching to "
"the default repo.".format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
else:
print("download failed, retrying, {} attempt{} left"
.format(retries, "s" if retries > 1 else ""))
return fname
def _check_sha1(filename, sha1_hash):
"""
Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
| 85.28863
| 140
| 0.693153
|
81ab95abbb5c997a4fc4ad2b394eef7b81910e12
| 919
|
py
|
Python
|
backend/app/app/api/api_v1/report/__init__.py
|
jimorsm/vue-element-admin-fastapi
|
3ffc7dc3d2be988e544f339af466538cb0708d25
|
[
"MIT"
] | null | null | null |
backend/app/app/api/api_v1/report/__init__.py
|
jimorsm/vue-element-admin-fastapi
|
3ffc7dc3d2be988e544f339af466538cb0708d25
|
[
"MIT"
] | null | null | null |
backend/app/app/api/api_v1/report/__init__.py
|
jimorsm/vue-element-admin-fastapi
|
3ffc7dc3d2be988e544f339af466538cb0708d25
|
[
"MIT"
] | null | null | null |
from typing import Any
from fastapi import APIRouter, Request, Depends
from sqlalchemy.orm import Session
from fastapi.responses import StreamingResponse
from app.api import deps
from app.api.api_v1.report.gen_report import Report
router = APIRouter()
@router.get("/report/excel_generate/{excel_name}", tags=["report"], exclude_dependencies=True)
def excel_generate(*, excel_name: str = "", request: Request, db: Session = Depends(deps.get_db)) -> Any:
"""
通过动态import的形式,统一处理excel:模板下载/数据导出
"""
report = Report(code=excel_name, query_params=request.query_params).module
if request.query_params.get("template", "1") == "1":
bio = report.get_template() # 模板
else:
bio = report.get_instance(db) # 实例
file_name = report.file_name.encode('utf-8').decode('latin1')
return StreamingResponse(bio, headers={'Content-Disposition': f'attachment; filename={file_name}.xlsx'})
| 36.76
| 108
| 0.723613
|
583218c96f655d4e3f5d49c227591c356da50513
| 2,392
|
py
|
Python
|
Notebooks/test-gdxp.py
|
henrydambanemuya/gdelt-colombia
|
65f84912df6f7a1137b685fbc0a0cc9a3e124b41
|
[
"MIT"
] | null | null | null |
Notebooks/test-gdxp.py
|
henrydambanemuya/gdelt-colombia
|
65f84912df6f7a1137b685fbc0a0cc9a3e124b41
|
[
"MIT"
] | null | null | null |
Notebooks/test-gdxp.py
|
henrydambanemuya/gdelt-colombia
|
65f84912df6f7a1137b685fbc0a0cc9a3e124b41
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 7 05:34:18 2017
@author: Administrator
"""
# Import useful libraries
import gdeltxp
import gdeltviz
import operator
import scipy as sp
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from math import isnan
from collections import Counter
from collections import OrderedDict
plt.style.use('seaborn-whitegrid')
# Declare global variables
all_events = pd.read_csv('C:/Users/Administrator/Dropbox/GDELT/all_events.csv').sort_values('SQLDATE', ascending=1)
goldstein_codes = pd.read_csv('C:/Users/Administrator/Dropbox/GDELT/goldstein_codes.csv')
event_codes = pd.read_csv('C:/Users/Administrator/Dropbox/GDELT/event_codes.csv')
events, goldstein = {}, {}
# Populate event names dictionary
for index, row in event_codes.iterrows():
events[row.CAMEOEVENTCODE] = row.EVENTDESCRIPTION
events[106] = 'unknown'
# Populate goldstein scale dictionary
for index, row in goldstein_codes.iterrows():
goldstein[int(row.CAMEOEVENTCODE)] = row.GOLDSTEINSCALE
goldstein[106] = 0
#print(all_events.columns)
#print(goldstein_codes.columns)
#print(event_codes.columns)
# Event Summary
gdeltxp.eventsSummary(all_events)
# Actor Type Codes Counts
ActorType1Codes = gdeltxp.actorType1Codes(all_events)
print(ActorType1Codes)
# Actor Type Code Pie Chart
gdeltviz.pieChart(list(ActorType1Codes.keys()), list(ActorType1Codes.values()))
# Prominent Actors
ActorNames = gdeltxp.actorNames(all_events)
print(ActorNames)
# Prominent Actors Visualization
gdeltviz.pieChart(list(ActorNames.keys()), list(ActorNames.values()))
dates = sorted([key for key in Counter(all_events['SQLDATE']).keys()])
farc1 = [all_events.loc[all_events['SQLDATE'] == date].loc[all_events['Actor1Name'] == 'FARC', 'GoldsteinScale'].sum() for date in dates] # / (len(all_events.loc[all_events['SQLDATE'] == date].loc[all_events['Actor1Name'] == 'FARC', 'GoldsteinScale'])+1)
farc2 = [all_events.loc[all_events['SQLDATE'] == date].loc[all_events['Actor2Name'] == 'FARC', 'GoldsteinScale'].sum() for date in dates] # / (len(all_events.loc[all_events['SQLDATE'] == date].loc[all_events['Actor2Name'] == 'FARC', 'GoldsteinScale'])+1)
window = 1
farc = gdeltxp.movingAverage([farc1[i] + farc2[i] for i in range(len(farc1))], window)
print(farc[:10])
| 36.8
| 255
| 0.738294
|
38ae7722bac6619235bc12b68ee36754a1686c15
| 9,853
|
py
|
Python
|
utils.py
|
statgen/bravo_data_prep
|
dd483f5e96566243f27769ecbfa41c055ab8d22b
|
[
"MIT"
] | null | null | null |
utils.py
|
statgen/bravo_data_prep
|
dd483f5e96566243f27769ecbfa41c055ab8d22b
|
[
"MIT"
] | null | null | null |
utils.py
|
statgen/bravo_data_prep
|
dd483f5e96566243f27769ecbfa41c055ab8d22b
|
[
"MIT"
] | null | null | null |
import traceback
from collections import OrderedDict
from operator import itemgetter
AF_BUCKETS = [0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]
METRICS = {
'BaseQRankSum': {},
'ClippingRankSum': {},
'DP': {'name': 'Total Depth'},
'FS': {},
'InbreedingCoeff': {},
'MQ': {'name': 'Mapping Quality'},
'MQRankSum': {},
'QD': {},
'ReadPosRankSum': {},
'VQSLOD': {},
'SVM': {'name': 'SVM Score'},
'FIBC_P': {'name': 'In-Breeding Coefficient'},
'FIBC_I': {'name': 'In-Breeding Coefficient (pop-adjusted)'},
'HWE_SLP_P': {'name': 'HWE signed log p-value'},
'HWE_SLP_I': {'name': 'HWE signed log p-value (pop-adjusted)'},
'ABE': {'name': 'Expected Allele Balance'},
'ABZ': {'name': 'Allele Balance Z-score'},
'BQZ': {'name': 'BaseQual-Allele correlation'},
'CYZ': {'name': 'Cycle-Allele correlation'},
'STZ': {'name': 'Strand-Allele correlation'},
'IOR': {'name': 'Inflated Rate of Observing other alleles (log10)'},
'NM0': {'name': 'Avg num mismatches in reads with ref alleles'},
'NM1': {'name': 'Avg num mismatches in reads with alt alleles'},
'NMZ': {'name': 'Mismatches/read-Allele correlation'},
}
for k,v in METRICS.items(): v.setdefault('name',k)
class Consequence(object):
# This is a slightly modified version of VEP's recommendations - see http://useast.ensembl.org/info/genome/variation/predicted_data.html#consequences
# The ordering of the LoF variants is from snpEff's recommendations - see http://snpeff.sourceforge.net/VCFannotationformat_v1.0.pdf
# To find all variants that are used, run:
# mongo --eval 'db.variants.distinct("vep_annotations.Consequence").forEach(printjson)' topmed | tr -d '",' | tr "&" "\n" | sort -u
_lof_csqs = [
"transcript_ablation",
"frameshift_variant",
"stop_gained",
"stop_lost",
"start_lost",
"splice_acceptor_variant",
"splice_donor_variant",
"transcript_amplification",
]
_missense_csqs = [
"inframe_insertion",
"inframe_deletion",
"missense_variant",
"protein_altering_variant",
]
_synonymous_csqs = [
"splice_region_variant",
"incomplete_terminal_codon_variant",
"stop_retained_variant",
"synonymous_variant",
]
_other_csqs = [
"coding_sequence_variant",
"mature_miRNA_variant",
"5_prime_UTR_variant",
"3_prime_UTR_variant",
"non_coding_transcript_exon_variant",
"intron_variant",
"NMD_transcript_variant",
"non_coding_transcript_variant",
"upstream_gene_variant",
"downstream_gene_variant",
"TFBS_ablation",
"TFBS_amplification",
"TF_binding_site_variant",
"regulatory_region_ablation",
"regulatory_region_amplification",
"feature_elongation",
"regulatory_region_variant",
"feature_truncation",
"intergenic_variant",
]
csqs = _lof_csqs + _missense_csqs + _synonymous_csqs + _other_csqs
assert len(csqs) == len(set(csqs)) # No dupes!
csqidxs = {csq:i for i,csq in enumerate(csqs)}
as_obj = {
'order':csqs,
'n_lof':len(_lof_csqs), # todo: instead use `last_lof_csqidx`, likewise below
'n_lof_mis':len(_lof_csqs)+len(_missense_csqs),
'n_lof_mis_syn':len(_lof_csqs)+len(_missense_csqs)+len(_synonymous_csqs),
}
class Xpos:
CHROMOSOME_STRINGS = [str(x) for x in range(1, 22+1)] + ['X', 'Y', 'M']
CHROMOSOME_STRING_TO_NUMBER = {chrom: idx+1 for idx,chrom in enumerate(CHROMOSOME_STRINGS) }
CHROMOSOME_NUMBER_TO_STRING = {chrom_num: chrom for chrom,chrom_num in CHROMOSOME_STRING_TO_NUMBER.items()}
@staticmethod
def from_chrom_pos(chrom, pos):
if chrom.startswith('chr'): chrom = chrom[3:]
return Xpos.CHROMOSOME_STRING_TO_NUMBER[chrom] * int(1e9) + pos
@staticmethod
def to_chrom_pos(xpos):
pos = xpos % int(1e9)
chrom = Xpos.CHROMOSOME_NUMBER_TO_STRING[int(xpos) / int(1e9)]
return (chrom, pos)
@staticmethod
def to_pos(xpos):
return xpos % int(1e9)
@staticmethod
def check_chrom(chrom):
if chrom.startswith('chr'): chrom = chrom[3:]
return chrom in Xpos.CHROMOSOME_STRING_TO_NUMBER
class ConsequenceDrilldown(object):
@staticmethod
def from_variant(variant):
"""
Returns something like {"frameshift": {"ENSG00001234": [{"SYMBOL": "APOL1", "Gene": "ENSG00001234", "Feature": "ENST00002345", ...}]}}
"""
if 'vep_annotations' not in variant:
return {}
consequences_drilldown = OrderedDict()
for annotation in variant['vep_annotations']:
consequences_drilldown.setdefault(Consequence.csqs[annotation['worst_csqidx']], {}).setdefault(annotation['Gene'], []).append(annotation)
# Sort the consequences
for csq in consequences_drilldown:
for gene in consequences_drilldown[csq]:
consequences_drilldown[csq][gene] = sorted(consequences_drilldown[csq][gene], key=lambda ann: (ann.get('HGVS'), ann.get('Feature')))
return consequences_drilldown
@staticmethod
def split_into_two_columns(consequences):
'''
Try to make two columns of similar height, but with the first a little taller.
Returns the names of the consequences (ie, the keys), but not the values (because that'd be a pain to use).
'''
if len(consequences) == 0:
return ([], [])
elif len(consequences) == 1:
return (consequences.keys(), [])
consequence_heights = [0]
for annotations in consequences.values()[0].values():
consequence_heights[0] += len(annotations) # The number of annotations in this gene (because all are shown in the first consequence)
# TODO: check for the other things displayed in variant_details.html
for csq in consequences.values()[1:]:
consequence_heights.append(len(csq)) # The number of genes in this consequence (because annotations are collapsed in these consequences)
index = ConsequenceDrilldown._get_midpoint_index(consequence_heights)
return (consequences.keys()[:index],
consequences.keys()[index:])
@staticmethod
def _get_midpoint_index(lst):
'''
for test_lst in [[1], [1,2,3], [3,1,1], [3,1,1,1], [3,1,1,1,1]]:
index = get_midpoint_index(test_lst)
assert 0 < index <= len(test_lst)
assert sum(test_lst[:index]) >= sum(test_lst[index:])
assert sum(test_lst[:index-1]) < sum(test_lst[index-1:])
'''
half = sum(lst) / 2.0
acc = 0
for index, num in enumerate(lst):
if acc >= half:
return index
acc += num
return len(lst)
@staticmethod
def get_top_gene_and_HGVSs(consequences_drilldown):
"""Returns something like ("APOL1", ["Gly70Ter", "Gly88Ter"])"""
if not consequences_drilldown:
return None, []
gene_drilldowns_for_top_csq = consequences_drilldown.values()[0]
if len(gene_drilldowns_for_top_csq) != 1: # we need exactly one gene
return None, []
annotation_drilldowns_for_top_csq = gene_drilldowns_for_top_csq.values()[0]
gene_symbol_for_top_csq = annotation_drilldowns_for_top_csq[0].get('SYMBOL') or gene_drilldowns_for_top_csq.keys()[0]
HGVSs_for_top_csq = sorted({ann['HGVS'] for ann in annotation_drilldowns_for_top_csq if ann.get('HGVS')})
return gene_symbol_for_top_csq, sorted(HGVSs_for_top_csq)
class defaultdict_that_passes_key_to_default_factory(dict):
"A class like collections.defaultdict, but where the default_factory takes the missing key as an argument."
def __init__(self, default_factory):
self._default_factory = default_factory
super(defaultdict_that_passes_key_to_default_factory, self).__init__()
def __missing__(self, key):
value = self[key] = self._default_factory(key)
return value
def indent_pprint(obj):
import pprint
print('\n').join('####'+line for line in pprint.pformat(obj).split('\n'))
def mkdict(*dicts, **ret):
for d in dicts: ret.update({k:True for k in d} if isinstance(d, (set,list)) else d)
return ret
def clamp(num, min_value, max_value):
return max(min_value, min(max_value, num))
def sortedgroupby(iterable, key):
from itertools import groupby
return groupby(sorted(iterable, key=key), key=key)
def histogram_from_counter(counter, num_bins=10, bin_range=None):
from math import floor
if bin_range is None:
bin_range = (min(counter.iterkeys()), max(counter.iterkeys()))
bin_width = float(bin_range[1] - bin_range[0]) / num_bins
if bin_width == 0:
only_key = counter.keys()[0]
print(f"Warning: metric always had the value {counter.keys()}")
return {'left_edges': [only_key-1, only_key, only_key+1], 'mids': [only_key-1, only_key, only_key+1], 'counts': [0, counter.values()[0], 0]}
bin_left_edges = [bin_range[0] + bin_width * i for i in range(num_bins)]
bin_counts = [0]*num_bins
for key, count in counter.iteritems():
bin_i = (key - bin_range[0]) / bin_width
try:
bin_i = int(floor(bin_i))
except:
print(f"error on: {bin_i} {key} {bin_range[0]} {bin_range[1]} {bin_width}")
raise
bin_i = clamp(bin_i, min_value=0, max_value=num_bins-1)
bin_counts[bin_i] += count
bin_mids = [left_edge + bin_width/2.0 for left_edge in bin_left_edges]
return {'left_edges': bin_left_edges, 'mids': bin_mids, 'counts': bin_counts}
| 43.214912
| 153
| 0.639907
|
90f938d31f8d98b67f38e14438ad5a0ce83897ba
| 5,817
|
py
|
Python
|
examples/titanic/assets/algo_random_forest/algo.py
|
cupcicm/substra
|
19eeec1dda02cce0e10ef6ed285636e974a6e77a
|
[
"Apache-2.0"
] | null | null | null |
examples/titanic/assets/algo_random_forest/algo.py
|
cupcicm/substra
|
19eeec1dda02cce0e10ef6ed285636e974a6e77a
|
[
"Apache-2.0"
] | null | null | null |
examples/titanic/assets/algo_random_forest/algo.py
|
cupcicm/substra
|
19eeec1dda02cce0e10ef6ed285636e974a6e77a
|
[
"Apache-2.0"
] | null | null | null |
import re
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
import numpy as np
import substratools as tools
class Algo(tools.algo.Algo):
def _normalize_X(self, X):
# Relatives
X['relatives'] = X['SibSp'] + X['Parch']
X.loc[X['relatives'] > 0, 'not_alone'] = 0
X.loc[X['relatives'] == 0, 'not_alone'] = 1
X['not_alone'] = X['not_alone'].astype(int)
# Passenger ID
X = X.drop(['PassengerId'], axis=1)
# Cabin
deck = {"A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7, "U": 8}
X['Cabin'] = X['Cabin'].fillna("U0")
X['Deck'] = X['Cabin'].map(
lambda x: re.compile("([a-zA-Z]+)").search(x).group())
X['Deck'] = X['Deck'].map(deck)
X['Deck'] = X['Deck'].fillna(0)
X['Deck'] = X['Deck'].astype(int)
X = X.drop(['Cabin'], axis=1)
# Age
mean = X["Age"].mean()
std = X["Age"].std()
is_null = X["Age"].isnull().sum()
# compute random numbers between the mean, std and is_null
rand_age = np.random.randint(mean - std, mean + std, size=is_null)
# fill NaN values in Age column with random values generated
age_slice = X["Age"].copy()
age_slice[np.isnan(age_slice)] = rand_age
X["Age"] = age_slice
X["Age"] = X["Age"].astype(int)
# make Age into a category
X['Age'] = X['Age'].astype(int)
X.loc[X['Age'] <= 11, 'Age'] = 0
X.loc[(X['Age'] > 11) & (X['Age'] <= 18), 'Age'] = 1
X.loc[(X['Age'] > 18) & (X['Age'] <= 22), 'Age'] = 2
X.loc[(X['Age'] > 22) & (X['Age'] <= 27), 'Age'] = 3
X.loc[(X['Age'] > 27) & (X['Age'] <= 33), 'Age'] = 4
X.loc[(X['Age'] > 33) & (X['Age'] <= 40), 'Age'] = 5
X.loc[(X['Age'] > 40) & (X['Age'] <= 66), 'Age'] = 6
X.loc[X['Age'] > 66, 'Age'] = 6
# create Age_Class feature
X['Age_Class'] = X['Age'] * X['Pclass']
# Embarked
ports = {"S": 0, "C": 1, "Q": 2}
X['Embarked'] = X['Embarked'].fillna('S')
X['Embarked'] = X['Embarked'].map(ports)
# Fare
X['Fare'] = X['Fare'].fillna(0)
X['Fare'] = X['Fare'].astype(int)
# make Fare into a category
X.loc[X['Fare'] <= 7.91, 'Fare'] = 0
X.loc[(X['Fare'] > 7.91) & (X['Fare'] <= 14.454), 'Fare'] = 1
X.loc[(X['Fare'] > 14.454) & (X['Fare'] <= 31), 'Fare'] = 2
X.loc[(X['Fare'] > 31) & (X['Fare'] <= 99), 'Fare'] = 3
X.loc[(X['Fare'] > 99) & (X['Fare'] <= 250), 'Fare'] = 4
X.loc[X['Fare'] > 250, 'Fare'] = 5
X['Fare'] = X['Fare'].astype(int)
# create Fare_Per_Person feature
X['Fare_Per_Person'] = X['Fare'] / (X['relatives'] + 1)
X['Fare_Per_Person'] = X['Fare_Per_Person'].astype(int)
# Name
titles = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
# extract titles
X['Title'] = X.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
# replace titles with a more common title or as Rare
X['Title'] = X['Title'].replace(
['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr',
'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
X['Title'] = X['Title'].replace('Mlle', 'Miss')
X['Title'] = X['Title'].replace('Ms', 'Miss')
X['Title'] = X['Title'].replace('Mme', 'Mrs')
# convert titles into numbers
X['Title'] = X['Title'].map(titles)
# filling NaN with 0, to get safe
X['Title'] = X['Title'].fillna(0)
X = X.drop(['Name'], axis=1)
# Sex
genders = {"male": 0, "female": 1}
X['Sex'] = X['Sex'].map(genders)
# Ticket
X = X.drop(['Ticket'], axis=1)
# Drop non relevant features
X = X.drop("not_alone", axis=1)
X = X.drop("Parch", axis=1)
return X
def _predict_pandas(self, model, X):
y_pred = model.predict(X)
return pd.DataFrame(columns=['Survived'], data=y_pred)
def train(self, X, y, models, rank):
X = self._normalize_X(X)
# the following RFC hyperparameters were determined using:
# >>> param_grid = {"criterion": ["gini", "entropy"], "min_samples_leaf": [1, 5, 10, 25, 50, 70],
# "min_samples_split": [2, 4, 10, 12, 16, 18, 25, 35],
# "n_estimators": [100, 400, 700, 1000, 1500]}
# >>> rf = RandomForestClassifier(n_estimators=100, max_features='auto', oob_score=True,
# random_state=1, n_jobs=-1)
# >>>,clf = GridSearchCV(estimator=rf, param_grid=param_grid, n_jobs=-1)
# Random Forest
random_forest = RandomForestClassifier(criterion="gini",
min_samples_leaf=1,
min_samples_split=10,
n_estimators=100,
max_features='auto',
oob_score=True,
random_state=1,
n_jobs=-1)
random_forest.fit(X, y)
y_pred = self._predict_pandas(random_forest, X)
return y_pred, random_forest
def predict(self, X, model):
X = self._normalize_X(X)
return self._predict_pandas(model, X)
def load_model(self, path):
with open(path, 'rb') as f:
return pickle.load(f)
def save_model(self, model, path):
with open(path, 'wb') as f:
pickle.dump(model, f)
if __name__ == '__main__':
tools.algo.execute(Algo())
| 37.772727
| 105
| 0.480832
|
181ed57e3eb39153ad141aa8f03aeb15ee7f7127
| 510
|
py
|
Python
|
idManager/view/authentication_view.py
|
lgarciasbr/idm-api
|
3517d29d55eb2a06fb5b4b21359b6cf6d11529a0
|
[
"Apache-2.0"
] | 2
|
2018-01-14T22:43:43.000Z
|
2018-01-14T22:43:48.000Z
|
idManager/view/authentication_view.py
|
lgarciasbr/idm-api
|
3517d29d55eb2a06fb5b4b21359b6cf6d11529a0
|
[
"Apache-2.0"
] | null | null | null |
idManager/view/authentication_view.py
|
lgarciasbr/idm-api
|
3517d29d55eb2a06fb5b4b21359b6cf6d11529a0
|
[
"Apache-2.0"
] | null | null | null |
from flask import jsonify
def auth_login(http_status_code, message, token):
view = jsonify({'status_code': http_status_code, 'message': message, '_token': token})
return view
def auth_is_valid(http_status_code, message, token):
view = jsonify({'status_code': http_status_code, 'message': message, '_token': token})
return view
def auth_logout(http_status_code, message, token):
view = jsonify({'status_code': http_status_code, 'message': message, '_token': token})
return view
| 25.5
| 90
| 0.721569
|
1f9ff1929c0bc02e2a5782ebe815a7b05d5833dc
| 7,176
|
py
|
Python
|
alephnull/sources/test_source.py
|
Python3pkg/AlephNull
|
70c522573fe1416c9f4972c9d0078a9b96de0c57
|
[
"Apache-2.0"
] | 1
|
2021-05-16T21:10:41.000Z
|
2021-05-16T21:10:41.000Z
|
alephnull/sources/test_source.py
|
Python3pkg/AlephNull
|
70c522573fe1416c9f4972c9d0078a9b96de0c57
|
[
"Apache-2.0"
] | null | null | null |
alephnull/sources/test_source.py
|
Python3pkg/AlephNull
|
70c522573fe1416c9f4972c9d0078a9b96de0c57
|
[
"Apache-2.0"
] | 1
|
2021-04-02T19:01:11.000Z
|
2021-04-02T19:01:11.000Z
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A source to be used in testing.
"""
import pytz
from itertools import cycle
from datetime import datetime, timedelta
import numpy as np
from alephnull.protocol import (
Event,
DATASOURCE_TYPE
)
from alephnull.gens.utils import hash_args
from alephnull.utils.tradingcalendar import trading_days
def create_trade(sid, price, amount, datetime, source_id="test_factory"):
trade = Event()
trade.source_id = source_id
trade.type = DATASOURCE_TYPE.TRADE
trade.sid = sid
trade.dt = datetime
trade.price = price
trade.close_price = price
trade.open_price = price
trade.low = price * .95
trade.high = price * 1.05
trade.volume = amount
return trade
def date_gen(start=datetime(2006, 6, 6, 12, tzinfo=pytz.utc),
delta=timedelta(minutes=1),
count=100,
repeats=None):
"""
Utility to generate a stream of dates.
"""
one_day = timedelta(days=1)
cur = start
if delta == one_day:
# if we are producing daily timestamps, we
# use midnight
cur = cur.replace(hour=0, minute=0, second=0,
microsecond=0)
# yield count trade events, all on trading days, and
# during trading hours.
# NB: Being inside of trading hours is currently dependent upon the
# count parameter being less than the number of trading minutes in a day
for i in range(count):
if repeats:
for j in range(repeats):
yield cur
else:
yield cur
cur = cur + delta
cur_midnight = cur.replace(hour=0, minute=0, second=0, microsecond=0)
# skip over any non-trading days
while cur_midnight not in trading_days:
cur = cur + one_day
cur_midnight = cur.replace(hour=0, minute=0, second=0,
microsecond=0)
cur = cur.replace(day=cur_midnight.day)
def mock_prices(count):
"""
Utility to generate a stream of mock prices. By default
cycles through values from 0.0 to 10.0, n times.
"""
return (float(i % 10) + 1.0 for i in range(count))
def mock_volumes(count):
"""
Utility to generate a set of volumes. By default cycles
through values from 100 to 1000, incrementing by 50.
"""
return ((i * 50) % 900 + 100 for i in range(count))
class SpecificEquityTrades(object):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
count : integer representing number of trades
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, *args, **kwargs):
# We shouldn't get any positional arguments.
assert len(args) == 0
# Default to None for event_list and filter.
self.event_list = kwargs.get('event_list')
self.filter = kwargs.get('filter')
if self.event_list is not None:
# If event_list is provided, extract parameters from there
# This isn't really clean and ultimately I think this
# class should serve a single purpose (either take an
# event_list or autocreate events).
self.count = kwargs.get('count', len(self.event_list))
self.sids = kwargs.get(
'sids',
np.unique([event.sid for event in self.event_list]).tolist())
self.start = kwargs.get('start', self.event_list[0].dt)
self.end = kwargs.get('start', self.event_list[-1].dt)
self.delta = kwargs.get(
'delta',
self.event_list[1].dt - self.event_list[0].dt)
self.concurrent = kwargs.get('concurrent', False)
else:
# Unpack config dictionary with default values.
self.count = kwargs.get('count', 500)
self.sids = kwargs.get('sids', [1, 2])
self.start = kwargs.get(
'start',
datetime(2008, 6, 6, 15, tzinfo=pytz.utc))
self.delta = kwargs.get(
'delta',
timedelta(minutes=1))
self.concurrent = kwargs.get('concurrent', False)
# Hash_value for downstream sorting.
self.arg_string = hash_args(*args, **kwargs)
self.generator = self.create_fresh_generator()
def __iter__(self):
return self
def __next__(self):
return next(self.generator)
def rewind(self):
self.generator = self.create_fresh_generator()
def get_hash(self):
return self.__class__.__name__ + "-" + self.arg_string
def update_source_id(self, gen):
for event in gen:
event.source_id = self.get_hash()
yield event
def create_fresh_generator(self):
if self.event_list:
event_gen = (event for event in self.event_list)
unfiltered = self.update_source_id(event_gen)
# Set up iterators for each expected field.
else:
if self.concurrent:
# in this context the count is the number of
# trades per sid, not the total.
dates = date_gen(
count=self.count,
start=self.start,
delta=self.delta,
repeats=len(self.sids),
)
else:
dates = date_gen(
count=self.count,
start=self.start,
delta=self.delta
)
prices = mock_prices(self.count)
volumes = mock_volumes(self.count)
sids = cycle(self.sids)
# Combine the iterators into a single iterator of arguments
arg_gen = zip(sids, prices, volumes, dates)
# Convert argument packages into events.
unfiltered = (create_trade(*args, source_id=self.get_hash())
for args in arg_gen)
# If we specified a sid filter, filter out elements that don't
# match the filter.
if self.filter:
filtered = filter(
lambda event: event.sid in self.filter, unfiltered)
# Otherwise just use all events.
else:
filtered = unfiltered
# Return the filtered event stream.
return filtered
| 31.893333
| 77
| 0.60131
|
6b339f25d2315186b02e4e3cb9e27f2f8e930848
| 9,087
|
py
|
Python
|
bitbots_animation_server/src/bitbots_animation_server/animation_node.py
|
bit-bots/bitbots_motion
|
7bdf35eba88773cc71759b25fae201d2accd573d
|
[
"MIT"
] | 3
|
2020-05-30T07:04:33.000Z
|
2021-08-07T07:41:27.000Z
|
bitbots_animation_server/src/bitbots_animation_server/animation_node.py
|
bit-bots/bitbots_motion
|
7bdf35eba88773cc71759b25fae201d2accd573d
|
[
"MIT"
] | 149
|
2018-12-18T12:49:56.000Z
|
2022-01-06T10:51:32.000Z
|
bitbots_animation_server/src/bitbots_animation_server/animation_node.py
|
bit-bots/bitbots_motion
|
7bdf35eba88773cc71759b25fae201d2accd573d
|
[
"MIT"
] | 4
|
2019-07-28T11:25:02.000Z
|
2021-12-06T19:04:18.000Z
|
#!/usr/bin/env python3
import json
import time
import actionlib
import traceback
import numpy as np
import rospy
from humanoid_league_msgs.msg import PlayAnimationResult, PlayAnimationFeedback
from humanoid_league_msgs.msg import PlayAnimationAction as PlayAction
from humanoid_league_msgs.msg import Animation as AnimationMsg
from trajectory_msgs.msg import JointTrajectoryPoint, JointTrajectory
from bitbots_animation_server.animation import parse
from sensor_msgs.msg import Imu, JointState
from bitbots_animation_server.resource_manager import find_all_animations_by_name
from humanoid_league_msgs.msg import RobotControlState
from bitbots_animation_server.spline_animator import SplineAnimator
from bitbots_ros_patches.rate import Rate
class AnimationNode:
def __init__(self):
"""Starts a simple action server and waits for requests."""
# currently we set log level to info since the action server is spamming too much
log_level = rospy.INFO if rospy.get_param("debug_active", False) else rospy.INFO
rospy.init_node("animation", log_level=log_level, anonymous=False)
if not rospy.get_param("simulation_active"):
rospy.on_shutdown(self.on_shutdown_hook)
rospy.logdebug("Starting Animation Server")
server = PlayAnimationAction(rospy.get_name())
rospy.spin()
def on_shutdown_hook(self):
# we got external shutdown, let's still wait a bit, since we propably want to do a shut down animation
rospy.sleep(5)
class PlayAnimationAction(object):
_feedback = PlayAnimationFeedback
_result = PlayAnimationResult
def __init__(self, name):
self.current_joint_states = None
self.action_name = name
self.hcm_state = 0
self.current_animation = None
self.animation_cache = {}
all_animations = find_all_animations_by_name()
for animation_name, animation_file in all_animations.items():
try:
with open(animation_file) as fp:
self.animation_cache[animation_name] = parse(json.load(fp))
except IOError:
rospy.logerr("Animation '%s' could not be loaded" % animation_name)
except ValueError:
rospy.logerr(
"Animation '%s' had a ValueError. Probably there is a syntax error in the animation file. "
"See traceback" % animation_name)
traceback.print_exc()
# predefined messages for performance
self.anim_msg = AnimationMsg()
# AnimationMsg takes a JointTrajectory message to also be able to process trajectories. To keep this
# functionality, we use this message type, even though we only need a single joint goal in this case.
self.traj_msg = JointTrajectory()
self.traj_point = JointTrajectoryPoint()
rospy.Subscriber("joint_states", JointState, self.update_current_pose, queue_size=1)
rospy.Subscriber("robot_state", RobotControlState, self.update_hcm_state, queue_size=1)
self.hcm_publisher = rospy.Publisher("animation", AnimationMsg, queue_size=1)
self._as = actionlib.SimpleActionServer(self.action_name, PlayAction,
execute_cb=self.execute_cb, auto_start=False)
self._as.start()
def execute_cb(self, goal):
""" This is called, when someone calls the animation action"""
first = True
self.current_animation = goal.animation
# publish info to the console for the user
rospy.loginfo("Request to play animation %s", goal.animation)
if self.hcm_state != 0 and not goal.hcm: # 0 means controllable
# we cant play an animation right now
# but we send a request, so that we may can soon
self.send_animation_request()
rospy.loginfo("HCM not controllable. Only sent request to make it come controllable.")
self._as.set_aborted(text="HCM not controllable. Will now become controllable. Try again later.")
return
animator = self.get_animation_splines(self.current_animation)
# start animation
rate = Rate(500)
while not rospy.is_shutdown() and animator:
# first check if we have another goal
self.check_for_new_goal()
new_goal = self._as.current_goal.goal.goal.animation
# if there is a new goal, calculate new splines and reset the time
if new_goal != self.current_animation:
self.current_animation = new_goal
animator = self.get_animation_splines(self.current_animation)
first = True
# if we're here we want to play the next keyframe, cause there is no other goal
# compute next pose
t = rospy.get_time() - animator.get_start_time()
pose = animator.get_positions_rad(t)
if pose is None:
# see walking node reset
# animation is finished
# tell it to the hcm
self.send_animation(False, True, goal.hcm, None, None)
self._as.publish_feedback(PlayAnimationFeedback(percent_done=100))
# we give a positive result
self._as.set_succeeded(PlayAnimationResult(True))
return
self.send_animation(first, False, goal.hcm, pose, animator.get_torque(t))
first = False # we have sent the first frame, all frames after this can't be the first
perc_done = int(((rospy.get_time() - animator.get_start_time()) / animator.get_duration()) * 100)
perc_done = max(0, min(perc_done, 100))
self._as.publish_feedback(PlayAnimationFeedback(percent_done=perc_done))
try:
# catch exception of moving backwards in time, when restarting simulator
rate.sleep()
except rospy.exceptions.ROSTimeMovedBackwardsException:
rospy.logwarn("We moved backwards in time. This is probably because the simulation was reset.")
except rospy.exceptions.ROSInterruptException:
exit()
def get_animation_splines(self, animation_name):
if animation_name not in self.animation_cache:
rospy.logerr("Animation '%s' not found" % animation_name)
self._as.set_aborted(False, "Animation not found")
return
parsed_animation = self.animation_cache[animation_name]
return SplineAnimator(parsed_animation, self.current_joint_states)
def check_for_new_goal(self):
if self._as.is_new_goal_available():
next_goal = self._as.next_goal
if not next_goal or not next_goal.get_goal():
return
rospy.logdebug("New goal: " + next_goal.get_goal().animation)
if next_goal.get_goal().hcm:
rospy.logdebug("Accepted hcm animation %s", next_goal.get_goal().animation)
# cancel old stuff and restart
self._as.current_goal.set_aborted()
self._as.accept_new_goal()
else:
# can't run this animation now
self._as.next_goal.set_rejected()
# delete the next goal to make sure, that we can accept something else
self._as.next_goal = None
rospy.logwarn("Couldn't start non hcm animation because another one is already running.")
def update_current_pose(self, msg):
"""Gets the current motor positions and updates the representing pose accordingly."""
self.current_joint_states = msg
def update_hcm_state(self, msg):
self.hcm_state = msg.state
def send_animation_request(self):
self.anim_msg.request = True
self.anim_msg.header.stamp = rospy.Time.now()
self.hcm_publisher.publish(self.anim_msg)
def send_animation(self, first, last, hcm, pose, torque):
self.anim_msg.request = False
self.anim_msg.first = first
self.anim_msg.last = last
self.anim_msg.hcm = hcm
if pose is not None:
self.traj_msg.joint_names = []
self.traj_msg.points = [JointTrajectoryPoint()]
# We are only using a single point in the trajectory message, since we only want to send a single joint goal
self.traj_msg.points[0].positions = []
self.traj_msg.points[0].effort = []
for joint in pose:
self.traj_msg.joint_names.append(joint)
self.traj_msg.points[0].positions.append(pose[joint])
if torque:
# 1 and 2 should be mapped to 1
self.traj_msg.points[0].effort.append(np.clip((torque[joint]), 0, 1))
self.anim_msg.position = self.traj_msg
self.anim_msg.header.stamp = rospy.Time.now()
self.hcm_publisher.publish(self.anim_msg)
if __name__ == "__main__":
rospy.logdebug("starting animation node")
animation = AnimationNode()
| 44.985149
| 120
| 0.652801
|
57b97df33665a61498fde35d95241a5cc574e373
| 3,701
|
py
|
Python
|
samples/samplenetconf/cmds/show_ctrl_yangmodel.py
|
gaberger/pybvc
|
bf546c4595a1a6282fca084865c5a0e69194030f
|
[
"BSD-3-Clause"
] | null | null | null |
samples/samplenetconf/cmds/show_ctrl_yangmodel.py
|
gaberger/pybvc
|
bf546c4595a1a6282fca084865c5a0e69194030f
|
[
"BSD-3-Clause"
] | 1
|
2021-03-26T00:46:31.000Z
|
2021-03-26T00:46:31.000Z
|
samples/samplenetconf/cmds/show_ctrl_yangmodel.py
|
gaberger/pybvc
|
bf546c4595a1a6282fca084865c5a0e69194030f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
"""
Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import sys
import getopt
from pybvc.controller.controller import Controller
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
def usage(myname):
print(' Usage: %s -i <identifier> -v <version>' % myname)
sys.exit()
if __name__ == "__main__":
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) == False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
except:
print ("Failed to get Controller device attributes")
exit(0)
model_identifier = None
model_version = None
if(len(sys.argv) == 1):
print(" Error: missing arguments")
usage(sys.argv[0])
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,"i:v:h",["identifier=","version=","help"])
except getopt.GetoptError, e:
print(" Error: %s" % e.msg)
usage(sys.argv[0])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage(sys.argv[0])
elif opt in ("-i", "--identifier"):
model_identifier = arg
elif opt in ("-v", "--version"):
model_version = arg
else:
print("Error: failed to parse option %s" % opt)
usage(sys.argv[0])
if(model_identifier == None) or (model_version == None):
print("Error: incomplete command")
usage(sys.argv[0])
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
print ("<<< 'Controller': %s" % (ctrlIpAddr))
result = ctrl.get_schema("controller-config", model_identifier, model_version)
status = result.get_status()
if(status.eq(STATUS.OK) == True):
print "YANG model definition:"
schema = result.get_data()
print schema.encode('utf-8', 'replace')
else:
print ("\n")
print ("!!!Failed, reason: %s" % status.brief().lower())
print ("%s" % status.detailed())
exit(0)
print ("\n")
| 32.182609
| 82
| 0.673872
|
d3d82815a654bd2783a8710cd4e3980324a894be
| 7,829
|
py
|
Python
|
cmoon/src/detect.py
|
Cmoon-cyl/ros-module
|
f026bbdde1193fd96eb9c50e1602ca4a9de90310
|
[
"MIT"
] | 3
|
2021-08-28T18:40:33.000Z
|
2021-12-13T02:19:47.000Z
|
cmoon/src/detect.py
|
Cmoon-cyl/ros-module
|
f026bbdde1193fd96eb9c50e1602ca4a9de90310
|
[
"MIT"
] | null | null | null |
cmoon/src/detect.py
|
Cmoon-cyl/ros-module
|
f026bbdde1193fd96eb9c50e1602ca4a9de90310
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import argparse
import os
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
from utils.torch_utils import select_device, load_classifier, time_synchronized
def detect(save_img=False):
out, source, weights, view_img, save_txt, imgsz = \
opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
# Initialize
set_logging()
device = select_device(opt.device)
if os.path.exists(out): # output dir
shutil.rmtree(out) # delete dir
os.makedirs(out) # make new dir
half = device.type != 'cpu' # half precision only supported on CUDA
# 加载模型
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
modelc.to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, conf, *xywh) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line) + '\n') % line)
if save_img or view_img: # Add bbox to image
label = '%s %.2f' % (names[int(cls)], conf)
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Stream results
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
print('Results saved to %s' % Path(out))
print('Done. (%.3fs)' % (time.time() - t0))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-dir', type=str, default='inference/output', help='directory to save results')
parser.add_argument('--classes', nargs='+', type=int, default='0',
help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
| 43.73743
| 119
| 0.588325
|
06ae9a95a9f1648df551ab990f2b314f5f65ed91
| 1,332
|
py
|
Python
|
nonebot/default_config.py
|
Hieuzest/nonebot
|
7418c4ac174d3b4fa7078a528bff3283d6999ec5
|
[
"MIT"
] | 1
|
2021-01-21T09:46:32.000Z
|
2021-01-21T09:46:32.000Z
|
nonebot/default_config.py
|
BillYang2016/nonebot
|
515b7dbf44ffa8326c9ae9948e4c261d89f699a0
|
[
"MIT"
] | null | null | null |
nonebot/default_config.py
|
BillYang2016/nonebot
|
515b7dbf44ffa8326c9ae9948e4c261d89f699a0
|
[
"MIT"
] | 1
|
2021-08-03T08:50:06.000Z
|
2021-08-03T08:50:06.000Z
|
"""
Default configurations.
Any derived configurations must import everything from this module
at the very beginning of their code, and then set their own value
to override the default one.
For example:
>>> from nonebot.default_config import *
>>> PORT = 9090
>>> DEBUG = False
>>> SUPERUSERS.add(123456)
>>> NICKNAME = '小明'
"""
from datetime import timedelta
from typing import Collection, Union, Iterable, Pattern, Optional, Dict, Any
from .typing import Expression_T
API_ROOT: str = ''
ACCESS_TOKEN: str = ''
SECRET: str = ''
HOST: str = '127.0.0.1'
PORT: int = 8080
DEBUG: bool = True
SUPERUSERS: Collection[int] = set()
NICKNAME: Union[str, Iterable[str]] = ''
COMMAND_START: Iterable[Union[str, Pattern]] = {'/', '!', '/', '!'}
COMMAND_SEP: Iterable[Union[str, Pattern]] = {'/', '.'}
SESSION_EXPIRE_TIMEOUT: Optional[timedelta] = timedelta(minutes=5)
SESSION_RUN_TIMEOUT: Optional[timedelta] = None
SESSION_RUNNING_EXPRESSION: Expression_T = '您有命令正在执行,请稍后再试'
SHORT_MESSAGE_MAX_LENGTH: int = 50
DEFAULT_VALIDATION_FAILURE_EXPRESSION: Expression_T = '您的输入不符合要求,请重新输入'
MAX_VALIDATION_FAILURES: int = 3
TOO_MANY_VALIDATION_FAILURES_EXPRESSION: Expression_T = \
'您输入错误太多次啦,如需重试,请重新触发本功能'
SESSION_CANCEL_EXPRESSION: Expression_T = '好的'
APSCHEDULER_CONFIG: Dict[str, Any] = {'apscheduler.timezone': 'Asia/Shanghai'}
| 27.183673
| 78
| 0.742492
|
f50436a5e84190fada4c96ee939bb0f6f56ef0c4
| 100
|
py
|
Python
|
venv/lib/python2.7/UserDict.py
|
IdeasBlockLT/emem
|
a3f6e1950e9a074fbb696728778b22d6f523c3df
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/UserDict.py
|
IdeasBlockLT/emem
|
a3f6e1950e9a074fbb696728778b22d6f523c3df
|
[
"MIT"
] | 9
|
2019-12-04T23:15:54.000Z
|
2022-02-10T11:05:43.000Z
|
venv/lib/python2.7/UserDict.py
|
edbolivar/perfectpair
|
c165cff40353c602fe0dc418375b90e9b25de674
|
[
"MIT"
] | null | null | null |
/usr/local/Cellar/python@2/2.7.16/Frameworks/Python.framework/Versions/2.7/lib/python2.7/UserDict.py
| 100
| 100
| 0.81
|
187a148176437703af871944bf5e1b79df3d7f9c
| 21,771
|
py
|
Python
|
qlib/model/trainer.py
|
LogCreative/qlib
|
da48f42f3f35bbbbe9c00c23831a80409a4a13ab
|
[
"MIT"
] | 2
|
2021-06-12T20:48:26.000Z
|
2021-06-25T02:26:09.000Z
|
qlib/model/trainer.py
|
LogCreative/qlib
|
da48f42f3f35bbbbe9c00c23831a80409a4a13ab
|
[
"MIT"
] | 1
|
2022-03-10T03:57:50.000Z
|
2022-03-10T03:57:50.000Z
|
qlib/model/trainer.py
|
LogCreative/qlib
|
da48f42f3f35bbbbe9c00c23831a80409a4a13ab
|
[
"MIT"
] | 1
|
2022-02-22T03:09:56.000Z
|
2022-02-22T03:09:56.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
The Trainer will train a list of tasks and return a list of model recorders.
There are two steps in each Trainer including ``train``(make model recorder) and ``end_train``(modify model recorder).
This is a concept called ``DelayTrainer``, which can be used in online simulating for parallel training.
In ``DelayTrainer``, the first step is only to save some necessary info to model recorders, and the second step which will be finished in the end can do some concurrent and time-consuming operations such as model fitting.
``Qlib`` offer two kinds of Trainer, ``TrainerR`` is the simplest way and ``TrainerRM`` is based on TaskManager to help manager tasks lifecycle automatically.
"""
import socket
import time
import re
from typing import Callable, List
from tqdm.auto import tqdm
from qlib.data.dataset import Dataset
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.utils import flatten_dict, get_callable_kwargs, init_instance_by_config, auto_filter_kwargs, fill_placeholder
from qlib.workflow import R
from qlib.workflow.record_temp import SignalRecord
from qlib.workflow.recorder import Recorder
from qlib.workflow.task.manage import TaskManager, run_task
from qlib.data.dataset.weight import Reweighter
def _log_task_info(task_config: dict):
R.log_params(**flatten_dict(task_config))
R.save_objects(**{"task": task_config}) # keep the original format and datatype
R.set_tags(**{"hostname": socket.gethostname()})
def _exe_task(task_config: dict):
rec = R.get_recorder()
# model & dataset initiation
model: Model = init_instance_by_config(task_config["model"])
dataset: Dataset = init_instance_by_config(task_config["dataset"])
reweighter: Reweighter = task_config.get("reweighter", None)
# model training
auto_filter_kwargs(model.fit)(dataset, reweighter=reweighter)
R.save_objects(**{"params.pkl": model})
# this dataset is saved for online inference. So the concrete data should not be dumped
dataset.config(dump_all=False, recursive=True)
R.save_objects(**{"dataset": dataset})
# fill placehorder
placehorder_value = {"<MODEL>": model, "<DATASET>": dataset}
task_config = fill_placeholder(task_config, placehorder_value)
# generate records: prediction, backtest, and analysis
records = task_config.get("record", [])
if isinstance(records, dict): # prevent only one dict
records = [records]
for record in records:
# Some recorder require the parameter `model` and `dataset`.
# try to automatically pass in them to the initialization function
# to make defining the tasking easier
r = init_instance_by_config(
record,
recorder=rec,
default_module="qlib.workflow.record_temp",
try_kwargs={"model": model, "dataset": dataset},
)
r.generate()
def begin_task_train(task_config: dict, experiment_name: str, recorder_name: str = None) -> Recorder:
"""
Begin task training to start a recorder and save the task config.
Args:
task_config (dict): the config of a task
experiment_name (str): the name of experiment
recorder_name (str): the given name will be the recorder name. None for using rid.
Returns:
Recorder: the model recorder
"""
with R.start(experiment_name=experiment_name, recorder_name=recorder_name):
_log_task_info(task_config)
return R.get_recorder()
def end_task_train(rec: Recorder, experiment_name: str) -> Recorder:
"""
Finish task training with real model fitting and saving.
Args:
rec (Recorder): the recorder will be resumed
experiment_name (str): the name of experiment
Returns:
Recorder: the model recorder
"""
with R.start(experiment_name=experiment_name, recorder_id=rec.info["id"], resume=True):
task_config = R.load_object("task")
_exe_task(task_config)
return rec
def task_train(task_config: dict, experiment_name: str, recorder_name: str = None) -> Recorder:
"""
Task based training, will be divided into two steps.
Parameters
----------
task_config : dict
The config of a task.
experiment_name: str
The name of experiment
recorder_name: str
The name of recorder
Returns
----------
Recorder: The instance of the recorder
"""
with R.start(experiment_name=experiment_name, recorder_name=recorder_name):
_log_task_info(task_config)
_exe_task(task_config)
return R.get_recorder()
class Trainer:
"""
The trainer can train a list of models.
There are Trainer and DelayTrainer, which can be distinguished by when it will finish real training.
"""
def __init__(self):
self.delay = False
def train(self, tasks: list, *args, **kwargs) -> list:
"""
Given a list of task definitions, begin training, and return the models.
For Trainer, it finishes real training in this method.
For DelayTrainer, it only does some preparation in this method.
Args:
tasks: a list of tasks
Returns:
list: a list of models
"""
raise NotImplementedError(f"Please implement the `train` method.")
def end_train(self, models: list, *args, **kwargs) -> list:
"""
Given a list of models, finished something at the end of training if you need.
The models may be Recorder, txt file, database, and so on.
For Trainer, it does some finishing touches in this method.
For DelayTrainer, it finishes real training in this method.
Args:
models: a list of models
Returns:
list: a list of models
"""
# do nothing if you finished all work in `train` method
return models
def is_delay(self) -> bool:
"""
If Trainer will delay finishing `end_train`.
Returns:
bool: if DelayTrainer
"""
return self.delay
def __call__(self, *args, **kwargs) -> list:
return self.end_train(self.train(*args, **kwargs))
def has_worker(self) -> bool:
"""
Some trainer has backend worker to support parallel training
This method can tell if the worker is enabled.
Returns
-------
bool:
if the worker is enabled
"""
return False
def worker(self):
"""
start the worker
Raises
------
NotImplementedError:
If the worker is not supported
"""
raise NotImplementedError(f"Please implement the `worker` method")
class TrainerR(Trainer):
"""
Trainer based on (R)ecorder.
It will train a list of tasks and return a list of model recorders in a linear way.
Assumption: models were defined by `task` and the results will be saved to `Recorder`.
"""
# Those tag will help you distinguish whether the Recorder has finished traning
STATUS_KEY = "train_status"
STATUS_BEGIN = "begin_task_train"
STATUS_END = "end_task_train"
def __init__(self, experiment_name: str = None, train_func: Callable = task_train):
"""
Init TrainerR.
Args:
experiment_name (str, optional): the default name of experiment.
train_func (Callable, optional): default training method. Defaults to `task_train`.
"""
super().__init__()
self.experiment_name = experiment_name
self.train_func = train_func
def train(self, tasks: list, train_func: Callable = None, experiment_name: str = None, **kwargs) -> List[Recorder]:
"""
Given a list of `task`s and return a list of trained Recorder. The order can be guaranteed.
Args:
tasks (list): a list of definitions based on `task` dict
train_func (Callable): the training method which needs at least `tasks` and `experiment_name`. None for the default training method.
experiment_name (str): the experiment name, None for use default name.
kwargs: the params for train_func.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(tasks, dict):
tasks = [tasks]
if len(tasks) == 0:
return []
if train_func is None:
train_func = self.train_func
if experiment_name is None:
experiment_name = self.experiment_name
recs = []
for task in tqdm(tasks, desc="train tasks"):
rec = train_func(task, experiment_name, **kwargs)
rec.set_tags(**{self.STATUS_KEY: self.STATUS_BEGIN})
recs.append(rec)
return recs
def end_train(self, models: list, **kwargs) -> List[Recorder]:
"""
Set STATUS_END tag to the recorders.
Args:
models (list): a list of trained recorders.
Returns:
List[Recorder]: the same list as the param.
"""
if isinstance(models, Recorder):
models = [models]
for rec in models:
rec.set_tags(**{self.STATUS_KEY: self.STATUS_END})
return models
class DelayTrainerR(TrainerR):
"""
A delayed implementation based on TrainerR, which means `train` method may only do some preparation and `end_train` method can do the real model fitting.
"""
def __init__(self, experiment_name: str = None, train_func=begin_task_train, end_train_func=end_task_train):
"""
Init TrainerRM.
Args:
experiment_name (str): the default name of experiment.
train_func (Callable, optional): default train method. Defaults to `begin_task_train`.
end_train_func (Callable, optional): default end_train method. Defaults to `end_task_train`.
"""
super().__init__(experiment_name, train_func)
self.end_train_func = end_train_func
self.delay = True
def end_train(self, models, end_train_func=None, experiment_name: str = None, **kwargs) -> List[Recorder]:
"""
Given a list of Recorder and return a list of trained Recorder.
This class will finish real data loading and model fitting.
Args:
models (list): a list of Recorder, the tasks have been saved to them
end_train_func (Callable, optional): the end_train method which needs at least `recorder`s and `experiment_name`. Defaults to None for using self.end_train_func.
experiment_name (str): the experiment name, None for use default name.
kwargs: the params for end_train_func.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(models, Recorder):
models = [models]
if end_train_func is None:
end_train_func = self.end_train_func
if experiment_name is None:
experiment_name = self.experiment_name
for rec in models:
if rec.list_tags()[self.STATUS_KEY] == self.STATUS_END:
continue
end_train_func(rec, experiment_name, **kwargs)
rec.set_tags(**{self.STATUS_KEY: self.STATUS_END})
return models
class TrainerRM(Trainer):
"""
Trainer based on (R)ecorder and Task(M)anager.
It can train a list of tasks and return a list of model recorders in a multiprocessing way.
Assumption: `task` will be saved to TaskManager and `task` will be fetched and trained from TaskManager
"""
# Those tag will help you distinguish whether the Recorder has finished traning
STATUS_KEY = "train_status"
STATUS_BEGIN = "begin_task_train"
STATUS_END = "end_task_train"
# This tag is the _id in TaskManager to distinguish tasks.
TM_ID = "_id in TaskManager"
def __init__(
self, experiment_name: str = None, task_pool: str = None, train_func=task_train, skip_run_task: bool = False
):
"""
Init TrainerR.
Args:
experiment_name (str): the default name of experiment.
task_pool (str): task pool name in TaskManager. None for use same name as experiment_name.
train_func (Callable, optional): default training method. Defaults to `task_train`.
skip_run_task (bool):
If skip_run_task == True:
Only run_task in the worker. Otherwise skip run_task.
"""
super().__init__()
self.experiment_name = experiment_name
self.task_pool = task_pool
self.train_func = train_func
self.skip_run_task = skip_run_task
def train(
self,
tasks: list,
train_func: Callable = None,
experiment_name: str = None,
before_status: str = TaskManager.STATUS_WAITING,
after_status: str = TaskManager.STATUS_DONE,
**kwargs,
) -> List[Recorder]:
"""
Given a list of `task`s and return a list of trained Recorder. The order can be guaranteed.
This method defaults to a single process, but TaskManager offered a great way to parallel training.
Users can customize their train_func to realize multiple processes or even multiple machines.
Args:
tasks (list): a list of definitions based on `task` dict
train_func (Callable): the training method which needs at least `task`s and `experiment_name`. None for the default training method.
experiment_name (str): the experiment name, None for use default name.
before_status (str): the tasks in before_status will be fetched and trained. Can be STATUS_WAITING, STATUS_PART_DONE.
after_status (str): the tasks after trained will become after_status. Can be STATUS_WAITING, STATUS_PART_DONE.
kwargs: the params for train_func.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(tasks, dict):
tasks = [tasks]
if len(tasks) == 0:
return []
if train_func is None:
train_func = self.train_func
if experiment_name is None:
experiment_name = self.experiment_name
task_pool = self.task_pool
if task_pool is None:
task_pool = experiment_name
tm = TaskManager(task_pool=task_pool)
_id_list = tm.create_task(tasks) # all tasks will be saved to MongoDB
query = {"_id": {"$in": _id_list}}
if not self.skip_run_task:
run_task(
train_func,
task_pool,
query=query, # only train these tasks
experiment_name=experiment_name,
before_status=before_status,
after_status=after_status,
**kwargs,
)
if not self.is_delay():
tm.wait(query=query)
recs = []
for _id in _id_list:
rec = tm.re_query(_id)["res"]
rec.set_tags(**{self.STATUS_KEY: self.STATUS_BEGIN})
rec.set_tags(**{self.TM_ID: _id})
recs.append(rec)
return recs
def end_train(self, recs: list, **kwargs) -> List[Recorder]:
"""
Set STATUS_END tag to the recorders.
Args:
recs (list): a list of trained recorders.
Returns:
List[Recorder]: the same list as the param.
"""
if isinstance(recs, Recorder):
recs = [recs]
for rec in recs:
rec.set_tags(**{self.STATUS_KEY: self.STATUS_END})
return recs
def worker(
self,
train_func: Callable = None,
experiment_name: str = None,
):
"""
The multiprocessing method for `train`. It can share a same task_pool with `train` and can run in other progress or other machines.
Args:
train_func (Callable): the training method which needs at least `task`s and `experiment_name`. None for the default training method.
experiment_name (str): the experiment name, None for use default name.
"""
if train_func is None:
train_func = self.train_func
if experiment_name is None:
experiment_name = self.experiment_name
task_pool = self.task_pool
if task_pool is None:
task_pool = experiment_name
run_task(train_func, task_pool=task_pool, experiment_name=experiment_name)
def has_worker(self) -> bool:
return True
class DelayTrainerRM(TrainerRM):
"""
A delayed implementation based on TrainerRM, which means `train` method may only do some preparation and `end_train` method can do the real model fitting.
"""
def __init__(
self,
experiment_name: str = None,
task_pool: str = None,
train_func=begin_task_train,
end_train_func=end_task_train,
skip_run_task: bool = False,
):
"""
Init DelayTrainerRM.
Args:
experiment_name (str): the default name of experiment.
task_pool (str): task pool name in TaskManager. None for use same name as experiment_name.
train_func (Callable, optional): default train method. Defaults to `begin_task_train`.
end_train_func (Callable, optional): default end_train method. Defaults to `end_task_train`.
skip_run_task (bool):
If skip_run_task == True:
Only run_task in the worker. Otherwise skip run_task.
E.g. Starting trainer on a CPU VM and then waiting tasks to be finished on GPU VMs.
"""
super().__init__(experiment_name, task_pool, train_func)
self.end_train_func = end_train_func
self.delay = True
self.skip_run_task = skip_run_task
def train(self, tasks: list, train_func=None, experiment_name: str = None, **kwargs) -> List[Recorder]:
"""
Same as `train` of TrainerRM, after_status will be STATUS_PART_DONE.
Args:
tasks (list): a list of definition based on `task` dict
train_func (Callable): the train method which need at least `task`s and `experiment_name`. Defaults to None for using self.train_func.
experiment_name (str): the experiment name, None for use default name.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(tasks, dict):
tasks = [tasks]
if len(tasks) == 0:
return []
_skip_run_task = self.skip_run_task
self.skip_run_task = False # The task preparation can't be skipped
res = super().train(
tasks,
train_func=train_func,
experiment_name=experiment_name,
after_status=TaskManager.STATUS_PART_DONE,
**kwargs,
)
self.skip_run_task = _skip_run_task
return res
def end_train(self, recs, end_train_func=None, experiment_name: str = None, **kwargs) -> List[Recorder]:
"""
Given a list of Recorder and return a list of trained Recorder.
This class will finish real data loading and model fitting.
Args:
recs (list): a list of Recorder, the tasks have been saved to them.
end_train_func (Callable, optional): the end_train method which need at least `recorder`s and `experiment_name`. Defaults to None for using self.end_train_func.
experiment_name (str): the experiment name, None for use default name.
kwargs: the params for end_train_func.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(recs, Recorder):
recs = [recs]
if end_train_func is None:
end_train_func = self.end_train_func
if experiment_name is None:
experiment_name = self.experiment_name
task_pool = self.task_pool
if task_pool is None:
task_pool = experiment_name
_id_list = []
for rec in recs:
_id_list.append(rec.list_tags()[self.TM_ID])
query = {"_id": {"$in": _id_list}}
if not self.skip_run_task:
run_task(
end_train_func,
task_pool,
query=query, # only train these tasks
experiment_name=experiment_name,
before_status=TaskManager.STATUS_PART_DONE,
**kwargs,
)
TaskManager(task_pool=task_pool).wait(query=query)
for rec in recs:
rec.set_tags(**{self.STATUS_KEY: self.STATUS_END})
return recs
def worker(self, end_train_func=None, experiment_name: str = None):
"""
The multiprocessing method for `end_train`. It can share a same task_pool with `end_train` and can run in other progress or other machines.
Args:
end_train_func (Callable, optional): the end_train method which need at least `recorder`s and `experiment_name`. Defaults to None for using self.end_train_func.
experiment_name (str): the experiment name, None for use default name.
"""
if end_train_func is None:
end_train_func = self.end_train_func
if experiment_name is None:
experiment_name = self.experiment_name
task_pool = self.task_pool
if task_pool is None:
task_pool = experiment_name
run_task(
end_train_func,
task_pool=task_pool,
experiment_name=experiment_name,
before_status=TaskManager.STATUS_PART_DONE,
)
def has_worker(self) -> bool:
return True
| 37.02551
| 221
| 0.636167
|
e42d2a07a18171eec8c9a065e0e645a99d95aa7b
| 719
|
py
|
Python
|
osisoft/pidevclub/piwebapi/web_id/web_id_string_type.py
|
inselbuch/pwap2
|
4ded0a62b241d9354f39ce87f3411fe9708317e3
|
[
"Apache-2.0"
] | 3
|
2019-05-16T15:44:09.000Z
|
2020-11-25T22:28:31.000Z
|
osisoft/pidevclub/piwebapi/web_id/web_id_string_type.py
|
inselbuch/pwap2
|
4ded0a62b241d9354f39ce87f3411fe9708317e3
|
[
"Apache-2.0"
] | null | null | null |
osisoft/pidevclub/piwebapi/web_id/web_id_string_type.py
|
inselbuch/pwap2
|
4ded0a62b241d9354f39ce87f3411fe9708317e3
|
[
"Apache-2.0"
] | 8
|
2019-03-15T10:20:57.000Z
|
2021-05-20T13:06:37.000Z
|
# coding: utf-8
"""
Copyright 2018 OSIsoft, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from enum import Enum
class WebIdStringType(Enum):
OneGuid = 1
TwoGuids = 2
ThreeGuids = 3
| 27.653846
| 74
| 0.721836
|
9fc4e4101b694e8f833d7d36f5f7268f75b9ff81
| 3,819
|
py
|
Python
|
lib/var_stack.py
|
hyche/openbmc-test-automation
|
1e656463b8db4fc55dc1a2bf7650d1bca845f958
|
[
"Apache-2.0"
] | null | null | null |
lib/var_stack.py
|
hyche/openbmc-test-automation
|
1e656463b8db4fc55dc1a2bf7650d1bca845f958
|
[
"Apache-2.0"
] | null | null | null |
lib/var_stack.py
|
hyche/openbmc-test-automation
|
1e656463b8db4fc55dc1a2bf7650d1bca845f958
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
r"""
Define the var_stack class.
"""
import sys
import collections
try:
from robot.utils import DotDict
except ImportError:
pass
import gen_print as gp
class var_stack:
r"""
Define the variable stack class.
An object of this class can be used to push variable name/variable value
pairs which may be popped off the stack at a later time. The most obvious
use for this is for saving variables that are to be restored later.
Example code:
save_stack = var_stack('save_stack')
var1 = "johnson"
save_stack.push(var1)
var1 = "smith"
...
var1 = save_stack.pop('var1')
# var1 has now been restored to the value "johnson".
Example use:
var1 = "mike"
save_stack.push(var1)
var1 = "james"
save_stack.push(var1)
save_stack.print_obj()
# The print-out of the object would then look like this:
save_stack:
stack_dict:
[var1]:
[var1][0]: mike
[var1][1]: james
# Continuing with this code...
var1 = save_stack.pop('var1')
save_stack.print_obj()
# The print-out of the object would then look like this:
save_stack:
stack_dict:
[var1]:
[var1][0]: mike
"""
def __init__(self,
obj_name='var_stack'):
r"""
Initialize a new object of this class type.
Description of argument(s):
obj_name The name of the object. This is useful
for printing out the object.
"""
self.__obj_name = obj_name
# Create a stack dictionary.
try:
self.__stack_dict = collections.OrderedDict()
except AttributeError:
self.__stack_dict = DotDict()
def sprint_obj(self):
r"""
sprint the fields of this object. This would normally be for debug
purposes.
"""
buffer = ""
buffer += self.__obj_name + ":\n"
indent = 2
buffer += gp.sprint_varx('stack_dict', self.__stack_dict, 1, indent)
return buffer
def print_obj(self):
r"""
print the fields of this object to stdout. This would normally be for
debug purposes.
"""
sys.stdout.write(self.sprint_obj())
def push(self,
var_value,
var_name=""):
r"""
push the var_name/var_value pair onto the stack.
Description of argument(s):
var_value The value being pushed.
var_name The name of the variable containing the
value to be pushed. This parameter is
normally unnecessary as this function can
figure out the var_name. This is provided
for Robot callers. In this scenario, we
are unable to get the variable name
ourselves.
"""
if var_name == "":
# The caller has not passed a var_name so we will try to figure
# it out.
stack_frame_ix = 2
var_name = gp.get_arg_name(0, 1, stack_frame_ix)
if var_name in self.__stack_dict:
self.__stack_dict[var_name].append(var_value)
else:
self.__stack_dict[var_name] = [var_value]
def pop(self,
var_name=""):
r"""
Pop the value for the given var_name from the stack and return it.
Description of argument(s):
var_name The name of the variable whose value is to
be popped.
"""
return self.__stack_dict[var_name].pop()
| 26.520833
| 78
| 0.547264
|
a1681cf073ec401205eb9645b9ce165c01168aff
| 1,893
|
py
|
Python
|
utils/writer.py
|
tony-rsa/Few-shot-Font-Generation-with-Localized-Style-Representations-and-Factorization-AAAI-2021-
|
33322e72fb5054ab5348f12d986059263a05d5ce
|
[
"MIT"
] | 98
|
2020-09-24T01:05:19.000Z
|
2022-03-04T16:13:42.000Z
|
utils/writer.py
|
tony-rsa/Few-shot-Font-Generation-with-Localized-Style-Representations-and-Factorization-AAAI-2021-
|
33322e72fb5054ab5348f12d986059263a05d5ce
|
[
"MIT"
] | 26
|
2020-09-24T07:36:37.000Z
|
2022-02-08T12:36:49.000Z
|
utils/writer.py
|
tony-rsa/Few-shot-Font-Generation-with-Localized-Style-Representations-and-Factorization-AAAI-2021-
|
33322e72fb5054ab5348f12d986059263a05d5ce
|
[
"MIT"
] | 20
|
2020-09-24T02:29:42.000Z
|
2022-01-23T15:35:28.000Z
|
"""
LF-Font
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
from pathlib import Path
import torch.nn.functional as F
from . import save_tensor_to_image
class Writer:
def add_scalars(self, tag_scalar_dic, global_step):
raise NotImplementedError()
def add_image(self, tag, img_tensor, global_step):
raise NotImplementedError()
class DiskWriter(Writer):
def __init__(self, img_path, scale=None):
self.img_dir = Path(img_path)
self.img_dir.mkdir(parents=True, exist_ok=True)
self.scale = scale
def add_scalars(self, tag_scalar_dic, global_step):
pass
# raise Exception("DiskWriter supports add_image only")
def add_image(self, tag, img_tensor, global_step):
path = self.img_dir / "{:07d}-{}.png".format(global_step, tag)
save_tensor_to_image(img_tensor, path, self.scale)
class TBWriter(Writer):
def __init__(self, dir_path, scale=None):
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(dir_path, flush_secs=30)
self.scale = scale
def add_scalars(self, tag_scalar_dic, global_step):
for tag, scalar in tag_scalar_dic.items():
self.writer.add_scalar(tag, scalar, global_step)
def add_image(self, tag, img_tensor, global_step):
if self.scale:
img_tensor = F.interpolate(
img_tensor.unsqueeze(0), scale_factor=self.scale, mode='bilinear',
align_corners=False
).squeeze(0)
self.writer.add_image(tag, img_tensor, global_step)
class TBDiskWriter(TBWriter):
def __init__(self, dir_path, img_path, scale=None):
super().__init__(dir_path)
self._disk_writer = DiskWriter(img_path, scale)
def add_image(self, tag, img_tensor, global_step):
return self._disk_writer.add_image(tag, img_tensor, global_step)
| 31.55
| 82
| 0.683043
|
3406049335d99d644917125e8b716c8c4bbe412b
| 4,174
|
py
|
Python
|
tic-tac-toe.py
|
RuTh-git/Tic-tac-toe-project
|
8a7f7720b91ca7f519c5fd66925ef154aa938142
|
[
"MIT"
] | null | null | null |
tic-tac-toe.py
|
RuTh-git/Tic-tac-toe-project
|
8a7f7720b91ca7f519c5fd66925ef154aa938142
|
[
"MIT"
] | null | null | null |
tic-tac-toe.py
|
RuTh-git/Tic-tac-toe-project
|
8a7f7720b91ca7f519c5fd66925ef154aa938142
|
[
"MIT"
] | null | null | null |
# -------Global Variables---------
# Game board
board =["-","-","-",
"-","-","-",
"-","-","-",]
# If game is still going
game_still_going = True
# Who won? Or tie?
winner = None
# Whos turn is it
current_player = "X"
# Display board
def display_board():
print("\n")
print(board[0] + " | " + board[1] + " | " + board[2])
print(board[3] + " | " + board[4] + " | " + board[5])
print(board[6] + " | " + board[7] + " | " + board[8])
print("\n")
# Play a game of tic tac toe
def play_game():
# Display initial board
display_board()
# While the game is still going
while game_still_going:
# handle a single turn of an arbitrary player
handle_turn(current_player)
# check if the game has ended
check_if_game_over()
# Flip to the other player
flip_player()
# The game has ended
if winner == "X" or winner == "O":
print(winner + " won.")
elif winner == None:
print("Tie.")
# Handle a single turn of an arbitrary player
def handle_turn(player):
print(player + "'s turn.")
print("\n")
position = input("Choose a position from 1-9: ")
valid = False
while not valid:
while position not in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
position = input("Choose a position from 1-9: ")
position = int(position) - 1
if board[position] == "-":
valid = True
else:
print("You can't go there. Go again.")
print("\n")
board[position] = player
display_board()
def check_if_game_over():
check_for_winner()
check_if_tie()
def check_for_winner():
# Set up global Variables
global winner
# check rows
row_winner = check_rows()
# check columns
column_winner = check_columns()
# check diagonals
diagonal_winner = check_diagonals()
if row_winner:
winner = row_winner
elif column_winner:
winner = column_winner
elif diagonal_winner:
winner = diagonal_winner
else:
winner = None
return
def check_rows():
# Set up global variables
global game_still_going
# check if any of the rows have all the same value (and is not empty)
row_1 = board[0] == board[1] == board[2] != "-"
row_2 = board[3] == board[4] == board[5] != "-"
row_3 = board[6] == board[7] == board[8] != "-"
# If any row does have a match, flag that there is a win
if row_1 or row_2 or row_3:
game_still_going = False
# Return the winner (X or O)
if row_1:
return board[0]
elif row_2:
return board[3]
elif row_3:
return board[6]
return
def check_columns():
# Set up global variables
global game_still_going
# check if any of the columns have all the same value (and is not empty)
column_1 = board[0] == board[3] == board[6] != "-"
column_2 = board[1] == board[4] == board[7] != "-"
column_3 = board[2] == board[5] == board[8] != "-"
# If any column does have a match, flag that there is a win
if column_1 or column_2 or column_3:
game_still_going = False
# Return the winner (X or O)
if column_1:
return board[0]
elif column_2:
return board[1]
elif column_3:
return board[2]
return
def check_diagonals():
# Set up global variables
global game_still_going
# check if any of the columns have all the same value (and is not empty)
diagonal_1 = board[0] == board[4] == board[8] != "-"
diagonal_2 = board[6] == board[4] == board[2] != "-"
# If any column does have a match, flag that there is a win
if diagonal_1 or diagonal_2:
game_still_going = False
# Return the winner (X or O)
if diagonal_1:
return board[0]
elif diagonal_2:
return board[6]
return
def check_if_tie():
global game_still_going
if "-" not in board:
game_still_going = False
return
def flip_player():
# global variables we need
global current_player
# if the current player was x, then change it to O
if current_player == "X":
current_player = "O"
# If the current player was O, then change it to X
elif current_player == "O":
current_player = "X"
return
play_game()
# board
# display board
# play game
# handle turn
# check win
# check rows
# check columns
# check diagonals
# check tie
# flip player
| 20.766169
| 74
| 0.626977
|
7c1c9db0b17eae1fd6b696637fe2e3e7cb6f427f
| 6,234
|
py
|
Python
|
ongeza/main.py
|
reubano/bump
|
0473bc49cd3b58dd1f4b87ac63ea5184c99bd9d5
|
[
"MIT"
] | 40
|
2015-12-31T17:00:01.000Z
|
2020-06-05T20:59:27.000Z
|
ongeza/main.py
|
reubano/bump
|
0473bc49cd3b58dd1f4b87ac63ea5184c99bd9d5
|
[
"MIT"
] | 7
|
2016-01-12T13:51:59.000Z
|
2018-06-10T16:18:30.000Z
|
ongeza/main.py
|
reubano/bump
|
0473bc49cd3b58dd1f4b87ac63ea5184c99bd9d5
|
[
"MIT"
] | 5
|
2016-02-05T01:45:01.000Z
|
2020-06-24T08:57:23.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
""" An automated way to follow the Semantic Versioning Specification """
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
import ongeza
from sys import exit
from os import getcwd, path as p
from argparse import RawTextHelpFormatter, ArgumentParser
from builtins import * # noqa pylint: disable=unused-import
from . import Project, version_is_valid, TRAVIS
CURDIR = None if TRAVIS else p.abspath(getcwd())
parser = ArgumentParser(
description=(
"description: ongeza makes following the Semantic Versioning "
"Specification a breeze.\nIf called with no options, ongeza will "
"print the current git repository's tag version.\nIf <dir> is not "
"specified, the current dir is used."),
prog='ongeza', usage='%(prog)s [options] <dir>',
formatter_class=RawTextHelpFormatter)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-t', '--type', dest='ongeza_type', action='store', metavar='TYPE',
choices=['m', 'n', 'p', 'major', 'minor', 'patch'],
help=(
"version bump type, must be one of:\n"
" m or major: [x].0.0\n"
" n or minor: x.[y].0\n"
" p or patch: x.y.[z]"))
group.add_argument(
'-s', '--set', dest='new_version', action='store', metavar='VERSION',
help='set arbitrary version number')
parser.add_argument(
dest='dir', nargs='?', default=CURDIR,
help='the project directory (default: %s).\n\n' % CURDIR)
parser.add_argument(
'-S', '--skip-commit', action='store_true', help='skip committing version'
' bumped files')
parser.add_argument(
'-T', '--tag', action='store_true', help='create git tag at HEAD with the'
' bumped version number')
parser.add_argument(
'-p', '--push', action='store_true', help='push to the remote origin')
parser.add_argument(
'-a', '--stash', action='store_true', help='stash uncommitted changes')
parser.add_argument(
'-f', '--tag-format', action='store', metavar='FORMAT',
default=ongeza.DEFAULT_TAG_FMT, help='git tag format')
parser.add_argument(
'-F', '--tag-msg-format', action='store', metavar='FORMAT',
default=ongeza.DEFAULT_TAG_MSG_FMT, help='git tag message format')
parser.add_argument(
'-c', '--commit-msg-format', action='store', metavar='FORMAT',
default=ongeza.DEFAULT_COMMIT_MSG_FMT, help='git commit message format')
parser.add_argument(
'-g', '--sign', action='store_true',
help='make a GPG-signed tag (implies `--tag`)')
parser.add_argument(
'-i', '--file', action='store', help='the versioned file')
parser.add_argument(
'-v', '--version', help="Show version and exit.", action='store_true',
default=False)
parser.add_argument(
'-V', '--verbose', action='store_true',
help='increase output verbosity')
args = parser.parse_args()
def prelim_check(project):
result = True
if args.version:
project.logger.info('ongeza v%s', ongeza.__version__)
elif project.version and not args.ongeza_type and not args.new_version:
project.logger.info('Current version: {0.version}'.format(project))
elif not any([project.version, args.ongeza_type, args.new_version]):
project.logger.info('No valid versions found.')
else:
result = False
return result
def ongeza_project(project):
if project.is_dirty and not args.stash:
error = (
"Can't bump the version with uncommitted changes. Please "
"commit your changes or stash the following files and try "
"again. Optionally, run with '-a' option to auto stash these "
"files. Dirty files:\n%s" % "\n".join(project.dirty_files))
raise RuntimeError(error)
elif project.is_dirty:
project.logger.info("Stashing changes...\n")
project.stash()
if args.new_version and version_is_valid(args.new_version):
new_version = args.new_version
elif args.new_version:
msg = "Invalid version: '{0.version}'. Please use x.y.z format."
raise RuntimeError(msg.format(args))
elif project.version and args.ongeza_type:
new_version = project.ongeza(args.ongeza_type)
else:
error = "No git tags found, please run with '-s and -T' options"
raise RuntimeError(error)
return new_version
def cleanup(project, new_version):
msg = "Couldn't find a version to bump."
if project.bumped and not args.skip_commit:
message = args.commit_msg_format.format(version=new_version)
project.add(project.dirty_files)
project.commit(message)
if args.stash and project.stash_count:
project.unstash()
if project.bumped and (args.tag or args.sign):
message = args.tag_msg_format.format(version=new_version)
tag_text = args.tag_format.format(version=new_version)
project.tag(message, tag_text, sign=args.sign)
elif args.tag:
raise RuntimeError("%s Nothing to tag." % msg)
if project.bumped and args.push:
project.push()
elif args.push:
raise RuntimeError("%s Nothing to push." % msg)
def set_versions(project, new_version):
# in some cases, e.g., single file python modules, the versioned file
# can't be predetermined and we must do a 2nd search over all files
for wave in [1, 2]:
project.set_versions(new_version, wave)
if project.bumped:
msg = 'Bumped from version %s to %s.'
project.logger.info(msg, project.version, new_version)
break
else:
msg = "Couldn't find version '{0.version}' in any files."
raise RuntimeError(msg.format(project))
def run():
project = Project(args.dir, args.file, verbose=args.verbose)
if prelim_check(project):
exit(0)
try:
new_version = ongeza_project(project)
set_versions(project, new_version)
except RuntimeError as err:
project.logger.error(err)
exit(1)
try:
cleanup(project, new_version)
except RuntimeError as err:
project.logger.error(err)
exit(1)
exit(0)
if __name__ == "__main__":
run()
| 31.806122
| 78
| 0.659769
|
5abbf8e6ff8847971330035478442d874011f79f
| 1,410
|
py
|
Python
|
Medium/994. Rotting Oranges/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | 3
|
2020-05-09T12:55:09.000Z
|
2022-03-11T18:56:05.000Z
|
Medium/994. Rotting Oranges/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | null | null | null |
Medium/994. Rotting Oranges/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | 1
|
2022-03-11T18:56:16.000Z
|
2022-03-11T18:56:16.000Z
|
# 994. Rotting Oranges
# Runtime: 56 ms, faster than 50.05% of Python3 online submissions for Rotting Oranges.
# Memory Usage: 14.1 MB, less than 88.54% of Python3 online submissions for Rotting Oranges.
from collections import deque
class Solution:
def orangesRotting(self, grid: list[list[int]]) -> int:
Fresh, Rotten = 1, 2
que = deque()
fresh_num = 0
for row in range(len(grid)):
for col in range(len(grid[0])):
if grid[row][col] == Rotten:
que.append((row, col))
elif grid[row][col] == Fresh:
fresh_num += 1
que.append((-1, -1))
time = -1
dirs = ((-1, 0), (1, 0), (0, 1), (0, -1))
while que:
row, col = que.popleft()
if row < 0:
time += 1
if que:
que.append((-1, -1))
else:
for dir in dirs:
next_row, next_col = row + dir[0], col + dir[1]
if 0 <= next_row and next_row < len(grid) and 0 <= next_col and next_col < len(grid[0]):
if grid[next_row][next_col] == Fresh:
grid[next_row][next_col] = Rotten
que.append((next_row, next_col))
fresh_num -= 1
return time if fresh_num == 0 else -1
| 34.390244
| 108
| 0.473759
|
0b4d7dbdc379bdbacda4112d2aa6969ec796308e
| 1,837
|
py
|
Python
|
news/models.py
|
Avneet5/news_agg
|
20fd1715002209d6411ec1e05c05fc1ed4005afe
|
[
"MIT"
] | null | null | null |
news/models.py
|
Avneet5/news_agg
|
20fd1715002209d6411ec1e05c05fc1ed4005afe
|
[
"MIT"
] | null | null | null |
news/models.py
|
Avneet5/news_agg
|
20fd1715002209d6411ec1e05c05fc1ed4005afe
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
import datetime
from users.models import Author
class Topic(models.Model):
name = models.CharField(max_length=100, unique=True)
def __repr__(self):
return self.name
def __str__(self):
return self.name
class Article(models.Model):
headline = models.CharField(max_length=100)
location = models.CharField(max_length=255, blank=True, null=True)
publish_date = models.DateField(default=datetime.date.today)
byline = models.CharField(max_length=150, blank=True)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
image_url = models.URLField(max_length=200)
content = models.TextField(max_length=3000)
article_topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
keywords = models.CharField(max_length=255)
def __repr__(self):
return self.headline
def __str__(self):
return self.headline
class Comment(models.Model):
comment_by = models.ForeignKey(User, on_delete=models.CASCADE)
date_posted = models.DateTimeField(default=timezone.now())
article_id = models.ForeignKey(Article, on_delete=models.CASCADE)
content = models.CharField(max_length=200)
class Tag(models.Model):
article_id = models.ForeignKey(Article, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
class View(models.Model):
article_id = models.ForeignKey(Article, on_delete=models.CASCADE)
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
def __repr__(self):
return str(self.user_id) + str(self.article_id)
def __str__(self):
return str(self.user_id) + " viewed " + str(self.article_id)
# class Article_URL(models.Model):
# url = models.URLField(max_length=250)
| 29.629032
| 70
| 0.732172
|
a5640507ff92353768feef4a4a298dac106f9f26
| 22,231
|
py
|
Python
|
test/functional/wallet_multiwallet.py
|
blinkhash/blinkhash-core
|
e05662019c2fa4cb2dc3736f38e48492712c23b1
|
[
"MIT"
] | 3
|
2021-07-27T16:59:47.000Z
|
2021-12-31T20:55:46.000Z
|
test/functional/wallet_multiwallet.py
|
blinkhash/blinkhash-core
|
e05662019c2fa4cb2dc3736f38e48492712c23b1
|
[
"MIT"
] | null | null | null |
test/functional/wallet_multiwallet.py
|
blinkhash/blinkhash-core
|
e05662019c2fa4cb2dc3736f38e48492712c23b1
|
[
"MIT"
] | 1
|
2021-12-31T12:58:23.000Z
|
2021-12-31T12:58:23.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a blinkhashd node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
while True:
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BlinkhashTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
self.generatetoaddress(node, nblocks=1, address=w5.getnewaddress(), sync_fun=self.no_op)
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 5000)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = f"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another instance of {self.config['environment']['PACKAGE_NAME']}?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
self.generatetoaddress(node, nblocks=1, address=wallets[0].getnewaddress(), sync_fun=self.no_op)
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 5000 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
self.generatetoaddress(node, nblocks=COINBASE_MATURITY + 1, address=w1.getnewaddress(), sync_fun=self.no_op)
assert_equal(w1.getbalance(), 10000)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
self.generatetoaddress(node, nblocks=1, address=w1.getnewaddress(), sync_fun=self.no_op)
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, f"Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another instance of {self.config['environment']['PACKAGE_NAME']}?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
| 51.820513
| 273
| 0.67154
|
abcf8eea5dd499f3501146abc9e504d059970d18
| 90,337
|
py
|
Python
|
myenv/lib/python3.7/site-packages/google/protobuf/unittest_custom_options_pb2.py
|
theCydonian/AudioEyes
|
3dece4529b31e6c63771c4358457962999bda3b4
|
[
"MIT"
] | 4,768
|
2015-01-08T04:45:33.000Z
|
2022-03-28T07:32:59.000Z
|
myenv/lib/python3.7/site-packages/google/protobuf/unittest_custom_options_pb2.py
|
theCydonian/AudioEyes
|
3dece4529b31e6c63771c4358457962999bda3b4
|
[
"MIT"
] | 2,599
|
2015-01-06T21:51:28.000Z
|
2022-03-30T12:40:09.000Z
|
venv/Lib/site-packages/google/protobuf/unittest_custom_options_pb2.py
|
Ammar-Raneez/Craigslist_Scraper
|
4d8ef7d65f6cb4bbc7a461828ab02ec9e3006f71
|
[
"MIT"
] | 878
|
2015-01-10T00:03:30.000Z
|
2022-03-31T22:54:15.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/unittest_custom_options.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import service as _service
from google.protobuf import service_reflection
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/unittest_custom_options.proto',
package='protobuf_unittest',
syntax='proto2',
serialized_options=b'\200\001\001\210\001\001\220\001\001\360\350\301\035\352\255\300\345$\372\354\205;p\010d\022\016FileAnnotation\032\026\022\024NestedFileAnnotation\"\036\372\354\205;\031\022\027FileExtensionAnnotation*$\013\020\366\353\256\007\032\033\n\031EmbeddedMessageSetElement\014',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n-google/protobuf/unittest_custom_options.proto\x12\x11protobuf_unittest\x1a google/protobuf/descriptor.proto\"\xbf\x01\n\x1cTestMessageWithCustomOptions\x12\x1e\n\x06\x66ield1\x18\x01 \x01(\tB\x0e\x08\x01\xc1\xe0\xc3\x1d-\xe1u\n\x02\x00\x00\x00\x12\x15\n\x0boneof_field\x18\x02 \x01(\x05H\x00\";\n\x06\x41nEnum\x12\x0f\n\x0b\x41NENUM_VAL1\x10\x01\x12\x16\n\x0b\x41NENUM_VAL2\x10\x02\x1a\x05\xb0\x86\xfa\x05{\x1a\x08\xc5\xf6\xc9\x1d\xeb\xfc\xff\xff:\x10\x08\x00\xe0\xe9\xc2\x1d\xc8\xff\xff\xff\xff\xff\xff\xff\xff\x01\x42\x19\n\x07\x41nOneof\x12\x0e\xf8\xac\xc3\x1d\x9d\xff\xff\xff\xff\xff\xff\xff\xff\x01\"\x18\n\x16\x43ustomOptionFooRequest\"\x19\n\x17\x43ustomOptionFooResponse\"\x1e\n\x1c\x43ustomOptionFooClientMessage\"\x1e\n\x1c\x43ustomOptionFooServerMessage\"m\n\x1a\x44ummyMessageContainingEnum\"O\n\x0cTestEnumType\x12\x1a\n\x16TEST_OPTION_ENUM_TYPE1\x10\x16\x12#\n\x16TEST_OPTION_ENUM_TYPE2\x10\xe9\xff\xff\xff\xff\xff\xff\xff\xff\x01\"!\n\x1f\x44ummyMessageInvalidAsOptionType\"\x8a\x01\n\x1c\x43ustomOptionMinIntegerValues:j\xd0\xde\xb2\x1d\x00\xe8\xc6\xb2\x1d\x80\x80\x80\x80\xf8\xff\xff\xff\xff\x01\xb0\xbc\xb2\x1d\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01\x80\x93\xb2\x1d\x00\xf8\xf5\xb0\x1d\x00\x80\xc4\xb0\x1d\xff\xff\xff\xff\x0f\xf8\x97\xb0\x1d\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x9d\xf5\xaf\x1d\x00\x00\x00\x00\x91\xee\xaf\x1d\x00\x00\x00\x00\x00\x00\x00\x00\xad\x8d\xaf\x1d\x00\x00\x00\x80\x99\xd6\xa8\x1d\x00\x00\x00\x00\x00\x00\x00\x80\"\x91\x01\n\x1c\x43ustomOptionMaxIntegerValues:q\xd0\xde\xb2\x1d\x01\xe8\xc6\xb2\x1d\xff\xff\xff\xff\x07\xb0\xbc\xb2\x1d\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x80\x93\xb2\x1d\xff\xff\xff\xff\x0f\xf8\xf5\xb0\x1d\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\xc4\xb0\x1d\xfe\xff\xff\xff\x0f\xf8\x97\xb0\x1d\xfe\xff\xff\xff\xff\xff\xff\xff\xff\x01\x9d\xf5\xaf\x1d\xff\xff\xff\xff\x91\xee\xaf\x1d\xff\xff\xff\xff\xff\xff\xff\xff\xad\x8d\xaf\x1d\xff\xff\xff\x7f\x99\xd6\xa8\x1d\xff\xff\xff\xff\xff\xff\xff\x7f\"n\n\x17\x43ustomOptionOtherValues:S\xe8\xc6\xb2\x1d\x9c\xff\xff\xff\xff\xff\xff\xff\xff\x01\xf5\xdf\xa3\x1d\xe7\x87\x45\x41\xe9\xdc\xa2\x1d\xfbY\x8c\x42\xca\xc0\xf3?\xaa\xdc\xa2\x1d\x0eHello, \"World\"\xb2\xd9\xa2\x1d\x0bHello\x00World\x88\xd9\xa2\x1d\xe9\xff\xff\xff\xff\xff\xff\xff\xff\x01\"4\n\x1cSettingRealsFromPositiveInts:\x14\xf5\xdf\xa3\x1d\x00\x00@A\xe9\xdc\xa2\x1d\x00\x00\x00\x00\x00@c@\"4\n\x1cSettingRealsFromNegativeInts:\x14\xf5\xdf\xa3\x1d\x00\x00@\xc1\xe9\xdc\xa2\x1d\x00\x00\x00\x00\x00@c\xc0\"U\n\x12\x43omplexOptionType1\x12\x0b\n\x03\x66oo\x18\x01 \x01(\x05\x12\x0c\n\x04\x66oo2\x18\x02 \x01(\x05\x12\x0c\n\x04\x66oo3\x18\x03 \x01(\x05\x12\x0c\n\x04\x66oo4\x18\x04 \x03(\x05*\x08\x08\x64\x10\x80\x80\x80\x80\x02\"\x8b\x03\n\x12\x43omplexOptionType2\x12\x32\n\x03\x62\x61r\x18\x01 \x01(\x0b\x32%.protobuf_unittest.ComplexOptionType1\x12\x0b\n\x03\x62\x61z\x18\x02 \x01(\x05\x12\x46\n\x04\x66red\x18\x03 \x01(\x0b\x32\x38.protobuf_unittest.ComplexOptionType2.ComplexOptionType4\x12H\n\x06\x62\x61rney\x18\x04 \x03(\x0b\x32\x38.protobuf_unittest.ComplexOptionType2.ComplexOptionType4\x1a\x97\x01\n\x12\x43omplexOptionType4\x12\r\n\x05waldo\x18\x01 \x01(\x05\x32r\n\x0c\x63omplex_opt4\x12\x1f.google.protobuf.MessageOptions\x18\x8a\xf5\xd1\x03 \x01(\x0b\x32\x38.protobuf_unittest.ComplexOptionType2.ComplexOptionType4*\x08\x08\x64\x10\x80\x80\x80\x80\x02\"\x9c\x01\n\x12\x43omplexOptionType3\x12\x0b\n\x03qux\x18\x01 \x01(\x05\x12T\n\x12\x63omplexoptiontype5\x18\x02 \x01(\n28.protobuf_unittest.ComplexOptionType3.ComplexOptionType5\x1a#\n\x12\x43omplexOptionType5\x12\r\n\x05plugh\x18\x03 \x01(\x05\"\x1f\n\x0b\x43omplexOpt6\x12\x10\n\x05xyzzy\x18\xdf\xbf\xcf\x03 \x01(\x05\"\xf1\x01\n\x15VariousComplexOptions:\xd7\x01\xa2\xe2\x95\x1d\x02\x08*\xa2\xe2\x95\x1d\x06\xd8\x85\x9e\x1d\xc4\x02\xa2\xe2\x95\x1d\x08\x92\xf5\x9d\x1d\x03\x08\xec\x06\xa2\xe2\x95\x1d\x02 c\xa2\xe2\x95\x1d\x02 X\xaa\xfd\x90\x1d\x03\x10\xdb\x07\xaa\xfd\x90\x1d\x06\xf8\xe6\x97\x1d\x8e\x05\xaa\xfd\x90\x1d\x05\n\x03\x08\xe7\x05\xaa\xfd\x90\x1d\x08\n\x06\xd8\x85\x9e\x1d\xcf\x0f\xaa\xfd\x90\x1d\n\n\x08\x92\xf5\x9d\x1d\x03\x08\xd8\x0f\xaa\xfd\x90\x1d\x08\xc2\xac\x97\x1d\x03\x08\xe5\x05\xaa\xfd\x90\x1d\x0b\xc2\xac\x97\x1d\x06\xd8\x85\x9e\x1d\xce\x0f\xaa\xfd\x90\x1d\r\xc2\xac\x97\x1d\x08\x92\xf5\x9d\x1d\x03\x08\xc9\x10\xd2\xa8\x8f\x1d\x03\x08\xb3\x0f\xaa\xfd\x90\x1d\x05\x1a\x03\x08\xc1\x02\xaa\xfd\x90\x1d\x04\"\x02\x08\x65\xaa\xfd\x90\x1d\x05\"\x03\x08\xd4\x01\xfa\xde\x90\x1d\x02\x08\t\xfa\xde\x90\x1d\x04\x13\x18\x16\x14\xe3\xdc\xfc\x1c\xf8\xfd\xfb\x1c\x18\xe4\xdc\xfc\x1c\"#\n\x13\x41ggregateMessageSet*\x08\x08\x04\x10\xff\xff\xff\xff\x07:\x02\x08\x01\"\xa0\x01\n\x1a\x41ggregateMessageSetElement\x12\t\n\x01s\x18\x01 \x01(\t2w\n\x15message_set_extension\x12&.protobuf_unittest.AggregateMessageSet\x18\xf6\xeb\xae\x07 \x01(\x0b\x32-.protobuf_unittest.AggregateMessageSetElement\"\xfd\x01\n\tAggregate\x12\t\n\x01i\x18\x01 \x01(\x05\x12\t\n\x01s\x18\x02 \x01(\t\x12)\n\x03sub\x18\x03 \x01(\x0b\x32\x1c.protobuf_unittest.Aggregate\x12*\n\x04\x66ile\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x34\n\x04mset\x18\x05 \x01(\x0b\x32&.protobuf_unittest.AggregateMessageSet2M\n\x06nested\x12\x1c.google.protobuf.FileOptions\x18\xa7\xd1\xb0\x07 \x01(\x0b\x32\x1c.protobuf_unittest.Aggregate\"Y\n\x10\x41ggregateMessage\x12)\n\tfieldname\x18\x01 \x01(\x05\x42\x16\xf2\xa1\x87;\x11\x12\x0f\x46ieldAnnotation:\x1a\xc2\xd1\x86;\x15\x08\x65\x12\x11MessageAnnotation\"\xc9\x01\n\x10NestedOptionType\x1a;\n\rNestedMessage\x12\"\n\x0cnested_field\x18\x01 \x01(\x05\x42\x0c\xc1\xe0\xc3\x1d\xea\x03\x00\x00\x00\x00\x00\x00:\x06\xe0\xe9\xc2\x1d\xe9\x07\"5\n\nNestedEnum\x12\x1d\n\x11NESTED_ENUM_VALUE\x10\x01\x1a\x06\xb0\x86\xfa\x05\xec\x07\x1a\x08\xc5\xf6\xc9\x1d\xeb\x03\x00\x00\x32\x41\n\x10nested_extension\x12\x1c.google.protobuf.FileOptions\x18\xfd\xf8\xe2\x03 \x01(\x05\x42\x06\xc8\x8b\xca\x1d\xed\x07\"d\n\rOldOptionType\x12\x38\n\x05value\x18\x01 \x02(\x0e\x32).protobuf_unittest.OldOptionType.TestEnum\"\x19\n\x08TestEnum\x12\r\n\tOLD_VALUE\x10\x00\"s\n\rNewOptionType\x12\x38\n\x05value\x18\x01 \x02(\x0e\x32).protobuf_unittest.NewOptionType.TestEnum\"(\n\x08TestEnum\x12\r\n\tOLD_VALUE\x10\x00\x12\r\n\tNEW_VALUE\x10\x01\"-\n!TestMessageWithRequiredEnumOption:\x08\xfa\xe8\xfc\x94\x03\x02\x08\x00*6\n\nMethodOpt1\x12\x13\n\x0fMETHODOPT1_VAL1\x10\x01\x12\x13\n\x0fMETHODOPT1_VAL2\x10\x02*M\n\rAggregateEnum\x12%\n\x05VALUE\x10\x01\x1a\x1a\xca\xfc\x89;\x15\x12\x13\x45numValueAnnotation\x1a\x15\x92\x95\x88;\x10\x12\x0e\x45numAnnotation2\x8e\x01\n\x1cTestServiceWithCustomOptions\x12\x63\n\x03\x46oo\x12).protobuf_unittest.CustomOptionFooRequest\x1a*.protobuf_unittest.CustomOptionFooResponse\"\x05\xe0\xfa\x8c\x1e\x02\x1a\t\x90\xb2\x8b\x1e\xd3\xdb\x80\xcbI2\x99\x01\n\x10\x41ggregateService\x12k\n\x06Method\x12#.protobuf_unittest.AggregateMessage\x1a#.protobuf_unittest.AggregateMessage\"\x17\xca\xc8\x96;\x12\x12\x10MethodAnnotation\x1a\x18\xca\xfb\x8e;\x13\x12\x11ServiceAnnotation:2\n\tfile_opt1\x12\x1c.google.protobuf.FileOptions\x18\x8e\x9d\xd8\x03 \x01(\x04:8\n\x0cmessage_opt1\x12\x1f.google.protobuf.MessageOptions\x18\x9c\xad\xd8\x03 \x01(\x05:4\n\nfield_opt1\x12\x1d.google.protobuf.FieldOptions\x18\x88\xbc\xd8\x03 \x01(\x06:8\n\nfield_opt2\x12\x1d.google.protobuf.FieldOptions\x18\xb9\xa1\xd9\x03 \x01(\x05:\x02\x34\x32:4\n\noneof_opt1\x12\x1d.google.protobuf.OneofOptions\x18\xcf\xb5\xd8\x03 \x01(\x05:2\n\tenum_opt1\x12\x1c.google.protobuf.EnumOptions\x18\xe8\x9e\xd9\x03 \x01(\x0f:<\n\x0f\x65num_value_opt1\x12!.google.protobuf.EnumValueOptions\x18\xe6\xa0_ \x01(\x05:8\n\x0cservice_opt1\x12\x1f.google.protobuf.ServiceOptions\x18\xa2\xb6\xe1\x03 \x01(\x12:U\n\x0bmethod_opt1\x12\x1e.google.protobuf.MethodOptions\x18\xac\xcf\xe1\x03 \x01(\x0e\x32\x1d.protobuf_unittest.MethodOpt1:4\n\x08\x62ool_opt\x12\x1f.google.protobuf.MessageOptions\x18\xea\xab\xd6\x03 \x01(\x08:5\n\tint32_opt\x12\x1f.google.protobuf.MessageOptions\x18\xed\xa8\xd6\x03 \x01(\x05:5\n\tint64_opt\x12\x1f.google.protobuf.MessageOptions\x18\xc6\xa7\xd6\x03 \x01(\x03:6\n\nuint32_opt\x12\x1f.google.protobuf.MessageOptions\x18\xb0\xa2\xd6\x03 \x01(\r:6\n\nuint64_opt\x12\x1f.google.protobuf.MessageOptions\x18\xdf\x8e\xd6\x03 \x01(\x04:6\n\nsint32_opt\x12\x1f.google.protobuf.MessageOptions\x18\xc0\x88\xd6\x03 \x01(\x11:6\n\nsint64_opt\x12\x1f.google.protobuf.MessageOptions\x18\xff\x82\xd6\x03 \x01(\x12:7\n\x0b\x66ixed32_opt\x12\x1f.google.protobuf.MessageOptions\x18\xd3\xfe\xd5\x03 \x01(\x07:7\n\x0b\x66ixed64_opt\x12\x1f.google.protobuf.MessageOptions\x18\xe2\xfd\xd5\x03 \x01(\x06:8\n\x0csfixed32_opt\x12\x1f.google.protobuf.MessageOptions\x18\xd5\xf1\xd5\x03 \x01(\x0f:8\n\x0csfixed64_opt\x12\x1f.google.protobuf.MessageOptions\x18\xe3\x8a\xd5\x03 \x01(\x10:5\n\tfloat_opt\x12\x1f.google.protobuf.MessageOptions\x18\xfe\xbb\xd4\x03 \x01(\x02:6\n\ndouble_opt\x12\x1f.google.protobuf.MessageOptions\x18\xcd\xab\xd4\x03 \x01(\x01:6\n\nstring_opt\x12\x1f.google.protobuf.MessageOptions\x18\xc5\xab\xd4\x03 \x01(\t:5\n\tbytes_opt\x12\x1f.google.protobuf.MessageOptions\x18\x96\xab\xd4\x03 \x01(\x0c:p\n\x08\x65num_opt\x12\x1f.google.protobuf.MessageOptions\x18\x91\xab\xd4\x03 \x01(\x0e\x32:.protobuf_unittest.DummyMessageContainingEnum.TestEnumType:p\n\x10message_type_opt\x12\x1f.google.protobuf.MessageOptions\x18\xaf\xf2\xd3\x03 \x01(\x0b\x32\x32.protobuf_unittest.DummyMessageInvalidAsOptionType:6\n\x04quux\x12%.protobuf_unittest.ComplexOptionType1\x18\xdb\xe0\xd3\x03 \x01(\x05:^\n\x05\x63orge\x12%.protobuf_unittest.ComplexOptionType1\x18\xd2\xde\xd3\x03 \x01(\x0b\x32%.protobuf_unittest.ComplexOptionType3:8\n\x06grault\x12%.protobuf_unittest.ComplexOptionType2\x18\xef\xfc\xd2\x03 \x01(\x05:_\n\x06garply\x12%.protobuf_unittest.ComplexOptionType2\x18\xc8\xf5\xd2\x03 \x01(\x0b\x32%.protobuf_unittest.ComplexOptionType1:_\n\x0c\x63omplex_opt1\x12\x1f.google.protobuf.MessageOptions\x18\xa4\xdc\xd2\x03 \x01(\x0b\x32%.protobuf_unittest.ComplexOptionType1:_\n\x0c\x63omplex_opt2\x12\x1f.google.protobuf.MessageOptions\x18\xd5\x8f\xd2\x03 \x01(\x0b\x32%.protobuf_unittest.ComplexOptionType2:_\n\x0c\x63omplex_opt3\x12\x1f.google.protobuf.MessageOptions\x18\xef\x8b\xd2\x03 \x01(\x0b\x32%.protobuf_unittest.ComplexOptionType3:W\n\x0b\x63omplexopt6\x12\x1f.google.protobuf.MessageOptions\x18\xcc\xcb\xcf\x03 \x01(\n2\x1e.protobuf_unittest.ComplexOpt6:N\n\x07\x66ileopt\x12\x1c.google.protobuf.FileOptions\x18\xcf\xdd\xb0\x07 \x01(\x0b\x32\x1c.protobuf_unittest.Aggregate:P\n\x06msgopt\x12\x1f.google.protobuf.MessageOptions\x18\x98\xea\xb0\x07 \x01(\x0b\x32\x1c.protobuf_unittest.Aggregate:P\n\x08\x66ieldopt\x12\x1d.google.protobuf.FieldOptions\x18\x9e\xf4\xb0\x07 \x01(\x0b\x32\x1c.protobuf_unittest.Aggregate:N\n\x07\x65numopt\x12\x1c.google.protobuf.EnumOptions\x18\xd2\x82\xb1\x07 \x01(\x0b\x32\x1c.protobuf_unittest.Aggregate:V\n\nenumvalopt\x12!.google.protobuf.EnumValueOptions\x18\xc9\x9f\xb1\x07 \x01(\x0b\x32\x1c.protobuf_unittest.Aggregate:T\n\nserviceopt\x12\x1f.google.protobuf.ServiceOptions\x18\xb9\xef\xb1\x07 \x01(\x0b\x32\x1c.protobuf_unittest.Aggregate:R\n\tmethodopt\x12\x1e.google.protobuf.MethodOptions\x18\x89\xe9\xb2\x07 \x01(\x0b\x32\x1c.protobuf_unittest.Aggregate:_\n\x11required_enum_opt\x12\x1f.google.protobuf.MessageOptions\x18\x8f\xcd\xcf\x32 \x01(\x0b\x32 .protobuf_unittest.OldOptionTypeB\x87\x01\x80\x01\x01\x88\x01\x01\x90\x01\x01\xf0\xe8\xc1\x1d\xea\xad\xc0\xe5$\xfa\xec\x85;p\x08\x64\x12\x0e\x46ileAnnotation\x1a\x16\x12\x14NestedFileAnnotation\"\x1e\xfa\xec\x85;\x19\x12\x17\x46ileExtensionAnnotation*$\x0b\x10\xf6\xeb\xae\x07\x1a\x1b\n\x19\x45mbeddedMessageSetElement\x0c'
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
_METHODOPT1 = _descriptor.EnumDescriptor(
name='MethodOpt1',
full_name='protobuf_unittest.MethodOpt1',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='METHODOPT1_VAL1', index=0, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='METHODOPT1_VAL2', index=1, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=3006,
serialized_end=3060,
)
_sym_db.RegisterEnumDescriptor(_METHODOPT1)
MethodOpt1 = enum_type_wrapper.EnumTypeWrapper(_METHODOPT1)
_AGGREGATEENUM = _descriptor.EnumDescriptor(
name='AggregateEnum',
full_name='protobuf_unittest.AggregateEnum',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='VALUE', index=0, number=1,
serialized_options=b'\312\374\211;\025\022\023EnumValueAnnotation',
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=b'\222\225\210;\020\022\016EnumAnnotation',
serialized_start=3062,
serialized_end=3139,
)
_sym_db.RegisterEnumDescriptor(_AGGREGATEENUM)
AggregateEnum = enum_type_wrapper.EnumTypeWrapper(_AGGREGATEENUM)
METHODOPT1_VAL1 = 1
METHODOPT1_VAL2 = 2
VALUE = 1
FILE_OPT1_FIELD_NUMBER = 7736974
file_opt1 = _descriptor.FieldDescriptor(
name='file_opt1', full_name='protobuf_unittest.file_opt1', index=0,
number=7736974, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
MESSAGE_OPT1_FIELD_NUMBER = 7739036
message_opt1 = _descriptor.FieldDescriptor(
name='message_opt1', full_name='protobuf_unittest.message_opt1', index=1,
number=7739036, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
FIELD_OPT1_FIELD_NUMBER = 7740936
field_opt1 = _descriptor.FieldDescriptor(
name='field_opt1', full_name='protobuf_unittest.field_opt1', index=2,
number=7740936, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
FIELD_OPT2_FIELD_NUMBER = 7753913
field_opt2 = _descriptor.FieldDescriptor(
name='field_opt2', full_name='protobuf_unittest.field_opt2', index=3,
number=7753913, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=42,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
ONEOF_OPT1_FIELD_NUMBER = 7740111
oneof_opt1 = _descriptor.FieldDescriptor(
name='oneof_opt1', full_name='protobuf_unittest.oneof_opt1', index=4,
number=7740111, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
ENUM_OPT1_FIELD_NUMBER = 7753576
enum_opt1 = _descriptor.FieldDescriptor(
name='enum_opt1', full_name='protobuf_unittest.enum_opt1', index=5,
number=7753576, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
ENUM_VALUE_OPT1_FIELD_NUMBER = 1560678
enum_value_opt1 = _descriptor.FieldDescriptor(
name='enum_value_opt1', full_name='protobuf_unittest.enum_value_opt1', index=6,
number=1560678, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
SERVICE_OPT1_FIELD_NUMBER = 7887650
service_opt1 = _descriptor.FieldDescriptor(
name='service_opt1', full_name='protobuf_unittest.service_opt1', index=7,
number=7887650, type=18, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
METHOD_OPT1_FIELD_NUMBER = 7890860
method_opt1 = _descriptor.FieldDescriptor(
name='method_opt1', full_name='protobuf_unittest.method_opt1', index=8,
number=7890860, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
BOOL_OPT_FIELD_NUMBER = 7706090
bool_opt = _descriptor.FieldDescriptor(
name='bool_opt', full_name='protobuf_unittest.bool_opt', index=9,
number=7706090, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
INT32_OPT_FIELD_NUMBER = 7705709
int32_opt = _descriptor.FieldDescriptor(
name='int32_opt', full_name='protobuf_unittest.int32_opt', index=10,
number=7705709, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
INT64_OPT_FIELD_NUMBER = 7705542
int64_opt = _descriptor.FieldDescriptor(
name='int64_opt', full_name='protobuf_unittest.int64_opt', index=11,
number=7705542, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
UINT32_OPT_FIELD_NUMBER = 7704880
uint32_opt = _descriptor.FieldDescriptor(
name='uint32_opt', full_name='protobuf_unittest.uint32_opt', index=12,
number=7704880, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
UINT64_OPT_FIELD_NUMBER = 7702367
uint64_opt = _descriptor.FieldDescriptor(
name='uint64_opt', full_name='protobuf_unittest.uint64_opt', index=13,
number=7702367, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
SINT32_OPT_FIELD_NUMBER = 7701568
sint32_opt = _descriptor.FieldDescriptor(
name='sint32_opt', full_name='protobuf_unittest.sint32_opt', index=14,
number=7701568, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
SINT64_OPT_FIELD_NUMBER = 7700863
sint64_opt = _descriptor.FieldDescriptor(
name='sint64_opt', full_name='protobuf_unittest.sint64_opt', index=15,
number=7700863, type=18, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
FIXED32_OPT_FIELD_NUMBER = 7700307
fixed32_opt = _descriptor.FieldDescriptor(
name='fixed32_opt', full_name='protobuf_unittest.fixed32_opt', index=16,
number=7700307, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
FIXED64_OPT_FIELD_NUMBER = 7700194
fixed64_opt = _descriptor.FieldDescriptor(
name='fixed64_opt', full_name='protobuf_unittest.fixed64_opt', index=17,
number=7700194, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
SFIXED32_OPT_FIELD_NUMBER = 7698645
sfixed32_opt = _descriptor.FieldDescriptor(
name='sfixed32_opt', full_name='protobuf_unittest.sfixed32_opt', index=18,
number=7698645, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
SFIXED64_OPT_FIELD_NUMBER = 7685475
sfixed64_opt = _descriptor.FieldDescriptor(
name='sfixed64_opt', full_name='protobuf_unittest.sfixed64_opt', index=19,
number=7685475, type=16, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
FLOAT_OPT_FIELD_NUMBER = 7675390
float_opt = _descriptor.FieldDescriptor(
name='float_opt', full_name='protobuf_unittest.float_opt', index=20,
number=7675390, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
DOUBLE_OPT_FIELD_NUMBER = 7673293
double_opt = _descriptor.FieldDescriptor(
name='double_opt', full_name='protobuf_unittest.double_opt', index=21,
number=7673293, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
STRING_OPT_FIELD_NUMBER = 7673285
string_opt = _descriptor.FieldDescriptor(
name='string_opt', full_name='protobuf_unittest.string_opt', index=22,
number=7673285, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
BYTES_OPT_FIELD_NUMBER = 7673238
bytes_opt = _descriptor.FieldDescriptor(
name='bytes_opt', full_name='protobuf_unittest.bytes_opt', index=23,
number=7673238, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
ENUM_OPT_FIELD_NUMBER = 7673233
enum_opt = _descriptor.FieldDescriptor(
name='enum_opt', full_name='protobuf_unittest.enum_opt', index=24,
number=7673233, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=22,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
MESSAGE_TYPE_OPT_FIELD_NUMBER = 7665967
message_type_opt = _descriptor.FieldDescriptor(
name='message_type_opt', full_name='protobuf_unittest.message_type_opt', index=25,
number=7665967, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
QUUX_FIELD_NUMBER = 7663707
quux = _descriptor.FieldDescriptor(
name='quux', full_name='protobuf_unittest.quux', index=26,
number=7663707, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
CORGE_FIELD_NUMBER = 7663442
corge = _descriptor.FieldDescriptor(
name='corge', full_name='protobuf_unittest.corge', index=27,
number=7663442, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
GRAULT_FIELD_NUMBER = 7650927
grault = _descriptor.FieldDescriptor(
name='grault', full_name='protobuf_unittest.grault', index=28,
number=7650927, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
GARPLY_FIELD_NUMBER = 7649992
garply = _descriptor.FieldDescriptor(
name='garply', full_name='protobuf_unittest.garply', index=29,
number=7649992, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
COMPLEX_OPT1_FIELD_NUMBER = 7646756
complex_opt1 = _descriptor.FieldDescriptor(
name='complex_opt1', full_name='protobuf_unittest.complex_opt1', index=30,
number=7646756, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
COMPLEX_OPT2_FIELD_NUMBER = 7636949
complex_opt2 = _descriptor.FieldDescriptor(
name='complex_opt2', full_name='protobuf_unittest.complex_opt2', index=31,
number=7636949, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
COMPLEX_OPT3_FIELD_NUMBER = 7636463
complex_opt3 = _descriptor.FieldDescriptor(
name='complex_opt3', full_name='protobuf_unittest.complex_opt3', index=32,
number=7636463, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
COMPLEXOPT6_FIELD_NUMBER = 7595468
complexopt6 = _descriptor.FieldDescriptor(
name='complexopt6', full_name='protobuf_unittest.complexopt6', index=33,
number=7595468, type=10, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
FILEOPT_FIELD_NUMBER = 15478479
fileopt = _descriptor.FieldDescriptor(
name='fileopt', full_name='protobuf_unittest.fileopt', index=34,
number=15478479, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
MSGOPT_FIELD_NUMBER = 15480088
msgopt = _descriptor.FieldDescriptor(
name='msgopt', full_name='protobuf_unittest.msgopt', index=35,
number=15480088, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
FIELDOPT_FIELD_NUMBER = 15481374
fieldopt = _descriptor.FieldDescriptor(
name='fieldopt', full_name='protobuf_unittest.fieldopt', index=36,
number=15481374, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
ENUMOPT_FIELD_NUMBER = 15483218
enumopt = _descriptor.FieldDescriptor(
name='enumopt', full_name='protobuf_unittest.enumopt', index=37,
number=15483218, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
ENUMVALOPT_FIELD_NUMBER = 15486921
enumvalopt = _descriptor.FieldDescriptor(
name='enumvalopt', full_name='protobuf_unittest.enumvalopt', index=38,
number=15486921, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
SERVICEOPT_FIELD_NUMBER = 15497145
serviceopt = _descriptor.FieldDescriptor(
name='serviceopt', full_name='protobuf_unittest.serviceopt', index=39,
number=15497145, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
METHODOPT_FIELD_NUMBER = 15512713
methodopt = _descriptor.FieldDescriptor(
name='methodopt', full_name='protobuf_unittest.methodopt', index=40,
number=15512713, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
REQUIRED_ENUM_OPT_FIELD_NUMBER = 106161807
required_enum_opt = _descriptor.FieldDescriptor(
name='required_enum_opt', full_name='protobuf_unittest.required_enum_opt', index=41,
number=106161807, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_TESTMESSAGEWITHCUSTOMOPTIONS_ANENUM = _descriptor.EnumDescriptor(
name='AnEnum',
full_name='protobuf_unittest.TestMessageWithCustomOptions.AnEnum',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ANENUM_VAL1', index=0, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ANENUM_VAL2', index=1, number=2,
serialized_options=b'\260\206\372\005{',
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=b'\305\366\311\035\353\374\377\377',
serialized_start=190,
serialized_end=249,
)
_sym_db.RegisterEnumDescriptor(_TESTMESSAGEWITHCUSTOMOPTIONS_ANENUM)
_DUMMYMESSAGECONTAININGENUM_TESTENUMTYPE = _descriptor.EnumDescriptor(
name='TestEnumType',
full_name='protobuf_unittest.DummyMessageContainingEnum.TestEnumType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TEST_OPTION_ENUM_TYPE1', index=0, number=22,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TEST_OPTION_ENUM_TYPE2', index=1, number=-23,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=443,
serialized_end=522,
)
_sym_db.RegisterEnumDescriptor(_DUMMYMESSAGECONTAININGENUM_TESTENUMTYPE)
_NESTEDOPTIONTYPE_NESTEDENUM = _descriptor.EnumDescriptor(
name='NestedEnum',
full_name='protobuf_unittest.NestedOptionType.NestedEnum',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NESTED_ENUM_VALUE', index=0, number=1,
serialized_options=b'\260\206\372\005\354\007',
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=b'\305\366\311\035\353\003\000\000',
serialized_start=2618,
serialized_end=2671,
)
_sym_db.RegisterEnumDescriptor(_NESTEDOPTIONTYPE_NESTEDENUM)
_OLDOPTIONTYPE_TESTENUM = _descriptor.EnumDescriptor(
name='TestEnum',
full_name='protobuf_unittest.OldOptionType.TestEnum',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OLD_VALUE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2815,
serialized_end=2840,
)
_sym_db.RegisterEnumDescriptor(_OLDOPTIONTYPE_TESTENUM)
_NEWOPTIONTYPE_TESTENUM = _descriptor.EnumDescriptor(
name='TestEnum',
full_name='protobuf_unittest.NewOptionType.TestEnum',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OLD_VALUE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='NEW_VALUE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2917,
serialized_end=2957,
)
_sym_db.RegisterEnumDescriptor(_NEWOPTIONTYPE_TESTENUM)
_TESTMESSAGEWITHCUSTOMOPTIONS = _descriptor.Descriptor(
name='TestMessageWithCustomOptions',
full_name='protobuf_unittest.TestMessageWithCustomOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='field1', full_name='protobuf_unittest.TestMessageWithCustomOptions.field1', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\010\001\301\340\303\035-\341u\n\002\000\000\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='oneof_field', full_name='protobuf_unittest.TestMessageWithCustomOptions.oneof_field', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_TESTMESSAGEWITHCUSTOMOPTIONS_ANENUM,
],
serialized_options=b'\010\000\340\351\302\035\310\377\377\377\377\377\377\377\377\001',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='AnOneof', full_name='protobuf_unittest.TestMessageWithCustomOptions.AnOneof',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370\254\303\035\235\377\377\377\377\377\377\377\377\001'),
],
serialized_start=103,
serialized_end=294,
)
_CUSTOMOPTIONFOOREQUEST = _descriptor.Descriptor(
name='CustomOptionFooRequest',
full_name='protobuf_unittest.CustomOptionFooRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=296,
serialized_end=320,
)
_CUSTOMOPTIONFOORESPONSE = _descriptor.Descriptor(
name='CustomOptionFooResponse',
full_name='protobuf_unittest.CustomOptionFooResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=322,
serialized_end=347,
)
_CUSTOMOPTIONFOOCLIENTMESSAGE = _descriptor.Descriptor(
name='CustomOptionFooClientMessage',
full_name='protobuf_unittest.CustomOptionFooClientMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=349,
serialized_end=379,
)
_CUSTOMOPTIONFOOSERVERMESSAGE = _descriptor.Descriptor(
name='CustomOptionFooServerMessage',
full_name='protobuf_unittest.CustomOptionFooServerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=381,
serialized_end=411,
)
_DUMMYMESSAGECONTAININGENUM = _descriptor.Descriptor(
name='DummyMessageContainingEnum',
full_name='protobuf_unittest.DummyMessageContainingEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_DUMMYMESSAGECONTAININGENUM_TESTENUMTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=413,
serialized_end=522,
)
_DUMMYMESSAGEINVALIDASOPTIONTYPE = _descriptor.Descriptor(
name='DummyMessageInvalidAsOptionType',
full_name='protobuf_unittest.DummyMessageInvalidAsOptionType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=524,
serialized_end=557,
)
_CUSTOMOPTIONMININTEGERVALUES = _descriptor.Descriptor(
name='CustomOptionMinIntegerValues',
full_name='protobuf_unittest.CustomOptionMinIntegerValues',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\320\336\262\035\000\350\306\262\035\200\200\200\200\370\377\377\377\377\001\260\274\262\035\200\200\200\200\200\200\200\200\200\001\200\223\262\035\000\370\365\260\035\000\200\304\260\035\377\377\377\377\017\370\227\260\035\377\377\377\377\377\377\377\377\377\001\235\365\257\035\000\000\000\000\221\356\257\035\000\000\000\000\000\000\000\000\255\215\257\035\000\000\000\200\231\326\250\035\000\000\000\000\000\000\000\200',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=560,
serialized_end=698,
)
_CUSTOMOPTIONMAXINTEGERVALUES = _descriptor.Descriptor(
name='CustomOptionMaxIntegerValues',
full_name='protobuf_unittest.CustomOptionMaxIntegerValues',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\320\336\262\035\001\350\306\262\035\377\377\377\377\007\260\274\262\035\377\377\377\377\377\377\377\377\177\200\223\262\035\377\377\377\377\017\370\365\260\035\377\377\377\377\377\377\377\377\377\001\200\304\260\035\376\377\377\377\017\370\227\260\035\376\377\377\377\377\377\377\377\377\001\235\365\257\035\377\377\377\377\221\356\257\035\377\377\377\377\377\377\377\377\255\215\257\035\377\377\377\177\231\326\250\035\377\377\377\377\377\377\377\177',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=701,
serialized_end=846,
)
_CUSTOMOPTIONOTHERVALUES = _descriptor.Descriptor(
name='CustomOptionOtherValues',
full_name='protobuf_unittest.CustomOptionOtherValues',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\350\306\262\035\234\377\377\377\377\377\377\377\377\001\365\337\243\035\347\207EA\351\334\242\035\373Y\214B\312\300\363?\252\334\242\035\016Hello, \"World\"\262\331\242\035\013Hello\000World\210\331\242\035\351\377\377\377\377\377\377\377\377\001',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=848,
serialized_end=958,
)
_SETTINGREALSFROMPOSITIVEINTS = _descriptor.Descriptor(
name='SettingRealsFromPositiveInts',
full_name='protobuf_unittest.SettingRealsFromPositiveInts',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\365\337\243\035\000\000@A\351\334\242\035\000\000\000\000\000@c@',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=960,
serialized_end=1012,
)
_SETTINGREALSFROMNEGATIVEINTS = _descriptor.Descriptor(
name='SettingRealsFromNegativeInts',
full_name='protobuf_unittest.SettingRealsFromNegativeInts',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\365\337\243\035\000\000@\301\351\334\242\035\000\000\000\000\000@c\300',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1014,
serialized_end=1066,
)
_COMPLEXOPTIONTYPE1 = _descriptor.Descriptor(
name='ComplexOptionType1',
full_name='protobuf_unittest.ComplexOptionType1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='foo', full_name='protobuf_unittest.ComplexOptionType1.foo', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='foo2', full_name='protobuf_unittest.ComplexOptionType1.foo2', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='foo3', full_name='protobuf_unittest.ComplexOptionType1.foo3', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='foo4', full_name='protobuf_unittest.ComplexOptionType1.foo4', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(100, 536870912), ],
oneofs=[
],
serialized_start=1068,
serialized_end=1153,
)
_COMPLEXOPTIONTYPE2_COMPLEXOPTIONTYPE4 = _descriptor.Descriptor(
name='ComplexOptionType4',
full_name='protobuf_unittest.ComplexOptionType2.ComplexOptionType4',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='waldo', full_name='protobuf_unittest.ComplexOptionType2.ComplexOptionType4.waldo', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='complex_opt4', full_name='protobuf_unittest.ComplexOptionType2.ComplexOptionType4.complex_opt4', index=0,
number=7633546, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1390,
serialized_end=1541,
)
_COMPLEXOPTIONTYPE2 = _descriptor.Descriptor(
name='ComplexOptionType2',
full_name='protobuf_unittest.ComplexOptionType2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='bar', full_name='protobuf_unittest.ComplexOptionType2.bar', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='baz', full_name='protobuf_unittest.ComplexOptionType2.baz', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fred', full_name='protobuf_unittest.ComplexOptionType2.fred', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='barney', full_name='protobuf_unittest.ComplexOptionType2.barney', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_COMPLEXOPTIONTYPE2_COMPLEXOPTIONTYPE4, ],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(100, 536870912), ],
oneofs=[
],
serialized_start=1156,
serialized_end=1551,
)
_COMPLEXOPTIONTYPE3_COMPLEXOPTIONTYPE5 = _descriptor.Descriptor(
name='ComplexOptionType5',
full_name='protobuf_unittest.ComplexOptionType3.ComplexOptionType5',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='plugh', full_name='protobuf_unittest.ComplexOptionType3.ComplexOptionType5.plugh', index=0,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1675,
serialized_end=1710,
)
_COMPLEXOPTIONTYPE3 = _descriptor.Descriptor(
name='ComplexOptionType3',
full_name='protobuf_unittest.ComplexOptionType3',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='qux', full_name='protobuf_unittest.ComplexOptionType3.qux', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='complexoptiontype5', full_name='protobuf_unittest.ComplexOptionType3.complexoptiontype5', index=1,
number=2, type=10, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_COMPLEXOPTIONTYPE3_COMPLEXOPTIONTYPE5, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1554,
serialized_end=1710,
)
_COMPLEXOPT6 = _descriptor.Descriptor(
name='ComplexOpt6',
full_name='protobuf_unittest.ComplexOpt6',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='xyzzy', full_name='protobuf_unittest.ComplexOpt6.xyzzy', index=0,
number=7593951, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1712,
serialized_end=1743,
)
_VARIOUSCOMPLEXOPTIONS = _descriptor.Descriptor(
name='VariousComplexOptions',
full_name='protobuf_unittest.VariousComplexOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\242\342\225\035\002\010*\242\342\225\035\006\330\205\236\035\304\002\242\342\225\035\010\222\365\235\035\003\010\354\006\242\342\225\035\002 c\242\342\225\035\002 X\252\375\220\035\003\020\333\007\252\375\220\035\006\370\346\227\035\216\005\252\375\220\035\005\n\003\010\347\005\252\375\220\035\010\n\006\330\205\236\035\317\017\252\375\220\035\n\n\010\222\365\235\035\003\010\330\017\252\375\220\035\010\302\254\227\035\003\010\345\005\252\375\220\035\013\302\254\227\035\006\330\205\236\035\316\017\252\375\220\035\r\302\254\227\035\010\222\365\235\035\003\010\311\020\322\250\217\035\003\010\263\017\252\375\220\035\005\032\003\010\301\002\252\375\220\035\004\"\002\010e\252\375\220\035\005\"\003\010\324\001\372\336\220\035\002\010\t\372\336\220\035\004\023\030\026\024\343\334\374\034\370\375\373\034\030\344\334\374\034',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1746,
serialized_end=1987,
)
_AGGREGATEMESSAGESET = _descriptor.Descriptor(
name='AggregateMessageSet',
full_name='protobuf_unittest.AggregateMessageSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\010\001',
is_extendable=True,
syntax='proto2',
extension_ranges=[(4, 2147483647), ],
oneofs=[
],
serialized_start=1989,
serialized_end=2024,
)
_AGGREGATEMESSAGESETELEMENT = _descriptor.Descriptor(
name='AggregateMessageSetElement',
full_name='protobuf_unittest.AggregateMessageSetElement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='s', full_name='protobuf_unittest.AggregateMessageSetElement.s', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='message_set_extension', full_name='protobuf_unittest.AggregateMessageSetElement.message_set_extension', index=0,
number=15447542, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2027,
serialized_end=2187,
)
_AGGREGATE = _descriptor.Descriptor(
name='Aggregate',
full_name='protobuf_unittest.Aggregate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='i', full_name='protobuf_unittest.Aggregate.i', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='s', full_name='protobuf_unittest.Aggregate.s', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sub', full_name='protobuf_unittest.Aggregate.sub', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='file', full_name='protobuf_unittest.Aggregate.file', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mset', full_name='protobuf_unittest.Aggregate.mset', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='nested', full_name='protobuf_unittest.Aggregate.nested', index=0,
number=15476903, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2190,
serialized_end=2443,
)
_AGGREGATEMESSAGE = _descriptor.Descriptor(
name='AggregateMessage',
full_name='protobuf_unittest.AggregateMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='fieldname', full_name='protobuf_unittest.AggregateMessage.fieldname', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\362\241\207;\021\022\017FieldAnnotation', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\302\321\206;\025\010e\022\021MessageAnnotation',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2445,
serialized_end=2534,
)
_NESTEDOPTIONTYPE_NESTEDMESSAGE = _descriptor.Descriptor(
name='NestedMessage',
full_name='protobuf_unittest.NestedOptionType.NestedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='nested_field', full_name='protobuf_unittest.NestedOptionType.NestedMessage.nested_field', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\301\340\303\035\352\003\000\000\000\000\000\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\340\351\302\035\351\007',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2557,
serialized_end=2616,
)
_NESTEDOPTIONTYPE = _descriptor.Descriptor(
name='NestedOptionType',
full_name='protobuf_unittest.NestedOptionType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
_descriptor.FieldDescriptor(
name='nested_extension', full_name='protobuf_unittest.NestedOptionType.nested_extension', index=0,
number=7912573, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=b'\310\213\312\035\355\007', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[_NESTEDOPTIONTYPE_NESTEDMESSAGE, ],
enum_types=[
_NESTEDOPTIONTYPE_NESTEDENUM,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2537,
serialized_end=2738,
)
_OLDOPTIONTYPE = _descriptor.Descriptor(
name='OldOptionType',
full_name='protobuf_unittest.OldOptionType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='protobuf_unittest.OldOptionType.value', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_OLDOPTIONTYPE_TESTENUM,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2740,
serialized_end=2840,
)
_NEWOPTIONTYPE = _descriptor.Descriptor(
name='NewOptionType',
full_name='protobuf_unittest.NewOptionType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='protobuf_unittest.NewOptionType.value', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_NEWOPTIONTYPE_TESTENUM,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2842,
serialized_end=2957,
)
_TESTMESSAGEWITHREQUIREDENUMOPTION = _descriptor.Descriptor(
name='TestMessageWithRequiredEnumOption',
full_name='protobuf_unittest.TestMessageWithRequiredEnumOption',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\372\350\374\224\003\002\010\000',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2959,
serialized_end=3004,
)
_TESTMESSAGEWITHCUSTOMOPTIONS_ANENUM.containing_type = _TESTMESSAGEWITHCUSTOMOPTIONS
_TESTMESSAGEWITHCUSTOMOPTIONS.oneofs_by_name['AnOneof'].fields.append(
_TESTMESSAGEWITHCUSTOMOPTIONS.fields_by_name['oneof_field'])
_TESTMESSAGEWITHCUSTOMOPTIONS.fields_by_name['oneof_field'].containing_oneof = _TESTMESSAGEWITHCUSTOMOPTIONS.oneofs_by_name['AnOneof']
_DUMMYMESSAGECONTAININGENUM_TESTENUMTYPE.containing_type = _DUMMYMESSAGECONTAININGENUM
_COMPLEXOPTIONTYPE2_COMPLEXOPTIONTYPE4.containing_type = _COMPLEXOPTIONTYPE2
_COMPLEXOPTIONTYPE2.fields_by_name['bar'].message_type = _COMPLEXOPTIONTYPE1
_COMPLEXOPTIONTYPE2.fields_by_name['fred'].message_type = _COMPLEXOPTIONTYPE2_COMPLEXOPTIONTYPE4
_COMPLEXOPTIONTYPE2.fields_by_name['barney'].message_type = _COMPLEXOPTIONTYPE2_COMPLEXOPTIONTYPE4
_COMPLEXOPTIONTYPE3_COMPLEXOPTIONTYPE5.containing_type = _COMPLEXOPTIONTYPE3
_COMPLEXOPTIONTYPE3.fields_by_name['complexoptiontype5'].message_type = _COMPLEXOPTIONTYPE3_COMPLEXOPTIONTYPE5
_AGGREGATE.fields_by_name['sub'].message_type = _AGGREGATE
_AGGREGATE.fields_by_name['file'].message_type = google_dot_protobuf_dot_descriptor__pb2._FILEOPTIONS
_AGGREGATE.fields_by_name['mset'].message_type = _AGGREGATEMESSAGESET
_NESTEDOPTIONTYPE_NESTEDMESSAGE.containing_type = _NESTEDOPTIONTYPE
_NESTEDOPTIONTYPE_NESTEDENUM.containing_type = _NESTEDOPTIONTYPE
_OLDOPTIONTYPE.fields_by_name['value'].enum_type = _OLDOPTIONTYPE_TESTENUM
_OLDOPTIONTYPE_TESTENUM.containing_type = _OLDOPTIONTYPE
_NEWOPTIONTYPE.fields_by_name['value'].enum_type = _NEWOPTIONTYPE_TESTENUM
_NEWOPTIONTYPE_TESTENUM.containing_type = _NEWOPTIONTYPE
DESCRIPTOR.message_types_by_name['TestMessageWithCustomOptions'] = _TESTMESSAGEWITHCUSTOMOPTIONS
DESCRIPTOR.message_types_by_name['CustomOptionFooRequest'] = _CUSTOMOPTIONFOOREQUEST
DESCRIPTOR.message_types_by_name['CustomOptionFooResponse'] = _CUSTOMOPTIONFOORESPONSE
DESCRIPTOR.message_types_by_name['CustomOptionFooClientMessage'] = _CUSTOMOPTIONFOOCLIENTMESSAGE
DESCRIPTOR.message_types_by_name['CustomOptionFooServerMessage'] = _CUSTOMOPTIONFOOSERVERMESSAGE
DESCRIPTOR.message_types_by_name['DummyMessageContainingEnum'] = _DUMMYMESSAGECONTAININGENUM
DESCRIPTOR.message_types_by_name['DummyMessageInvalidAsOptionType'] = _DUMMYMESSAGEINVALIDASOPTIONTYPE
DESCRIPTOR.message_types_by_name['CustomOptionMinIntegerValues'] = _CUSTOMOPTIONMININTEGERVALUES
DESCRIPTOR.message_types_by_name['CustomOptionMaxIntegerValues'] = _CUSTOMOPTIONMAXINTEGERVALUES
DESCRIPTOR.message_types_by_name['CustomOptionOtherValues'] = _CUSTOMOPTIONOTHERVALUES
DESCRIPTOR.message_types_by_name['SettingRealsFromPositiveInts'] = _SETTINGREALSFROMPOSITIVEINTS
DESCRIPTOR.message_types_by_name['SettingRealsFromNegativeInts'] = _SETTINGREALSFROMNEGATIVEINTS
DESCRIPTOR.message_types_by_name['ComplexOptionType1'] = _COMPLEXOPTIONTYPE1
DESCRIPTOR.message_types_by_name['ComplexOptionType2'] = _COMPLEXOPTIONTYPE2
DESCRIPTOR.message_types_by_name['ComplexOptionType3'] = _COMPLEXOPTIONTYPE3
DESCRIPTOR.message_types_by_name['ComplexOpt6'] = _COMPLEXOPT6
DESCRIPTOR.message_types_by_name['VariousComplexOptions'] = _VARIOUSCOMPLEXOPTIONS
DESCRIPTOR.message_types_by_name['AggregateMessageSet'] = _AGGREGATEMESSAGESET
DESCRIPTOR.message_types_by_name['AggregateMessageSetElement'] = _AGGREGATEMESSAGESETELEMENT
DESCRIPTOR.message_types_by_name['Aggregate'] = _AGGREGATE
DESCRIPTOR.message_types_by_name['AggregateMessage'] = _AGGREGATEMESSAGE
DESCRIPTOR.message_types_by_name['NestedOptionType'] = _NESTEDOPTIONTYPE
DESCRIPTOR.message_types_by_name['OldOptionType'] = _OLDOPTIONTYPE
DESCRIPTOR.message_types_by_name['NewOptionType'] = _NEWOPTIONTYPE
DESCRIPTOR.message_types_by_name['TestMessageWithRequiredEnumOption'] = _TESTMESSAGEWITHREQUIREDENUMOPTION
DESCRIPTOR.enum_types_by_name['MethodOpt1'] = _METHODOPT1
DESCRIPTOR.enum_types_by_name['AggregateEnum'] = _AGGREGATEENUM
DESCRIPTOR.extensions_by_name['file_opt1'] = file_opt1
DESCRIPTOR.extensions_by_name['message_opt1'] = message_opt1
DESCRIPTOR.extensions_by_name['field_opt1'] = field_opt1
DESCRIPTOR.extensions_by_name['field_opt2'] = field_opt2
DESCRIPTOR.extensions_by_name['oneof_opt1'] = oneof_opt1
DESCRIPTOR.extensions_by_name['enum_opt1'] = enum_opt1
DESCRIPTOR.extensions_by_name['enum_value_opt1'] = enum_value_opt1
DESCRIPTOR.extensions_by_name['service_opt1'] = service_opt1
DESCRIPTOR.extensions_by_name['method_opt1'] = method_opt1
DESCRIPTOR.extensions_by_name['bool_opt'] = bool_opt
DESCRIPTOR.extensions_by_name['int32_opt'] = int32_opt
DESCRIPTOR.extensions_by_name['int64_opt'] = int64_opt
DESCRIPTOR.extensions_by_name['uint32_opt'] = uint32_opt
DESCRIPTOR.extensions_by_name['uint64_opt'] = uint64_opt
DESCRIPTOR.extensions_by_name['sint32_opt'] = sint32_opt
DESCRIPTOR.extensions_by_name['sint64_opt'] = sint64_opt
DESCRIPTOR.extensions_by_name['fixed32_opt'] = fixed32_opt
DESCRIPTOR.extensions_by_name['fixed64_opt'] = fixed64_opt
DESCRIPTOR.extensions_by_name['sfixed32_opt'] = sfixed32_opt
DESCRIPTOR.extensions_by_name['sfixed64_opt'] = sfixed64_opt
DESCRIPTOR.extensions_by_name['float_opt'] = float_opt
DESCRIPTOR.extensions_by_name['double_opt'] = double_opt
DESCRIPTOR.extensions_by_name['string_opt'] = string_opt
DESCRIPTOR.extensions_by_name['bytes_opt'] = bytes_opt
DESCRIPTOR.extensions_by_name['enum_opt'] = enum_opt
DESCRIPTOR.extensions_by_name['message_type_opt'] = message_type_opt
DESCRIPTOR.extensions_by_name['quux'] = quux
DESCRIPTOR.extensions_by_name['corge'] = corge
DESCRIPTOR.extensions_by_name['grault'] = grault
DESCRIPTOR.extensions_by_name['garply'] = garply
DESCRIPTOR.extensions_by_name['complex_opt1'] = complex_opt1
DESCRIPTOR.extensions_by_name['complex_opt2'] = complex_opt2
DESCRIPTOR.extensions_by_name['complex_opt3'] = complex_opt3
DESCRIPTOR.extensions_by_name['complexopt6'] = complexopt6
DESCRIPTOR.extensions_by_name['fileopt'] = fileopt
DESCRIPTOR.extensions_by_name['msgopt'] = msgopt
DESCRIPTOR.extensions_by_name['fieldopt'] = fieldopt
DESCRIPTOR.extensions_by_name['enumopt'] = enumopt
DESCRIPTOR.extensions_by_name['enumvalopt'] = enumvalopt
DESCRIPTOR.extensions_by_name['serviceopt'] = serviceopt
DESCRIPTOR.extensions_by_name['methodopt'] = methodopt
DESCRIPTOR.extensions_by_name['required_enum_opt'] = required_enum_opt
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TestMessageWithCustomOptions = _reflection.GeneratedProtocolMessageType('TestMessageWithCustomOptions', (_message.Message,), {
'DESCRIPTOR' : _TESTMESSAGEWITHCUSTOMOPTIONS,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.TestMessageWithCustomOptions)
})
_sym_db.RegisterMessage(TestMessageWithCustomOptions)
CustomOptionFooRequest = _reflection.GeneratedProtocolMessageType('CustomOptionFooRequest', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMOPTIONFOOREQUEST,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.CustomOptionFooRequest)
})
_sym_db.RegisterMessage(CustomOptionFooRequest)
CustomOptionFooResponse = _reflection.GeneratedProtocolMessageType('CustomOptionFooResponse', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMOPTIONFOORESPONSE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.CustomOptionFooResponse)
})
_sym_db.RegisterMessage(CustomOptionFooResponse)
CustomOptionFooClientMessage = _reflection.GeneratedProtocolMessageType('CustomOptionFooClientMessage', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMOPTIONFOOCLIENTMESSAGE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.CustomOptionFooClientMessage)
})
_sym_db.RegisterMessage(CustomOptionFooClientMessage)
CustomOptionFooServerMessage = _reflection.GeneratedProtocolMessageType('CustomOptionFooServerMessage', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMOPTIONFOOSERVERMESSAGE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.CustomOptionFooServerMessage)
})
_sym_db.RegisterMessage(CustomOptionFooServerMessage)
DummyMessageContainingEnum = _reflection.GeneratedProtocolMessageType('DummyMessageContainingEnum', (_message.Message,), {
'DESCRIPTOR' : _DUMMYMESSAGECONTAININGENUM,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.DummyMessageContainingEnum)
})
_sym_db.RegisterMessage(DummyMessageContainingEnum)
DummyMessageInvalidAsOptionType = _reflection.GeneratedProtocolMessageType('DummyMessageInvalidAsOptionType', (_message.Message,), {
'DESCRIPTOR' : _DUMMYMESSAGEINVALIDASOPTIONTYPE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.DummyMessageInvalidAsOptionType)
})
_sym_db.RegisterMessage(DummyMessageInvalidAsOptionType)
CustomOptionMinIntegerValues = _reflection.GeneratedProtocolMessageType('CustomOptionMinIntegerValues', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMOPTIONMININTEGERVALUES,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.CustomOptionMinIntegerValues)
})
_sym_db.RegisterMessage(CustomOptionMinIntegerValues)
CustomOptionMaxIntegerValues = _reflection.GeneratedProtocolMessageType('CustomOptionMaxIntegerValues', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMOPTIONMAXINTEGERVALUES,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.CustomOptionMaxIntegerValues)
})
_sym_db.RegisterMessage(CustomOptionMaxIntegerValues)
CustomOptionOtherValues = _reflection.GeneratedProtocolMessageType('CustomOptionOtherValues', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMOPTIONOTHERVALUES,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.CustomOptionOtherValues)
})
_sym_db.RegisterMessage(CustomOptionOtherValues)
SettingRealsFromPositiveInts = _reflection.GeneratedProtocolMessageType('SettingRealsFromPositiveInts', (_message.Message,), {
'DESCRIPTOR' : _SETTINGREALSFROMPOSITIVEINTS,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.SettingRealsFromPositiveInts)
})
_sym_db.RegisterMessage(SettingRealsFromPositiveInts)
SettingRealsFromNegativeInts = _reflection.GeneratedProtocolMessageType('SettingRealsFromNegativeInts', (_message.Message,), {
'DESCRIPTOR' : _SETTINGREALSFROMNEGATIVEINTS,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.SettingRealsFromNegativeInts)
})
_sym_db.RegisterMessage(SettingRealsFromNegativeInts)
ComplexOptionType1 = _reflection.GeneratedProtocolMessageType('ComplexOptionType1', (_message.Message,), {
'DESCRIPTOR' : _COMPLEXOPTIONTYPE1,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.ComplexOptionType1)
})
_sym_db.RegisterMessage(ComplexOptionType1)
ComplexOptionType2 = _reflection.GeneratedProtocolMessageType('ComplexOptionType2', (_message.Message,), {
'ComplexOptionType4' : _reflection.GeneratedProtocolMessageType('ComplexOptionType4', (_message.Message,), {
'DESCRIPTOR' : _COMPLEXOPTIONTYPE2_COMPLEXOPTIONTYPE4,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.ComplexOptionType2.ComplexOptionType4)
})
,
'DESCRIPTOR' : _COMPLEXOPTIONTYPE2,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.ComplexOptionType2)
})
_sym_db.RegisterMessage(ComplexOptionType2)
_sym_db.RegisterMessage(ComplexOptionType2.ComplexOptionType4)
ComplexOptionType3 = _reflection.GeneratedProtocolMessageType('ComplexOptionType3', (_message.Message,), {
'ComplexOptionType5' : _reflection.GeneratedProtocolMessageType('ComplexOptionType5', (_message.Message,), {
'DESCRIPTOR' : _COMPLEXOPTIONTYPE3_COMPLEXOPTIONTYPE5,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.ComplexOptionType3.ComplexOptionType5)
})
,
'DESCRIPTOR' : _COMPLEXOPTIONTYPE3,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.ComplexOptionType3)
})
_sym_db.RegisterMessage(ComplexOptionType3)
_sym_db.RegisterMessage(ComplexOptionType3.ComplexOptionType5)
ComplexOpt6 = _reflection.GeneratedProtocolMessageType('ComplexOpt6', (_message.Message,), {
'DESCRIPTOR' : _COMPLEXOPT6,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.ComplexOpt6)
})
_sym_db.RegisterMessage(ComplexOpt6)
VariousComplexOptions = _reflection.GeneratedProtocolMessageType('VariousComplexOptions', (_message.Message,), {
'DESCRIPTOR' : _VARIOUSCOMPLEXOPTIONS,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.VariousComplexOptions)
})
_sym_db.RegisterMessage(VariousComplexOptions)
AggregateMessageSet = _reflection.GeneratedProtocolMessageType('AggregateMessageSet', (_message.Message,), {
'DESCRIPTOR' : _AGGREGATEMESSAGESET,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.AggregateMessageSet)
})
_sym_db.RegisterMessage(AggregateMessageSet)
AggregateMessageSetElement = _reflection.GeneratedProtocolMessageType('AggregateMessageSetElement', (_message.Message,), {
'DESCRIPTOR' : _AGGREGATEMESSAGESETELEMENT,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.AggregateMessageSetElement)
})
_sym_db.RegisterMessage(AggregateMessageSetElement)
Aggregate = _reflection.GeneratedProtocolMessageType('Aggregate', (_message.Message,), {
'DESCRIPTOR' : _AGGREGATE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.Aggregate)
})
_sym_db.RegisterMessage(Aggregate)
AggregateMessage = _reflection.GeneratedProtocolMessageType('AggregateMessage', (_message.Message,), {
'DESCRIPTOR' : _AGGREGATEMESSAGE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.AggregateMessage)
})
_sym_db.RegisterMessage(AggregateMessage)
NestedOptionType = _reflection.GeneratedProtocolMessageType('NestedOptionType', (_message.Message,), {
'NestedMessage' : _reflection.GeneratedProtocolMessageType('NestedMessage', (_message.Message,), {
'DESCRIPTOR' : _NESTEDOPTIONTYPE_NESTEDMESSAGE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.NestedOptionType.NestedMessage)
})
,
'DESCRIPTOR' : _NESTEDOPTIONTYPE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.NestedOptionType)
})
_sym_db.RegisterMessage(NestedOptionType)
_sym_db.RegisterMessage(NestedOptionType.NestedMessage)
OldOptionType = _reflection.GeneratedProtocolMessageType('OldOptionType', (_message.Message,), {
'DESCRIPTOR' : _OLDOPTIONTYPE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.OldOptionType)
})
_sym_db.RegisterMessage(OldOptionType)
NewOptionType = _reflection.GeneratedProtocolMessageType('NewOptionType', (_message.Message,), {
'DESCRIPTOR' : _NEWOPTIONTYPE,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.NewOptionType)
})
_sym_db.RegisterMessage(NewOptionType)
TestMessageWithRequiredEnumOption = _reflection.GeneratedProtocolMessageType('TestMessageWithRequiredEnumOption', (_message.Message,), {
'DESCRIPTOR' : _TESTMESSAGEWITHREQUIREDENUMOPTION,
'__module__' : 'google.protobuf.unittest_custom_options_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.TestMessageWithRequiredEnumOption)
})
_sym_db.RegisterMessage(TestMessageWithRequiredEnumOption)
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(file_opt1)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(message_opt1)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(field_opt1)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(field_opt2)
google_dot_protobuf_dot_descriptor__pb2.OneofOptions.RegisterExtension(oneof_opt1)
google_dot_protobuf_dot_descriptor__pb2.EnumOptions.RegisterExtension(enum_opt1)
google_dot_protobuf_dot_descriptor__pb2.EnumValueOptions.RegisterExtension(enum_value_opt1)
google_dot_protobuf_dot_descriptor__pb2.ServiceOptions.RegisterExtension(service_opt1)
method_opt1.enum_type = _METHODOPT1
google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(method_opt1)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(bool_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(int32_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(int64_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(uint32_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(uint64_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(sint32_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(sint64_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(fixed32_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(fixed64_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(sfixed32_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(sfixed64_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(float_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(double_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(string_opt)
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(bytes_opt)
enum_opt.enum_type = _DUMMYMESSAGECONTAININGENUM_TESTENUMTYPE
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(enum_opt)
message_type_opt.message_type = _DUMMYMESSAGEINVALIDASOPTIONTYPE
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(message_type_opt)
ComplexOptionType1.RegisterExtension(quux)
corge.message_type = _COMPLEXOPTIONTYPE3
ComplexOptionType1.RegisterExtension(corge)
ComplexOptionType2.RegisterExtension(grault)
garply.message_type = _COMPLEXOPTIONTYPE1
ComplexOptionType2.RegisterExtension(garply)
complex_opt1.message_type = _COMPLEXOPTIONTYPE1
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(complex_opt1)
complex_opt2.message_type = _COMPLEXOPTIONTYPE2
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(complex_opt2)
complex_opt3.message_type = _COMPLEXOPTIONTYPE3
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(complex_opt3)
complexopt6.message_type = _COMPLEXOPT6
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(complexopt6)
fileopt.message_type = _AGGREGATE
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(fileopt)
msgopt.message_type = _AGGREGATE
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(msgopt)
fieldopt.message_type = _AGGREGATE
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(fieldopt)
enumopt.message_type = _AGGREGATE
google_dot_protobuf_dot_descriptor__pb2.EnumOptions.RegisterExtension(enumopt)
enumvalopt.message_type = _AGGREGATE
google_dot_protobuf_dot_descriptor__pb2.EnumValueOptions.RegisterExtension(enumvalopt)
serviceopt.message_type = _AGGREGATE
google_dot_protobuf_dot_descriptor__pb2.ServiceOptions.RegisterExtension(serviceopt)
methodopt.message_type = _AGGREGATE
google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(methodopt)
required_enum_opt.message_type = _OLDOPTIONTYPE
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(required_enum_opt)
_COMPLEXOPTIONTYPE2_COMPLEXOPTIONTYPE4.extensions_by_name['complex_opt4'].message_type = _COMPLEXOPTIONTYPE2_COMPLEXOPTIONTYPE4
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(_COMPLEXOPTIONTYPE2_COMPLEXOPTIONTYPE4.extensions_by_name['complex_opt4'])
_AGGREGATEMESSAGESETELEMENT.extensions_by_name['message_set_extension'].message_type = _AGGREGATEMESSAGESETELEMENT
AggregateMessageSet.RegisterExtension(_AGGREGATEMESSAGESETELEMENT.extensions_by_name['message_set_extension'])
_AGGREGATE.extensions_by_name['nested'].message_type = _AGGREGATE
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(_AGGREGATE.extensions_by_name['nested'])
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(_NESTEDOPTIONTYPE.extensions_by_name['nested_extension'])
DESCRIPTOR._options = None
_AGGREGATEENUM._options = None
_AGGREGATEENUM.values_by_name["VALUE"]._options = None
_TESTMESSAGEWITHCUSTOMOPTIONS.oneofs_by_name['AnOneof']._options = None
_TESTMESSAGEWITHCUSTOMOPTIONS_ANENUM._options = None
_TESTMESSAGEWITHCUSTOMOPTIONS_ANENUM.values_by_name["ANENUM_VAL2"]._options = None
_TESTMESSAGEWITHCUSTOMOPTIONS.fields_by_name['field1']._options = None
_TESTMESSAGEWITHCUSTOMOPTIONS._options = None
_CUSTOMOPTIONMININTEGERVALUES._options = None
_CUSTOMOPTIONMAXINTEGERVALUES._options = None
_CUSTOMOPTIONOTHERVALUES._options = None
_SETTINGREALSFROMPOSITIVEINTS._options = None
_SETTINGREALSFROMNEGATIVEINTS._options = None
_VARIOUSCOMPLEXOPTIONS._options = None
_AGGREGATEMESSAGESET._options = None
_AGGREGATEMESSAGE.fields_by_name['fieldname']._options = None
_AGGREGATEMESSAGE._options = None
_NESTEDOPTIONTYPE_NESTEDMESSAGE.fields_by_name['nested_field']._options = None
_NESTEDOPTIONTYPE_NESTEDMESSAGE._options = None
_NESTEDOPTIONTYPE_NESTEDENUM._options = None
_NESTEDOPTIONTYPE_NESTEDENUM.values_by_name["NESTED_ENUM_VALUE"]._options = None
_NESTEDOPTIONTYPE.extensions_by_name['nested_extension']._options = None
_TESTMESSAGEWITHREQUIREDENUMOPTION._options = None
_TESTSERVICEWITHCUSTOMOPTIONS = _descriptor.ServiceDescriptor(
name='TestServiceWithCustomOptions',
full_name='protobuf_unittest.TestServiceWithCustomOptions',
file=DESCRIPTOR,
index=0,
serialized_options=b'\220\262\213\036\323\333\200\313I',
create_key=_descriptor._internal_create_key,
serialized_start=3142,
serialized_end=3284,
methods=[
_descriptor.MethodDescriptor(
name='Foo',
full_name='protobuf_unittest.TestServiceWithCustomOptions.Foo',
index=0,
containing_service=None,
input_type=_CUSTOMOPTIONFOOREQUEST,
output_type=_CUSTOMOPTIONFOORESPONSE,
serialized_options=b'\340\372\214\036\002',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_TESTSERVICEWITHCUSTOMOPTIONS)
DESCRIPTOR.services_by_name['TestServiceWithCustomOptions'] = _TESTSERVICEWITHCUSTOMOPTIONS
_AGGREGATESERVICE = _descriptor.ServiceDescriptor(
name='AggregateService',
full_name='protobuf_unittest.AggregateService',
file=DESCRIPTOR,
index=1,
serialized_options=b'\312\373\216;\023\022\021ServiceAnnotation',
create_key=_descriptor._internal_create_key,
serialized_start=3287,
serialized_end=3440,
methods=[
_descriptor.MethodDescriptor(
name='Method',
full_name='protobuf_unittest.AggregateService.Method',
index=0,
containing_service=None,
input_type=_AGGREGATEMESSAGE,
output_type=_AGGREGATEMESSAGE,
serialized_options=b'\312\310\226;\022\022\020MethodAnnotation',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_AGGREGATESERVICE)
DESCRIPTOR.services_by_name['AggregateService'] = _AGGREGATESERVICE
TestServiceWithCustomOptions = service_reflection.GeneratedServiceType('TestServiceWithCustomOptions', (_service.Service,), dict(
DESCRIPTOR = _TESTSERVICEWITHCUSTOMOPTIONS,
__module__ = 'google.protobuf.unittest_custom_options_pb2'
))
TestServiceWithCustomOptions_Stub = service_reflection.GeneratedServiceStubType('TestServiceWithCustomOptions_Stub', (TestServiceWithCustomOptions,), dict(
DESCRIPTOR = _TESTSERVICEWITHCUSTOMOPTIONS,
__module__ = 'google.protobuf.unittest_custom_options_pb2'
))
AggregateService = service_reflection.GeneratedServiceType('AggregateService', (_service.Service,), dict(
DESCRIPTOR = _AGGREGATESERVICE,
__module__ = 'google.protobuf.unittest_custom_options_pb2'
))
AggregateService_Stub = service_reflection.GeneratedServiceStubType('AggregateService_Stub', (AggregateService,), dict(
DESCRIPTOR = _AGGREGATESERVICE,
__module__ = 'google.protobuf.unittest_custom_options_pb2'
))
# @@protoc_insertion_point(module_scope)
| 47.59589
| 11,592
| 0.801012
|
cb2e0f0ccf935d5273651b23d7facc7c376e6711
| 3,347
|
py
|
Python
|
dist.py
|
kwadrat/s_dist
|
a0c8e6a0420bcc5f15fbbdf4ccbea9d9afd97902
|
[
"MIT"
] | null | null | null |
dist.py
|
kwadrat/s_dist
|
a0c8e6a0420bcc5f15fbbdf4ccbea9d9afd97902
|
[
"MIT"
] | null | null | null |
dist.py
|
kwadrat/s_dist
|
a0c8e6a0420bcc5f15fbbdf4ccbea9d9afd97902
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import sys
import unittest
def thresh(value):
return (6 * (value + 100) / 50)
class Box:
def __init__(self):
'''
Box:
'''
self.ls = []
def add(self, value, tm=None):
'''
Box:
Dodaj kolejną wartość do listy
'''
if tm is not None and tm > thresh(value):
self.ls = [value]
else:
if value not in self.ls:
self.ls.append(value)
if len(self.ls) > 2:
if (
abs(self.ls[2] - self.ls[0])
> abs(self.ls[2] - self.ls[1])):
del self.ls[0]
else:
del self.ls[1]
def get(self):
'''
Box:
Wyznacz średnią wartość listy
'''
if self.ls:
return sum(self.ls) / (len(self.ls))
else:
return None
class TestDist(unittest.TestCase):
def test_something(self):
'''
TestDist:
'''
obk = Box()
obk.add(10, 0)
self.assertEqual(obk.get(), 10)
obk.add(12)
self.assertEqual(obk.get(), 11)
def test_second(self):
'''
TestDist:
'''
obk = Box()
obk.add(20)
obk.add(30)
self.assertEqual(obk.get(), 25)
obk.add(40)
self.assertEqual(obk.get(), 35)
def test_third(self):
'''
TestDist:
Pomiń element bardziej odległy (czyli drugi)
'''
obk = Box()
obk.add(2)
obk.add(10)
obk.add(0)
self.assertEqual(obk.get(), 1)
def test_four(self):
'''
TestDist:
Jeśli element jest już na liście jako
pierwszy, to zignoruj ten nowy element.
'''
obk = Box()
obk.add(2)
obk.add(10)
obk.add(2)
self.assertEqual(obk.get(), 6)
def test_fifth(self):
'''
TestDist:
Jeśli element jest już na liście w dowolnym
miejsciu, to zignoruj ten nowy element.
'''
obk = Box()
obk.add(2)
obk.add(10)
obk.add(10)
self.assertEqual(obk.get(), 6)
def test_sixth(self):
'''
TestDist:
Wyrzuć wszystkie elementy jeśli czas jest
większy niż 10 sekund
'''
obk = Box()
obk.add(2)
obk.add(10)
obk.add(30, 12)
self.assertEqual(obk.get(), 20)
def test_seventh(self):
'''
TestDist:
Obsługa pustej listy
'''
obk = Box()
self.assertEqual(obk.get(), None)
def test_eighth(self):
'''
TestDist:
'''
for i in range(30, 0, -1):
print(i)
obk = Box()
obk.add(2)
obk.add(10)
obk.add(30, i)
self.assertEqual(obk.get(), 30)
def test_ninth(self):
'''
TestDist:
'''
self.assertEqual(thresh(15), 13.8)
self.assertEqual(thresh(16), 13.92)
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
unittest.main(argv=sys.argv[:1])
else:
for i in range(0, 30):
print('%s %s' % (i, thresh(i)))
| 22.165563
| 60
| 0.45115
|
834518c45369bd86c1419ef0546e1a751f74c5ec
| 6,868
|
py
|
Python
|
test/integration/ggrc/integrations/test_asmt_sync_job.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/ggrc/integrations/test_asmt_sync_job.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/ggrc/integrations/test_asmt_sync_job.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration test for Assessment object sync cron job."""
from datetime import datetime
import ddt
import mock
from ggrc import settings
from ggrc.models import all_models
from ggrc.integrations.synchronization_jobs import assessment_sync_job
from ggrc.integrations.synchronization_jobs import sync_utils
from ggrc.integrations import constants
from integration import ggrc
from integration.ggrc.models import factories
@ddt.ddt
@mock.patch.object(settings, "ISSUE_TRACKER_ENABLED", True)
@mock.patch('ggrc.integrations.issues.Client.update_issue',
return_value=mock.MagicMock())
class TestAsmtSyncJob(ggrc.TestCase):
"""Test cron job for sync Assessment object attributes."""
@staticmethod
def _create_asmt(people_sync_enabled):
"""Helper function creating assessment and audit."""
with factories.single_commit():
asmt = factories.AssessmentFactory()
factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=asmt.audit,
people_sync_enabled=people_sync_enabled,
**TestAsmtSyncJob._issuetracker_data()
)
factories.IssueTrackerIssueFactory(
enabled=True,
issue_tracked_obj=asmt,
due_date=datetime.utcnow(),
**TestAsmtSyncJob._issuetracker_data()
)
return asmt
@staticmethod
def _issuetracker_data():
"""Helper function returning default issue tracker settings."""
return dict(
component_id=constants.DEFAULT_ISSUETRACKER_VALUES["component_id"],
hotlist_id=constants.DEFAULT_ISSUETRACKER_VALUES["hotlist_id"],
issue_type=constants.DEFAULT_ISSUETRACKER_VALUES["issue_type"],
issue_priority=constants.DEFAULT_ISSUETRACKER_VALUES["issue_priority"],
issue_severity=constants.DEFAULT_ISSUETRACKER_VALUES["issue_severity"],
)
@staticmethod
def _to_issuetrakcer_repr(asmt):
"""Return issue tracker representation of assessment."""
return {
asmt.issuetracker_issue.issue_id: dict(
component_id=int(asmt.issuetracker_issue.component_id),
status=asmt.status,
type=asmt.issuetracker_issue.issue_type,
priority=asmt.issuetracker_issue.issue_priority,
severity=asmt.issuetracker_issue.issue_severity,
reporter=asmt.issuetracker_issue.reporter or "",
assignee=asmt.issuetracker_issue.assignee or "",
verifier=asmt.issuetracker_issue.assignee or "",
ccs=asmt.issuetracker_issue.cc_list or [],
),
}
@staticmethod
def _construct_expected_upd_call(current_repr, new_audit_captains=(),
new_asmt_assignees=(),
people_sync_enabled=False):
"""Return expected args for client update_issue call."""
issue_id, = current_repr.keys()
body = dict(current_repr[issue_id])
new_audit_captains = {a.email for a in new_audit_captains}
new_asmt_assignees = {a.email for a in new_asmt_assignees}
if people_sync_enabled:
if new_audit_captains:
body["reporter"] = min(new_audit_captains)
if new_asmt_assignees:
body["assignee"] = min(new_asmt_assignees)
body["verifier"] = body["assignee"]
body["ccs"] = list(
(new_audit_captains | new_asmt_assignees) -
{body["reporter"], body["assignee"]}
)
body["status"] = constants.STATUSES_MAPPING.get(body["status"])
return str(issue_id), body
@ddt.data(True, False)
def test_assignee_people_sync(self, people_sync_enabled, update_issue_mock):
"""Test sync of Assignees when people_sync_enabled is on/off."""
asmt = self._create_asmt(people_sync_enabled=people_sync_enabled)
issuetracker_repr = self._to_issuetrakcer_repr(asmt)
with factories.single_commit():
assignee_1 = factories.PersonFactory()
assignee_2 = factories.PersonFactory()
expected_upd_args = self._construct_expected_upd_call(
current_repr=issuetracker_repr,
new_asmt_assignees=(assignee_1, assignee_2),
people_sync_enabled=people_sync_enabled,
)
asmt.add_person_with_role_name(assignee_1, "Assignees")
asmt.add_person_with_role_name(assignee_2, "Assignees")
with mock.patch.object(sync_utils, "iter_issue_batches",
return_value=[issuetracker_repr]):
assessment_sync_job.sync_assessment_attributes()
update_issue_mock.assert_called_once_with(*expected_upd_args)
@ddt.data(True, False)
def test_captains_people_sync_on(self, people_sync_enabled,
update_issue_mock):
"""Test sync of Audit Captain when people_sync_enabled is on/off."""
asmt = self._create_asmt(people_sync_enabled=people_sync_enabled)
issuetracker_repr = self._to_issuetrakcer_repr(asmt)
with factories.single_commit():
audit_captain_1 = factories.PersonFactory()
audit_captain_2 = factories.PersonFactory()
expected_upd_args = self._construct_expected_upd_call(
current_repr=issuetracker_repr,
new_audit_captains=(audit_captain_1, audit_captain_2),
people_sync_enabled=people_sync_enabled,
)
asmt.audit.add_person_with_role_name(audit_captain_1, "Audit Captains")
asmt.audit.add_person_with_role_name(audit_captain_2, "Audit Captains")
with mock.patch.object(sync_utils, "iter_issue_batches",
return_value=[issuetracker_repr]):
assessment_sync_job.sync_assessment_attributes()
update_issue_mock.assert_called_once_with(*expected_upd_args)
def test_empty_due_date_sync(self, update_issue_mock):
"""Test adding empty due_date in Issue"""
due_date = None
with factories.single_commit():
assmt = self._create_asmt(True)
assmt.start_date = due_date
issue = assmt.issuetracker_issue
issuetracker_issue_id = issue.id
iti = self._to_issuetrakcer_repr(assmt)
iti[assmt.issuetracker_issue.issue_id].update({
"custom_fields": [{
constants.CustomFields.DUE_DATE:
issue.due_date.strftime("%Y-%m-%d")
}],
})
batches = [iti]
with mock.patch.object(
sync_utils,
"iter_issue_batches",
return_value=batches
):
assessment_sync_job.sync_assessment_attributes()
issue_id = iti.keys()[0]
payload = iti[issue_id]
payload["custom_fields"] = [{
'display_string': 'Due Date',
'type': 'DATE',
'name': 'Due Date',
'value': None,
}]
payload["status"] = 'ASSIGNED'
update_issue_mock.assert_called_once_with(issue_id, payload)
issue = all_models.IssuetrackerIssue.query.get(issuetracker_issue_id)
self.assertIsNone(issue.due_date)
| 38.155556
| 79
| 0.703698
|
078476690ad123517df847f294bf38e86d99b3a3
| 3,009
|
py
|
Python
|
modules/lex_bot_importer.py
|
adamhamden/lex-bot
|
3c21b8d60607950c707b97ff5ba8491d40e31592
|
[
"MIT"
] | null | null | null |
modules/lex_bot_importer.py
|
adamhamden/lex-bot
|
3c21b8d60607950c707b97ff5ba8491d40e31592
|
[
"MIT"
] | null | null | null |
modules/lex_bot_importer.py
|
adamhamden/lex-bot
|
3c21b8d60607950c707b97ff5ba8491d40e31592
|
[
"MIT"
] | null | null | null |
import boto3
import pprint
import json
import os
class LexBotImporter:
def __init__(self):
self.client = boto3.client('lex-models')
def import_bot(self, bot_file=None, file_path=None):
bot_data = self._parse_bot_file(bot_file, file_path)
self._construct_bot(bot_data)
@staticmethod
def _parse_bot_file(bot_file=None, file_path=None):
if bot_file is None:
raise RuntimeError("ERROR: No bot file was provided")
return
if file_path is None:
file_path = os.path.dirname(os.path.abspath(__file__))
print("No filepath provided, using current file path")
filename = os.path.join(file_path, bot_file)
try:
with open(filename) as f:
bot_data = json.load(f)
except FileNotFoundError:
print("ERROR: Bot file was not found")
return
print("Successfully parsed {}".format(bot_file))
return bot_data
def _construct_bot(self, bot_data):
self._import_slot_types(bot_data)
self._import_intents(bot_data)
self._import_bot_configurations(bot_data)
def _import_slot_types(self, bot_data):
for slot in bot_data['resource']['slotTypes']:
del slot['version']
# check if slot exists
try:
response = self.client.get_slot_type(name=slot['name'], version='$LATEST')
slot['checksum'] = response['checksum']
except self.client.exceptions.NotFoundException:
pass
self.client.put_slot_type(**slot)
print("Successfully imported slot type {}".format(slot['name']))
def _import_intents(self, bot_data):
for intent in bot_data['resource']['intents']:
del intent['version']
# check if intent exists
try:
response = self.client.get_intent(name=intent['name'], version='$LATEST')
intent['checksum'] = response['checksum']
except self.client.exceptions.NotFoundException:
pass
self.client.put_intent(**intent)
print("Successfully imported intent {}".format(intent['name']))
def _import_bot_configurations(self, bot_data):
bot = bot_data['resource']
del bot['version']
del bot['slotTypes']
# check if bot exists
try:
response = self.client.get_bot(name=bot['name'], versionOrAlias='$LATEST')
bot['checksum'] = response['checksum']
except self.client.exceptions.NotFoundException:
pass
intent_list = []
for intent in bot_data['resource']['intents']:
intent_list.append({'intentName': intent['name'], 'intentVersion':'$LATEST'})
bot['intents'] = intent_list
response = self.client.put_bot(**bot)
print("Successfully imported bot {}".format(bot['name']))
pprint.pprint(response)
| 27.605505
| 90
| 0.600199
|
09d98d61117490010bd9092f0a3d521efac6767e
| 793
|
py
|
Python
|
gen_locale.py
|
iida-hayato/factorio-not-included
|
4350c02bd301646245733a5cb37bb446f24950b9
|
[
"MIT"
] | 3
|
2021-02-06T01:58:24.000Z
|
2021-12-23T03:51:44.000Z
|
gen_locale.py
|
iida-hayato/factorio-not-included
|
4350c02bd301646245733a5cb37bb446f24950b9
|
[
"MIT"
] | null | null | null |
gen_locale.py
|
iida-hayato/factorio-not-included
|
4350c02bd301646245733a5cb37bb446f24950b9
|
[
"MIT"
] | null | null | null |
import re
def gen_with(type,path=''):
if path == '':
path = f'prototypes/{type}.lua'
gen_from_file(type, path)
def gen_from_file(type, path):
with open(path) as f:
lineList = f.readlines()
for line in lineList:
target = r'^ *name = '
if re.search(target, line):
new_line = re.findall(r'name = "(.*)"',line)
name = new_line[0]
print(f'{name}={name}')
print(f'[item-name]')
gen_with('item','prototypes/entity-item.lua')
gen_with('item')
print(f'[fluid-name]')
gen_with('fluid','prototypes/fluids.lua')
print(f'[entity-name]')
gen_with('entity')
print(f'[recipe-name]')
gen_with('recipe')
gen_with('recipe','prototypes/entity-recipe.lua')
print(f'[technology-name]')
gen_with('tech')
| 25.580645
| 60
| 0.596469
|
1716751a27d8f123957b12b8474c789ae4568729
| 115
|
py
|
Python
|
module1-web-application-development-with-flask/APP/__init__.py
|
lucguittard/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
79c2c8ec02a673b135da1d012747fee82d50ce35
|
[
"MIT"
] | 1
|
2020-05-28T21:56:57.000Z
|
2020-05-28T21:56:57.000Z
|
module1-web-application-development-with-flask/APP/__init__.py
|
lucguittard/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
79c2c8ec02a673b135da1d012747fee82d50ce35
|
[
"MIT"
] | 4
|
2021-06-02T00:41:19.000Z
|
2022-03-12T00:07:13.000Z
|
module1-web-application-development-with-flask/APP/__init__.py
|
lucguittard/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
79c2c8ec02a673b135da1d012747fee82d50ce35
|
[
"MIT"
] | null | null | null |
"""Entry point for twitoff flask app"""
from .app import create_app
APP = create_app #no relation to folder name
| 23
| 45
| 0.747826
|
612d37597f0f5c95f38a16dade99500c6223ed3d
| 2,689
|
py
|
Python
|
test/_utils/_common_utils_for_test.py
|
Nayef211/data
|
66b7ac07f75c45f1cc6aed71423fdb5d29a9648f
|
[
"BSD-3-Clause"
] | null | null | null |
test/_utils/_common_utils_for_test.py
|
Nayef211/data
|
66b7ac07f75c45f1cc6aed71423fdb5d29a9648f
|
[
"BSD-3-Clause"
] | null | null | null |
test/_utils/_common_utils_for_test.py
|
Nayef211/data
|
66b7ac07f75c45f1cc6aed71423fdb5d29a9648f
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import tempfile
from torch.utils.data import IterDataPipe
from typing import Any, List, Tuple, TypeVar
T_co = TypeVar("T_co", covariant=True)
class IDP_NoLen(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
def __iter__(self):
for i in self.input_dp:
yield i
def get_name(path_and_stream):
return os.path.basename(path_and_stream[0]), path_and_stream[1]
# Given a DataPipe and integer n, iterate the DataPipe for n elements and store the elements into a list
# Then, reset the DataPipe and return a tuple of two lists
# 1. A list of elements yielded before the reset
# 2. A list of all elements of the DataPipe after the reset
def reset_after_n_next_calls(datapipe: IterDataPipe[T_co], n: int) -> Tuple[List[T_co], List[T_co]]:
it = iter(datapipe)
res_before_reset = []
for _ in range(n):
res_before_reset.append(next(it))
return res_before_reset, list(datapipe)
def create_temp_dir_and_files() -> List[Any]:
# The temp dir and files within it will be released and deleted in tearDown().
# Adding `noqa: P201` to avoid mypy's warning on not releasing the dir handle within this function.
temp_dir = tempfile.TemporaryDirectory() # noqa: P201
temp_dir_path = temp_dir.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, prefix="1", suffix=".txt") as f:
temp_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, prefix="2", suffix=".byte") as f:
temp_file2_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, prefix="3", suffix=".empty") as f:
temp_file3_name = f.name
with open(temp_file1_name, "w") as f1:
f1.write("0123456789abcdef")
with open(temp_file2_name, "wb") as f2:
f2.write(b"0123456789abcdef")
temp_sub_dir = tempfile.TemporaryDirectory(dir=temp_dir_path) # noqa: P201
temp_sub_dir_path = temp_sub_dir.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, prefix="4", suffix=".txt") as f:
temp_sub_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, prefix="5", suffix=".byte") as f:
temp_sub_file2_name = f.name
with open(temp_sub_file1_name, "w") as f1:
f1.write("0123456789abcdef")
with open(temp_sub_file2_name, "wb") as f2:
f2.write(b"0123456789abcdef")
return [
(temp_dir, temp_file1_name, temp_file2_name, temp_file3_name),
(temp_sub_dir, temp_sub_file1_name, temp_sub_file2_name),
]
| 37.347222
| 107
| 0.708814
|
7c9ddf2c7258d54413713fd8792353fb1ae0f5d4
| 9,562
|
py
|
Python
|
heat/tests/test_sahara_cluster.py
|
pshchelo/heat
|
6cf94a3ece89d77b839f61292e5f023c3f192c82
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/test_sahara_cluster.py
|
pshchelo/heat
|
6cf94a3ece89d77b839f61292e5f023c3f192c82
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/test_sahara_cluster.py
|
pshchelo/heat
|
6cf94a3ece89d77b839f61292e5f023c3f192c82
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import neutron
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import sahara_cluster as sc
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
cluster_stack_template = """
heat_template_version: 2013-05-23
description: Hadoop Cluster by Sahara
resources:
super-cluster:
type: OS::Sahara::Cluster
properties:
name: super-cluster
plugin_name: vanilla
hadoop_version: 2.3.0
cluster_template_id: some_cluster_template_id
default_image_id: some_image
key_name: admin
neutron_management_network: some_network
"""
class FakeCluster(object):
def __init__(self, status='Active'):
self.status = status
self.id = "some_id"
self.name = "super-cluster"
self.info = {"HDFS": {"NameNode": "hdfs://hostname:port",
"Web UI": "http://host_ip:port"}}
class SaharaClusterTest(common.HeatTestCase):
def setUp(self):
super(SaharaClusterTest, self).setUp()
self.patchobject(sc.constraints.CustomConstraint, '_is_valid'
).return_value = True
self.patchobject(glance.GlanceClientPlugin, 'get_image_id'
).return_value = 'some_image_id'
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin, 'find_neutron_resource'
).return_value = 'some_network_id'
self.sahara_mock = mock.MagicMock()
self.patchobject(sahara.SaharaClientPlugin, '_create'
).return_value = self.sahara_mock
self.cl_mgr = self.sahara_mock.clusters
self.fake_cl = FakeCluster()
self.t = template_format.parse(cluster_stack_template)
def _init_cluster(self, template):
self.stack = utils.parse_stack(template)
cluster = self.stack['super-cluster']
return cluster
def _create_cluster(self, template):
cluster = self._init_cluster(template)
self.cl_mgr.create.return_value = self.fake_cl
self.cl_mgr.get.return_value = self.fake_cl
scheduler.TaskRunner(cluster.create)()
self.assertEqual((cluster.CREATE, cluster.COMPLETE),
cluster.state)
self.assertEqual(self.fake_cl.id, cluster.resource_id)
return cluster
def test_cluster_create(self):
self._create_cluster(self.t)
expected_args = ('super-cluster', 'vanilla', '2.3.0')
expected_kwargs = {'cluster_template_id': 'some_cluster_template_id',
'user_keypair_id': 'admin',
'default_image_id': 'some_image_id',
'net_id': 'some_network_id'}
self.cl_mgr.create.assert_called_once_with(*expected_args,
**expected_kwargs)
self.cl_mgr.get.assert_called_once_with(self.fake_cl.id)
def test_cluster_delete(self):
cluster = self._create_cluster(self.t)
self.cl_mgr.get.side_effect = [
self.fake_cl,
sahara.sahara_base.APIException(error_code=404)]
self.cl_mgr.get.reset_mock()
scheduler.TaskRunner(cluster.delete)()
self.assertEqual((cluster.DELETE, cluster.COMPLETE),
cluster.state)
self.cl_mgr.delete.assert_called_once_with(self.fake_cl.id)
self.assertEqual(2, self.cl_mgr.get.call_count)
def test_cluster_create_fails(self):
cfg.CONF.set_override('action_retry_limit', 0)
cluster = self._init_cluster(self.t)
self.cl_mgr.create.return_value = self.fake_cl
self.cl_mgr.get.return_value = FakeCluster(status='Error')
create_task = scheduler.TaskRunner(cluster.create)
ex = self.assertRaises(exception.ResourceFailure, create_task)
expected = 'ResourceInError: Went to status Error due to "Unknown"'
self.assertEqual(expected, six.text_type(ex))
def test_cluster_delete_fails(self):
cluster = self._create_cluster(self.t)
self.cl_mgr.delete.side_effect = sahara.sahara_base.APIException()
delete_task = scheduler.TaskRunner(cluster.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = "APIException: None"
self.assertEqual(expected, six.text_type(ex))
self.cl_mgr.delete.assert_called_once_with(self.fake_cl.id)
def test_cluster_not_found_in_delete(self):
cluster = self._create_cluster(self.t)
self.cl_mgr.delete.side_effect = sahara.sahara_base.APIException(
error_code=404)
scheduler.TaskRunner(cluster.delete)()
self.cl_mgr.delete.assert_called_once_with(self.fake_cl.id)
def test_cluster_check_delete_complete_error(self):
cluster = self._create_cluster(self.t)
self.cl_mgr.get.side_effect = [
self.fake_cl,
sahara.sahara_base.APIException()]
self.cl_mgr.get.reset_mock()
delete_task = scheduler.TaskRunner(cluster.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = "APIException: None"
self.assertEqual(expected, six.text_type(ex))
self.cl_mgr.delete.assert_called_once_with(self.fake_cl.id)
self.assertEqual(2, self.cl_mgr.get.call_count)
def test_cluster_delete_cluster_in_error(self):
cluster = self._create_cluster(self.t)
self.cl_mgr.get.side_effect = [
self.fake_cl,
FakeCluster(status='Error')]
self.cl_mgr.get.reset_mock()
delete_task = scheduler.TaskRunner(cluster.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = 'ResourceInError: Went to status Error due to "Unknown"'
self.assertEqual(expected, six.text_type(ex))
self.cl_mgr.delete.assert_called_once_with(self.fake_cl.id)
self.assertEqual(2, self.cl_mgr.get.call_count)
def test_cluster_resolve_attribute(self):
cluster = self._create_cluster(self.t)
self.cl_mgr.get.reset_mock()
self.assertEqual(self.fake_cl.info,
cluster._resolve_attribute('info'))
self.assertEqual(self.fake_cl.status,
cluster._resolve_attribute('status'))
self.assertEqual(2, self.cl_mgr.get.call_count)
def test_cluster_resource_mapping(self):
cluster = self._init_cluster(self.t)
mapping = sc.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(sc.SaharaCluster,
mapping['OS::Sahara::Cluster'])
self.assertIsInstance(cluster, sc.SaharaCluster)
def test_cluster_create_no_image_anywhere_fails(self):
self.t['resources']['super-cluster']['properties'].pop(
'default_image_id')
self.sahara_mock.cluster_templates.get.return_value = mock.Mock(
default_image_id=None)
cluster = self._init_cluster(self.t)
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(cluster.create))
self.assertIsInstance(ex.exc, exception.StackValidationFailed)
self.assertIn("image must be provided: "
"Referenced cluster template some_cluster_template_id "
"has no default_image_id defined.",
six.text_type(ex.message))
def test_cluster_validate_no_network_on_neutron_fails(self):
self.t['resources']['super-cluster']['properties'].pop(
'neutron_management_network')
cluster = self._init_cluster(self.t)
self.patchobject(cluster, 'is_using_neutron', return_value=True)
ex = self.assertRaises(exception.StackValidationFailed,
cluster.validate)
self.assertEqual("neutron_management_network must be provided",
six.text_type(ex))
def test_validation_error_for_deprecated_properties(self):
tmpl = '''
heat_template_version: 2013-05-23
description: Hadoop Cluster by Sahara
resources:
super-cluster:
type: OS::Sahara::Cluster
properties:
name: super-cluster
plugin_name: vanilla
hadoop_version: 2.3.0
cluster_template_id: some_cluster_template_id
image: some_image
default_image_id: test_image_id
key_name: admin
neutron_management_network: some_network
'''
ct = self._init_cluster(template_format.parse(tmpl))
ex = self.assertRaises(exception.ResourcePropertyConflict, ct.validate)
msg = 'Cannot define the following properties at the same time: '
self.assertIn(msg, six.text_type(ex))
| 42.123348
| 79
| 0.673395
|
8325bf8dc69d83238be966171dcdbe3035fdca9c
| 37,395
|
py
|
Python
|
tensorflow/targetDirectory/lib/python3.7/site-packages/tensorflow/python/ops/gen_state_ops.py
|
amyhxqin/heartbit
|
ebb67349e90654e275760d081b80b343bd2f45eb
|
[
"MIT"
] | null | null | null |
tensorflow/targetDirectory/lib/python3.7/site-packages/tensorflow/python/ops/gen_state_ops.py
|
amyhxqin/heartbit
|
ebb67349e90654e275760d081b80b343bd2f45eb
|
[
"MIT"
] | null | null | null |
tensorflow/targetDirectory/lib/python3.7/site-packages/tensorflow/python/ops/gen_state_ops.py
|
amyhxqin/heartbit
|
ebb67349e90654e275760d081b80b343bd2f45eb
|
[
"MIT"
] | null | null | null |
"""Python wrappers around Brain.
This file is MACHINE GENERATED! Do not edit.
"""
import collections
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import op_def_library
_assign_outputs = ["output_ref"]
def assign(ref, value, validate_shape=None, use_locking=None, name=None):
r"""Update 'ref' by assigning 'value' to it.
This operation outputs "ref" after the assignment is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
value: A `Tensor`. Must have the same type as `ref`.
The value to be assigned to the variable.
validate_shape: An optional `bool`. Defaults to `True`.
If true, the operation will validate that the shape
of 'value' matches the shape of the Tensor being assigned to. If false,
'ref' will take on the shape of 'value'.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been reset.
"""
result = _op_def_lib.apply_op("Assign", ref=ref, value=value,
validate_shape=validate_shape,
use_locking=use_locking, name=name)
return result
_assign_add_outputs = ["output_ref"]
def assign_add(ref, value, use_locking=None, name=None):
r"""Update 'ref' by adding 'value' to it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be added to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
result = _op_def_lib.apply_op("AssignAdd", ref=ref, value=value,
use_locking=use_locking, name=name)
return result
_assign_sub_outputs = ["output_ref"]
def assign_sub(ref, value, use_locking=None, name=None):
r"""Update 'ref' by subtracting 'value' from it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be subtracted to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
result = _op_def_lib.apply_op("AssignSub", ref=ref, value=value,
use_locking=use_locking, name=name)
return result
_count_up_to_outputs = ["output"]
def count_up_to(ref, limit, name=None):
r"""Increments 'ref' until it reaches 'limit'.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `int32`, `int64`.
Should be from a scalar `Variable` node.
limit: An `int`.
If incrementing ref would bring it above limit, instead generates an
'OutOfRange' error.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `ref`.
A copy of the input before increment. If nothing else modifies the
input, the values produced will all be distinct.
"""
result = _op_def_lib.apply_op("CountUpTo", ref=ref, limit=limit, name=name)
return result
__destroy_temporary_variable_outputs = ["value"]
def _destroy_temporary_variable(ref, var_name, name=None):
r"""Destroys the temporary variable and returns its final value.
Sets output to the value of the Tensor pointed to by 'ref', then destroys
the temporary variable called 'var_name'.
All other uses of 'ref' *must* have executed before this op.
This is typically achieved by chaining the ref through each assign op, or by
using control dependencies.
Outputs the final value of the tensor pointed to by 'ref'.
Args:
ref: A mutable `Tensor`. A reference to the temporary variable tensor.
var_name: A `string`.
Name of the temporary variable, usually the name of the matching
'TemporaryVariable' op.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `ref`.
"""
result = _op_def_lib.apply_op("DestroyTemporaryVariable", ref=ref,
var_name=var_name, name=name)
return result
_is_variable_initialized_outputs = ["is_initialized"]
def is_variable_initialized(ref, name=None):
r"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
result = _op_def_lib.apply_op("IsVariableInitialized", ref=ref, name=name)
return result
_scatter_add_outputs = ["output_ref"]
def scatter_add(ref, indices, updates, use_locking=None, name=None):
r"""Adds sparse updates to a variable reference.
This operation computes
# Scalar indices
ref[indices, ...] += updates[...]
# Vector indices (for each i)
ref[indices[i], ...] += updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/ScatterAdd.png" alt>
</div>
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to add to `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
result = _op_def_lib.apply_op("ScatterAdd", ref=ref, indices=indices,
updates=updates, use_locking=use_locking,
name=name)
return result
_scatter_div_outputs = ["output_ref"]
def scatter_div(ref, indices, updates, use_locking=None, name=None):
r"""Divides a variable reference by sparse updates.
This operation computes
# Scalar indices
ref[indices, ...] /= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] /= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions divide.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of values that `ref` is divided by.
use_locking: An optional `bool`. Defaults to `False`.
If True, the operation will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
result = _op_def_lib.apply_op("ScatterDiv", ref=ref, indices=indices,
updates=updates, use_locking=use_locking,
name=name)
return result
_scatter_mul_outputs = ["output_ref"]
def scatter_mul(ref, indices, updates, use_locking=None, name=None):
r"""Multiplies sparse updates into a variable reference.
This operation computes
# Scalar indices
ref[indices, ...] *= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] *= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions multiply.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to multiply to `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the operation will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
result = _op_def_lib.apply_op("ScatterMul", ref=ref, indices=indices,
updates=updates, use_locking=use_locking,
name=name)
return result
_scatter_nd_add_outputs = ["output_ref"]
def scatter_nd_add(ref, indices, updates, use_locking=None, name=None):
r"""Applies sparse addition between `updates` and individual values or slices
within a given variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
elements. In Python, that addition would look like this:
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
add = tf.scatter_nd_add(ref, indices, updates)
with tf.Session() as sess:
print sess.run(add)
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to
slices.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
A mutable Tensor. Should be from a Variable node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A Tensor. Must have the same type as ref. A tensor of updated values
to add to ref.
use_locking: An optional `bool`. Defaults to `False`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
Same as ref. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
result = _op_def_lib.apply_op("ScatterNdAdd", ref=ref, indices=indices,
updates=updates, use_locking=use_locking,
name=name)
return result
_scatter_nd_sub_outputs = ["output_ref"]
def scatter_nd_sub(ref, indices, updates, use_locking=None, name=None):
r"""Applies sparse subtraction between `updates` and individual values or slices
within a given variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to subtract 4 scattered elements from a rank-1 tensor
with 8 elements. In Python, that subtraction would look like this:
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
sub = tf.scatter_nd_sub(ref, indices, updates)
with tf.Session() as sess:
print sess.run(sub)
The resulting update to ref would look like this:
[1, -9, 3, -6, -4, 6, 7, -4]
See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to
slices.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
A mutable Tensor. Should be from a Variable node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A Tensor. Must have the same type as ref. A tensor of updated values
to subtract from ref.
use_locking: An optional `bool`. Defaults to `False`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
Same as ref. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
result = _op_def_lib.apply_op("ScatterNdSub", ref=ref, indices=indices,
updates=updates, use_locking=use_locking,
name=name)
return result
_scatter_nd_update_outputs = ["output_ref"]
def scatter_nd_update(ref, indices, updates, use_locking=None, name=None):
r"""Applies sparse `updates` to individual values or slices within a given
variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to update 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
update = tf.scatter_nd_update(ref, indices, updates)
with tf.Session() as sess:
print sess.run(update)
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to
slices.
Args:
ref: A mutable `Tensor`. A mutable Tensor. Should be from a Variable node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A Tensor. Must have the same type as ref. A tensor of updated
values to add to ref.
use_locking: An optional `bool`. Defaults to `True`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
Same as ref. Returned as a convenience for operations that want to
use the updated values after the update is done.
"""
result = _op_def_lib.apply_op("ScatterNdUpdate", ref=ref, indices=indices,
updates=updates, use_locking=use_locking,
name=name)
return result
_scatter_sub_outputs = ["output_ref"]
def scatter_sub(ref, indices, updates, use_locking=None, name=None):
r"""Subtracts sparse updates to a variable reference.
# Scalar indices
ref[indices, ...] -= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] -= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their (negated) contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/ScatterSub.png" alt>
</div>
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to subtract from `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
result = _op_def_lib.apply_op("ScatterSub", ref=ref, indices=indices,
updates=updates, use_locking=use_locking,
name=name)
return result
_scatter_update_outputs = ["output_ref"]
def scatter_update(ref, indices, updates, use_locking=None, name=None):
r"""Applies sparse updates to a variable reference.
This operation computes
# Scalar indices
ref[indices, ...] = updates[...]
# Vector indices (for each i)
ref[indices[i], ...] = updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
If values in `ref` is to be updated more than once, because there are
duplicate entires in `indices`, the order at which the updates happen
for each value is undefined.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/ScatterUpdate.png" alt>
</div>
Args:
ref: A mutable `Tensor`. Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to store in `ref`.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
result = _op_def_lib.apply_op("ScatterUpdate", ref=ref, indices=indices,
updates=updates, use_locking=use_locking,
name=name)
return result
__temporary_variable_outputs = ["ref"]
def _temporary_variable(shape, dtype, var_name=None, name=None):
r"""Returns a tensor that may be mutated, but only persists within a single step.
This is an experimental op for internal use only and it is possible to use this
op in unsafe ways. DO NOT USE unless you fully understand the risks.
It is the caller's responsibility to ensure that 'ref' is eventually passed to a
matching 'DestroyTemporaryVariable' op after all other uses have completed.
Outputs a ref to the tensor state so it may be read or modified.
E.g.
var = state_ops._temporary_variable([1, 2], types.float_)
var_name = var.op.name
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = state_ops._destroy_temporary_variable(var, var_name=var_name)
Args:
shape: A `tf.TensorShape` or list of `ints`.
The shape of the variable tensor.
dtype: A `tf.DType`. The type of elements in the variable tensor.
var_name: An optional `string`. Defaults to `""`.
Overrides the name used for the temporary variable resource. Default
value is the name of the 'TemporaryVariable' op (which is guaranteed unique).
name: A name for the operation (optional).
Returns:
A mutable `Tensor` of type `dtype`. A reference to the variable tensor.
"""
result = _op_def_lib.apply_op("TemporaryVariable", shape=shape, dtype=dtype,
var_name=var_name, name=name)
return result
__variable_outputs = ["ref"]
def _variable(shape, dtype, container=None, shared_name=None, name=None):
r"""Holds state in the form of a tensor that persists across steps.
Outputs a ref to the tensor state so it may be read or modified.
TODO(zhifengc/mrry): Adds a pointer to a more detail document
about sharing states in tensorflow.
Args:
shape: A `tf.TensorShape` or list of `ints`.
The shape of the variable tensor.
dtype: A `tf.DType`. The type of elements in the variable tensor.
container: An optional `string`. Defaults to `""`.
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
name: A name for the operation (optional).
Returns:
A mutable `Tensor` of type `dtype`. A reference to the variable tensor.
"""
result = _op_def_lib.apply_op("Variable", shape=shape, dtype=dtype,
container=container, shared_name=shared_name,
name=name)
return result
def _InitOpDefLibrary():
op_list = op_def_pb2.OpList()
text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
op_def_registry.register_op_list(op_list)
op_def_lib = op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "Assign"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "value"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
attr {
name: "validate_shape"
type: "bool"
default_value {
b: true
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: true
}
}
allows_uninitialized_input: true
}
op {
name: "AssignAdd"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "value"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "AssignSub"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "value"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "CountUpTo"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "limit"
type: "int"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "DestroyTemporaryVariable"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
output_arg {
name: "value"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "var_name"
type: "string"
}
}
op {
name: "IsVariableInitialized"
input_arg {
name: "ref"
type_attr: "dtype"
is_ref: true
}
output_arg {
name: "is_initialized"
type: DT_BOOL
}
attr {
name: "dtype"
type: "type"
}
allows_uninitialized_input: true
}
op {
name: "ScatterAdd"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ScatterDiv"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ScatterMul"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ScatterNdAdd"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ScatterNdSub"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ScatterNdUpdate"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: true
}
}
}
op {
name: "ScatterSub"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ScatterUpdate"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: true
}
}
}
op {
name: "TemporaryVariable"
output_arg {
name: "ref"
type_attr: "dtype"
is_ref: true
}
attr {
name: "shape"
type: "shape"
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "var_name"
type: "string"
default_value {
s: ""
}
}
is_stateful: true
}
op {
name: "Variable"
output_arg {
name: "ref"
type_attr: "dtype"
is_ref: true
}
attr {
name: "shape"
type: "shape"
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "container"
type: "string"
default_value {
s: ""
}
}
attr {
name: "shared_name"
type: "string"
default_value {
s: ""
}
}
is_stateful: true
}
"""
_op_def_lib = _InitOpDefLibrary()
| 26.844939
| 204
| 0.621955
|
450e327d6bbd23bfdb940bfaa4a7447da4eca54b
| 4,840
|
py
|
Python
|
monai/data/png_saver.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
monai/data/png_saver.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
monai/data/png_saver.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional, Union
import numpy as np
import torch
from monai.data.png_writer import write_png
from monai.data.utils import create_file_basename
from monai.utils import InterpolateMode
class PNGSaver:
"""
Save the data as png file, it can support single data content or a batch of data.
Typically, the data can be segmentation predictions, call `save` for single data
or call `save_batch` to save a batch of data together. If no meta data provided,
use index from 0 as the filename prefix.
"""
def __init__(
self,
output_dir: str = "./",
output_postfix: str = "seg",
output_ext: str = ".png",
resample: bool = True,
mode: Union[InterpolateMode, str] = InterpolateMode.NEAREST,
scale: Optional[int] = None,
) -> None:
"""
Args:
output_dir: output image directory.
output_postfix: a string appended to all output file names.
output_ext: output file extension name.
resample: whether to resample and resize if providing spatial_shape in the metadata.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"nearest"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling
[0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.
"""
self.output_dir = output_dir
self.output_postfix = output_postfix
self.output_ext = output_ext
self.resample = resample
self.mode: InterpolateMode = InterpolateMode(mode)
self.scale = scale
self._data_index = 0
def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None:
"""
Save data into a png file.
The meta_data could optionally have the following keys:
- ``'filename_or_obj'`` -- for output file name creation, corresponding to filename or object.
- ``'spatial_shape'`` -- for data output shape.
If meta_data is None, use the default index (starting from 0) as the filename.
Args:
data: target data content that to be saved as a png format file.
Assuming the data shape are spatial dimensions.
Shape of the spatial dimensions (C,H,W).
C should be 1, 3 or 4
meta_data: the meta data information corresponding to the data.
Raises:
ValueError: When ``data`` channels is not one of [1, 3, 4].
See Also
:py:meth:`monai.data.png_writer.write_png`
"""
filename = meta_data["filename_or_obj"] if meta_data else str(self._data_index)
self._data_index += 1
spatial_shape = meta_data.get("spatial_shape", None) if meta_data and self.resample else None
if torch.is_tensor(data):
data = data.detach().cpu().numpy()
filename = create_file_basename(self.output_postfix, filename, self.output_dir)
filename = f"{filename}{self.output_ext}"
if data.shape[0] == 1:
data = data.squeeze(0)
elif 2 < data.shape[0] < 5:
data = np.moveaxis(data, 0, -1)
else:
raise ValueError(f"Unsupported number of channels: {data.shape[0]}, available options are [1, 3, 4]")
write_png(
data,
file_name=filename,
output_spatial_shape=spatial_shape,
mode=self.mode,
scale=self.scale,
)
def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None:
"""Save a batch of data into png format files.
Args:
batch_data: target batch data content that save into png format.
meta_data: every key-value in the meta_data is corresponding to a batch of data.
"""
for i, data in enumerate(batch_data): # save a batch of files
self.save(data, {k: meta_data[k][i] for k in meta_data} if meta_data else None)
| 40.672269
| 113
| 0.633058
|
0cbaf33ea7ad3e657b9515399301c39989a2f22a
| 82,616
|
py
|
Python
|
test/sql/test_selectable.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | 1
|
2018-04-02T18:41:52.000Z
|
2018-04-02T18:41:52.000Z
|
test/sql/test_selectable.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | null | null | null |
test/sql/test_selectable.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | 3
|
2017-09-26T13:59:24.000Z
|
2020-12-04T17:51:54.000Z
|
"""Test various algorithmic properties of selectables."""
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, is_
from sqlalchemy import *
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \
AssertsExecutionResults
from sqlalchemy.sql import elements
from sqlalchemy import testing
from sqlalchemy.sql import util as sql_util, visitors, expression
from sqlalchemy import exc
from sqlalchemy.sql import table, column, null
from sqlalchemy import util
from sqlalchemy.schema import Column, Table, MetaData
metadata = MetaData()
table1 = Table('table1', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', String(20)),
Column('col3', Integer),
Column('colx', Integer),
)
table2 = Table('table2', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', Integer, ForeignKey('table1.col1')),
Column('col3', String(20)),
Column('coly', Integer),
)
keyed = Table('keyed', metadata,
Column('x', Integer, key='colx'),
Column('y', Integer, key='coly'),
Column('z', Integer),
)
class SelectableTest(
fixtures.TestBase,
AssertsExecutionResults,
AssertsCompiledSQL):
__dialect__ = 'default'
def test_indirect_correspondence_on_labels(self):
# this test depends upon 'distance' to
# get the right result
# same column three times
s = select([table1.c.col1.label('c2'), table1.c.col1,
table1.c.col1.label('c1')])
# this tests the same thing as
# test_direct_correspondence_on_labels below -
# that the presence of label() affects the 'distance'
assert s.corresponding_column(table1.c.col1) is s.c.col1
assert s.corresponding_column(s.c.col1) is s.c.col1
assert s.corresponding_column(s.c.c1) is s.c.c1
def test_labeled_subquery_twice(self):
scalar_select = select([table1.c.col1]).label('foo')
s1 = select([scalar_select])
s2 = select([scalar_select, scalar_select])
eq_(
s1.c.foo.proxy_set,
set([s1.c.foo, scalar_select, scalar_select.element])
)
eq_(
s2.c.foo.proxy_set,
set([s2.c.foo, scalar_select, scalar_select.element])
)
assert s1.corresponding_column(scalar_select) is s1.c.foo
assert s2.corresponding_column(scalar_select) is s2.c.foo
def test_label_grouped_still_corresponds(self):
label = select([table1.c.col1]).label('foo')
label2 = label.self_group()
s1 = select([label])
s2 = select([label2])
assert s1.corresponding_column(label) is s1.c.foo
assert s2.corresponding_column(label) is s2.c.foo
def test_direct_correspondence_on_labels(self):
# this test depends on labels being part
# of the proxy set to get the right result
l1, l2 = table1.c.col1.label('foo'), table1.c.col1.label('bar')
sel = select([l1, l2])
sel2 = sel.alias()
assert sel2.corresponding_column(l1) is sel2.c.foo
assert sel2.corresponding_column(l2) is sel2.c.bar
sel2 = select([table1.c.col1.label('foo'), table1.c.col2.label('bar')])
sel3 = sel.union(sel2).alias()
assert sel3.corresponding_column(l1) is sel3.c.foo
assert sel3.corresponding_column(l2) is sel3.c.bar
def test_keyed_gen(self):
s = select([keyed])
eq_(s.c.colx.key, 'colx')
eq_(s.c.colx.name, 'x')
assert s.corresponding_column(keyed.c.colx) is s.c.colx
assert s.corresponding_column(keyed.c.coly) is s.c.coly
assert s.corresponding_column(keyed.c.z) is s.c.z
sel2 = s.alias()
assert sel2.corresponding_column(keyed.c.colx) is sel2.c.colx
assert sel2.corresponding_column(keyed.c.coly) is sel2.c.coly
assert sel2.corresponding_column(keyed.c.z) is sel2.c.z
def test_keyed_label_gen(self):
s = select([keyed]).apply_labels()
assert s.corresponding_column(keyed.c.colx) is s.c.keyed_colx
assert s.corresponding_column(keyed.c.coly) is s.c.keyed_coly
assert s.corresponding_column(keyed.c.z) is s.c.keyed_z
sel2 = s.alias()
assert sel2.corresponding_column(keyed.c.colx) is sel2.c.keyed_colx
assert sel2.corresponding_column(keyed.c.coly) is sel2.c.keyed_coly
assert sel2.corresponding_column(keyed.c.z) is sel2.c.keyed_z
def test_keyed_c_collection_upper(self):
c = Column('foo', Integer, key='bar')
t = Table('t', MetaData(), c)
is_(t.c.bar, c)
def test_keyed_c_collection_lower(self):
c = column('foo')
c.key = 'bar'
t = table('t', c)
is_(t.c.bar, c)
def test_clone_c_proxy_key_upper(self):
c = Column('foo', Integer, key='bar')
t = Table('t', MetaData(), c)
s = select([t])._clone()
assert c in s.c.bar.proxy_set
def test_clone_c_proxy_key_lower(self):
c = column('foo')
c.key = 'bar'
t = table('t', c)
s = select([t])._clone()
assert c in s.c.bar.proxy_set
def test_no_error_on_unsupported_expr_key(self):
from sqlalchemy.sql.expression import BinaryExpression
def myop(x, y):
pass
t = table('t', column('x'), column('y'))
expr = BinaryExpression(t.c.x, t.c.y, myop)
s = select([t, expr])
eq_(
s.c.keys(),
['x', 'y', expr.anon_label]
)
def test_cloned_intersection(self):
t1 = table('t1', column('x'))
t2 = table('t2', column('x'))
s1 = t1.select()
s2 = t2.select()
s3 = t1.select()
s1c1 = s1._clone()
s1c2 = s1._clone()
s2c1 = s2._clone()
s3c1 = s3._clone()
eq_(
expression._cloned_intersection(
[s1c1, s3c1], [s2c1, s1c2]
),
set([s1c1])
)
def test_cloned_difference(self):
t1 = table('t1', column('x'))
t2 = table('t2', column('x'))
s1 = t1.select()
s2 = t2.select()
s3 = t1.select()
s1c1 = s1._clone()
s1c2 = s1._clone()
s2c1 = s2._clone()
s2c2 = s2._clone()
s3c1 = s3._clone()
eq_(
expression._cloned_difference(
[s1c1, s2c1, s3c1], [s2c1, s1c2]
),
set([s3c1])
)
def test_distance_on_aliases(self):
a1 = table1.alias('a1')
for s in (select([a1, table1], use_labels=True),
select([table1, a1], use_labels=True)):
assert s.corresponding_column(table1.c.col1) \
is s.c.table1_col1
assert s.corresponding_column(a1.c.col1) is s.c.a1_col1
def test_join_against_self(self):
jj = select([table1.c.col1.label('bar_col1')])
jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1)
# test column directly against itself
assert jjj.corresponding_column(jjj.c.table1_col1) \
is jjj.c.table1_col1
assert jjj.corresponding_column(jj.c.bar_col1) is jjj.c.bar_col1
# test alias of the join
j2 = jjj.alias('foo')
assert j2.corresponding_column(table1.c.col1) \
is j2.c.table1_col1
def test_clone_append_column(self):
sel = select([literal_column('1').label('a')])
eq_(list(sel.c.keys()), ['a'])
cloned = visitors.ReplacingCloningVisitor().traverse(sel)
cloned.append_column(literal_column('2').label('b'))
cloned.append_column(func.foo())
eq_(list(cloned.c.keys()), ['a', 'b', 'foo()'])
def test_append_column_after_replace_selectable(self):
basesel = select([literal_column('1').label('a')])
tojoin = select([
literal_column('1').label('a'),
literal_column('2').label('b')
])
basefrom = basesel.alias('basefrom')
joinfrom = tojoin.alias('joinfrom')
sel = select([basefrom.c.a])
replaced = sel.replace_selectable(
basefrom,
basefrom.join(joinfrom, basefrom.c.a == joinfrom.c.a)
)
self.assert_compile(
replaced,
"SELECT basefrom.a FROM (SELECT 1 AS a) AS basefrom "
"JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom "
"ON basefrom.a = joinfrom.a"
)
replaced.append_column(joinfrom.c.b)
self.assert_compile(
replaced,
"SELECT basefrom.a, joinfrom.b FROM (SELECT 1 AS a) AS basefrom "
"JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom "
"ON basefrom.a = joinfrom.a"
)
def test_against_cloned_non_table(self):
# test that corresponding column digs across
# clone boundaries with anonymous labeled elements
col = func.count().label('foo')
sel = select([col])
sel2 = visitors.ReplacingCloningVisitor().traverse(sel)
assert sel2.corresponding_column(col) is sel2.c.foo
sel3 = visitors.ReplacingCloningVisitor().traverse(sel2)
assert sel3.corresponding_column(col) is sel3.c.foo
def test_with_only_generative(self):
s1 = table1.select().as_scalar()
self.assert_compile(
s1.with_only_columns([s1]),
"SELECT (SELECT table1.col1, table1.col2, "
"table1.col3, table1.colx FROM table1) AS anon_1"
)
def test_type_coerce_preserve_subq(self):
class MyType(TypeDecorator):
impl = Integer
stmt = select([type_coerce(column('x'), MyType).label('foo')])
stmt2 = stmt.select()
assert isinstance(stmt._raw_columns[0].type, MyType)
assert isinstance(stmt.c.foo.type, MyType)
assert isinstance(stmt2.c.foo.type, MyType)
def test_select_on_table(self):
sel = select([table1, table2], use_labels=True)
assert sel.corresponding_column(table1.c.col1) \
is sel.c.table1_col1
assert sel.corresponding_column(
table1.c.col1,
require_embedded=True) is sel.c.table1_col1
assert table1.corresponding_column(sel.c.table1_col1) \
is table1.c.col1
assert table1.corresponding_column(sel.c.table1_col1,
require_embedded=True) is None
def test_join_against_join(self):
j = outerjoin(table1, table2, table1.c.col1 == table2.c.col2)
jj = select([table1.c.col1.label('bar_col1')],
from_obj=[j]).alias('foo')
jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1)
assert jjj.corresponding_column(jjj.c.table1_col1) \
is jjj.c.table1_col1
j2 = jjj.alias('foo')
assert j2.corresponding_column(jjj.c.table1_col1) \
is j2.c.table1_col1
assert jjj.corresponding_column(jj.c.bar_col1) is jj.c.bar_col1
def test_table_alias(self):
a = table1.alias('a')
j = join(a, table2)
criterion = a.c.col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_alias_handles_column_context(self):
# not quite a use case yet but this is expected to become
# prominent w/ Postgresql's tuple functions
stmt = select([table1.c.col1, table1.c.col2])
a = stmt.alias('a')
self.assert_compile(
select([func.foo(a)]),
"SELECT foo(SELECT table1.col1, table1.col2 FROM table1) "
"AS foo_1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2 FROM table1) "
"AS a"
)
def test_union(self):
# tests that we can correspond a column in a Select statement
# with a certain Table, against a column in a Union where one of
# its underlying Selects matches to that same Table
u = select([table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label('coly')]).union(select([table2.c.col1,
table2.c.col2,
table2.c.col3,
null().label('colx'),
table2.c.coly]))
s1 = table1.select(use_labels=True)
s2 = table2.select(use_labels=True)
assert u.corresponding_column(s1.c.table1_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_col2) is u.c.col2
def test_union_precedence(self):
# conflicting column correspondence should be resolved based on
# the order of the select()s in the union
s1 = select([table1.c.col1, table1.c.col2])
s2 = select([table1.c.col2, table1.c.col1])
s3 = select([table1.c.col3, table1.c.colx])
s4 = select([table1.c.colx, table1.c.col3])
u1 = union(s1, s2)
assert u1.corresponding_column(table1.c.col1) is u1.c.col1
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
u1 = union(s1, s2, s3, s4)
assert u1.corresponding_column(table1.c.col1) is u1.c.col1
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
assert u1.corresponding_column(table1.c.colx) is u1.c.col2
assert u1.corresponding_column(table1.c.col3) is u1.c.col1
def test_singular_union(self):
u = union(select([table1.c.col1, table1.c.col2, table1.c.col3]),
select([table1.c.col1, table1.c.col2, table1.c.col3]))
u = union(select([table1.c.col1, table1.c.col2, table1.c.col3]))
assert u.c.col1 is not None
assert u.c.col2 is not None
assert u.c.col3 is not None
def test_alias_union(self):
# same as testunion, except its an alias of the union
u = select([table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label('coly')]).union(
select([table2.c.col1, table2.c.col2, table2.c.col3,
null().label('colx'), table2.c.coly])
).alias('analias')
s1 = table1.select(use_labels=True)
s2 = table2.select(use_labels=True)
assert u.corresponding_column(s1.c.table1_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_coly) is u.c.coly
assert s2.corresponding_column(u.c.coly) is s2.c.table2_coly
def test_union_of_alias(self):
s1 = select([table1.c.col1, table1.c.col2])
s2 = select([table1.c.col1, table1.c.col2]).alias()
u1 = union(s1, s2)
assert u1.corresponding_column(s1.c.col1) is u1.c.col1
assert u1.corresponding_column(s2.c.col1) is u1.c.col1
u2 = union(s2, s1)
assert u2.corresponding_column(s1.c.col1) is u2.c.col1
assert u2.corresponding_column(s2.c.col1) is u2.c.col1
def test_union_of_text(self):
s1 = select([table1.c.col1, table1.c.col2])
s2 = text("select col1, col2 from foo").columns(
column('col1'), column('col2'))
u1 = union(s1, s2)
assert u1.corresponding_column(s1.c.col1) is u1.c.col1
assert u1.corresponding_column(s2.c.col1) is u1.c.col1
u2 = union(s2, s1)
assert u2.corresponding_column(s1.c.col1) is u2.c.col1
assert u2.corresponding_column(s2.c.col1) is u2.c.col1
@testing.emits_warning("Column 'col1'")
def test_union_dupe_keys(self):
s1 = select([table1.c.col1, table1.c.col2, table2.c.col1])
s2 = select([table2.c.col1, table2.c.col2, table2.c.col3])
u1 = union(s1, s2)
assert u1.corresponding_column(
s1.c._all_columns[0]) is u1.c._all_columns[0]
assert u1.corresponding_column(s2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(s1.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col3) is u1.c._all_columns[2]
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[2]
assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
@testing.emits_warning("Column 'col1'")
def test_union_alias_dupe_keys(self):
s1 = select([table1.c.col1, table1.c.col2, table2.c.col1]).alias()
s2 = select([table2.c.col1, table2.c.col2, table2.c.col3])
u1 = union(s1, s2)
assert u1.corresponding_column(
s1.c._all_columns[0]) is u1.c._all_columns[0]
assert u1.corresponding_column(s2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(s1.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col3) is u1.c._all_columns[2]
# this differs from the non-alias test because table2.c.col1 is
# more directly at s2.c.col1 than it is s1.c.col1.
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
@testing.emits_warning("Column 'col1'")
def test_union_alias_dupe_keys_grouped(self):
s1 = select([table1.c.col1, table1.c.col2, table2.c.col1]).\
limit(1).alias()
s2 = select([table2.c.col1, table2.c.col2, table2.c.col3]).limit(1)
u1 = union(s1, s2)
assert u1.corresponding_column(
s1.c._all_columns[0]) is u1.c._all_columns[0]
assert u1.corresponding_column(s2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(s1.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col2) is u1.c.col2
assert u1.corresponding_column(s2.c.col3) is u1.c._all_columns[2]
# this differs from the non-alias test because table2.c.col1 is
# more directly at s2.c.col1 than it is s1.c.col1.
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
def test_select_union(self):
# like testaliasunion, but off a Select off the union.
u = select([table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label('coly')]).union(
select([table2.c.col1, table2.c.col2, table2.c.col3,
null().label('colx'), table2.c.coly])
).alias('analias')
s = select([u])
s1 = table1.select(use_labels=True)
s2 = table2.select(use_labels=True)
assert s.corresponding_column(s1.c.table1_col2) is s.c.col2
assert s.corresponding_column(s2.c.table2_col2) is s.c.col2
def test_union_against_join(self):
# same as testunion, except its an alias of the union
u = select([table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label('coly')]).union(
select([table2.c.col1, table2.c.col2, table2.c.col3,
null().label('colx'), table2.c.coly])
).alias('analias')
j1 = table1.join(table2)
assert u.corresponding_column(j1.c.table1_colx) is u.c.colx
assert j1.corresponding_column(u.c.colx) is j1.c.table1_colx
def test_join(self):
a = join(table1, table2)
print(str(a.select(use_labels=True)))
b = table2.alias('b')
j = join(a, b)
print(str(j))
criterion = a.c.table1_col1 == b.c.col2
self.assert_(criterion.compare(j.onclause))
def test_select_alias(self):
a = table1.select().alias('a')
j = join(a, table2)
criterion = a.c.col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_select_labels(self):
a = table1.select(use_labels=True)
j = join(a, table2)
criterion = a.c.table1_col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_scalar_cloned_comparator(self):
sel = select([table1.c.col1]).as_scalar()
expr = sel == table1.c.col1
sel2 = visitors.ReplacingCloningVisitor().traverse(sel)
expr2 = sel2 == table1.c.col1
is_(expr2.left, sel2)
def test_column_labels(self):
a = select([table1.c.col1.label('acol1'),
table1.c.col2.label('acol2'),
table1.c.col3.label('acol3')])
j = join(a, table2)
criterion = a.c.acol1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_labeled_select_correspoinding(self):
l1 = select([func.max(table1.c.col1)]).label('foo')
s = select([l1])
eq_(s.corresponding_column(l1), s.c.foo)
s = select([table1.c.col1, l1])
eq_(s.corresponding_column(l1), s.c.foo)
def test_select_alias_labels(self):
a = table2.select(use_labels=True).alias('a')
j = join(a, table1)
criterion = table1.c.col1 == a.c.table2_col2
self.assert_(criterion.compare(j.onclause))
def test_table_joined_to_select_of_table(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
j2 = select([a.c.id.label('aid')]).alias('bar')
j3 = a.join(j2, j2.c.aid == a.c.id)
j4 = select([j3]).alias('foo')
assert j4.corresponding_column(j2.c.aid) is j4.c.aid
assert j4.corresponding_column(a.c.id) is j4.c.id
def test_two_metadata_join_raises(self):
m = MetaData()
m2 = MetaData()
t1 = Table('t1', m, Column('id', Integer), Column('id2', Integer))
t2 = Table('t2', m, Column('id', Integer, ForeignKey('t1.id')))
t3 = Table('t3', m2, Column('id', Integer, ForeignKey('t1.id2')))
s = select([t2, t3], use_labels=True)
assert_raises(exc.NoReferencedTableError, s.join, t1)
def test_multi_label_chain_naming_col(self):
# See [ticket:2167] for this one.
l1 = table1.c.col1.label('a')
l2 = select([l1]).label('b')
s = select([l2])
assert s.c.b is not None
self.assert_compile(
s.select(),
"SELECT b FROM (SELECT (SELECT table1.col1 AS a FROM table1) AS b)"
)
s2 = select([s.label('c')])
self.assert_compile(
s2.select(),
"SELECT c FROM (SELECT (SELECT ("
"SELECT table1.col1 AS a FROM table1) AS b) AS c)"
)
def test_self_referential_select_raises(self):
t = table('t', column('x'))
s = select([t])
s.append_whereclause(s.c.x > 5)
assert_raises_message(
exc.InvalidRequestError,
r"select\(\) construct refers to itself as a FROM",
s.compile
)
def test_unusual_column_elements_text(self):
"""test that .c excludes text()."""
s = select([table1.c.col1, text("foo")])
eq_(
list(s.c),
[s.c.col1]
)
def test_unusual_column_elements_clauselist(self):
"""Test that raw ClauseList is expanded into .c."""
from sqlalchemy.sql.expression import ClauseList
s = select([table1.c.col1, ClauseList(table1.c.col2, table1.c.col3)])
eq_(
list(s.c),
[s.c.col1, s.c.col2, s.c.col3]
)
def test_unusual_column_elements_boolean_clauselist(self):
"""test that BooleanClauseList is placed as single element in .c."""
c2 = and_(table1.c.col2 == 5, table1.c.col3 == 4)
s = select([table1.c.col1, c2])
eq_(
list(s.c),
[s.c.col1, s.corresponding_column(c2)]
)
def test_from_list_deferred_constructor(self):
c1 = Column('c1', Integer)
c2 = Column('c2', Integer)
s = select([c1])
t = Table('t', MetaData(), c1, c2)
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
self.assert_compile(select([c1]),
"SELECT t.c1 FROM t")
self.assert_compile(select([c2]),
"SELECT t.c2 FROM t")
def test_from_list_deferred_whereclause(self):
c1 = Column('c1', Integer)
c2 = Column('c2', Integer)
s = select([c1]).where(c1 == 5)
t = Table('t', MetaData(), c1, c2)
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
self.assert_compile(select([c1]),
"SELECT t.c1 FROM t")
self.assert_compile(select([c2]),
"SELECT t.c2 FROM t")
def test_from_list_deferred_fromlist(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer))
c1 = Column('c1', Integer)
s = select([c1]).where(c1 == 5).select_from(t1)
t2 = Table('t2', MetaData(), c1)
eq_(c1._from_objects, [t2])
self.assert_compile(select([c1]),
"SELECT t2.c1 FROM t2")
def test_from_list_deferred_cloning(self):
c1 = Column('c1', Integer)
c2 = Column('c2', Integer)
s = select([c1])
s2 = select([c2])
s3 = sql_util.ClauseAdapter(s).traverse(s2)
Table('t', MetaData(), c1, c2)
self.assert_compile(
s3,
"SELECT t.c2 FROM t"
)
def test_from_list_with_columns(self):
table1 = table('t1', column('a'))
table2 = table('t2', column('b'))
s1 = select([table1.c.a, table2.c.b])
self.assert_compile(s1,
"SELECT t1.a, t2.b FROM t1, t2"
)
s2 = s1.with_only_columns([table2.c.b])
self.assert_compile(s2,
"SELECT t2.b FROM t2"
)
s3 = sql_util.ClauseAdapter(table1).traverse(s1)
self.assert_compile(s3,
"SELECT t1.a, t2.b FROM t1, t2"
)
s4 = s3.with_only_columns([table2.c.b])
self.assert_compile(s4,
"SELECT t2.b FROM t2"
)
def test_from_list_warning_against_existing(self):
c1 = Column('c1', Integer)
s = select([c1])
# force a compile.
self.assert_compile(
s,
"SELECT c1"
)
Table('t', MetaData(), c1)
self.assert_compile(
s,
"SELECT t.c1 FROM t"
)
def test_from_list_recovers_after_warning(self):
c1 = Column('c1', Integer)
c2 = Column('c2', Integer)
s = select([c1])
# force a compile.
eq_(str(s), "SELECT c1")
@testing.emits_warning()
def go():
return Table('t', MetaData(), c1, c2)
t = go()
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
# 's' has been baked. Can't afford
# not caching select._froms.
# hopefully the warning will clue the user
self.assert_compile(s, "SELECT t.c1 FROM t")
self.assert_compile(select([c1]), "SELECT t.c1 FROM t")
self.assert_compile(select([c2]), "SELECT t.c2 FROM t")
def test_label_gen_resets_on_table(self):
c1 = Column('c1', Integer)
eq_(c1._label, "c1")
Table('t1', MetaData(), c1)
eq_(c1._label, "t1_c1")
class RefreshForNewColTest(fixtures.TestBase):
def test_join_uninit(self):
a = table('a', column('x'))
b = table('b', column('y'))
j = a.join(b, a.c.x == b.c.y)
q = column('q')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_join_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
j = a.join(b, a.c.x == b.c.y)
j.c
q = column('q')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_join_samename_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
j = a.join(b, a.c.x == b.c.y)
j.c
q = column('x')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_x is q
def test_select_samename_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
s = select([a, b]).apply_labels()
s.c
q = column('x')
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_samename_uninit(self):
a = table('a', column('x'))
b = table('b', column('y'))
s = select([a, b]).apply_labels().alias()
q = column('x')
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_samename_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
s = select([a, b]).apply_labels().alias()
s.c
q = column('x')
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_irrelevant(self):
a = table('a', column('x'))
b = table('b', column('y'))
c = table('c', column('z'))
s = select([a, b]).apply_labels().alias()
s.c
q = column('x')
c.append_column(q)
s._refresh_for_new_column(q)
assert 'c_x' not in s.c
def test_aliased_select_no_cols_clause(self):
a = table('a', column('x'))
s = select([a.c.x]).apply_labels().alias()
s.c
q = column('q')
a.append_column(q)
s._refresh_for_new_column(q)
assert 'a_q' not in s.c
def test_union_uninit(self):
a = table('a', column('x'))
s1 = select([a])
s2 = select([a])
s3 = s1.union(s2)
q = column('q')
a.append_column(q)
s3._refresh_for_new_column(q)
assert a.c.q in s3.c.q.proxy_set
def test_union_init_raises(self):
a = table('a', column('x'))
s1 = select([a])
s2 = select([a])
s3 = s1.union(s2)
s3.c
q = column('q')
a.append_column(q)
assert_raises_message(
NotImplementedError,
"CompoundSelect constructs don't support addition of "
"columns to underlying selectables",
s3._refresh_for_new_column, q
)
def test_nested_join_uninit(self):
a = table('a', column('x'))
b = table('b', column('y'))
c = table('c', column('z'))
j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z)
q = column('q')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_nested_join_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
c = table('c', column('z'))
j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z)
j.c
q = column('q')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_fk_table(self):
m = MetaData()
fk = ForeignKey('x.id')
Table('x', m, Column('id', Integer))
a = Table('a', m, Column('x', Integer, fk))
a.c
q = Column('q', Integer)
a.append_column(q)
a._refresh_for_new_column(q)
eq_(a.foreign_keys, set([fk]))
fk2 = ForeignKey('g.id')
p = Column('p', Integer, fk2)
a.append_column(p)
a._refresh_for_new_column(p)
eq_(a.foreign_keys, set([fk, fk2]))
def test_fk_join(self):
m = MetaData()
fk = ForeignKey('x.id')
Table('x', m, Column('id', Integer))
a = Table('a', m, Column('x', Integer, fk))
b = Table('b', m, Column('y', Integer))
j = a.join(b, a.c.x == b.c.y)
j.c
q = Column('q', Integer)
b.append_column(q)
j._refresh_for_new_column(q)
eq_(j.foreign_keys, set([fk]))
fk2 = ForeignKey('g.id')
p = Column('p', Integer, fk2)
b.append_column(p)
j._refresh_for_new_column(p)
eq_(j.foreign_keys, set([fk, fk2]))
class AnonLabelTest(fixtures.TestBase):
"""Test behaviors fixed by [ticket:2168]."""
def test_anon_labels_named_column(self):
c1 = column('x')
assert c1.label(None) is not c1
eq_(str(select([c1.label(None)])), "SELECT x AS x_1")
def test_anon_labels_literal_column(self):
c1 = literal_column('x')
assert c1.label(None) is not c1
eq_(str(select([c1.label(None)])), "SELECT x AS x_1")
def test_anon_labels_func(self):
c1 = func.count('*')
assert c1.label(None) is not c1
eq_(str(select([c1])), "SELECT count(:count_2) AS count_1")
c2 = select([c1]).compile()
eq_(str(select([c1.label(None)])), "SELECT count(:count_2) AS count_1")
def test_named_labels_named_column(self):
c1 = column('x')
eq_(str(select([c1.label('y')])), "SELECT x AS y")
def test_named_labels_literal_column(self):
c1 = literal_column('x')
eq_(str(select([c1.label('y')])), "SELECT x AS y")
class JoinAliasingTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_flat_ok_on_non_join(self):
a = table('a', column('a'))
s = a.select()
self.assert_compile(
s.alias(flat=True).select(),
"SELECT anon_1.a FROM (SELECT a.a AS a FROM a) AS anon_1"
)
def test_join_alias(self):
a = table('a', column('a'))
b = table('b', column('b'))
self.assert_compile(
a.join(b, a.c.a == b.c.b).alias(),
"SELECT a.a AS a_a, b.b AS b_b FROM a JOIN b ON a.a = b.b"
)
def test_join_standalone_alias(self):
a = table('a', column('a'))
b = table('b', column('b'))
self.assert_compile(
alias(a.join(b, a.c.a == b.c.b)),
"SELECT a.a AS a_a, b.b AS b_b FROM a JOIN b ON a.a = b.b"
)
def test_join_alias_flat(self):
a = table('a', column('a'))
b = table('b', column('b'))
self.assert_compile(
a.join(b, a.c.a == b.c.b).alias(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b"
)
def test_join_standalone_alias_flat(self):
a = table('a', column('a'))
b = table('b', column('b'))
self.assert_compile(
alias(a.join(b, a.c.a == b.c.b), flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b"
)
def test_composed_join_alias_flat(self):
a = table('a', column('a'))
b = table('b', column('b'))
c = table('c', column('c'))
d = table('d', column('d'))
j1 = a.join(b, a.c.a == b.c.b)
j2 = c.join(d, c.c.c == d.c.d)
self.assert_compile(
j1.join(j2, b.c.b == c.c.c).alias(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b JOIN "
"(c AS c_1 JOIN d AS d_1 ON c_1.c = d_1.d) ON b_1.b = c_1.c"
)
def test_composed_join_alias(self):
a = table('a', column('a'))
b = table('b', column('b'))
c = table('c', column('c'))
d = table('d', column('d'))
j1 = a.join(b, a.c.a == b.c.b)
j2 = c.join(d, c.c.c == d.c.d)
self.assert_compile(
select([j1.join(j2, b.c.b == c.c.c).alias()]),
"SELECT anon_1.a_a, anon_1.b_b, anon_1.c_c, anon_1.d_d "
"FROM (SELECT a.a AS a_a, b.b AS b_b, c.c AS c_c, d.d AS d_d "
"FROM a JOIN b ON a.a = b.b "
"JOIN (c JOIN d ON c.c = d.d) ON b.b = c.c) AS anon_1"
)
class JoinConditionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_join_condition(self):
m = MetaData()
t1 = Table('t1', m, Column('id', Integer))
t2 = Table('t2', m,
Column('id', Integer),
Column('t1id', ForeignKey('t1.id')))
t3 = Table('t3', m,
Column('id', Integer),
Column('t1id', ForeignKey('t1.id')),
Column('t2id', ForeignKey('t2.id')))
t4 = Table('t4', m, Column('id', Integer),
Column('t2id', ForeignKey('t2.id')))
t5 = Table('t5', m,
Column('t1id1', ForeignKey('t1.id')),
Column('t1id2', ForeignKey('t1.id')),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
for (left, right, a_subset, expected) in [
(t1, t2, None, t1.c.id == t2.c.t1id),
(t1t2, t3, t2, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t3, t1.c.id == t3.c.t1id),
(t2t3, t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t3, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t1, t2t3.c.t2_id == t4.c.t2id),
(t1t2, t2t3, t2, t1t2.c.t2_id == t2t3.c.t3_t2id),
]:
assert expected.compare(
sql_util.join_condition(
left,
right,
a_subset=a_subset))
# these are ambiguous, or have no joins
for left, right, a_subset in [
(t1t2, t3, None),
(t2t3, t1, None),
(t1, t4, None),
(t1t2, t2t3, None),
(t5, t1, None),
(t5.select(use_labels=True), t1, None)
]:
assert_raises(
exc.ArgumentError,
sql_util.join_condition,
left, right, a_subset=a_subset
)
als = t2t3.alias()
# test join's behavior, including natural
for left, right, expected in [
(t1, t2, t1.c.id == t2.c.t1id),
(t1t2, t3, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t1.c.id == t3.c.t1id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t1t2, als, t1t2.c.t2_id == als.c.t3_t2id)
]:
assert expected.compare(
left.join(right).onclause
)
# these are right-nested joins
j = t1t2.join(t2t3)
assert j.onclause.compare(t2.c.id == t3.c.t2id)
self.assert_compile(
j, "t1 JOIN t2 ON t1.id = t2.t1id JOIN "
"(t2 JOIN t3 ON t2.id = t3.t2id) ON t2.id = t3.t2id")
st2t3 = t2t3.select(use_labels=True)
j = t1t2.join(st2t3)
assert j.onclause.compare(t2.c.id == st2t3.c.t3_t2id)
self.assert_compile(
j, "t1 JOIN t2 ON t1.id = t2.t1id JOIN "
"(SELECT t2.id AS t2_id, t2.t1id AS t2_t1id, "
"t3.id AS t3_id, t3.t1id AS t3_t1id, t3.t2id AS t3_t2id "
"FROM t2 JOIN t3 ON t2.id = t3.t2id) ON t2.id = t3_t2id")
def test_join_multiple_equiv_fks(self):
m = MetaData()
t1 = Table('t1', m,
Column('id', Integer, primary_key=True)
)
t2 = Table(
't2',
m,
Column(
't1id',
Integer,
ForeignKey('t1.id'),
ForeignKey('t1.id')))
assert sql_util.join_condition(t1, t2).compare(t1.c.id == t2.c.t1id)
def test_join_cond_no_such_unrelated_table(self):
m = MetaData()
# bounding the "good" column with two "bad" ones is so to
# try to get coverage to get the "continue" statements
# in the loop...
t1 = Table('t1', m,
Column('y', Integer, ForeignKey('t22.id')),
Column('x', Integer, ForeignKey('t2.id')),
Column('q', Integer, ForeignKey('t22.id')),
)
t2 = Table('t2', m, Column('id', Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_unrelated_column(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, ForeignKey('t2.id')),
Column('y', Integer, ForeignKey('t3.q')))
t2 = Table('t2', m, Column('id', Integer))
Table('t3', m, Column('id', Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_related_table(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table('t1', m1, Column('x', Integer, ForeignKey('t2.id')))
t2 = Table('t2', m2, Column('id', Integer))
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition, t1, t2
)
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition, t2, t1
)
def test_join_cond_no_such_related_column(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, ForeignKey('t2.q')))
t2 = Table('t2', m, Column('id', Integer))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for "
"ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition, t1, t2
)
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for "
"ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition, t2, t1
)
class PrimaryKeyTest(fixtures.TestBase, AssertsExecutionResults):
def test_join_pk_collapse_implicit(self):
"""test that redundant columns in a join get 'collapsed' into a
minimal primary key, which is the root column along a chain of
foreign key relationships."""
meta = MetaData()
a = Table('a', meta, Column('id', Integer, primary_key=True))
b = Table('b', meta, Column('id', Integer, ForeignKey('a.id'),
primary_key=True))
c = Table('c', meta, Column('id', Integer, ForeignKey('b.id'),
primary_key=True))
d = Table('d', meta, Column('id', Integer, ForeignKey('c.id'),
primary_key=True))
assert c.c.id.references(b.c.id)
assert not d.c.id.references(a.c.id)
assert list(a.join(b).primary_key) == [a.c.id]
assert list(b.join(c).primary_key) == [b.c.id]
assert list(a.join(b).join(c).primary_key) == [a.c.id]
assert list(b.join(c).join(d).primary_key) == [b.c.id]
assert list(d.join(c).join(b).primary_key) == [b.c.id]
assert list(a.join(b).join(c).join(d).primary_key) == [a.c.id]
def test_join_pk_collapse_explicit(self):
"""test that redundant columns in a join get 'collapsed' into a
minimal primary key, which is the root column along a chain of
explicit join conditions."""
meta = MetaData()
a = Table('a', meta, Column('id', Integer, primary_key=True),
Column('x', Integer))
b = Table('b', meta, Column('id', Integer, ForeignKey('a.id'),
primary_key=True), Column('x', Integer))
c = Table('c', meta, Column('id', Integer, ForeignKey('b.id'),
primary_key=True), Column('x', Integer))
d = Table('d', meta, Column('id', Integer, ForeignKey('c.id'),
primary_key=True), Column('x', Integer))
print(list(a.join(b, a.c.x == b.c.id).primary_key))
assert list(a.join(b, a.c.x == b.c.id).primary_key) == [a.c.id]
assert list(b.join(c, b.c.x == c.c.id).primary_key) == [b.c.id]
assert list(a.join(b).join(c, c.c.id == b.c.x).primary_key) \
== [a.c.id]
assert list(b.join(c, c.c.x == b.c.id).join(d).primary_key) \
== [b.c.id]
assert list(b.join(c, c.c.id == b.c.x).join(d).primary_key) \
== [b.c.id]
assert list(
d.join(
b,
d.c.id == b.c.id).join(
c,
b.c.id == c.c.x).primary_key) == [
b.c.id]
assert list(a.join(b).join(c, c.c.id
== b.c.x).join(d).primary_key) == [a.c.id]
assert list(a.join(b, and_(a.c.id == b.c.id, a.c.x
== b.c.id)).primary_key) == [a.c.id]
def test_init_doesnt_blowitaway(self):
meta = MetaData()
a = Table('a', meta,
Column('id', Integer, primary_key=True),
Column('x', Integer))
b = Table('b', meta,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('x', Integer))
j = a.join(b)
assert list(j.primary_key) == [a.c.id]
j.foreign_keys
assert list(j.primary_key) == [a.c.id]
def test_non_column_clause(self):
meta = MetaData()
a = Table('a', meta,
Column('id', Integer, primary_key=True),
Column('x', Integer))
b = Table('b', meta,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('x', Integer, primary_key=True))
j = a.join(b, and_(a.c.id == b.c.id, b.c.x == 5))
assert str(j) == "a JOIN b ON a.id = b.id AND b.x = :x_1", str(j)
assert list(j.primary_key) == [a.c.id, b.c.x]
def test_onclause_direction(self):
metadata = MetaData()
employee = Table('Employee', metadata,
Column('name', String(100)),
Column('id', Integer, primary_key=True),
)
engineer = Table('Engineer', metadata,
Column('id', Integer,
ForeignKey('Employee.id'), primary_key=True))
eq_(util.column_set(employee.join(engineer, employee.c.id
== engineer.c.id).primary_key),
util.column_set([employee.c.id]))
eq_(util.column_set(employee.join(engineer, engineer.c.id
== employee.c.id).primary_key),
util.column_set([employee.c.id]))
class ReduceTest(fixtures.TestBase, AssertsExecutionResults):
def test_reduce(self):
meta = MetaData()
t1 = Table('t1', meta,
Column('t1id', Integer, primary_key=True),
Column('t1data', String(30)))
t2 = Table(
't2',
meta,
Column(
't2id',
Integer,
ForeignKey('t1.t1id'),
primary_key=True),
Column(
't2data',
String(30)))
t3 = Table(
't3',
meta,
Column(
't3id',
Integer,
ForeignKey('t2.t2id'),
primary_key=True),
Column(
't3data',
String(30)))
eq_(util.column_set(sql_util.reduce_columns([
t1.c.t1id,
t1.c.t1data,
t2.c.t2id,
t2.c.t2data,
t3.c.t3id,
t3.c.t3data,
])), util.column_set([t1.c.t1id, t1.c.t1data, t2.c.t2data,
t3.c.t3data]))
def test_reduce_selectable(self):
metadata = MetaData()
engineers = Table('engineers', metadata,
Column('engineer_id', Integer, primary_key=True),
Column('engineer_name', String(50)))
managers = Table('managers', metadata,
Column('manager_id', Integer, primary_key=True),
Column('manager_name', String(50)))
s = select([engineers,
managers]).where(engineers.c.engineer_name
== managers.c.manager_name)
eq_(util.column_set(sql_util.reduce_columns(list(s.c), s)),
util.column_set([s.c.engineer_id, s.c.engineer_name,
s.c.manager_id]))
def test_reduce_generation(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, primary_key=True),
Column('y', Integer))
t2 = Table('t2', m, Column('z', Integer, ForeignKey('t1.x')),
Column('q', Integer))
s1 = select([t1, t2])
s2 = s1.reduce_columns(only_synonyms=False)
eq_(
set(s2.inner_columns),
set([t1.c.x, t1.c.y, t2.c.q])
)
s2 = s1.reduce_columns()
eq_(
set(s2.inner_columns),
set([t1.c.x, t1.c.y, t2.c.z, t2.c.q])
)
def test_reduce_only_synonym_fk(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, primary_key=True),
Column('y', Integer))
t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x')),
Column('q', Integer, ForeignKey('t1.y')))
s1 = select([t1, t2])
s1 = s1.reduce_columns(only_synonyms=True)
eq_(
set(s1.c),
set([s1.c.x, s1.c.y, s1.c.q])
)
def test_reduce_only_synonym_lineage(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, primary_key=True),
Column('y', Integer),
Column('z', Integer)
)
# test that the first appearance in the columns clause
# wins - t1 is first, t1.c.x wins
s1 = select([t1])
s2 = select([t1, s1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().inner_columns),
set([t1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z])
)
# reverse order, s1.c.x wins
s1 = select([t1])
s2 = select([s1, t1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().inner_columns),
set([s1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z])
)
def test_reduce_aliased_join(self):
metadata = MetaData()
people = Table(
'people', metadata, Column(
'person_id', Integer, Sequence(
'person_id_seq', optional=True), primary_key=True), Column(
'name', String(50)), Column(
'type', String(30)))
engineers = Table(
'engineers',
metadata,
Column('person_id', Integer, ForeignKey('people.person_id'
), primary_key=True),
Column('status', String(30)),
Column('engineer_name', String(50)),
Column('primary_language', String(50)),
)
managers = Table(
'managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)),
Column('manager_name', String(50)))
pjoin = \
people.outerjoin(engineers).outerjoin(managers).\
select(use_labels=True).alias('pjoin'
)
eq_(util.column_set(sql_util.reduce_columns(
[pjoin.c.people_person_id, pjoin.c.engineers_person_id,
pjoin.c.managers_person_id])),
util.column_set([pjoin.c.people_person_id]))
def test_reduce_aliased_union(self):
metadata = MetaData()
item_table = Table(
'item',
metadata,
Column(
'id',
Integer,
ForeignKey('base_item.id'),
primary_key=True),
Column(
'dummy',
Integer,
default=0))
base_item_table = Table(
'base_item', metadata, Column(
'id', Integer, primary_key=True), Column(
'child_name', String(255), default=None))
from sqlalchemy.orm.util import polymorphic_union
item_join = polymorphic_union({
'BaseItem':
base_item_table.select(
base_item_table.c.child_name
== 'BaseItem'),
'Item': base_item_table.join(item_table)},
None, 'item_join')
eq_(util.column_set(sql_util.reduce_columns([item_join.c.id,
item_join.c.dummy,
item_join.c.child_name])),
util.column_set([item_join.c.id,
item_join.c.dummy,
item_join.c.child_name]))
def test_reduce_aliased_union_2(self):
metadata = MetaData()
page_table = Table('page', metadata, Column('id', Integer,
primary_key=True))
magazine_page_table = Table('magazine_page', metadata,
Column('page_id', Integer,
ForeignKey('page.id'),
primary_key=True))
classified_page_table = Table(
'classified_page',
metadata,
Column(
'magazine_page_id',
Integer,
ForeignKey('magazine_page.page_id'),
primary_key=True))
# this is essentially the union formed by the ORM's
# polymorphic_union function. we define two versions with
# different ordering of selects.
#
# the first selectable has the "real" column
# classified_page.magazine_page_id
pjoin = union(
select([
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id
]).
select_from(
page_table.join(magazine_page_table).
join(classified_page_table)),
select([
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label('magazine_page_id')
]).
select_from(page_table.join(magazine_page_table))
).alias('pjoin')
eq_(util.column_set(sql_util.reduce_columns(
[pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id])),
util.column_set([pjoin.c.id]))
# the first selectable has a CAST, which is a placeholder for
# classified_page.magazine_page_id in the second selectable.
# reduce_columns needs to take into account all foreign keys
# derived from pjoin.c.magazine_page_id. the UNION construct
# currently makes the external column look like that of the
# first selectable only.
pjoin = union(select([
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label('magazine_page_id')
]).
select_from(page_table.join(magazine_page_table)),
select([
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id
]).
select_from(page_table.join(magazine_page_table).
join(classified_page_table))
).alias('pjoin')
eq_(util.column_set(sql_util.reduce_columns(
[pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id])),
util.column_set([pjoin.c.id]))
class DerivedTest(fixtures.TestBase, AssertsExecutionResults):
def test_table(self):
meta = MetaData()
t1 = Table('t1', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
t2 = Table('t2', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
assert t1.is_derived_from(t1)
assert not t2.is_derived_from(t1)
def test_alias(self):
meta = MetaData()
t1 = Table('t1', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
t2 = Table('t2', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
assert t1.alias().is_derived_from(t1)
assert not t2.alias().is_derived_from(t1)
assert not t1.is_derived_from(t1.alias())
assert not t1.is_derived_from(t2.alias())
def test_select(self):
meta = MetaData()
t1 = Table('t1', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
t2 = Table('t2', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
assert t1.select().is_derived_from(t1)
assert not t2.select().is_derived_from(t1)
assert select([t1, t2]).is_derived_from(t1)
assert t1.select().alias('foo').is_derived_from(t1)
assert select([t1, t2]).alias('foo').is_derived_from(t1)
assert not t2.select().alias('foo').is_derived_from(t1)
class AnnotationsTest(fixtures.TestBase):
def test_hashing(self):
t = table('t', column('x'))
a = t.alias()
s = t.select()
s2 = a.select()
for obj in [
t,
t.c.x,
a,
s,
s2,
t.c.x > 1,
(t.c.x > 1).label(None)
]:
annot = obj._annotate({})
eq_(set([obj]), set([annot]))
def test_compare(self):
t = table('t', column('x'), column('y'))
x_a = t.c.x._annotate({})
assert t.c.x.compare(x_a)
assert x_a.compare(t.c.x)
assert not x_a.compare(t.c.y)
assert not t.c.y.compare(x_a)
assert (t.c.x == 5).compare(x_a == 5)
assert not (t.c.y == 5).compare(x_a == 5)
s = select([t])
x_p = s.c.x
assert not x_a.compare(x_p)
assert not t.c.x.compare(x_p)
x_p_a = x_p._annotate({})
assert x_p_a.compare(x_p)
assert x_p.compare(x_p_a)
assert not x_p_a.compare(x_a)
def test_late_name_add(self):
from sqlalchemy.schema import Column
c1 = Column(Integer)
c1_a = c1._annotate({"foo": "bar"})
c1.name = 'somename'
eq_(c1_a.name, 'somename')
def test_late_table_add(self):
c1 = Column("foo", Integer)
c1_a = c1._annotate({"foo": "bar"})
t = Table('t', MetaData(), c1)
is_(c1_a.table, t)
def test_basic_attrs(self):
t = Table('t', MetaData(),
Column('x', Integer, info={'q': 'p'}),
Column('y', Integer, key='q'))
x_a = t.c.x._annotate({})
y_a = t.c.q._annotate({})
t.c.x.info['z'] = 'h'
eq_(y_a.key, 'q')
is_(x_a.table, t)
eq_(x_a.info, {'q': 'p', 'z': 'h'})
eq_(t.c.x.anon_label, x_a.anon_label)
def test_custom_constructions(self):
from sqlalchemy.schema import Column
class MyColumn(Column):
def __init__(self):
Column.__init__(self, 'foo', Integer)
_constructor = Column
t1 = Table('t1', MetaData(), MyColumn())
s1 = t1.select()
assert isinstance(t1.c.foo, MyColumn)
assert isinstance(s1.c.foo, Column)
annot_1 = t1.c.foo._annotate({})
s2 = select([annot_1])
assert isinstance(s2.c.foo, Column)
annot_2 = s1._annotate({})
assert isinstance(annot_2.c.foo, Column)
def test_custom_construction_correct_anno_subclass(self):
# [ticket:2918]
from sqlalchemy.schema import Column
from sqlalchemy.sql.elements import AnnotatedColumnElement
class MyColumn(Column):
pass
assert isinstance(
MyColumn('x', Integer)._annotate({"foo": "bar"}),
AnnotatedColumnElement)
def test_custom_construction_correct_anno_expr(self):
# [ticket:2918]
from sqlalchemy.schema import Column
class MyColumn(Column):
pass
col = MyColumn('x', Integer)
binary_1 = col == 5
col_anno = MyColumn('x', Integer)._annotate({"foo": "bar"})
binary_2 = col_anno == 5
eq_(binary_2.left._annotations, {"foo": "bar"})
def test_annotated_corresponding_column(self):
table1 = table('table1', column("col1"))
s1 = select([table1.c.col1])
t1 = s1._annotate({})
t2 = s1
# t1 needs to share the same _make_proxy() columns as t2, even
# though it's annotated. otherwise paths will diverge once they
# are corresponded against "inner" below.
assert t1.c is t2.c
assert t1.c.col1 is t2.c.col1
inner = select([s1])
assert inner.corresponding_column(
t2.c.col1,
require_embedded=False) is inner.corresponding_column(
t2.c.col1,
require_embedded=True) is inner.c.col1
assert inner.corresponding_column(
t1.c.col1,
require_embedded=False) is inner.corresponding_column(
t1.c.col1,
require_embedded=True) is inner.c.col1
def test_annotated_visit(self):
table1 = table('table1', column("col1"), column("col2"))
bin = table1.c.col1 == bindparam('foo', value=None)
assert str(bin) == "table1.col1 = :foo"
def visit_binary(b):
b.right = table1.c.col2
b2 = visitors.cloned_traverse(bin, {}, {'binary': visit_binary})
assert str(b2) == "table1.col1 = table1.col2"
b3 = visitors.cloned_traverse(bin._annotate({}), {}, {'binary':
visit_binary})
assert str(b3) == 'table1.col1 = table1.col2'
def visit_binary(b):
b.left = bindparam('bar')
b4 = visitors.cloned_traverse(b2, {}, {'binary': visit_binary})
assert str(b4) == ":bar = table1.col2"
b5 = visitors.cloned_traverse(b3, {}, {'binary': visit_binary})
assert str(b5) == ":bar = table1.col2"
def test_label_accessors(self):
t1 = table('t1', column('c1'))
l1 = t1.c.c1.label(None)
is_(l1._order_by_label_element, l1)
l1a = l1._annotate({"foo": "bar"})
is_(l1a._order_by_label_element, l1a)
def test_annotate_aliased(self):
t1 = table('t1', column('c1'))
s = select([(t1.c.c1 + 3).label('bat')])
a = s.alias()
a = sql_util._deep_annotate(a, {'foo': 'bar'})
eq_(a._annotations['foo'], 'bar')
eq_(a.element._annotations['foo'], 'bar')
def test_annotate_expressions(self):
table1 = table('table1', column('col1'), column('col2'))
for expr, expected in [(table1.c.col1, 'table1.col1'),
(table1.c.col1 == 5,
'table1.col1 = :col1_1'),
(table1.c.col1.in_([2, 3, 4]),
'table1.col1 IN (:col1_1, :col1_2, '
':col1_3)')]:
eq_(str(expr), expected)
eq_(str(expr._annotate({})), expected)
eq_(str(sql_util._deep_annotate(expr, {})), expected)
eq_(str(sql_util._deep_annotate(
expr, {}, exclude=[table1.c.col1])), expected)
def test_deannotate(self):
table1 = table('table1', column("col1"), column("col2"))
bin = table1.c.col1 == bindparam('foo', value=None)
b2 = sql_util._deep_annotate(bin, {'_orm_adapt': True})
b3 = sql_util._deep_deannotate(b2)
b4 = sql_util._deep_deannotate(bin)
for elem in (b2._annotations, b2.left._annotations):
assert '_orm_adapt' in elem
for elem in b3._annotations, b3.left._annotations, \
b4._annotations, b4.left._annotations:
assert elem == {}
assert b2.left is not bin.left
assert b3.left is not b2.left and b2.left is not bin.left
assert b4.left is bin.left # since column is immutable
# deannotate copies the element
assert bin.right is not b2.right and b2.right is not b3.right \
and b3.right is not b4.right
def test_annotate_unique_traversal(self):
"""test that items are copied only once during
annotate, deannotate traversal
#2453 - however note this was modified by
#1401, and it's likely that re49563072578
is helping us with the str() comparison
case now, as deannotate is making
clones again in some cases.
"""
table1 = table('table1', column('x'))
table2 = table('table2', column('y'))
a1 = table1.alias()
s = select([a1.c.x]).select_from(
a1.join(table2, a1.c.x == table2.c.y)
)
for sel in (
sql_util._deep_deannotate(s),
visitors.cloned_traverse(s, {}, {}),
visitors.replacement_traverse(s, {}, lambda x: None)
):
# the columns clause isn't changed at all
assert sel._raw_columns[0].table is a1
assert sel._froms[0] is sel._froms[1].left
eq_(str(s), str(sel))
# when we are modifying annotations sets only
# partially, each element is copied unconditionally
# when encountered.
for sel in (
sql_util._deep_deannotate(s, {"foo": "bar"}),
sql_util._deep_annotate(s, {'foo': 'bar'}),
):
assert sel._froms[0] is not sel._froms[1].left
# but things still work out due to
# re49563072578
eq_(str(s), str(sel))
def test_annotate_varied_annot_same_col(self):
"""test two instances of the same column with different annotations
preserving them when deep_annotate is run on them.
"""
t1 = table('table1', column("col1"), column("col2"))
s = select([t1.c.col1._annotate({"foo": "bar"})])
s2 = select([t1.c.col1._annotate({"bat": "hoho"})])
s3 = s.union(s2)
sel = sql_util._deep_annotate(s3, {"new": "thing"})
eq_(
sel.selects[0]._raw_columns[0]._annotations,
{"foo": "bar", "new": "thing"}
)
eq_(
sel.selects[1]._raw_columns[0]._annotations,
{"bat": "hoho", "new": "thing"}
)
def test_deannotate_2(self):
table1 = table('table1', column("col1"), column("col2"))
j = table1.c.col1._annotate({"remote": True}) == \
table1.c.col2._annotate({"local": True})
j2 = sql_util._deep_deannotate(j)
eq_(
j.left._annotations, {"remote": True}
)
eq_(
j2.left._annotations, {}
)
def test_deannotate_3(self):
table1 = table('table1', column("col1"), column("col2"),
column("col3"), column("col4"))
j = and_(
table1.c.col1._annotate({"remote": True}) ==
table1.c.col2._annotate({"local": True}),
table1.c.col3._annotate({"remote": True}) ==
table1.c.col4._annotate({"local": True})
)
j2 = sql_util._deep_deannotate(j)
eq_(
j.clauses[0].left._annotations, {"remote": True}
)
eq_(
j2.clauses[0].left._annotations, {}
)
def test_annotate_fromlist_preservation(self):
"""test the FROM list in select still works
even when multiple annotate runs have created
copies of the same selectable
#2453, continued
"""
table1 = table('table1', column('x'))
table2 = table('table2', column('y'))
a1 = table1.alias()
s = select([a1.c.x]).select_from(
a1.join(table2, a1.c.x == table2.c.y)
)
assert_s = select([select([s])])
for fn in (
sql_util._deep_deannotate,
lambda s: sql_util._deep_annotate(s, {'foo': 'bar'}),
lambda s: visitors.cloned_traverse(s, {}, {}),
lambda s: visitors.replacement_traverse(s, {}, lambda x: None)
):
sel = fn(select([fn(select([fn(s)]))]))
eq_(str(assert_s), str(sel))
def test_bind_unique_test(self):
table('t', column('a'), column('b'))
b = bindparam("bind", value="x", unique=True)
# the annotation of "b" should render the
# same. The "unique" test in compiler should
# also pass, [ticket:2425]
eq_(str(or_(b, b._annotate({"foo": "bar"}))),
":bind_1 OR :bind_1")
def test_comparators_cleaned_out_construction(self):
c = column('a')
comp1 = c.comparator
c1 = c._annotate({"foo": "bar"})
comp2 = c1.comparator
assert comp1 is not comp2
def test_comparators_cleaned_out_reannotate(self):
c = column('a')
c1 = c._annotate({"foo": "bar"})
comp1 = c1.comparator
c2 = c1._annotate({"bat": "hoho"})
comp2 = c2.comparator
assert comp1 is not comp2
def test_comparator_cleanout_integration(self):
c = column('a')
c1 = c._annotate({"foo": "bar"})
comp1 = c1.comparator
c2 = c1._annotate({"bat": "hoho"})
comp2 = c2.comparator
assert (c2 == 5).left._annotations == {"foo": "bar", "bat": "hoho"}
class ReprTest(fixtures.TestBase):
def test_ensure_repr_elements(self):
for obj in [
elements.Cast(1, 2),
elements.TypeClause(String()),
elements.ColumnClause('x'),
elements.BindParameter('q'),
elements.Null(),
elements.True_(),
elements.False_(),
elements.ClauseList(),
elements.BooleanClauseList.and_(),
elements.Tuple(),
elements.Case([]),
elements.Extract('foo', column('x')),
elements.UnaryExpression(column('x')),
elements.Grouping(column('x')),
elements.Over(func.foo()),
elements.Label('q', column('x')),
]:
repr(obj)
class WithLabelsTest(fixtures.TestBase):
def _assert_labels_warning(self, s):
assert_raises_message(
exc.SAWarning,
r"replaced by Column.*, which has the same key",
lambda: s.c
)
def _assert_result_keys(self, s, keys):
compiled = s.compile()
eq_(set(compiled._create_result_map()), set(keys))
def _assert_subq_result_keys(self, s, keys):
compiled = s.select().compile()
eq_(set(compiled._create_result_map()), set(keys))
def _names_overlap(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer))
t2 = Table('t2', m, Column('x', Integer))
return select([t1, t2])
def test_names_overlap_nolabel(self):
sel = self._names_overlap()
self._assert_labels_warning(sel)
self._assert_result_keys(sel, ['x'])
def test_names_overlap_label(self):
sel = self._names_overlap().apply_labels()
eq_(
list(sel.c.keys()),
['t1_x', 't2_x']
)
self._assert_result_keys(sel, ['t1_x', 't2_x'])
def _names_overlap_keys_dont(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, key='a'))
t2 = Table('t2', m, Column('x', Integer, key='b'))
return select([t1, t2])
def test_names_overlap_keys_dont_nolabel(self):
sel = self._names_overlap_keys_dont()
eq_(
list(sel.c.keys()),
['a', 'b']
)
self._assert_result_keys(sel, ['x'])
def test_names_overlap_keys_dont_label(self):
sel = self._names_overlap_keys_dont().apply_labels()
eq_(
list(sel.c.keys()),
['t1_a', 't2_b']
)
self._assert_result_keys(sel, ['t1_x', 't2_x'])
def _labels_overlap(self):
m = MetaData()
t1 = Table('t', m, Column('x_id', Integer))
t2 = Table('t_x', m, Column('id', Integer))
return select([t1, t2])
def test_labels_overlap_nolabel(self):
sel = self._labels_overlap()
eq_(
list(sel.c.keys()),
['x_id', 'id']
)
self._assert_result_keys(sel, ['x_id', 'id'])
def test_labels_overlap_label(self):
sel = self._labels_overlap().apply_labels()
t2 = sel.froms[1]
eq_(
list(sel.c.keys()),
['t_x_id', t2.c.id.anon_label]
)
self._assert_result_keys(sel, ['t_x_id', 'id_1'])
self._assert_subq_result_keys(sel, ['t_x_id', 'id_1'])
def _labels_overlap_keylabels_dont(self):
m = MetaData()
t1 = Table('t', m, Column('x_id', Integer, key='a'))
t2 = Table('t_x', m, Column('id', Integer, key='b'))
return select([t1, t2])
def test_labels_overlap_keylabels_dont_nolabel(self):
sel = self._labels_overlap_keylabels_dont()
eq_(list(sel.c.keys()), ['a', 'b'])
self._assert_result_keys(sel, ['x_id', 'id'])
def test_labels_overlap_keylabels_dont_label(self):
sel = self._labels_overlap_keylabels_dont().apply_labels()
eq_(list(sel.c.keys()), ['t_a', 't_x_b'])
self._assert_result_keys(sel, ['t_x_id', 'id_1'])
def _keylabels_overlap_labels_dont(self):
m = MetaData()
t1 = Table('t', m, Column('a', Integer, key='x_id'))
t2 = Table('t_x', m, Column('b', Integer, key='id'))
return select([t1, t2])
def test_keylabels_overlap_labels_dont_nolabel(self):
sel = self._keylabels_overlap_labels_dont()
eq_(list(sel.c.keys()), ['x_id', 'id'])
self._assert_result_keys(sel, ['a', 'b'])
def test_keylabels_overlap_labels_dont_label(self):
sel = self._keylabels_overlap_labels_dont().apply_labels()
t2 = sel.froms[1]
eq_(list(sel.c.keys()), ['t_x_id', t2.c.id.anon_label])
self._assert_result_keys(sel, ['t_a', 't_x_b'])
self._assert_subq_result_keys(sel, ['t_a', 't_x_b'])
def _keylabels_overlap_labels_overlap(self):
m = MetaData()
t1 = Table('t', m, Column('x_id', Integer, key='x_a'))
t2 = Table('t_x', m, Column('id', Integer, key='a'))
return select([t1, t2])
def test_keylabels_overlap_labels_overlap_nolabel(self):
sel = self._keylabels_overlap_labels_overlap()
eq_(list(sel.c.keys()), ['x_a', 'a'])
self._assert_result_keys(sel, ['x_id', 'id'])
self._assert_subq_result_keys(sel, ['x_id', 'id'])
def test_keylabels_overlap_labels_overlap_label(self):
sel = self._keylabels_overlap_labels_overlap().apply_labels()
t2 = sel.froms[1]
eq_(list(sel.c.keys()), ['t_x_a', t2.c.a.anon_label])
self._assert_result_keys(sel, ['t_x_id', 'id_1'])
self._assert_subq_result_keys(sel, ['t_x_id', 'id_1'])
def _keys_overlap_names_dont(self):
m = MetaData()
t1 = Table('t1', m, Column('a', Integer, key='x'))
t2 = Table('t2', m, Column('b', Integer, key='x'))
return select([t1, t2])
def test_keys_overlap_names_dont_nolabel(self):
sel = self._keys_overlap_names_dont()
self._assert_labels_warning(sel)
self._assert_result_keys(sel, ['a', 'b'])
def test_keys_overlap_names_dont_label(self):
sel = self._keys_overlap_names_dont().apply_labels()
eq_(
list(sel.c.keys()),
['t1_x', 't2_x']
)
self._assert_result_keys(sel, ['t1_a', 't2_b'])
class ResultMapTest(fixtures.TestBase):
def _fixture(self):
m = MetaData()
t = Table('t', m, Column('x', Integer), Column('y', Integer))
return t
def _mapping(self, stmt):
compiled = stmt.compile()
return dict(
(elem, key)
for key, elements in compiled._create_result_map().items()
for elem in elements[1]
)
def test_select_label_alt_name(self):
t = self._fixture()
l1, l2 = t.c.x.label('a'), t.c.y.label('b')
s = select([l1, l2])
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_label_alt_name(self):
t = self._fixture()
l1, l2 = t.c.x.label('a'), t.c.y.label('b')
s = select([l1, l2]).alias()
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_column(self):
t = self._fixture()
x, y = t.c.x, t.c.y
s = select([x, y]).alias()
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_alias_column_apply_labels(self):
t = self._fixture()
x, y = t.c.x, t.c.y
s = select([x, y]).apply_labels().alias()
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_table_alias_column(self):
t = self._fixture()
x, y = t.c.x, t.c.y
ta = t.alias()
s = select([ta.c.x, ta.c.y])
mapping = self._mapping(s)
assert x not in mapping
def test_select_label_alt_name_table_alias_column(self):
t = self._fixture()
x, y = t.c.x, t.c.y
ta = t.alias()
l1, l2 = ta.c.x.label('a'), ta.c.y.label('b')
s = select([l1, l2])
mapping = self._mapping(s)
assert x not in mapping
assert l1 in mapping
assert ta.c.x not in mapping
def test_column_subquery_exists(self):
t = self._fixture()
s = exists().where(t.c.x == 5).select()
mapping = self._mapping(s)
assert t.c.x not in mapping
eq_(
[type(entry[-1]) for entry in s.compile()._result_columns],
[Boolean]
)
def test_plain_exists(self):
expr = exists([1])
eq_(type(expr.type), Boolean)
eq_(
[type(entry[-1]) for
entry in select([expr]).compile()._result_columns],
[Boolean]
)
def test_plain_exists_negate(self):
expr = ~exists([1])
eq_(type(expr.type), Boolean)
eq_(
[type(entry[-1]) for
entry in select([expr]).compile()._result_columns],
[Boolean]
)
def test_plain_exists_double_negate(self):
expr = ~(~exists([1]))
eq_(type(expr.type), Boolean)
eq_(
[type(entry[-1]) for
entry in select([expr]).compile()._result_columns],
[Boolean]
)
def test_column_subquery_plain(self):
t = self._fixture()
s1 = select([t.c.x]).where(t.c.x > 5).as_scalar()
s2 = select([s1])
mapping = self._mapping(s2)
assert t.c.x not in mapping
assert s1 in mapping
eq_(
[type(entry[-1]) for entry in s2.compile()._result_columns],
[Integer]
)
def test_unary_boolean(self):
s1 = select([not_(True)], use_labels=True)
eq_(
[type(entry[-1]) for entry in s1.compile()._result_columns],
[Boolean]
)
class ForUpdateTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def _assert_legacy(self, leg, read=False, nowait=False):
t = table('t', column('c'))
s1 = select([t], for_update=leg)
if leg is False:
assert s1._for_update_arg is None
assert s1.for_update is None
else:
eq_(
s1._for_update_arg.read, read
)
eq_(
s1._for_update_arg.nowait, nowait
)
eq_(s1.for_update, leg)
def test_false_legacy(self):
self._assert_legacy(False)
def test_plain_true_legacy(self):
self._assert_legacy(True)
def test_read_legacy(self):
self._assert_legacy("read", read=True)
def test_nowait_legacy(self):
self._assert_legacy("nowait", nowait=True)
def test_read_nowait_legacy(self):
self._assert_legacy("read_nowait", read=True, nowait=True)
def test_legacy_setter(self):
t = table('t', column('c'))
s = select([t])
s.for_update = 'nowait'
eq_(s._for_update_arg.nowait, True)
def test_basic_clone(self):
t = table('t', column('c'))
s = select([t]).with_for_update(read=True, of=t.c.c)
s2 = visitors.ReplacingCloningVisitor().traverse(s)
assert s2._for_update_arg is not s._for_update_arg
eq_(s2._for_update_arg.read, True)
eq_(s2._for_update_arg.of, [t.c.c])
self.assert_compile(s2,
"SELECT t.c FROM t FOR SHARE OF t",
dialect="postgresql")
def test_adapt(self):
t = table('t', column('c'))
s = select([t]).with_for_update(read=True, of=t.c.c)
a = t.alias()
s2 = sql_util.ClauseAdapter(a).traverse(s)
eq_(s2._for_update_arg.of, [a.c.c])
self.assert_compile(s2,
"SELECT t_1.c FROM t AS t_1 FOR SHARE OF t_1",
dialect="postgresql")
| 34.683459
| 79
| 0.545439
|
edd4b889aaca2071b7d1eff6a77d3d199ebb8ec3
| 4,925
|
py
|
Python
|
install/core/python/tank_vendor/shotgun_api3/lib/mockgun/schema.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/core/python/tank_vendor/shotgun_api3/lib/mockgun/schema.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/core/python/tank_vendor/shotgun_api3/lib/mockgun/schema.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 1
|
2020-02-15T10:42:56.000Z
|
2020-02-15T10:42:56.000Z
|
"""
-----------------------------------------------------------------------------
Copyright (c) 2009-2017, Shotgun Software Inc
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the Shotgun Software Inc nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----------------------------------------------------------------------------
"""
import cPickle as pickle
import os
import copy
from .errors import MockgunError
class SchemaFactory(object):
"""
Allows to instantiate a pickled schema.
"""
_schema_entity_cache = None
_schema_entity_cache_path = None
_schema_cache = None
_schema_cache_path = None
@classmethod
def get_schemas(cls, schema_path, schema_entity_path):
"""
Retrieves the schemas from disk.
:param str schema_path: Path to the schema.
:param str schema_entity_path: Path to the entities schema.
:returns: Pair of dictionaries holding the schema and entities schema.
:rtype: tuple
"""
if not os.path.exists(schema_path):
raise MockgunError("Cannot locate Mockgun schema file '%s'!" % schema_path)
if not os.path.exists(schema_entity_path):
raise MockgunError("Cannot locate Mockgun schema file '%s'!" % schema_entity_path)
# Poor man's attempt at a cache. All of our use cases deal with a single pair of files
# for the duration of the unit tests, so keep a cache for both inputs. We don't want
# to deal with ever growing caches anyway. Just having this simple cache has shown
# speed increases of up to 500% for Toolkit unit tests alone.
if schema_path != cls._schema_cache_path:
cls._schema_cache = cls._read_file(schema_path)
cls._schema_cache_path = schema_path
if schema_entity_path != cls._schema_entity_cache_path:
cls._schema_entity_cache = cls._read_file(schema_entity_path)
cls._schema_entity_cache_path = schema_entity_path
return cls._schema_cache, cls._schema_entity_cache
@classmethod
def _read_file(cls, path):
fh = open(path, "rb")
try:
return pickle.load(fh)
finally:
fh.close()
# Highest protocol that Python 2.4 supports, which is the earliest version of Python we support.
# Actually, this is the same version that Python 2.7 supports at the moment!
_HIGHEST_24_PICKLE_PROTOCOL = 2
# ----------------------------------------------------------------------------
# Utility methods
def generate_schema(shotgun, schema_file_path, schema_entity_file_path):
"""
Helper method for mockgun.
Generates the schema files needed by the mocker by connecting to a real shotgun
and downloading the schema information for that site. Once the generated schema
files are being passed to mockgun, it will mimic the site's schema structure.
:param sg_url: Shotgun site url
:param sg_script: Script name to connect with
:param sg_key: Script key to connect with
:param schema_file_path: Path where to write the main schema file to
:param schema_entity_file_path: Path where to write the entity schema file to
"""
schema = shotgun.schema_read()
fh = open(schema_file_path, "wb")
try:
pickle.dump(schema, fh, protocol=_HIGHEST_24_PICKLE_PROTOCOL)
finally:
fh.close()
schema_entity = shotgun.schema_entity_read()
fh = open(schema_entity_file_path, "wb")
try:
pickle.dump(schema_entity, fh, protocol=_HIGHEST_24_PICKLE_PROTOCOL)
finally:
fh.close()
| 39.4
| 96
| 0.690355
|
83b67e5c4e02f696592fff9ac77e90d9986bdd00
| 271
|
py
|
Python
|
Python3/server.py
|
MatYoshr/Alibaba-FC-CustomRuntime-Sample
|
06c12d3547d660fa65b9966e2d8f42a3d7367932
|
[
"MIT"
] | null | null | null |
Python3/server.py
|
MatYoshr/Alibaba-FC-CustomRuntime-Sample
|
06c12d3547d660fa65b9966e2d8f42a3d7367932
|
[
"MIT"
] | null | null | null |
Python3/server.py
|
MatYoshr/Alibaba-FC-CustomRuntime-Sample
|
06c12d3547d660fa65b9966e2d8f42a3d7367932
|
[
"MIT"
] | null | null | null |
from flask import Flask
import os
import sys
app = Flask(__name__)
@app.route('/invoke', methods=['GET','POST'])
def hello_world():
return sys.version
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=os.environ.get("FC_SERVER_PORT", "9000"))
| 22.583333
| 86
| 0.686347
|
c2f769e79df9d4280287b3a4c70aec83088110a2
| 399
|
py
|
Python
|
backend/ownly_29896/wsgi.py
|
crowdbotics-apps/ownly-29896
|
31f5f8da6607479c7931f69cdd7e3e29a6858719
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/ownly_29896/wsgi.py
|
crowdbotics-apps/ownly-29896
|
31f5f8da6607479c7931f69cdd7e3e29a6858719
|
[
"FTL",
"AML",
"RSA-MD"
] | 18
|
2021-08-29T18:20:38.000Z
|
2022-01-09T17:44:40.000Z
|
backend/ownly_29896/wsgi.py
|
crowdbotics-apps/ownly-29896
|
31f5f8da6607479c7931f69cdd7e3e29a6858719
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for ownly_29896 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ownly_29896.settings')
application = get_wsgi_application()
| 23.470588
| 78
| 0.789474
|
1742d00e98016b7987bab1bdb32140ee303406f3
| 1,632
|
py
|
Python
|
socket/port_killer.py
|
robotx-school/eurobot-2022
|
c5a6bfba92191fe0643e1691175fd0facf9fbf99
|
[
"MIT"
] | 1
|
2022-03-23T12:03:16.000Z
|
2022-03-23T12:03:16.000Z
|
socket/port_killer.py
|
robotx-school/eurobot-2022
|
c5a6bfba92191fe0643e1691175fd0facf9fbf99
|
[
"MIT"
] | null | null | null |
socket/port_killer.py
|
robotx-school/eurobot-2022
|
c5a6bfba92191fe0643e1691175fd0facf9fbf99
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import sys
def KillPort(port):
try:
port = int(port)
cmd = 'lsof -t -i:{0}'.format(port)
pid = None
try:
pid = subprocess.check_output(cmd, shell=True)
except Exception as e:
print("No process running on port {} by current user. Checking if root is running the proecess".format(port))
if pid is None:
cmd = 'sudo lsof -t -i:{0}'.format(port)
pid = subprocess.check_output(cmd, shell=True)
pids = pid.decode().split("\n")
pids_int = []
for pid in pids:
if pid:
pid = int(pid)
pids_int.append(pid)
except ValueError as e:
print(e)
return -1
except Exception as e:
print("No process found running on port {0}.".format(port))
return -1
for pid in pids_int:
processTypeCmd = 'ps -p {0} -o comm='.format(pid)
processType = subprocess.check_output(processTypeCmd, shell=True, text=True).rstrip('\n')
userCmd = 'ps -o user= -p {}'.format(pid)
user = subprocess.check_output(userCmd, shell=True, text=True).rstrip('\n')
if user.lower() == "root":
killCmd = 'sudo kill -9 {0}'.format(pid)
else:
killCmd = 'kill -9 {0}'.format(pid)
isKilled = os.system(killCmd)
if isKilled == 0:
print("Port {0} is free. Processs {1} killed successfully".format(port, pid))
else:
print("Cannot free port {0}.Failed to kill process {1}, err code:{2}".format(port, pid, isKilled))
| 36.266667
| 121
| 0.553309
|
8c51da32a0619b8f4256c36fa1da731423738730
| 6,317
|
py
|
Python
|
utils/contour.py
|
euCanSHare/dicom2nitfi
|
1d036b4d197b63430a97f7ace19d00a771a599a3
|
[
"MIT"
] | null | null | null |
utils/contour.py
|
euCanSHare/dicom2nitfi
|
1d036b4d197b63430a97f7ace19d00a771a599a3
|
[
"MIT"
] | null | null | null |
utils/contour.py
|
euCanSHare/dicom2nitfi
|
1d036b4d197b63430a97f7ace19d00a771a599a3
|
[
"MIT"
] | null | null | null |
import os
import cv2
import glob
import pickle
import numpy as np
from utils.parse_cvi42 import parse as parse_cvi
def parseContours(patient_dir, new_dir):
"""
Find and parse contours from cvi42 files.
Returns true if files were found and false otherwise.
"""
# Obtain cvi42wsx or cvi42ws file
files = list(glob.iglob(os.path.join(patient_dir, '*.cvi42ws*')))
if len(files) != 0:
cvi42_file = files[0]
print('cvi42 xml file is', cvi42_file)
# Parse file
parse_cvi(cvi42_file, new_dir)
return True
return False
def getContour(contour_pickle, X, Y):
'''
Construct contour from points in pickle file and return
in given dimensions.
'''
# The image annotation by default upsamples the image and then
# annotate on the upsampled image.
up = 4
# Check whether there is a corresponding contour file for this dicom
if os.path.exists(contour_pickle):
with open(contour_pickle, 'rb') as f:
contours = pickle.load(f)
# Labels
# short axis
lv_endo = 1
lv_epi = 2
rv_endo = 3
papil = 4
enh_ref_myo = 6
ref_myo = 7
excl_enh = 10
no_reflow = 20
# Long axis
la_endo = 4
ra_endo = 5
# Fill the contours in order
# RV endocardium first, then LV epicardium,
# then LV endocardium, then RA and LA.
#
# Issue: there is a problem in very rare cases,
# e.g. eid 2485225, 2700750, 2862965, 2912168,
# where LV epicardial contour is not a closed contour. This problem
# can only be solved if we could have a better definition of contours.
# Thanks for Elena Lukaschuk and Stefan Piechnik for pointing this out.
# We skip the last point in the contours from cvi, otherwise
# the polygon may present problems when closing.
print('----------->', contours.keys())
ordered_contours = []
if 'sarvendocardialContour' in contours:
ordered_contours += [(contours['sarvendocardialContour'], rv_endo)]
if 'larvendocardialContour' in contours:
ordered_contours += [(contours['larvendocardialContour'][:-1], rv_endo)]
if 'saepicardialContour' in contours:
ordered_contours += [(contours['saepicardialContour'], lv_epi)]
if 'saepicardialOpenContour' in contours:
ordered_contours += [(contours['saepicardialOpenContour'], lv_epi)]
# Close LV epicardium in long axis by taking the closest
# points to the endocardium contour
if 'laendocardialContour' in contours:
aux = contours['laepicardialContour'].copy()
start_closest = min(contours['laendocardialContour'], key=lambda x: np.linalg.norm(x-aux[0]))
aux = np.concatenate(([start_closest], aux))
end_closest = min(contours['laendocardialContour'], key=lambda x: np.linalg.norm(x-aux[-1]))
aux = np.concatenate((aux, [end_closest]))
contours['laepicardialContour'] = aux
if 'laepicardialContour' in contours:
ordered_contours += [(contours['laepicardialContour'][:-1], lv_epi)]
if 'laepicardialOpenContour' in contours:
ordered_contours += [(contours['laepicardialOpenContour'], lv_epi)]
if 'saendocardialContour' in contours:
ordered_contours += [(contours['saendocardialContour'], lv_endo)]
if 'laendocardialContour' in contours:
ordered_contours += [(contours['laendocardialContour'][:-1], lv_endo)]
if 'saendocardialOpenContour' in contours:
ordered_contours += [(contours['saendocardialOpenContour'], lv_endo)]
if 'laendocardialOpenContour' in contours:
ordered_contours += [(contours['laendocardialOpenContour'][:-1], lv_endo)]
if 'saEnhancementReferenceMyoContour' in contours:
ordered_contours += [(contours['saEnhancementReferenceMyoContour'], enh_ref_myo)]
if 'saReferenceMyoContour' in contours:
ordered_contours += [(contours['saReferenceMyoContour'], ref_myo)]
if 'excludeEnhancementAreaContour' in contours:
ordered_contours += [(contours['excludeEnhancementAreaContour'], excl_enh)]
if 'noReflowAreaContour' in contours:
ordered_contours += [(contours['noReflowAreaContour'], no_reflow)]
if 'laraContour' in contours:
ordered_contours += [(contours['laraContour'], ra_endo)]
if 'lalaContour' in contours:
ordered_contours += [(contours['lalaContour'], la_endo)]
# if 'sapapilMuscContour' in contours:
# ordered_contours += [(contours['sapapilMuscContour'], papil)]
# cv2.fillPoly requires the contour coordinates to be integers.
# However, the contour coordinates are floating point number since
# they are drawn on an upsampled image by 4 times.
# We multiply it by 4 to be an integer. Then we perform fillPoly on
# the upsampled image as cvi42 does. This leads to a consistent volume
# measurement as cvi2. If we perform fillPoly on the original image, the
# volumes are often over-estimated by 5~10%.
# We found that it also looks better to fill polygons on the upsampled
# space and then downsample the label map than fill on the original image.
lab_up = np.zeros((Y * up, X * up))
for c, l in ordered_contours:
coord = np.round(c * up).astype(np.int)
# Remove outlier points in contours.
# For some unknown reason, some outlier points appear.
# b = np.linalg.norm(coord - np.mean(coord, axis=0), axis=1)
# coord = coord[(b < np.mean(b) + 3*np.std(b))&(b > np.mean(b) - 3*np.std(b))]
cv2.fillPoly(lab_up, [coord], l)
return lab_up[::up, ::up].transpose(), lab_up.transpose()
| 44.485915
| 109
| 0.602343
|
92b97812e5d9a15a7d41e70452c013e92056a3d0
| 7,082
|
py
|
Python
|
p3/views/profile.py
|
malemburg/epcon
|
1edec493ac1258950dcabdc9f9ee8b97c24f96c5
|
[
"BSD-2-Clause"
] | null | null | null |
p3/views/profile.py
|
malemburg/epcon
|
1edec493ac1258950dcabdc9f9ee8b97c24f96c5
|
[
"BSD-2-Clause"
] | null | null | null |
p3/views/profile.py
|
malemburg/epcon
|
1edec493ac1258950dcabdc9f9ee8b97c24f96c5
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
import os.path
import logging
from assopy import models as amodels
from assopy.views import render_to_json
from conference import models as cmodels
from conference.views import profile_access, json_dumps
from django import http
from django import forms
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.shortcuts import get_object_or_404, redirect, render
from email_template import utils
from p3 import dataaccess
from p3 import forms as p3forms
from p3 import models
log = logging.getLogger('p3.views')
@profile_access
def p3_profile(request, slug, profile=None, full_access=False, format_='html'):
if format_ == 'json':
pdata = dataaccess.profile_data(profile.user_id)
from conference.templatetags.conference import markdown2
pdata['bio'] = markdown2(pdata['bio'], "smarty-pants,code-color")
return http.HttpResponse(
json_dumps(pdata),
content_type='text/javascript')
tpl = 'conference/profile_publicdata_form.html'
if request.method == 'POST':
section = request.POST.get('section')
if section == 'public-data':
fc = p3forms.P3ProfilePublicDataForm
tpl = 'conference/profile_publicdata_form.html'
elif section == 'bio':
fc = p3forms.P3ProfileBioForm
tpl = 'conference/profile_bio_form.html'
elif section == 'visibility':
fc = p3forms.P3ProfileVisibilityForm
tpl = 'conference/profile_visibility_form.html'
elif section == 'picture':
fc = p3forms.P3ProfilePictureForm
tpl = 'conference/profile_picture_form.html'
else:
fc = p3forms.P3ProfileForm
form = fc(instance=profile, data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
else:
form = p3forms.P3ProfileForm(instance=profile)
ctx = {
'form': form,
'full_access': full_access,
'profile': profile,
}
return render(request, tpl, ctx)
def p3_profile_avatar(request, slug):
p = get_object_or_404(cmodels.AttendeeProfile, slug=slug).p3_profile
from urllib2 import urlopen
try:
img = urlopen(p.profile_image_url())
except Exception:
import p3
from django.conf import settings
path = os.path.join(os.path.dirname(p3.__file__), 'static', settings.P3_ANONYMOUS_AVATAR)
img = file(path)
ct = 'image/jpg'
else:
headers = img.info()
ct = headers.get('content-type')
return http.HttpResponse(img.read(), content_type=ct)
@login_required
@render_to_json
def p3_profile_message(request, slug):
if request.method != 'POST':
return http.HttpResponseNotAllowed(('POST',))
class MessageForm(forms.Form):
subject = forms.CharField()
message = forms.CharField()
f = MessageForm(data=request.POST)
if f.is_valid():
data = f.cleaned_data
profile = get_object_or_404(cmodels.AttendeeProfile, slug=slug)
try:
profile.p3_profile.send_user_message(request.user, data['subject'], data['message'])
except ValueError as e:
return http.HttpResponseBadRequest(str(e))
return "OK"
return f.errors
@login_required
def p3_account_data(request):
ctx = {}
if request.method == 'POST':
profile = cmodels.AttendeeProfile.objects.getOrCreateForUser(request.user)
form = p3forms.P3ProfilePersonalDataForm(instance=profile, data=request.POST)
ctx['pform'] = form
if form.is_valid():
form.save()
data = form.cleaned_data
request.user.first_name = data['first_name']
request.user.last_name = data['last_name']
request.user.save()
if profile.slug[0] == '-':
slug = cmodels.AttendeeProfile.objects.findSlugForUser(request.user)
if slug and slug[0] != '-':
profile.slug = slug
profile.save()
return render(request, "assopy/profile_personal_data.html", ctx)
@transaction.atomic
def OTCHandler_E(request, token):
user = token.user
models.TicketConference.objects\
.filter(assigned_to=user.email)\
.update(assigned_to=token.payload)
user.email = token.payload
user.save()
log.info('"%s" has verified the new email "%s"', user.username, user.email)
return redirect('assopy-profile')
@login_required
def p3_account_email(request):
if request.method == 'POST':
form = p3forms.P3ProfileEmailContactForm(data=request.POST, user=request.user)
if form.is_valid():
email = form.cleaned_data['email']
if email != request.user.email:
log.info(
'requested an email change from "%s" to "%s" for the user "%s"',
request.user.email,
email,
request.user.username,)
utils.email(
'verify-account',
ctx={
'user': request.user,
'token': amodels.Token.objects.create(ctype='e', user=request.user, payload=email),
},
to=[email]
).send()
else:
form = p3forms.P3ProfileEmailContactForm(initial={'email': request.user.email})
ctx = {
'pform': form,
}
return render(request, "assopy/profile_email_contact.html", ctx)
@login_required
def p3_account_spam_control(request):
ctx = {}
if request.method == 'POST':
profile = cmodels.AttendeeProfile.objects.getOrCreateForUser(request.user)
form = p3forms.P3ProfileSpamControlForm(instance=profile.p3_profile, data=request.POST)
if form.is_valid():
form.save()
return render(request, "assopy/profile_spam_control.html", ctx)
def connect_profile_to_assopy(backend, user, response, *args, **kwargs):
""" CB to be filled in the python-social-auth pipeline in order to
verify if user is a new user and (if not) assopy and conference
profiles are created.
For more details about the reason for adding this method look at
assopy.views.janrain_token that should be doing the same but for a
janrain backend instead of python-social-auth.
Params: Refer to http://python-social-auth.readthedocs.org/en/latest/pipeline.html
for more details
"""
# TODO: `email` is not used anywhere
if backend.name.startswith('google'):
email = kwargs['details']['email']
try:
# check if assopy user have already been created for this user
asso_user = user.assopy_user
except amodels.User.DoesNotExist:
# create it if not...s
log.debug('the current user "%s" will become an assopy user', user)
asso_user = amodels.User(user=user)
asso_user.save()
# same for conference profile...
profile = cmodels.AttendeeProfile.objects.getOrCreateForUser(user)
| 36.694301
| 107
| 0.641768
|
bd2162e1db4a0f5c9bc1322145d5cf37e9f20061
| 474
|
py
|
Python
|
lab03/tests/q5_4.py
|
ucsb-ds/ds1-f20-content
|
25f62c7a597b98da436ca39631761c1f3feccfdd
|
[
"MIT"
] | 2
|
2020-10-14T12:43:18.000Z
|
2021-01-06T18:06:16.000Z
|
lab03/tests/q5_4.py
|
ucsb-int5/int5-f19-notebooks
|
5b3d1ee6964d9357f211f4706787403ec5a3079c
|
[
"MIT"
] | 3
|
2019-12-14T06:20:14.000Z
|
2019-12-14T07:12:33.000Z
|
lab03/tests/q5_4.py
|
ucsb-int5/int5-f19-notebooks
|
5b3d1ee6964d9357f211f4706787403ec5a3079c
|
[
"MIT"
] | 3
|
2019-10-02T18:36:06.000Z
|
2019-12-03T18:16:45.000Z
|
test = {
'name': '',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> abs(average_20th_century_rating - 8.2783625730994146) < 1e-5
True
>>> abs(average_21st_century_rating - 8.2379746835443033) < 1e-5
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| 18.96
| 74
| 0.411392
|
754615df952793c6737430ba77e8fb0443cd059f
| 5,276
|
py
|
Python
|
venv/Lib/site-packages/Token/generated/provider/models/proxy_create_payment_request.py
|
The-Fragment/FragmentFembot
|
bca0027b423753eb162590e8fd440a2c1e65d133
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/Token/generated/provider/models/proxy_create_payment_request.py
|
The-Fragment/FragmentFembot
|
bca0027b423753eb162590e8fd440a2c1e65d133
|
[
"MIT"
] | 5
|
2020-06-06T00:40:42.000Z
|
2021-06-10T22:36:12.000Z
|
venv/Lib/site-packages/Token/generated/provider/models/proxy_create_payment_request.py
|
The-Fragment/FragmentFembot
|
bca0027b423753eb162590e8fd440a2c1e65d133
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class ProxyCreatePaymentRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ProxyCreatePaymentRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'payee_authority': 'Authority',
'description': 'str',
'amount': 'Money',
'route': 'Route'
}
self.attribute_map = {
'payee_authority': 'payeeAuthority',
'description': 'description',
'amount': 'amount',
'route': 'route'
}
self._payee_authority = None
self._description = None
self._amount = None
self._route = None
@property
def payee_authority(self):
"""
Gets the payee_authority of this ProxyCreatePaymentRequest.
:return: The payee_authority of this ProxyCreatePaymentRequest.
:rtype: Authority
"""
return self._payee_authority
@payee_authority.setter
def payee_authority(self, payee_authority):
"""
Sets the payee_authority of this ProxyCreatePaymentRequest.
:param payee_authority: The payee_authority of this ProxyCreatePaymentRequest.
:type: Authority
"""
self._payee_authority = payee_authority
@property
def description(self):
"""
Gets the description of this ProxyCreatePaymentRequest.
:return: The description of this ProxyCreatePaymentRequest.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this ProxyCreatePaymentRequest.
:param description: The description of this ProxyCreatePaymentRequest.
:type: str
"""
self._description = description
@property
def amount(self):
"""
Gets the amount of this ProxyCreatePaymentRequest.
:return: The amount of this ProxyCreatePaymentRequest.
:rtype: Money
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this ProxyCreatePaymentRequest.
:param amount: The amount of this ProxyCreatePaymentRequest.
:type: Money
"""
self._amount = amount
@property
def route(self):
"""
Gets the route of this ProxyCreatePaymentRequest.
:return: The route of this ProxyCreatePaymentRequest.
:rtype: Route
"""
return self._route
@route.setter
def route(self, route):
"""
Sets the route of this ProxyCreatePaymentRequest.
:param route: The route of this ProxyCreatePaymentRequest.
:type: Route
"""
self._route = route
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.918367
| 86
| 0.582449
|
5d7e96b177e4541bf6724ee500a084cc1811ebd9
| 1,768
|
py
|
Python
|
brownian/wiener.py
|
rmorshea/quantfi
|
bb29364fe19c20ab2fe77263c474b147c795a058
|
[
"MIT"
] | null | null | null |
brownian/wiener.py
|
rmorshea/quantfi
|
bb29364fe19c20ab2fe77263c474b147c795a058
|
[
"MIT"
] | null | null | null |
brownian/wiener.py
|
rmorshea/quantfi
|
bb29364fe19c20ab2fe77263c474b147c795a058
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import erfc
from random import random
from random import gauss
def w_series(n, dt, t_init=0, w_init=0.0):
"""Returns one realization of a Wiener process with n steps of length dt.
The time and Wiener series can be initialized using t_init and w_init respectively.
"""
n+=1
t_series = np.arange(t_init,(n-0.1)*dt,dt)
h = t_series[1]-t_series[0]
z = np.random.normal(0.0,1.0,n)
dw = np.sqrt(h)*z
dw[0] = w_init
w_series = dw.cumsum()
return t_series, w_series
def raise_res(T, W, c, mu=0, sigma=1):
'''Increase the resolution of a wiener series by a factor of c.
Returns a more reolved Wiener series and its associate time series
T = the given Time series.
W = the associated Wiener series.
c = Scaling factor (integer greater than 1).
mu = Mean of W's underlying normal distribution.
sigma = Standard deviation of W's underlying normal distribution.
'''
dT = T[1]-T[0]
dt = float(T[1]-T[0])/c
t_series = []
w_series = []
for i in range(len(T)-1):
t = T[i]
w_t = W[i]
t_next = T[i+1]
w_next = W[i+1]
t_series.append(t)
w_series.append(w_t)
for j in range(c-1):
t+=dt
dW = (w_next-w_t)
drawfrm_cum = np.sqrt(2)*np.sqrt(t_next-t)*sigma*erfc(random())
if np.sqrt(2)*np.sqrt(t_next-t)*sigma*erfc(-2*random())<abs(dW):
w_t+=abs(gauss(0,np.sqrt(dt)*sigma))*float(dW)/abs(dW)
else:
w_t+=gauss(0,np.sqrt(dt)*sigma)
t_series.append(t)
w_series.append(w_t)
t_series.append(T[-1])
w_series.append(W[-1])
return t_series,w_series
| 33.358491
| 87
| 0.582579
|
d033b45df446d22c451833757c02cdcecc49c230
| 399
|
py
|
Python
|
lab_manager/wsgi.py
|
edilson/lab_manager
|
e0885d0b132b4e2e45b52510758a532128aa29ea
|
[
"MIT"
] | null | null | null |
lab_manager/wsgi.py
|
edilson/lab_manager
|
e0885d0b132b4e2e45b52510758a532128aa29ea
|
[
"MIT"
] | 5
|
2021-03-19T03:19:12.000Z
|
2021-06-10T19:21:38.000Z
|
lab_manager/wsgi.py
|
edilson/lab_manager
|
e0885d0b132b4e2e45b52510758a532128aa29ea
|
[
"MIT"
] | null | null | null |
"""
WSGI config for lab_manager project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lab_manager.settings')
application = get_wsgi_application()
| 23.470588
| 78
| 0.789474
|
ac434f8aa69e24c120e0acf556b1efa746bc3c33
| 2,583
|
py
|
Python
|
sdk/edgegateway/azure-mgmt-edgegateway/setup.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2021-09-07T18:39:05.000Z
|
2021-09-07T18:39:05.000Z
|
sdk/edgegateway/azure-mgmt-edgegateway/setup.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
sdk/edgegateway/azure-mgmt-edgegateway/setup.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-edgegateway"
PACKAGE_PPRINT_NAME = "Data Box Edge / Data Box Gateway"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.6.21',
'msrestazure>=0.4.32,<2.0.0',
'azure-common~=1.1',
],
python_requires=">=3.6",
)
| 34.44
| 91
| 0.604336
|
e19cd40437838d24a1995a39a5891650832aae3a
| 1,051
|
py
|
Python
|
benchmark.py
|
alessiamarcolini/speech-to-text-benchmark
|
16962ee2391fc2725ae1fcfe91c197753d192ac8
|
[
"Apache-2.0"
] | null | null | null |
benchmark.py
|
alessiamarcolini/speech-to-text-benchmark
|
16962ee2391fc2725ae1fcfe91c197753d192ac8
|
[
"Apache-2.0"
] | null | null | null |
benchmark.py
|
alessiamarcolini/speech-to-text-benchmark
|
16962ee2391fc2725ae1fcfe91c197753d192ac8
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import editdistance
from dataset import *
from engine import *
from tqdm import tqdm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--engine_type", type=str, required=True)
args = parser.parse_args()
dataset = Dataset.create("SpeechAccentArchive")
print("loaded %s with %.2f hours of data" % (str(dataset), dataset.size_hours()))
engine = ASREngine.create(ASREngines[args.engine_type])
print("created %s engine" % str(engine))
word_error_count = 0
word_count = 0
for i in tqdm(range(dataset.size())):
path, ref_transcript = dataset.get(i)
transcript = engine.transcribe(path)
if transcript is None:
continue
ref_words = ref_transcript.strip("\n ").lower().split()
words = transcript.strip("\n ").lower().split()
word_error_count += editdistance.eval(ref_words, words)
word_count += len(ref_words)
print("word error rate : %.2f" % (100 * float(word_error_count) / word_count))
| 28.405405
| 85
| 0.663178
|
297eb3946d007c19017a09d690ddaeac3caebcec
| 4,154
|
py
|
Python
|
nicos/devices/notifiers/mattermost.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos/devices/notifiers/mattermost.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91
|
2020-08-18T09:20:26.000Z
|
2022-02-01T11:07:14.000Z
|
nicos/devices/notifiers/mattermost.py
|
ISISComputingGroup/nicos
|
94cb4d172815919481f8c6ee686f21ebb76f2068
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <g.brandl@fz-juelich.de>
#
# *****************************************************************************
import json
import requests
from nicos.core import ConfigurationError, Param
from nicos.devices.notifiers import Notifier
from nicos.utils.credentials.keystore import nicoskeystore
class Mattermost(Notifier):
"""Mattermost notifier.
Mattermost is a group chat system similar to Slack, but open source.
To use this notifier, some Mattermost user must register an "Incoming
Webhook" on the Mattermost instance. The credid parameter should be set to
a NICOS keystore credential ID of the "secret" part of the hook URL.
Receivers can be given as channels, using the last part of the channel's
URL, or people, in the form ``@joe``.
For example, if you want to send messages via a webhook with the URL
https://chat.example.org/hooks/xsdkue8djsk
to the user "joe" and to the channel
https://chat.example.org/team/channels/nicos-notifications
you would set the following configuration::
baseurl = 'https://chat.example.org'
credid = '...' (a keystore ID with the value 'xsdkue8djsk')
receivers = ['nicos-notifications', '@joe']
The `username` parameter can be set freely, Mattermost will show "bot"
next to it to avoid spoofing actual users.
"""
parameters = {
'baseurl': Param('URL of the Mattermost instance',
type=str, mandatory=True),
'username': Param('User name to show for notifications',
type=str, mandatory=True),
'iconurl': Param('URL of an image to show next to notifications',
type=str, default=''),
'credid': Param('Credential ID in the NICOS keystore '
'for the hook ID', type=str, default='mattermost'),
}
_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def doInit(self, mode):
secret_hookid = nicoskeystore.getCredential(self.credid)
if not secret_hookid:
raise ConfigurationError('Mattermost hook ID missing in keystore')
self._hookurl = self.baseurl + '/hooks/' + secret_hookid
def send(self, subject, body, what=None, short=None, important=True):
message = '**%s**\n\n```\n%s\n```' % (subject, body)
if important:
message = '@all ' + message
for entry in self._getAllRecipients(important):
self.log.debug('sending Mattermost message to %s', entry)
data = {'text': message, 'username': self.username,
'channel': entry}
if self.iconurl:
data['icon_url'] = self.iconurl
try:
response = requests.post(self._hookurl, headers=self._headers,
data=json.dumps(data), timeout=2)
if not response.ok:
raise ValueError(response.json()['message'])
except Exception as err:
self.log.warning('Could not send Mattermost '
'message to %s: %s', entry, err, exc=1)
| 41.128713
| 79
| 0.617236
|
a296f896f7f89db76b01f3b1738038ff9447eef0
| 25,797
|
py
|
Python
|
test/functional/p2p_sendheaders.py
|
ORO-mlm/ORO-Core
|
770e4728e1b67023f2f52da2850e058732e7583f
|
[
"MIT"
] | null | null | null |
test/functional/p2p_sendheaders.py
|
ORO-mlm/ORO-Core
|
770e4728e1b67023f2f52da2850e058732e7583f
|
[
"MIT"
] | null | null | null |
test/functional/p2p_sendheaders.py
|
ORO-mlm/ORO-Core
|
770e4728e1b67023f2f52da2850e058732e7583f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import OroTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
'''
SendHeadersTest -- test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
'''
direct_fetch_response_time = 0.05
class BaseNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
self.last_headers = message
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_close(self, conn):
self.disconnected = True
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
assert(wait_until(test_function, timeout=60))
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(OroTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
self.sync_blocks(self.nodes, wait=0.1)
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
self.sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
print("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
print("Part 1: success!")
print("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getdata([tip], timeout=5)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
inv_node.send_block_inv(tip)
# Should have received a getheaders as well!
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5)
[ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ]
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
print("Part 2: success!")
print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
print("Part 3: success!")
print("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
print("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
print("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_getdata = None
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders(timeout=1)
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders(timeout=1)
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders(timeout=1)
# Eventually this stops working.
with mininode_lock:
self.last_getheaders = None
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
with mininode_lock:
self.last_getheaders = True
print("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main()
| 42.429276
| 116
| 0.634066
|
f4d4ef0efcf86ddc89e38ea493a020e6d1dc4d7d
| 7,195
|
py
|
Python
|
airflow/jobs/local_task_job.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 7
|
2018-11-19T12:05:13.000Z
|
2020-01-17T08:30:38.000Z
|
airflow/jobs/local_task_job.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 14
|
2019-11-22T09:24:20.000Z
|
2021-07-09T06:06:59.000Z
|
airflow/jobs/local_task_job.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-05-01T21:54:37.000Z
|
2021-05-01T21:54:37.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import signal
from typing import Optional
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.jobs.base_job import BaseJob
from airflow.models.taskinstance import TaskInstance
from airflow.stats import Stats
from airflow.task.task_runner import get_task_runner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import provide_session
from airflow.utils.state import State
class LocalTaskJob(BaseJob):
"""LocalTaskJob runs a single task instance."""
__mapper_args__ = {'polymorphic_identity': 'LocalTaskJob'}
def __init__(
self,
task_instance: TaskInstance,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
pickle_id: Optional[str] = None,
pool: Optional[str] = None,
*args,
**kwargs,
):
self.task_instance = task_instance
self.dag_id = task_instance.dag_id
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_runner = None
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
super().__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
# pylint: disable=unused-argument
def signal_handler(signum, frame):
"""Setting kill signal handler"""
self.log.error("Received SIGTERM. Terminating subprocesses")
self.on_kill()
raise AirflowException("LocalTaskJob received SIGTERM signal")
# pylint: enable=unused-argument
signal.signal(signal.SIGTERM, signal_handler)
if not self.task_instance.check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=self.id,
pool=self.pool,
):
self.log.info("Task is not able to be run")
return
try:
self.task_runner.start()
heartbeat_time_limit = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
while True:
# Monitor the task to see if it's done. Wait in a syscall
# (`os.wait`) for as long as possible so we notice the
# subprocess finishing as quick as we can
max_wait_time = max(
0, # Make sure this value is never negative,
min(
(
heartbeat_time_limit
- (timezone.utcnow() - self.latest_heartbeat).total_seconds() * 0.75
),
self.heartrate,
),
)
return_code = self.task_runner.return_code(timeout=max_wait_time)
if return_code is not None:
self.log.info("Task exited with return code %s", return_code)
return
self.heartbeat()
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
# This can only really happen if the worker can't read the DB for a long time
time_since_last_heartbeat = (timezone.utcnow() - self.latest_heartbeat).total_seconds()
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.log.error("Heartbeat time limit exceeded!")
raise AirflowException(
"Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s).".format(time_since_last_heartbeat, heartbeat_time_limit)
)
finally:
self.on_kill()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
if ti.state == State.RUNNING:
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
if not same_hostname:
self.log.warning(
"The recorded hostname %s " "does not match this instance's hostname " "%s",
ti.hostname,
fqdn,
)
raise AirflowException("Hostname of job runner does not match")
current_pid = os.getpid()
same_process = ti.pid == current_pid
if not same_process:
self.log.warning("Recorded pid %s does not match " "the current pid %s", ti.pid, current_pid)
raise AirflowException("PID of job runner does not match")
elif self.task_runner.return_code() is None and hasattr(self.task_runner, 'process'):
self.log.warning(
"State of this instance has been externally set to %s. " "Terminating instance.", ti.state
)
if ti.state == State.FAILED and ti.task.on_failure_callback:
context = ti.get_template_context()
ti.task.on_failure_callback(context)
if ti.state == State.SUCCESS and ti.task.on_success_callback:
context = ti.get_template_context()
ti.task.on_success_callback(context)
self.task_runner.terminate()
self.terminating = True
| 39.972222
| 109
| 0.622794
|
931779bb5bed9a51d76b040868ae4716fa6a109b
| 1,318
|
py
|
Python
|
huxley/api/mixins.py
|
srisainachuri/huxley
|
7166a1423e49b506d6d5f142c748eac4e5d2314c
|
[
"BSD-3-Clause"
] | 18
|
2015-07-12T00:55:51.000Z
|
2021-12-13T15:41:06.000Z
|
huxley/api/mixins.py
|
srisainachuri/huxley
|
7166a1423e49b506d6d5f142c748eac4e5d2314c
|
[
"BSD-3-Clause"
] | 288
|
2015-01-13T23:05:09.000Z
|
2022-03-25T17:35:36.000Z
|
huxley/api/mixins.py
|
srisainachuri/huxley
|
7166a1423e49b506d6d5f142c748eac4e5d2314c
|
[
"BSD-3-Clause"
] | 47
|
2015-05-12T15:39:57.000Z
|
2022-03-30T09:12:48.000Z
|
# Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import json
from django.db import transaction
from rest_framework import status
from rest_framework.response import Response
from huxley.core.models import Delegate
class ListUpdateModelMixin(object):
"""
Update a queryset
"""
def list_update(self, request, partial=False, *args, **kwargs):
updates = {delegate['id']: delegate for delegate in request.data}
response_data = []
with transaction.atomic():
delegates = Delegate.objects.filter(id__in=updates.keys())
for delegate in delegates:
serializer = self.get_serializer(
instance=delegate,
data=updates[delegate.id],
partial=partial)
serializer.is_valid(raise_exception=True)
serializer.save()
response_data.append(serializer.data)
return Response(response_data, status=status.HTTP_200_OK)
def put(self, request, *args, **kwargs):
return self.list_update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.list_update(request, partial=True, *args, **kwargs)
| 32.95
| 77
| 0.650228
|
3645da2ad5b34e11dbc50baba8b592b1fe09ecdd
| 6,770
|
py
|
Python
|
test/test_oneview_id_pools_ipv4_range_facts.py
|
LaudateCorpus1/oneview-ansible
|
a1befcab3ff8d23ab7f85844eeba0d2f2c6a21e2
|
[
"Apache-2.0"
] | 108
|
2016-06-28T18:14:08.000Z
|
2022-02-21T09:16:06.000Z
|
test/test_oneview_id_pools_ipv4_range_facts.py
|
HPE-Japan-Presales/oneview-ansible
|
26eb13354333d862d9e80f07e3fe9bbe2eb59af3
|
[
"Apache-2.0"
] | 248
|
2016-07-14T12:50:17.000Z
|
2022-02-06T18:57:16.000Z
|
test/test_oneview_id_pools_ipv4_range_facts.py
|
HPE-Japan-Presales/oneview-ansible
|
26eb13354333d862d9e80f07e3fe9bbe2eb59af3
|
[
"Apache-2.0"
] | 88
|
2016-06-29T15:52:44.000Z
|
2022-03-10T12:34:41.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2021) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import pytest
import mock
from hpe_test_utils import OneViewBaseTest
from oneview_module_loader import IdPoolsIpv4RangeFactsModule
ERROR_MSG = 'Fake message error'
DEFAULT_RANGE_TEMPLATE = dict(
name='Ipv4Range',
uri='rest/range/test',
subnetUri='rest/subnet/test',
type='Range',
enabled=True,
gateway='10.10.0.1'
)
DEFAULT_NOT_RANGE_TEMPLATE = dict(
name='NOTIpv4Range',
uri='rest/range/not',
subnetUri='rest/subnet/test',
type='Range',
gateway='10.3.3.1'
)
DEFAULT_SUBNET_TEMPLATE_1 = dict(
name='Ipv4Subnet1',
uri='rest/subnet/test1',
type='Subnet',
rangeUris=['rest/range/not2', 'rest/range/not3']
)
DEFAULT_SUBNET_TEMPLATE_2 = dict(
name='Ipv4Subnet2',
uri='rest/subnet/test2',
type='Subnet',
rangeUris=['rest/range/test', 'rest/range/not4']
)
PARAMS_GET_ALL = dict(
config='config.json',
)
PARAMS_GET_ALL_FROM_SUBNET = dict(
config='config.json',
subnetUri='rest/subnet/test2'
)
PARAMS_GET_BY_NAME_AND_SUBNET_URI = dict(
config='config.json',
name="Ipv4Range",
subnetUri='rest/subnet/test2'
)
PARAMS_GET_BY_URI = dict(
config='config.json',
uri='/rest/ipv4-range/test'
)
PARAMS_GET_ALLOCATED_FRAGMENTS = dict(
config='config.json',
options=['allocatedFragments'],
uri='/rest/ipv4-range/test'
)
PARAMS_GET_SCHEMA = dict(
config='config.json',
options=['schema']
)
PARAMS_GET_FREE_FRAGMENTS = dict(
config='config.json',
options=['freeFragments'],
uri='/rest/ipv4-range/test'
)
ALL_SUBNETS = [DEFAULT_SUBNET_TEMPLATE_1.copy(), DEFAULT_SUBNET_TEMPLATE_2.copy()]
@pytest.mark.resource(TestIdPoolsIpv4RangeFactsModule='id_pools_ipv4_ranges')
class TestIdPoolsIpv4RangeFactsModule(OneViewBaseTest):
def test_should_get_all_id_pools_ipv4_ranges(self):
self.mock_ov_client.id_pools_ipv4_subnets.get_all.return_value = ALL_SUBNETS
range_1 = DEFAULT_RANGE_TEMPLATE.copy()
range_2 = DEFAULT_RANGE_TEMPLATE.copy()
range_3 = DEFAULT_RANGE_TEMPLATE.copy()
range_4 = DEFAULT_RANGE_TEMPLATE.copy()
ranges = [range_2, range_3, range_1, range_4]
self.resource.get_by_uri().data = range_1
self.mock_ansible_module.params = PARAMS_GET_ALL
IdPoolsIpv4RangeFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(id_pools_ipv4_ranges=ranges)
)
def test_should_get_all_id_pools_ipv4_ranges_from_subnet(self):
obj = mock.Mock()
obj.data = DEFAULT_SUBNET_TEMPLATE_2
self.mock_ov_client.id_pools_ipv4_subnets.get_by_uri.return_value = obj
range_1 = DEFAULT_RANGE_TEMPLATE.copy()
range_4 = DEFAULT_RANGE_TEMPLATE.copy()
ranges = [range_1, range_4]
self.resource.get_by_uri.return_value = self.resource
self.resource.data = range_1
self.mock_ansible_module.params = PARAMS_GET_ALL_FROM_SUBNET
IdPoolsIpv4RangeFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(id_pools_ipv4_ranges=ranges)
)
def test_should_get_id_pools_ipv4_range_from_subnet_and_name(self):
obj = mock.Mock()
obj.data = DEFAULT_SUBNET_TEMPLATE_2
self.mock_ov_client.id_pools_ipv4_subnets.get_by_uri.return_value = obj
range_1 = DEFAULT_RANGE_TEMPLATE.copy()
self.resource.get_by_uri.return_value = self.resource
self.resource.data = range_1
self.mock_ansible_module.params = PARAMS_GET_BY_NAME_AND_SUBNET_URI
IdPoolsIpv4RangeFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(id_pools_ipv4_ranges=[range_1])
)
def test_should_get_id_pools_ipv4_range_from_uri(self):
self.resource.get_by_uri.return_value = self.resource
self.resource.data = DEFAULT_RANGE_TEMPLATE.copy()
self.mock_ansible_module.params = PARAMS_GET_BY_URI
IdPoolsIpv4RangeFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(id_pools_ipv4_ranges=[DEFAULT_RANGE_TEMPLATE.copy()])
)
def test_should_get_id_pools_ipv4_ranges_allocated_fragments(self):
self.resource.get_by_uri().data = DEFAULT_RANGE_TEMPLATE.copy()
self.resource.get_allocated_fragments.return_value = [{'frag': 'test'}]
self.mock_ansible_module.params = PARAMS_GET_ALLOCATED_FRAGMENTS
IdPoolsIpv4RangeFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(id_pools_ipv4_ranges=[DEFAULT_RANGE_TEMPLATE.copy()],
id_pools_ipv4_ranges_allocated_fragments=[{'frag': 'test'}])
)
def test_should_get_id_pools_ipv4_ranges_schema(self):
self.resource.get_schema.return_value = [{'schema': 'schema'}]
self.mock_ansible_module.params = PARAMS_GET_SCHEMA
IdPoolsIpv4RangeFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(id_pools_ipv4_ranges_schema=[{'schema': 'schema'}],
id_pools_ipv4_ranges=[])
)
def test_should_get_id_pools_ipv4_ranges_free_fragments(self):
self.resource.get_by_uri().data = DEFAULT_RANGE_TEMPLATE.copy()
self.resource.get_free_fragments.return_value = [{'frag': 'testfree'}]
self.mock_ansible_module.params = PARAMS_GET_FREE_FRAGMENTS
IdPoolsIpv4RangeFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(id_pools_ipv4_ranges=[DEFAULT_RANGE_TEMPLATE.copy()],
id_pools_ipv4_ranges_free_fragments=[{'frag': 'testfree'}])
)
if __name__ == '__main__':
pytest.main([__file__])
| 32.705314
| 91
| 0.706499
|
b5115e230109299bbe27f9d7c8e09b58d707310d
| 5,204
|
py
|
Python
|
stable_baselines/her/utils.py
|
johannes-dornheim/stable-baselines
|
b38b6d47daa119118104c63568edc4b255a0282e
|
[
"MIT"
] | null | null | null |
stable_baselines/her/utils.py
|
johannes-dornheim/stable-baselines
|
b38b6d47daa119118104c63568edc4b255a0282e
|
[
"MIT"
] | null | null | null |
stable_baselines/her/utils.py
|
johannes-dornheim/stable-baselines
|
b38b6d47daa119118104c63568edc4b255a0282e
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import numpy as np
from gym import spaces
# Important: gym mixes up ordered and unordered keys
# and the Dict space may return a different order of keys that the actual one
KEY_ORDER = ['observation', 'achieved_goal', 'desired_goal']
class HERGoalEnvWrapper(object):
"""
A wrapper that allow to use dict observation space (coming from GoalEnv) with
the RL algorithms.
It assumes that all the spaces of the dict space are of the same type.
:param env: (gym.GoalEnv)
"""
def __init__(self, env):
super(HERGoalEnvWrapper, self).__init__()
self.env = env
self.metadata = self.env.metadata
self.action_space = env.action_space
self.spaces = list(env.observation_space.spaces.values())
self.achieved_goals = {}
# Check that all spaces are of the same type
# (current limitation of the wrapper)
space_types = [type(env.observation_space.spaces[key]) for key in KEY_ORDER]
assert len(set(space_types)) == 1, "The spaces for goal and observation"\
" must be of the same type"
if isinstance(self.spaces[0], spaces.Discrete):
self.obs_dim = 1
self.goal_dim = 1
else:
goal_space_shape = env.observation_space.spaces['achieved_goal'].shape
self.obs_dim = env.observation_space.spaces['observation'].shape[0]
self.goal_dim = goal_space_shape[0]
if len(goal_space_shape) == 2:
assert goal_space_shape[1] == 1, "Only 1D observation spaces are supported yet"
else:
assert len(goal_space_shape) == 1, "Only 1D observation spaces are supported yet"
if isinstance(self.spaces[0], spaces.MultiBinary):
total_dim = self.obs_dim + self.goal_dim # 2 * self.goal_dim
self.observation_space = spaces.MultiBinary(total_dim)
elif isinstance(self.spaces[0], spaces.Box):
lows = np.concatenate([space.low for space in np.array(self.spaces)[[0, 2]]]) # np.concatenate([space.low for space in self.spaces])
highs = np.concatenate([space.high for space in np.array(self.spaces)[[0, 2]]]) # np.concatenate([space.high for space in self.spaces])
self.observation_space = spaces.Box(lows, highs, dtype=np.float32)
elif isinstance(self.spaces[0], spaces.Discrete):
dimensions = [env.observation_space.spaces[key].n for key in ['observation', 'desired_goal']]
self.observation_space = spaces.MultiDiscrete(dimensions)
else:
raise NotImplementedError("{} space is not supported".format(type(self.spaces[0])))
def convert_dict_to_obs(self, obs_dict):
"""
:param obs_dict: (dict<np.ndarray>)
:return: (np.ndarray)
"""
# Note: achieved goal is not removed from the observation
# this is helpful to have a revertible transformation
# --------------------------------------------------------------------------------------------
# instead: achieved goals are stored in extra dict hash(obs,desired):achieved !
# Assumes (obs,desired)->achieved to be unique !
# --------------------------------------------------------------------------------------------
if isinstance(self.observation_space, spaces.MultiDiscrete):
# Special case for multidiscrete
obs = np.concatenate([[int(obs_dict[key])] for key in ['observation', 'desired_goal']])
else:
# obs = np.concatenate([obs_dict[key] for key in ['observation', 'desired_goal']])
# todo !!!!!!!!!! experimental relative goal
obs = np.concatenate([obs_dict['observation'], obs_dict['desired_goal'] - obs_dict['observation']])
# todo !!!!!!!!!! experimental relative goal
# obs.flags.writeable = False
# self.achieved_goals[hash(obs.data)] = obs_dict['achieved_goal']
self.achieved_goals[hash(obs.data.tobytes())] = obs_dict['achieved_goal']
return obs
def convert_obs_to_dict(self, observations):
"""
Inverse operation of convert_dict_to_obs
:param observations: (np.ndarray)
:return: (OrderedDict<np.ndarray>)
"""
return OrderedDict([
('observation', observations[:self.obs_dim]),
('achieved_goal', self.achieved_goals[hash(observations.data.tobytes())]),
('desired_goal', observations[self.obs_dim:]),
])
def step(self, action):
obs, reward, done, info = self.env.step(action)
return self.convert_dict_to_obs(obs), reward, done, info
def seed(self, seed=None):
return self.env.seed(seed)
def reset(self):
# todo test
self.achieved_goals = {}
o = self.convert_dict_to_obs(self.env.reset())
return o
def compute_reward(self, achieved_goal, desired_goal, info):
return self.env.compute_reward(achieved_goal, desired_goal, info)
def render(self, mode='human'):
return self.env.render(mode)
def close(self):
return self.env.close()
| 41.967742
| 147
| 0.610492
|
985c98390f7334a400ae670f14277845167e0aa9
| 178
|
py
|
Python
|
libs/__init__.py
|
SeanLee97/word2vec-test
|
68053556c7016cecc5e97dd25c28dd452b77f2e4
|
[
"MIT"
] | 1
|
2019-01-20T08:39:25.000Z
|
2019-01-20T08:39:25.000Z
|
libs/__init__.py
|
SeanLee97/word2vec-test
|
68053556c7016cecc5e97dd25c28dd452b77f2e4
|
[
"MIT"
] | null | null | null |
libs/__init__.py
|
SeanLee97/word2vec-test
|
68053556c7016cecc5e97dd25c28dd452b77f2e4
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from .gensim_word_vector import GensimWordVector
from .word_vector import WordVector
__all__ = ('GensimWordVecotr', 'WordVector')
| 25.428571
| 48
| 0.747191
|
cb3d8abf07cc204799323aaec16d40aa299b15c6
| 5,584
|
py
|
Python
|
pydefect/core/tests/test_supercell_calc_results.py
|
wangvei/pydefect
|
e909796c429e16982cefe549d16881039bce89e7
|
[
"MIT"
] | 1
|
2021-06-07T03:05:39.000Z
|
2021-06-07T03:05:39.000Z
|
pydefect/core/tests/test_supercell_calc_results.py
|
wangvei/pydefect
|
e909796c429e16982cefe549d16881039bce89e7
|
[
"MIT"
] | null | null | null |
pydefect/core/tests/test_supercell_calc_results.py
|
wangvei/pydefect
|
e909796c429e16982cefe549d16881039bce89e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from copy import deepcopy
import tempfile
import numpy as np
from pydefect.core.defect_entry import DefectEntry
from pydefect.core.supercell_calc_results import (
ProcarDefectProperty, SupercellCalcResults)
from pymatgen.io.vasp.outputs import Vasprun, Procar
from pymatgen.electronic_structure.core import Spin
from pydefect.util.testing import PydefectTest
from pydefect.util.tools import flatten_dict
class ProcarDefectPropertyTest(PydefectTest):
def setUp(self) -> None:
""" Va_O in the 2+ charge state in 64-atom supercells"""
# TODO: Fix the hob_index toss123 and change related values.
# The true hob_index is 123 but is fine for unittest.
hob_index = {Spin.up: 124, Spin.down: 124}
procar = self.get_object_by_name(
Procar, ["defects", "MgO", "Va_O1_2", "PROCAR"])
vasprun = self.get_object_by_name(
Vasprun, ["defects", "MgO", "Va_O1_2", "vasprun.xml"])
eigenvalues = vasprun.eigenvalues
structure = self.get_structure_by_name("MgO64atoms-Va_O1_2")
neighboring_sites = [0, 4, 16, 17, 24, 26]
self.prop = ProcarDefectProperty.analyze_procar(
hob_index=hob_index,
procar=procar,
eigenvalues=eigenvalues,
structure=structure,
neighboring_sites=neighboring_sites)
def test_band_edge_energies(self):
expected = { Spin.up: {'hob': {'top': 5.5148, 'bottom': 5.5148},
'lub': {'top': 8.6662, 'bottom': 8.6662}},
Spin.down: {'hob': {'top': 5.5148, 'bottom': 5.5148},
'lub': {'top': 8.6662, 'bottom': 8.6662}}}
self.assertEqual(expected, self.prop.band_edge_energies)
def test_orbital_character(self):
expected = \
{Spin.up: {'hob': {'top': {'Mg': {'s': 0.018, 'p': 0.036,
'd': 0.018, 'f': 0.0},
'O': {'s': 0.018, 'p': 0.216,
'd': 0.0, 'f': 0.0}},
'bottom': {'Mg': {'s': 0.018, 'p': 0.036,
'd': 0.018, 'f': 0.0},
'O': {'s': 0.018, 'p': 0.216,
'd': 0.0, 'f': 0.0}}},
'lub': {'top': {'Mg': {'s': 0.174, 'p': 0.006,
'd': 0.0, 'f': 0.0},
'O': {'s': 0.199, 'p': 0.114,
'd': 0.0, 'f': 0.0}},
'bottom': {'Mg': {'s': 0.174, 'p': 0.006,
'd': 0.0, 'f': 0.0},
'O': {'s': 0.199, 'p': 0.114,
'd': 0.0, 'f': 0.0}}}}}
expected[Spin.down] = deepcopy(expected[Spin.up])
for k1, k2, k3, k4, k5, v in flatten_dict(expected):
self.assertAlmostEqual(
v, self.prop.orbital_character[k1][k2][k3][k4][k5], 3)
def test_participation_ratio(self):
expected = { Spin.up: {'hob': 0.235294, 'lub': 0.060852},
Spin.down: {'hob': 0.235294, 'lub': 0.060852}}
for k1, k2, v in flatten_dict(expected):
self.assertAlmostEqual(v, self.prop.participation_ratio[k1][k2], 5)
class SupercellDftResultsTest(PydefectTest):
def setUp(self):
""" Va_O in the 2+ charge state in 64-atom supercells"""
self.mgO_perfect = \
SupercellCalcResults.from_vasp_files(
directory_path=self.DEFECTS_MGO_DIR / "perfect")
filepath = ["defects", "MgO", "Va_O1_2", "defect_entry.json"]
defect_entry = self.get_object_by_name(DefectEntry.load_json, filepath)
self.mgo_va_o1_2 = SupercellCalcResults.from_vasp_files(
directory_path=self.DEFECTS_MGO_DIR / "Va_O1_2",
defect_entry=defect_entry)
def test_from_vasp_files(self):
# CAUTION: When constructing Structure object from Structure.from_file
# velocities are not stored, so equality check of Structure
# objects returns False. If the structure is converted via
# poscar file format, it may be solved.
# energy
expected = -399.85095628
actual = self.mgo_va_o1_2.total_energy
self.assertAlmostEqual(expected, actual, 5)
# total_magnetization
expected = 0.0
actual = self.mgo_va_o1_2.total_magnetization
self.assertAlmostEqual(expected, actual, 5)
# eigenvalue: test only a single point
expected = [-1.40215e+01, 1.0]
actual = self.mgo_va_o1_2.eigenvalues[Spin.up][0][0]
self.assertArrayAlmostEqual(expected, actual, 5)
def test_dict(self):
expected = self.mgo_va_o1_2.as_dict()
actual = SupercellCalcResults.from_dict(expected).as_dict()
self.assertEqual(expected, actual)
def test_json(self):
tmp_file = tempfile.NamedTemporaryFile()
self.mgo_va_o1_2.to_json_file(tmp_file.name)
actual = SupercellCalcResults.load_json(tmp_file.name)
np.testing.assert_equal(actual.eigenvalues[Spin.up],
self.mgo_va_o1_2.eigenvalues[Spin.up])
def test_msonable(self):
self.assertMSONable(self.mgO_perfect)
self.assertMSONable(self.mgo_va_o1_2)
| 45.032258
| 79
| 0.540294
|
036ce55119476fbe6301c7d070519569bdbad4a1
| 8,208
|
py
|
Python
|
pyartcd/pyartcd/pipelines/check_bugs.py
|
DennisPeriquet/aos-cd-jobs
|
d864953fd70b0828f74e0fe2a602a60ac6820ccb
|
[
"Apache-2.0"
] | null | null | null |
pyartcd/pyartcd/pipelines/check_bugs.py
|
DennisPeriquet/aos-cd-jobs
|
d864953fd70b0828f74e0fe2a602a60ac6820ccb
|
[
"Apache-2.0"
] | null | null | null |
pyartcd/pyartcd/pipelines/check_bugs.py
|
DennisPeriquet/aos-cd-jobs
|
d864953fd70b0828f74e0fe2a602a60ac6820ccb
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import subprocess
import concurrent
import click
import aiohttp
from pyartcd.cli import cli, click_coroutine, pass_runtime
from pyartcd.runtime import Runtime
BASE_URL = 'https://api.openshift.com/api/upgrades_info/v1/graph?arch=amd64&channel=fast'
ELLIOTT_BIN = 'elliott'
async def is_ga(version: str, session):
# 3.11 is an exception, no need to query Openshift API
if version == '3.11':
return True
url = f'{BASE_URL}-{version}'
# A release is considered GA'd if nodes are found
async with session.get(url, headers={'Accept': 'application/json'}) as response:
assert response.status == 200
response.raise_for_status()
response_body = await response.json()
nodes = response_body['nodes']
return len(nodes) > 0
def get_next_version(version: str) -> str:
major, minor = version.split('.')[:2]
return '.'.join([major, str(int(minor) + 1)])
class CheckBugsPipeline:
def __init__(self, runtime: Runtime, channel: str, versions: list, pre_releases: list) -> None:
self.runtime = runtime
self.versions = versions
self.pre_releases = pre_releases
self.logger = runtime.logger
self.applicable_versions = []
self.blockers = {}
self.regressions = {}
self.slack_client = self.initialize_slack_client(runtime, channel)
@staticmethod
def initialize_slack_client(runtime: Runtime, channel: str):
if not channel.startswith('#'):
raise ValueError('Invalid Slack channel name provided')
slack_client = runtime.new_slack_client()
slack_client.bind_channel(channel)
return slack_client
async def run(self):
# Check applicable OCP versions
await self._check_applicable_versions()
# Find blocker bugs
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for v in self.applicable_versions:
futures.append(executor.submit(self._find_blockers, v))
for f in futures:
try:
self.blockers.update(f.result())
except TypeError:
# In case no blockers have been found
pass
# Find regressions
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for v in self.applicable_versions:
futures.append(executor.submit(self._find_regressions, v))
for f in futures:
try:
self.regressions.update(f.result())
except TypeError:
# In case no regressions have been found
pass
# Notify Slack
await self._slack_report()
self.logger.info('All done!')
async def _check_applicable_versions(self):
ga_info = {}
async with aiohttp.ClientSession() as session:
tasks = []
for v in self.versions:
tasks.append(asyncio.ensure_future(is_ga(v, session)))
responses = await asyncio.gather(*tasks)
ga_info = dict(zip(self.versions, responses))
self.applicable_versions = [v for v in self.versions if ga_info.get(v, True)]
if self.applicable_versions:
self.logger.info(f'Found applicable versions: {" ".join(self.applicable_versions)}')
else:
self.logger.warning('No applicable versions found')
def _find_blockers(self, version: str):
self.logger.info(f'Checking blocker bugs for Openshift {version}')
cmd = [
ELLIOTT_BIN,
f'--group=openshift-{version}',
f'--working-dir={version}-working',
'find-bugs:blocker',
'--output=slack'
]
self.logger.info(f'Executing command: {" ".join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
errcode = process.returncode
if errcode:
self.logger.error(f'Command {cmd} failed with {errcode}: see output below')
self.logger.info(err)
return None
out = out.decode().strip().splitlines()
if not out:
self.logger.info('No blockers found for version %s', version)
return None
self.logger.info('Cmd returned: %s', out)
return {version: out}
def _find_regressions(self, version: str):
# Do nothing for 3.11
if version == '3.11':
return None
# Check pre-release
if self._next_is_prerelease(version):
self.logger.info(
'Version %s is in pre-release state: skipping regression checks for %s',
get_next_version(version), version
)
return None
self.logger.info(f'Checking possible regressions for Openshift {version}')
# Find bugs
cmd = [
ELLIOTT_BIN,
f'--group=openshift-{version}',
f'--working-dir={version}-working',
'find-bugs:sweep'
]
self.logger.info(f'Executing command: {" ".join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
errcode = process.returncode
if errcode:
self.logger.error(f'Command {cmd} failed with {errcode}: see output below')
self.logger.info(err)
return None
# First line in elliott stdout is something like "Searching for bugs..."
# Next line (if present) goes like this: "Found N bugs (M ignored):"
# Following is a list of bugs that we need to process
out = out.decode().strip().splitlines()
if len(out) < 2:
return None
bugs = out[-1].split(':')[1].split(', ')
# Verify bugs
cmd = [
ELLIOTT_BIN,
f'--group=openshift-{version}',
f'--working-dir={version}-working',
'verify-bugs',
'--output=slack'
]
cmd.extend(bugs)
self.logger.info(f'Executing command: {" ".join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, _ = process.communicate()
# If process returned 0, no regressions were found
if not process.returncode:
self.logger.info('No regressions found for version %s', version)
return None
out = out.decode().strip().splitlines()
res = {version: out} if out else None
return res
def _next_is_prerelease(self, version: str) -> bool:
return get_next_version(version) in self.pre_releases
async def _slack_report(self):
# If no issues have been found, do nothing
if not any((self.blockers, self.regressions)):
return
# Merge results
from collections import defaultdict
report = defaultdict(list)
for d in (self.blockers, self.regressions):
for k, v in d.items():
report[k].extend(v)
# Format output message
message = ':red-siren: *There are some issues to look into:*'
for k in report.keys():
message += f'\n:warning:*{k}*'
for i in report[k]:
message += f'\n{i}'
self.logger.info('Sending notification to Slack')
self.logger.debug(message)
await self.slack_client.say(message)
@cli.command('check-bugs')
@click.option('--slack_channel', required=False, default='#art-team',
help='Slack channel to be notified for failures')
@click.option('--version', required=True, multiple=True,
help='OCP version to check for blockers e.g. 4.7')
@click.option('--pre_release', required=False, multiple=True,
help='OCP versions still in pre-release state')
@pass_runtime
@click_coroutine
async def check_bugs(runtime: Runtime, slack_channel: str, version: list, pre_release: list):
pipeline = CheckBugsPipeline(runtime, channel=slack_channel, versions=version, pre_releases=pre_release)
await pipeline.run()
| 35.532468
| 108
| 0.601486
|
aecb3b2ae1460d8b77898e47bda5f36545a19365
| 3,181
|
py
|
Python
|
master/admin_migrations/0001_initial.py
|
YangWanjun/areaparking
|
b08bc9b8f8d5f602d823115263b9d040edb9f245
|
[
"Apache-2.0"
] | 1
|
2018-08-02T04:00:44.000Z
|
2018-08-02T04:00:44.000Z
|
master/admin_migrations/0001_initial.py
|
YangWanjun/areaparking
|
b08bc9b8f8d5f602d823115263b9d040edb9f245
|
[
"Apache-2.0"
] | null | null | null |
master/admin_migrations/0001_initial.py
|
YangWanjun/areaparking
|
b08bc9b8f8d5f602d823115263b9d040edb9f245
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-05 06:41
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.admin.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EMailLogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='action time')),
('sender', models.EmailField(max_length=254, verbose_name='差出人')),
('recipient', models.CharField(max_length=1000, verbose_name='宛先')),
('cc', models.CharField(blank=True, max_length=1000, null=True, verbose_name='CC')),
('bcc', models.CharField(blank=True, max_length=1000, null=True, verbose_name='BCC')),
('title', models.CharField(max_length=50, verbose_name='件名')),
('body', models.TextField(verbose_name='メール本文')),
('attachment', models.CharField(blank=True, max_length=255, null=True, verbose_name='添付ファイル名')),
],
options={
'verbose_name': 'メール送信履歴',
'verbose_name_plural': 'メール送信履歴',
'db_table': 'ap_email_log',
'ordering': ['-action_time'],
'managed': False,
},
),
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='action time')),
('object_id', models.TextField(blank=True, null=True, verbose_name='object id')),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('change_message', models.TextField(blank=True, verbose_name='change message')),
('content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType', verbose_name='content type')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
'db_table': 'django_admin_log',
'ordering': ('-action_time',),
},
managers=[
('objects', django.contrib.admin.models.LogEntryManager()),
],
),
]
| 48.19697
| 181
| 0.606413
|
24e8d560f2f515675e4ca9e01399df36c8142ba2
| 18,173
|
py
|
Python
|
src/mygrad/nnet/layers/gru.py
|
kw-0/MyGrad
|
307f1bb5f2391e7f4df49fe43a7acf9d1e8ea141
|
[
"MIT"
] | 147
|
2018-07-14T01:37:35.000Z
|
2022-03-29T06:37:58.000Z
|
src/mygrad/nnet/layers/gru.py
|
kw-0/MyGrad
|
307f1bb5f2391e7f4df49fe43a7acf9d1e8ea141
|
[
"MIT"
] | 223
|
2018-05-31T14:13:18.000Z
|
2022-02-27T18:53:49.000Z
|
src/mygrad/nnet/layers/gru.py
|
kw-0/MyGrad
|
307f1bb5f2391e7f4df49fe43a7acf9d1e8ea141
|
[
"MIT"
] | 27
|
2018-06-17T14:42:05.000Z
|
2021-10-31T00:21:09.000Z
|
import weakref
from numbers import Integral
import numpy as np
from mygrad._utils import SkipGradient
from mygrad.operation_base import Operation
from mygrad.tensor_base import Tensor
try:
from numba import njit, vectorize
except ImportError: # pragma: no cover
raise ImportError(
"The package `numba` must be installed in order to access the gru."
)
@vectorize(
["float32(float32)", "float64(float64)"],
nopython=True,
)
def sig(f): # pragma: no cover
"""
Calculates a sigmoid function
"""
return 1 / (1 + np.exp(-f))
@vectorize(
["float32(float32)", "float64(float64)"],
nopython=True,
)
def d_sig(f): # pragma: no cover
"""
Calculates the derivative of a sigmoid function
"""
return f * (1 - f)
@vectorize(
["float32(float32)", "float64(float64)"],
nopython=True,
)
def d_tanh(f): # pragma: no cover
"""
Calculates the derivative of a tanh function
"""
return 1 - f ** 2
@njit
def dot(a, b):
"""
Calculates the dot product between 2 arrays
of shapes (W,X,Y) and (Y,Z), respectively
"""
return np.dot(a.reshape(-1, a.shape[-1]), b).reshape(*a.shape[:-1], b.shape[-1])
@njit
def _gru_layer(s, z, r, h, Wz, Wr, Wh):
"""Given:
S(t=0)
z = X(t) Uz + bz
r = X(t) Ur + br
h = X(t) Uh + bh
Compute Z(t), R(t), H(t), S(t) for all 1 <= t <= T
Parameters
----------
s : numpy.ndarray, shape=(T+1, N, D)
Modified in-place
z : numpy.ndarray, shape=(T, N, D)
Modified in-place
r : numpy.ndarray, shape=(T, N, D)
Modified in-place
h : numpy.ndarray, shape=(T, N, D)
Modified in-place
Wz : numpy.ndarray, shape=(D, D)
Wr : numpy.ndarray, shape=(D, D)
Wh : numpy.ndarray, shape=(D, D)"""
for n in range(len(s) - 1):
z[n] += np.dot(s[n], Wz)
z[n] = sig(z[n])
r[n] += np.dot(s[n], Wr)
r[n] = sig(r[n])
h[n] += np.dot(r[n] * s[n], Wh)
h[n] = np.tanh(h[n])
s[n + 1] = (1 - z[n]) * h[n] + z[n] * s[n]
@njit
def _gru_dLds(s, z, r, dLds, Wz, Wh, Wr, dz, dh, dr, s_h, one_z):
"""
Z_{t} = sigmoid(Uz X_{t} + Wz S_{t-1} + bz)
R_{t} = sigmoid(Ur X_{t} + Wr S_{t-1} + br)
H_{t} = tanh(Uh X_{t} + Wh (R{t} * S_{t-1}) + bh)
S_{t} = (1 - Z{t}) * H{t} + Z{t} * S_{t-1}
Returns
--------
dL / ds(t) = partial dL / ds(t+1) * ds(t+1) / ds(t)
+ partial dL / ds(t+1) * ds(t+1) / dz(t) * dz(t) / ds(t)
+ partial dL / ds(t+1) * ds(t+1) / dh(t) * dh(t) / ds(t)
+ partial dL / ds(t+1) * ds(t+1) / dh(t) * dh(t) / dr(t) * dr(t) / ds(t)
"""
dLdh = dot(dLds * one_z * dh, Wh)
out = z * dLds
out += dot(dLds * s_h * dz, Wz)
out += dLdh * r
out += dot(dLdh * s * dr, Wr)
return out
@njit
def _gru_bptt(
X, dLds, s, z, r, Wz, Wh, Wr, dz, dh, dr, s_h, one_z, bp_lim, old_dLds=None
):
Wz, Wh, Wr = Wz.T, Wh.T, Wr.T
bptt = bp_lim < len(X) - 1
if bptt: # pragma: no cover
old_dLds = np.zeros_like(dLds)
for i in range(bp_lim):
# dL(t) / ds(t) + dL(t+1) / ds(t)
if bptt: # pragma: no cover
source_index = slice(1, len(dLds) - i)
target_index = slice(None, len(dLds) - (i + 1))
dt = dLds[source_index] - old_dLds[source_index]
old_dLds = np.copy(dLds)
else: # no backprop truncation
source_index = slice(len(dLds) - (i + 1), len(dLds) - i)
target_index = slice(len(dLds) - (i + 2), len(dLds) - (i + 1))
dt = dLds[source_index]
dLds[target_index] += _gru_dLds(
s[source_index],
z[source_index],
r[source_index],
dt,
Wz,
Wh,
Wr,
dz[source_index],
dh[source_index],
dr[source_index],
s_h[source_index],
one_z[source_index],
)
def _backprop(var, grad): # pragma: no cover
if not var.constant:
if var._grad is None:
var._grad = np.asarray(grad)
else:
var._grad += grad
class GRUnit(Operation):
def __call__(
self, X, Uz, Wz, bz, Ur, Wr, br, Uh, Wh, bh, s0=None, bp_lim=None, dropout=0.0
):
if bp_lim is not None:
assert isinstance(bp_lim, Integral) and 0 <= bp_lim < len(X)
assert 0.0 <= dropout < 1.0
self._dropout = dropout
self.bp_lim = bp_lim if bp_lim is not None else len(X) - 1
self.X = X # type: Tensor # shape=(T, N, C)
self.Uz = Uz # type: Tensor # shape=(C, D)
self.Wz = Wz # type: Tensor # shape=(D, D)
self.bz = bz # type: Tensor # shape=(D,)
self.Ur = Ur # type: Tensor # shape=(C, D)
self.Wr = Wr # type: Tensor # shape=(D, D)
self.br = br # type: Tensor # shape=(D,)
self.Uh = Uh # type: Tensor # shape=(C, D)
self.Wh = Wh # type: Tensor # shape=(D, D)
self.bh = bh # type: Tensor # shape=(D,)
self.variables = (
self.X,
self.Uz,
self.Wz,
self.bz,
self.Ur,
self.Wr,
self.br,
self.Uh,
self.Wh,
self.bh,
)
self.type = max(t.dtype for t in self.variables)
T, N, C = X.shape
(D,) = bz.shape
seq = self.X.data
# t starts at 0 for S; all other sequences begin at t = 1
out = np.zeros((T + 1, N, D), dtype=self.type)
if s0 is not None:
out[0] = s0.data if isinstance(s0, Tensor) else s0
# compute all contributions to Z, R, H from the input sequence
# shape: T, N, D
z = np.tensordot(seq, self.Uz.data, [[-1], [0]]).astype(self.type, copy=False)
r = np.tensordot(seq, self.Ur.data, [[-1], [0]]).astype(self.type, copy=False)
h = np.tensordot(seq, self.Uh.data, [[-1], [0]]).astype(self.type, copy=False)
if dropout:
p = 1 - dropout
# For Uz/Ur/Uh: a dropout mask is generated for each datum and is applied uniformly across T
self._dropUz, self._dropUr, self._dropUh = (
np.random.binomial(1, p, size=(3, 1, N, D)) / p
)
self._dropWz, self._dropWr, self._dropWh = (
np.random.binomial(1, p, size=(3, D, D)) / p
)
z *= self._dropUz
r *= self._dropUr
h *= self._dropUh
Wz = (self._dropWz * self.Wz.data).astype(self.type, copy=False)
Wr = (self._dropWr * self.Wr.data).astype(self.type, copy=False)
Wh = (self._dropWh * self.Wh.data).astype(self.type, copy=False)
else:
self._dropUz, self._dropUr, self._dropUh = None, None, None
self._dropWz, self._dropWr, self._dropWh = None, None, None
Wz = self.Wz.data.astype(self.type, copy=False)
Wr = self.Wr.data.astype(self.type, copy=False)
Wh = self.Wh.data.astype(self.type, copy=False)
z += bz.data.astype(self.type, copy=False) # X Uz + bz
r += br.data.astype(self.type, copy=False) # X Ur + br
h += bh.data.astype(self.type, copy=False) # X Uh + bh
_gru_layer(out, z, r, h, Wz, Wr, Wh)
self._z = z
self._r = r
self._h = h
return out
def backward_var(self, grad, index, **kwargs):
raise SkipGradient("Gradient computed in GRU.backward()")
def backward(self, grad, *, graph, **kwargs):
hidden_seq = self._hidden_seq()
if hidden_seq is None: # pragma: no cover
assert False, "should be unreachable"
s = hidden_seq.data[:-1]
z = self._z
r = self._r
h = self._h
dLds = grad[1:].astype(self.type, copy=False)
const = {"1 - h**2": d_tanh(h), "z*(1 - z)": d_sig(z), "r*(1 - r)": d_sig(r)}
if self._dropout:
Wz = (self._dropWz * self.Wz.data).astype(self.type, copy=False)
Wr = (self._dropWr * self.Wr.data).astype(self.type, copy=False)
Wh = (self._dropWh * self.Wh.data).astype(self.type, copy=False)
else:
Wz = self.Wz.data.astype(self.type, copy=False)
Wr = self.Wr.data.astype(self.type, copy=False)
Wh = self.Wh.data.astype(self.type, copy=False)
const["s - h"] = s - h
const["1 - z"] = 1 - z
_gru_bptt(
self.X.data,
dLds,
s,
z,
r,
Wz,
Wh,
Wr,
const["z*(1 - z)"],
const["1 - h**2"],
const["r*(1 - r)"],
const["s - h"],
const["1 - z"],
self.bp_lim,
)
zgrad = dLds * const["s - h"] # dL / dz
hgrad = dLds * const["1 - z"] # dL / dh
rgrad = dot(const["1 - h**2"] * hgrad, Wh.T) * s # dL / dr
hidden_seq._grad = dLds
if not (self.Uz.constant and self.Wz.constant and self.bz.constant):
dz = zgrad * const["z*(1 - z)"]
# backprop through Wz
if not self.Wz.constant:
dWz = np.tensordot(s, dz, ([0, 1], [0, 1]))
if self._dropout:
dWz *= self._dropWz
_backprop(
self.Wz, dWz.astype(self.Wz.dtype, copy=False)
) # self.Wz.backward(dWz, **kwargs)
# backprop through bz
if not self.bz.constant:
_backprop(self.bz, dz.sum(axis=(0, 1), dtype=self.bz.dtype))
# backprop through bz
if not self.Uz.constant:
if self._dropout:
dz *= (
self._dropUz
) # IMPORTANT augmented update: this must come after Wz and bz backprop
_backprop(
self.Uz,
np.tensordot(self.X.data, dz, ([0, 1], [0, 1])).astype(
self.Uz.dtype, copy=False
),
)
if not (self.Ur.constant and self.Wr.constant and self.br.constant):
dr = rgrad * const["r*(1 - r)"]
# backprop through Wr
if not self.Wr.constant:
dWr = np.tensordot(s, dr, ([0, 1], [0, 1]))
if self._dropout:
dWr *= self._dropWr
_backprop(self.Wr, dWr.astype(self.Wr.dtype, copy=False))
# backprop through br
if not self.br.constant:
_backprop(
self.br, dr.sum(axis=(0, 1), dtype=self.br.dtype)
) # self.br.backward(dr.sum(axis=(0, 1)), **kwargs)
# backprop through Ur
if not self.Ur.constant:
if self._dropout:
dr *= (
self._dropUr
) # IMPORTANT augmented update: this must come after Wr and br backprop
_backprop(
self.Ur,
np.tensordot(self.X.data, dr, ([0, 1], [0, 1])).astype(
self.Ur.dtype, copy=False
),
)
if not (self.Uh.constant and self.Wh.constant and self.bh.constant):
dh = hgrad * const["1 - h**2"]
# backprop through Wh
if not self.Wh.constant:
dWh = np.tensordot((s * r), dh, ([0, 1], [0, 1]))
if self._dropout:
dWh *= self._dropWh
_backprop(
self.Wh, dWh.astype(self.Wh.dtype, copy=False)
) # self.Wh.backward(dWh, **kwargs)
# backprop through bh
if not self.bh.constant:
_backprop(
self.bh, dh.sum(axis=(0, 1), dtype=self.bh.dtype)
) # self.bh.backward(dh.sum(axis=(0, 1)), **kwargs)
# backprop through Uh
if not self.Uh.constant:
if self._dropout:
dh *= (
self._dropUh
) # IMPORTANT augmented update: this must come after Wh and bh backprop
_backprop(
self.Uh,
np.tensordot(self.X.data, dh, ([0, 1], [0, 1])).astype(
self.Uh.dtype, copy=False
),
)
# backprop through X
if not self.X.constant:
tmp = dLds * const["1 - z"] * const["1 - h**2"]
if not self._dropout:
dLdX = np.dot(
(dLds * const["s - h"]) * const["z*(1 - z)"], self.Uz.data.T
)
dLdX += np.dot(tmp, self.Uh.data.T)
dLdX += np.dot(
np.dot(tmp, Wh.T) * s * const["r*(1 - r)"], self.Ur.data.T
)
else:
dLdX = np.dot(
(self._dropUz * (dLds * const["s - h"]) * const["z*(1 - z)"]),
self.Uz.data.T,
)
dLdX += np.dot(self._dropUh * tmp, self.Uh.data.T)
dLdX += np.dot(
self._dropUr * (dot(tmp, Wh.T) * s * const["r*(1 - r)"]),
self.Ur.data.T,
)
_backprop(
self.X, dLdX.astype(self.X.dtype, copy=False)
) # self.X.backward(dLdX, **kwargs)
del self._z
del self._r
del self._h
super().backward(grad, graph=graph)
def gru(
X,
Uz,
Wz,
bz,
Ur,
Wr,
br,
Uh,
Wh,
bh,
s0=None,
bp_lim=None,
dropout=0.0,
constant=None,
):
r"""Performs a forward pass of sequential data through a Gated Recurrent Unit layer, returning
the 'hidden-descriptors' arrived at by utilizing the trainable parameters as follows::
Z_{t} = sigmoid(X_{t} Uz + S_{t-1} Wz + bz)
R_{t} = sigmoid(X_{t} Ur + S_{t-1} Wr + br)
H_{t} = tanh(X_{t} Uh + (R{t} * S_{t-1}) Wh + bh)
S_{t} = (1 - Z{t}) * H{t} + Z{t} * S_{t-1}
Parameters
----------
X : array_like, shape=(T, N, C)
The sequential data to be passed forward.
Uz : array_like, shape=(C, D)
The weights used to map sequential data to its hidden-descriptor representation
Wz : array_like, shape=(D, D)
The weights used to map a hidden-descriptor to a hidden-descriptor.
bz : array_like, shape=(D,)
The biases used to scale a hidden-descriptor.
Ur : array_like, shape=(C, D)
The weights used to map sequential data to its hidden-descriptor representation
Wr : array_like, shape=(D, D)
The weights used to map a hidden-descriptor to a hidden-descriptor.
br : array_like, shape=(D,)
The biases used to scale a hidden-descriptor.
Uh : array_like, shape=(C, D)
The weights used to map sequential data to its hidden-descriptor representation
Wh : array_like, shape=(D, D)
The weights used to map a hidden-descriptor to a hidden-descriptor.
bh : array_like, shape=(D,)
The biases used to scale a hidden-descriptor.
s0 : Optional[array_like], shape=(N, D)
The 'seed' hidden descriptors to feed into the RNN. If None, a Tensor
of zeros of shape (N, D) is created.
bp_lim : Optional[int]
*This feature is experimental and is currently untested*.
The (non-zero) limit of the depth of back propagation through time to be
performed. If `None` back propagation is passed back through the entire sequence.
E.g. `bp_lim=3` will propagate gradients only up to 3 steps backward through the
recursive sequence.
dropout : float (default=0.), 0 <= dropout < 1
If non-zero, the dropout scheme described in [1]_ is applied. See Notes
for more details.
constant : bool, optional (default=False)
If True, the resulting Tensor is a constant.
Returns
-------
mygrad.Tensor, shape=(T+1, N, D)
The sequence of 'hidden-descriptors' produced by the forward pass of the RNN.
Notes
-----
- :math:`T` : Sequence length
- :math:`N` : Batch size
- :math:`C` : Length of single datum
- :math:`D` : Length of 'hidden' descriptor
The GRU system of equations is given by:
.. math::
Z_{t} = \sigma (X_{t} U_z + S_{t-1} Wz + bz)
R_{t} = \sigma (X_{t} U_r + S_{t-1} Wr + br)
H_{t} = tanh(X_{t} U_h + (R_{t} * S_{t-1}) W_h + b_h)
S_{t} = (1 - Z_{t}) * H_{t} + Z_{t} * S_{t-1}
Following the dropout scheme specified in [1]_, the hidden-hidden weights (Wz/Wr/Wh)
randomly have their weights dropped prior to forward/back-prop. The input connections
(via Uz/Ur/Uh) have variational dropout ([2]_) applied to them with a common dropout
mask across all t. That is three static dropout masks, each with shape-(N,D), are
applied to
.. math::
X_{t} U_z
X_{t} U_r
X_{t} U_h
respectively, for all :math:`t`.
References
----------
.. [1] S. Merity, et. al. "Regularizing and Optimizing LSTM Language Models",
arXiv:1708.02182v1, 2017.
.. [2] Y. Gal, Z. Ghahramani "A Theoretically Grounded Application of Dropout
in Recurrent Neural Networks" arXiv:1512.05287v5, 2016."""
if s0 is not None:
if not isinstance(s0, np.ndarray) and not (
isinstance(s0, Tensor) and (constant or s0.constant)
):
raise ValueError(
"GRU does not support non-constant tensors for the initial hidden"
"state value, `s0`"
)
s = Tensor._op(
GRUnit,
X,
Uz,
Wz,
bz,
Ur,
Wr,
br,
Uh,
Wh,
bh,
op_kwargs=dict(s0=s0, bp_lim=bp_lim, dropout=dropout),
constant=constant,
)
try:
s.creator._hidden_seq = weakref.ref(s)
except AttributeError: # pragma: no cover
# `no-autodiff` mode does not record creator
pass
return s
| 31.715532
| 104
| 0.504595
|
488d142b11275fb4350573ecbb2695961f425671
| 2,690
|
py
|
Python
|
latextools/shortcuts.py
|
cduck/latextools
|
8161acc88d669951b2b5e1e3e6888b9fc918b49a
|
[
"MIT"
] | 13
|
2020-06-02T22:57:13.000Z
|
2022-03-26T23:07:27.000Z
|
latextools/shortcuts.py
|
cduck/latextools
|
8161acc88d669951b2b5e1e3e6888b9fc918b49a
|
[
"MIT"
] | 3
|
2021-06-03T14:38:17.000Z
|
2022-02-28T23:05:48.000Z
|
latextools/shortcuts.py
|
cduck/latextools
|
8161acc88d669951b2b5e1e3e6888b9fc918b49a
|
[
"MIT"
] | 2
|
2020-08-19T05:44:23.000Z
|
2021-06-03T01:56:48.000Z
|
from .project import LatexProject
from .content import BasicContent
from .document import DocumentConfig, STANDALONE_CONFIG
from .common_preamble import pkg
def render_snippet(content=r'$Z\cdot Y=X$', *packages, commands=(),
lpad=0, rpad=0, tpad=0, bpad=0, pad=None,
config=STANDALONE_CONFIG):
'''Easy way to render a small snippet of Latex code.
Use `latextools.pkg` and `.cmd` for quick package and command definitions.
Returns a Pdf object. Save with `obj.save('file.pdf')`. Add to drawing
with `d.draw(obj)` (using drawSvg).
'''
if pad is not None:
lpad, bpad, rpad, tpad = (pad,) * 4
if config is None:
config = DocumentConfig('standalone')
if (lpad, bpad, rpad, tpad) != (0, 0, 0, 0):
padding = [p if isinstance(p, str) else '{}pt'.format(p)
for p in (lpad, bpad, rpad, tpad)]
border_conf = 'border={{{}}}'.format(' '.join(padding))
if (config.doc_type == 'standalone'
and not any(option.startswith('border=')
for option in config.options)):
config = DocumentConfig(
'standalone', options=(*config.options, border_conf),
packages=config.packages, commands=config.commands)
proj = LatexProject()
content = BasicContent(content, packages, commands)
proj.add_file(content.as_document(path='main.tex', config=config))
r = proj.compile_pdf(options=['-halt-on-error', '-file-line-error',
'-interaction', 'nonstopmode',
'-shell-escape'])
return r
def render_qcircuit(content=r'& \gate{X} & \qw', *packages, r=0.5, c=0.7,
const_size=False, const_row=False, const_col=False,
lpad=1, rpad=1, tpad=1, bpad=1, pad=None,
commands=(), config=None):
'''Easy way to render a qcircuit diagram.
Use `latextools.pkg` and `.cmd` for quick package and command definitions.
Returns a Pdf object. Save with `obj.save('file.pdf')`. Add to drawing
with `d.draw(obj)` (using drawSvg).
'''
if not isinstance(r, str):
r = '{}em'.format(r)
if not isinstance(c, str):
c = '{}em'.format(c)
q_conf = '@R={} @C{}'.format(r, c)
if const_row:
q_conf += ' @!R'
if const_col:
q_conf += ' @!C'
if const_size:
q_conf += ' @!'
content = '\\Qcircuit {} {{\n{}\n}}'.format(q_conf, content.strip())
return render_snippet(content,
pkg.qcircuit, *packages, lpad=lpad, rpad=rpad, tpad=tpad, bpad=bpad,
pad=pad, commands=commands, config=config)
| 40.757576
| 78
| 0.580669
|
8665769f5e157ef85e4bfbc7513a2880671a301e
| 2,288
|
py
|
Python
|
lcm-types/python/vectornav_lcmt.py
|
FikkleG/gallopingFaster
|
2578980f0fbb3a2aa32054bc12cab6e156f1953f
|
[
"MIT"
] | null | null | null |
lcm-types/python/vectornav_lcmt.py
|
FikkleG/gallopingFaster
|
2578980f0fbb3a2aa32054bc12cab6e156f1953f
|
[
"MIT"
] | null | null | null |
lcm-types/python/vectornav_lcmt.py
|
FikkleG/gallopingFaster
|
2578980f0fbb3a2aa32054bc12cab6e156f1953f
|
[
"MIT"
] | null | null | null |
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class vectornav_lcmt(object):
__slots__ = ["q", "w", "a"]
__typenames__ = ["float", "float", "float"]
__dimensions__ = [[4], [3], [3]]
def __init__(self):
self.q = [ 0.0 for dim0 in range(4) ]
self.w = [ 0.0 for dim0 in range(3) ]
self.a = [ 0.0 for dim0 in range(3) ]
def encode(self):
buf = BytesIO()
buf.write(vectornav_lcmt._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack('>4f', *self.q[:4]))
buf.write(struct.pack('>3f', *self.w[:3]))
buf.write(struct.pack('>3f', *self.a[:3]))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != vectornav_lcmt._get_packed_fingerprint():
raise ValueError("Decode error")
return vectornav_lcmt._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = vectornav_lcmt()
self.q = struct.unpack('>4f', buf.read(16))
self.w = struct.unpack('>3f', buf.read(12))
self.a = struct.unpack('>3f', buf.read(12))
return self
_decode_one = staticmethod(_decode_one)
def _get_hash_recursive(parents):
if vectornav_lcmt in parents: return 0
tmphash = (0xf57906decbf7ebdc) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if vectornav_lcmt._packed_fingerprint is None:
vectornav_lcmt._packed_fingerprint = struct.pack(">Q", vectornav_lcmt._get_hash_recursive([]))
return vectornav_lcmt._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
def get_hash(self):
"""Get the LCM hash of the struct"""
return struct.unpack(">Q", vectornav_lcmt._get_packed_fingerprint())[0]
| 32.225352
| 106
| 0.63549
|
205a14f14d01200e512f8ca3e91d3ac4554cf899
| 6,698
|
py
|
Python
|
codes/gpt_query/Data/SubData_test.py
|
biswesh456/Simulated-Dialog-Generation
|
b1f12e09c3e0be274f03e66eb08402e0f681f97a
|
[
"Apache-2.0"
] | 6
|
2021-12-12T00:11:25.000Z
|
2022-03-02T23:23:58.000Z
|
codes/gpt_query/Data/SubData_test.py
|
biswesh456/Simulated-Dialog-Generation
|
b1f12e09c3e0be274f03e66eb08402e0f681f97a
|
[
"Apache-2.0"
] | null | null | null |
codes/gpt_query/Data/SubData_test.py
|
biswesh456/Simulated-Dialog-Generation
|
b1f12e09c3e0be274f03e66eb08402e0f681f97a
|
[
"Apache-2.0"
] | null | null | null |
import pickle
import json
import random
import torch
import numpy as np
import os
from tokenizers import ByteLevelBPETokenizer
class SubData_test():
def __init__(self, data_dir, vocab_size, bert_model_name, eot="EOT"):
self.eot = eot
with open(data_dir+"test.input.txt", "r") as f:
valid_contexts = f.readlines()
self.valid_contexts = [[y.strip() for y in x.strip().split(eot)] for x in valid_contexts]
with open(data_dir+"test.tgt.txt", "r") as f:
valid_responses = f.readlines()
self.valid_responses = [x.strip() + ' [SEP]' for x in valid_responses]
with open(data_dir+"test.goal.txt", "r") as f:
valid_goals = f.readlines()
self.valid_goals = [x.strip() for x in valid_goals]
with open(data_dir+"test.key.txt", "r") as f:
valid_keys = f.readlines()
self.valid_keys = [[int(y) for y in x.strip().split()] for x in valid_keys]
self.valid_keys = [[(key[k], key[k+1]) for k in range(0,len(key),2)] for key in self.valid_keys]
self.shuffle_te = np.arange(len(self.valid_contexts))
path = data_dir+"5ByteLevelBPETokenizer" + str(vocab_size)+'-'
self.tokenizer = ByteLevelBPETokenizer(vocab_file= path+"vocab.json",merges_file=path+"merges.txt",
lowercase=True)
self.tokenizer.add_special_tokens(["<pad>", "[SEP]"])
def tensorFromSentence(self, sent, maxlen):
indices = torch.Tensor(self.tokenizer.encode(sent).ids).long()
ulen = len(indices)
if ulen>maxlen:
indices = torch.cat((indices[:maxlen-1], indices[-1:]), dim=0)
ulen = maxlen
return indices, ulen
def TensorFromGoal(self, sent, maxlen, g_keys):
encoding = self.tokenizer.encode(sent)
offset = encoding.offsets
j = 0
new_keys = []
# map the key indices to new key indices after tokenisation
for start,end in g_keys:
start-=1
while j < len(offset) and j < maxlen:
if offset[j][0] == start:
new_keys.append(j)
if offset[j][1] == end:
j += 1
break
j += 1
while j < len(offset) and j < maxlen and offset[j][1] != end:
new_keys.append(j)
j += 1
if j<maxlen:
new_keys.append(j)
j += 1
break
else:
j += 1
indices = torch.Tensor(encoding.ids).long()
ulen = len(indices)
if ulen>maxlen:
indices = torch.cat((indices[:maxlen-1], indices[-1:]), dim=0)
ulen = maxlen
return indices, ulen, new_keys, len(new_keys)
def shuffle_train(self):
self.shuffle_te = np.random.permutation(len(self.valid_contexts))
def get_batch(self, batch_size=10, maxlen=50, train=True, start=-1, word=None, goallen=500):
contexts = self.valid_contexts
responses = self.valid_responses
shuffle = self.shuffle_te
goal = self.valid_goals
keys = self.valid_keys
cc_plain = []
rr_plain = []
g_plain = []
g_keys = []
for i in range(batch_size):
if word is None:
if start==-1:
ind = random.randint(0, len(contexts)-1)
else:
ind = start + i
ind = shuffle[ind]
else:
if start==-1:
x = random.randint(0, len(self.inverted_index[word])-1)
ind = self.inverted_index[word][x]
else:
x = start + i
ind = self.inverted_index[word][x]
cc = contexts[ind]
rr = responses[ind]
g = goal[ind]
k = keys[ind]
cc_plain.append(cc)
rr_plain.append(rr)
g_plain.append(g)
g_keys.append(k)
max_cutts = max([len(cc) for cc in cc_plain])
c_utts = torch.zeros(batch_size, max_cutts, maxlen).long()
c_ulens = torch.zeros(batch_size, max_cutts).long()
c_clens = torch.zeros(batch_size).long()
cind_mat = torch.zeros(batch_size, max_cutts, maxlen)
r_utts = torch.zeros(batch_size, 1, maxlen).long()
r_ulens = torch.zeros(batch_size, 1).long()
r_clens = torch.zeros(batch_size).long()
rind_mat = torch.zeros(batch_size, 1, maxlen)
g_utts = torch.zeros(batch_size, goallen).long()
g_ulens = torch.zeros(batch_size).long()
g_clens = torch.zeros(batch_size).long()
gind_mat = torch.zeros(batch_size, goallen)
keys = torch.zeros(batch_size, goallen).long()
kind_mat = torch.zeros(batch_size, goallen)
k_ulens = torch.zeros(batch_size).long()
for i,cc in enumerate(cc_plain):
for j,utt in enumerate(cc):
uinds, ulen = self.tensorFromSentence(utt, maxlen)
cind_mat[i, j, :ulen] = 1
c_utts[i,j, :ulen] = uinds
c_ulens[i,j] = ulen
c_clens[i] += 1
for i,rr in enumerate(rr_plain):
uinds, ulen = self.tensorFromSentence(rr, maxlen)
rind_mat[i, 0, :ulen] = 1
r_utts[i, 0, :ulen] = uinds
r_ulens[i, 0] = ulen
r_clens[i] = 1
for i,gg in enumerate(g_plain):
uinds, ulen, new_key, klen = self.TensorFromGoal(gg, goallen, g_keys[i])
gind_mat[i, :ulen] = 1
g_utts[i, :ulen] = uinds
g_ulens[i] = ulen
keys[i, :klen] = torch.LongTensor(new_key)
kind_mat[i, :klen] = 1
k_ulens[i] = klen
c_utts = c_utts[:,:,:c_ulens.max()]
r_utts = r_utts[:,:,:r_ulens.max()]
g_utts = g_utts[:,:g_ulens.max()]
cind_mat = cind_mat[:,:,:c_ulens.max()]
rind_mat = rind_mat[:,:,:r_ulens.max()]
gind_mat = gind_mat[:,:g_ulens.max()]
keys = keys[:,:k_ulens.max()]
kind_mat = kind_mat[:,:k_ulens.max()]
return c_utts, c_ulens, c_clens, r_utts, r_ulens, r_clens, cind_mat,\
rind_mat, gind_mat, g_utts, g_ulens, keys, kind_mat, k_ulens
| 36.601093
| 108
| 0.513288
|
598caab27782b8dbd386a798d00cab6e5bad899e
| 1,529
|
py
|
Python
|
onlineShop/Library/models.py
|
alirezaryahi/django-onlineShop
|
b36c4a37ac98977862b83f646c2303ec4bb1a6ab
|
[
"MIT"
] | null | null | null |
onlineShop/Library/models.py
|
alirezaryahi/django-onlineShop
|
b36c4a37ac98977862b83f646c2303ec4bb1a6ab
|
[
"MIT"
] | null | null | null |
onlineShop/Library/models.py
|
alirezaryahi/django-onlineShop
|
b36c4a37ac98977862b83f646c2303ec4bb1a6ab
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Category(models.Model):
title = models.CharField(max_length=200, verbose_name='عنوان')
class Meta:
verbose_name = 'موضوع'
verbose_name_plural = 'موضوع ها'
def __str__(self):
return self.title
class Author(models.Model):
first_name = models.CharField(max_length=100, verbose_name='نام')
last_name = models.CharField(max_length=100, verbose_name='نام خانوادگی')
class Meta:
verbose_name = 'نویسنده'
verbose_name_plural = 'نویسندگان'
def __str__(self):
return self.last_name
class Book(models.Model):
category = models.ForeignKey(Category, on_delete=models.CASCADE, verbose_name='موضوع')
author = models.ForeignKey(Author, on_delete=models.CASCADE, verbose_name='نویسنده')
title = models.CharField(max_length=200, verbose_name='عنوان کتاب')
description = models.TextField(verbose_name='توضیحات', null=True, blank=True)
price = models.IntegerField(default=0, verbose_name='قیمت')
image = models.ImageField(upload_to='books/', null=True, blank=True, verbose_name='تصویر')
vote = models.IntegerField(default=0)
is_exist = models.BooleanField(default=True, verbose_name='موجود')
select = models.CharField(max_length=100, default='book')
class Meta:
verbose_name = 'کتاب'
verbose_name_plural = 'کتاب ها'
ordering = ['-vote']
def __str__(self):
return self.title
| 31.854167
| 95
| 0.676913
|
6202bb3207c3920803da072ff3262aa98dbdb0d5
| 624
|
py
|
Python
|
comments/migrations/0002_auto_20210531_2230.py
|
Stepan91/utk_api
|
f917afc9019711f8d8643ebea88eed84f33c449a
|
[
"MIT"
] | null | null | null |
comments/migrations/0002_auto_20210531_2230.py
|
Stepan91/utk_api
|
f917afc9019711f8d8643ebea88eed84f33c449a
|
[
"MIT"
] | null | null | null |
comments/migrations/0002_auto_20210531_2230.py
|
Stepan91/utk_api
|
f917afc9019711f8d8643ebea88eed84f33c449a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-05-31 19:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='Адрес электронной почты'),
),
migrations.AlterField(
model_name='comment',
name='image',
field=models.ImageField(blank=True, upload_to='', verbose_name='Изображение'),
),
]
| 26
| 105
| 0.599359
|
0b2c6c537a9233c47ee653a0dbab586ac275bfbd
| 36,162
|
py
|
Python
|
operator/main.py
|
gnossen/kadalu
|
65af1ac86eb0d79f1589cbbfe82320800d6b357c
|
[
"Apache-2.0"
] | null | null | null |
operator/main.py
|
gnossen/kadalu
|
65af1ac86eb0d79f1589cbbfe82320800d6b357c
|
[
"Apache-2.0"
] | null | null | null |
operator/main.py
|
gnossen/kadalu
|
65af1ac86eb0d79f1589cbbfe82320800d6b357c
|
[
"Apache-2.0"
] | null | null | null |
"""
KaDalu Operator: Once started, deploys required CSI drivers,
bootstraps the ConfigMap and waits for the CRD update to create
Server pods
"""
import json
import logging
import os
import re
import time
import uuid
import urllib3
from jinja2 import Template
from kadalulib import execute as lib_execute
from kadalulib import logf, logging_setup, send_analytics_tracker, is_host_reachable
from kubernetes import client, config, watch
from urllib3.exceptions import (ProtocolError, NewConnectionError)
from utils import CommandError
from utils import execute as utils_execute
NAMESPACE = os.environ.get("KADALU_NAMESPACE", "kadalu")
VERSION = os.environ.get("KADALU_VERSION", "latest")
K8S_DIST = os.environ.get("K8S_DIST", "kubernetes")
KUBELET_DIR = os.environ.get("KUBELET_DIR")
VERBOSE = os.environ.get("VERBOSE", "no")
MANIFESTS_DIR = "/kadalu/templates"
KUBECTL_CMD = "/usr/bin/kubectl"
KADALU_CONFIG_MAP = "kadalu-info"
CSI_POD_PREFIX = "csi-"
STORAGE_CLASS_NAME_PREFIX = "kadalu."
# TODO: Add ThinArbiter
VALID_HOSTING_VOLUME_TYPES = ["Replica1", "Replica2", "Replica3",
"Disperse", "External"]
VALID_PV_RECLAIM_POLICY_TYPES = ["delete", "archive"]
VOLUME_TYPE_REPLICA_1 = "Replica1"
VOLUME_TYPE_REPLICA_2 = "Replica2"
VOLUME_TYPE_REPLICA_3 = "Replica3"
VOLUME_TYPE_EXTERNAL = "External"
VOLUME_TYPE_DISPERSE = "Disperse"
CREATE_CMD = "create"
APPLY_CMD = "apply"
DELETE_CMD = "delete"
def template(filename, **kwargs):
"""Substitute the template with provided fields"""
content = ""
with open(filename + ".j2") as template_file:
content = template_file.read()
if kwargs.get("render", False):
return Template(content).render(**kwargs)
return Template(content).stream(**kwargs).dump(filename)
def bricks_validation(bricks):
"""Validate Brick path and node options"""
ret = True
for idx, brick in enumerate(bricks):
if not ret:
break
if brick.get("pvc", None) is not None:
continue
if brick.get("path", None) is None and \
brick.get("device", None) is None:
logging.error(logf("Storage path/device not specified",
number=idx+1))
ret = False
if brick.get("node", None) is None:
logging.error(logf("Storage node not specified", number=idx+1))
ret = False
return ret
def validate_ext_details(obj):
"""Validate external Volume details"""
cluster = obj["spec"].get("details", None)
if not cluster:
logging.error(logf("External Cluster details not given."))
return False
valid = 0
ghosts = []
gport = 24007
if cluster.get('gluster_hosts', None):
valid += 1
hosts = cluster.get('gluster_hosts')
ghosts.extend(hosts)
if cluster.get('gluster_host', None):
valid += 1
ghosts.append(cluster.get('gluster_host'))
if cluster.get('gluster_volname', None):
valid += 1
if cluster.get('gluster_port', None):
gport = cluster.get('gluster_port', 24007)
if valid < 2:
logging.error(logf("No 'host' and 'volname' details provided."))
return False
if not is_host_reachable(ghosts, gport):
logging.error(logf("gluster server not reachable: on %s:%d" %
(ghosts, gport)))
# Noticed that there may be glitches in n/w during this time.
# Not good to fail the validation, instead, just log here, so
# we are aware this is a possible reason.
#return False
logging.debug(logf("External Storage %s successfully validated" % \
obj["metadata"].get("name", "<unknown>")))
return True
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-branches
def validate_volume_request(obj):
"""Validate the Volume request for Replica options, number of bricks etc"""
if not obj.get("spec", None):
logging.error("Storage 'spec' not specified")
return False
pv_reclaim_policy = obj["spec"].get("pvReclaimPolicy", "delete")
if pv_reclaim_policy not in VALID_PV_RECLAIM_POLICY_TYPES:
logging.error("PV Reclaim Policy not valid")
return False
voltype = obj["spec"].get("type", None)
if voltype is None:
logging.error("Storage type not specified")
return False
if voltype not in VALID_HOSTING_VOLUME_TYPES:
logging.error(logf("Invalid Storage type",
valid_types=",".join(VALID_HOSTING_VOLUME_TYPES),
provided_type=voltype))
return False
if voltype == VOLUME_TYPE_EXTERNAL:
return validate_ext_details(obj)
bricks = obj["spec"].get("storage", [])
if not bricks_validation(bricks):
return False
decommissioned = ""
subvol_bricks_count = 1
if voltype == VOLUME_TYPE_REPLICA_2:
subvol_bricks_count = 2
elif voltype == VOLUME_TYPE_REPLICA_3:
subvol_bricks_count = 3
if voltype == VOLUME_TYPE_DISPERSE:
disperse_config = obj["spec"].get("disperse", None)
if disperse_config is None:
logging.error("Disperse Volume data and redundancy "
"count is not specified")
return False
data_bricks = disperse_config.get("data", 0)
redundancy_bricks = disperse_config.get("redundancy", 0)
if data_bricks == 0 or redundancy_bricks == 0:
logging.error("Disperse Volume data or redundancy "
"count is not specified")
return False
subvol_bricks_count = data_bricks + redundancy_bricks
# redundancy must be greater than 0, and the total number
# of bricks must be greater than 2 * redundancy. This
# means that a dispersed volume must have a minimum of 3 bricks.
if subvol_bricks_count <= (2 * redundancy_bricks):
logging.error("Invalid redundancy for the Disperse Volume")
return False
# stripe_size = (bricks_count - redundancy) * 512
# Using combinations of #Bricks/redundancy that give a power
# of two for the stripe size will make the disperse volume
# perform better in most workloads because it's more typical
# to write information in blocks that are multiple of two
# https://docs.gluster.org/en/latest/Administrator-Guide
# /Setting-Up-Volumes/#creating-dispersed-volumes
if data_bricks % 2 != 0:
logging.error("Disperse Configuration is not Optimal")
return False
if len(bricks) % subvol_bricks_count != 0:
logging.error("Invalid number of storage directories/devices"
" specified")
return False
if subvol_bricks_count > 1:
for i in range(0, int(len(bricks) / subvol_bricks_count)):
decommissioned = ""
for k in range(0, subvol_bricks_count):
brick_idx = (i * subvol_bricks_count) + k
brick = bricks[brick_idx]
decom = brick.get("decommissioned", "")
if k == 0:
decommissioned = decom
continue
if decom != decommissioned:
logging.error(logf(
"All of distribute subvolume should be marked decommissioned",
brick=brick, brick_index=brick_idx))
return False
# If we are here, decommissioned option is properly given.
if voltype == VOLUME_TYPE_REPLICA_2:
tiebreaker = obj["spec"].get("tiebreaker", None)
if tiebreaker and (not tiebreaker.get("node", None) or
not tiebreaker.get("path", None)):
logging.error(logf("'tiebreaker' provided for replica2 "
"config is not valid"))
return False
logging.debug(logf("Storage %s successfully validated" % \
obj["metadata"].get("name", "<unknown>")))
return True
def get_brick_device_dir(brick):
"""If custom file is passed as brick device then the
parent directory needs to be mounted as is
in server container"""
brick_device_dir = ""
logging.info(repr(brick))
brickdev = brick.get("device", "")
logging.info(brickdev)
if brickdev != "" and not brickdev.startswith("/dev/"):
brick_device_dir = os.path.dirname(brickdev)
return brick_device_dir
def get_brick_hostname(volname, idx, suffix=True):
"""Brick hostname is <statefulset-name>-<ordinal>.<service-name>
statefulset name is the one which is visible when the
`get pods` command is run, so the format used for that name
is "server-<volname>-<idx>". Escape dots from the
hostname from the input otherwise will become invalid name.
Service is created with name as Volume name. For example,
brick_hostname will be "server-spool1-0-0.spool1" and
server pod name will be "server-spool1-0"
"""
tmp_vol = volname.replace("-", "_")
dns_friendly_volname = re.sub(r'\W+', '', tmp_vol).replace("_", "-")
hostname = "server-%s-%d" % (dns_friendly_volname, idx)
if suffix:
return "%s-0.%s" % (hostname, volname)
return hostname
def upgrade_storage_pods(core_v1_client):
"""
Upgrade the Storage pods after operator pod upgrade
"""
# Add new entry in the existing config map
configmap_data = core_v1_client.read_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE)
for key in configmap_data.data:
if ".info" not in key:
continue
volname = key.replace('.info', '')
data = json.loads(configmap_data.data[key])
logging.info(logf("config map", volname=volname, data=data))
if data['type'] == VOLUME_TYPE_EXTERNAL:
# nothing to be done for upgrade, say we are good.
logging.debug(logf(
"volume type external, nothing to upgrade",
volname=volname,
data=data))
continue
if data['type'] == VOLUME_TYPE_REPLICA_1:
# No promise of high availability, upgrade
logging.debug(logf(
"volume type Replica1, calling upgrade",
volname=volname,
data=data))
# TODO: call upgrade
# Replica 2 and Replica 3 needs to check for self-heal
# count 0 before going ahead with upgrade.
# glfsheal volname --file-path=/template/file info-summary
obj = {}
obj["metadata"] = {}
obj["spec"] = {}
obj["metadata"]["name"] = volname
obj["spec"]["type"] = data['type']
obj["spec"]["pvReclaimPolicy"] = data.get("pvReclaimPolicy", "delete")
obj["spec"]["volume_id"] = data["volume_id"]
obj["spec"]["storage"] = []
# Need this loop so below array can be constructed in the proper order
for val in data["bricks"]:
obj["spec"]["storage"].append({})
# Set Node ID for each storage device from configmap
for val in data["bricks"]:
idx = val["brick_index"]
obj["spec"]["storage"][idx]["node_id"] = val["node_id"]
obj["spec"]["storage"][idx]["path"] = val["host_brick_path"]
obj["spec"]["storage"][idx]["node"] = val["kube_hostname"]
obj["spec"]["storage"][idx]["device"] = val["brick_device"]
obj["spec"]["storage"][idx]["pvc"] = val["pvc_name"]
if data['type'] == VOLUME_TYPE_REPLICA_2:
if "tie-breaker.kadalu.io" not in data['tiebreaker']['node']:
obj["spec"]["tiebreaker"] = data['tiebreaker']
# TODO: call upgrade_pods_with_heal_check() here
deploy_server_pods(obj)
def update_config_map(core_v1_client, obj):
"""
Volinfo of new hosting Volume is generated and updated to ConfigMap
"""
volname = obj["metadata"]["name"]
voltype = obj["spec"]["type"]
pv_reclaim_policy = obj["spec"].get("pvReclaimPolicy", "delete")
volume_id = obj["spec"]["volume_id"]
disperse_config = obj["spec"].get("disperse", {})
data = {
"namespace": NAMESPACE,
"kadalu_version": VERSION,
"volname": volname,
"volume_id": volume_id,
"kadalu_format": obj["spec"].get("kadalu_format", "native"),
"type": voltype,
"pvReclaimPolicy" : pv_reclaim_policy,
"bricks": [],
"disperse": {
"data": disperse_config.get("data", 0),
"redundancy": disperse_config.get("redundancy", 0)
},
"options": obj["spec"].get("options", {})
}
# Add new entry in the existing config map
configmap_data = core_v1_client.read_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE)
# For each brick, add brick path and node id
bricks = obj["spec"]["storage"]
for idx, storage in enumerate(bricks):
data["bricks"].append({
"brick_path": "/bricks/%s/data/brick" % volname,
"kube_hostname": storage.get("node", ""),
"node": get_brick_hostname(volname, idx),
"node_id": storage["node_id"],
"host_brick_path": storage.get("path", ""),
"brick_device": storage.get("device", ""),
"pvc_name": storage.get("pvc", ""),
"brick_device_dir": get_brick_device_dir(storage),
"decommissioned": storage.get("decommissioned", ""),
"brick_index": idx
})
if voltype == VOLUME_TYPE_REPLICA_2:
tiebreaker = obj["spec"].get("tiebreaker", None)
if not tiebreaker:
logging.warning(logf("No 'tiebreaker' provided for replica2 "
"config. Using default tie-breaker.kadalu.io:/mnt",
volname=volname))
# Add default tiebreaker if no tie-breaker option provided
tiebreaker = {
"node": "tie-breaker.kadalu.io",
"path": "/mnt",
}
if not tiebreaker.get("port", None):
tiebreaker["port"] = 24007
data["tiebreaker"] = tiebreaker
volinfo_file = "%s.info" % volname
configmap_data.data[volinfo_file] = json.dumps(data)
core_v1_client.patch_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE, configmap_data)
logging.info(logf("Updated configmap", name=KADALU_CONFIG_MAP,
volname=volname))
def deploy_server_pods(obj):
"""
Deploy server pods depending on type of Hosting
Volume and other options specified
"""
# Deploy server pod
volname = obj["metadata"]["name"]
voltype = obj["spec"]["type"]
pv_reclaim_policy = obj["spec"].get("pvReclaimPolicy", "delete")
docker_user = os.environ.get("DOCKER_USER", "kadalu")
shd_required = False
if voltype in (VOLUME_TYPE_REPLICA_3, VOLUME_TYPE_REPLICA_2,
VOLUME_TYPE_DISPERSE):
shd_required = True
template_args = {
"namespace": NAMESPACE,
"kadalu_version": VERSION,
"docker_user": docker_user,
"volname": volname,
"voltype": voltype,
"pvReclaimPolicy": pv_reclaim_policy,
"volume_id": obj["spec"]["volume_id"],
"shd_required": shd_required
}
# One StatefulSet per Brick
for idx, storage in enumerate(obj["spec"]["storage"]):
template_args["host_brick_path"] = storage.get("path", "")
template_args["kube_hostname"] = storage.get("node", "")
# TODO: Understand the need, and usage of suffix
template_args["serverpod_name"] = get_brick_hostname(
volname,
idx,
suffix=False
)
template_args["brick_path"] = "/bricks/%s/data/brick" % volname
template_args["brick_index"] = idx
template_args["brick_device"] = storage.get("device", "")
template_args["pvc_name"] = storage.get("pvc", "")
template_args["brick_device_dir"] = get_brick_device_dir(storage)
template_args["brick_node_id"] = storage["node_id"]
template_args["k8s_dist"] = K8S_DIST
template_args["verbose"] = VERBOSE
filename = os.path.join(MANIFESTS_DIR, "server.yaml")
template(filename, **template_args)
lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
logging.info(logf("Deployed Server pod",
volname=volname,
manifest=filename,
node=storage.get("node", "")))
def handle_external_storage_addition(core_v1_client, obj):
"""Deploy service(One service per Volume)"""
volname = obj["metadata"]["name"]
details = obj["spec"]["details"]
pv_reclaim_policy = obj["spec"].get("pvReclaimPolicy", "delete")
hosts = []
ghost = details.get("gluster_host", None)
ghosts = details.get("gluster_hosts", None)
if ghost:
hosts.append(ghost)
if ghosts:
hosts.extend(ghosts)
data = {
"volname": volname,
"volume_id": obj["spec"]["volume_id"],
"type": VOLUME_TYPE_EXTERNAL,
"pvReclaimPolicy": pv_reclaim_policy,
# CRD would set 'native' but just being cautious
"kadalu_format": obj["spec"].get("kadalu_format", "native"),
"gluster_hosts": ",".join(hosts),
"gluster_volname": details["gluster_volname"],
"gluster_options": details.get("gluster_options", ""),
}
# Add new entry in the existing config map
configmap_data = core_v1_client.read_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE)
volinfo_file = "%s.info" % volname
configmap_data.data[volinfo_file] = json.dumps(data)
core_v1_client.patch_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE, configmap_data)
logging.info(logf("Updated configmap", name=KADALU_CONFIG_MAP,
volname=volname))
filename = os.path.join(MANIFESTS_DIR, "external-storageclass.yaml")
template(filename, **data)
lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
logging.info(logf("Deployed External StorageClass", volname=volname, manifest=filename))
def handle_added(core_v1_client, obj):
"""
New Volume is requested. Update the configMap and deploy
"""
if not validate_volume_request(obj):
# TODO: Delete Custom resource
logging.debug(logf(
"validation of volume request failed",
yaml=obj
))
return
# Ignore if already deployed
volname = obj["metadata"]["name"]
pods = core_v1_client.list_namespaced_pod(NAMESPACE)
for pod in pods.items:
if pod.metadata.name.startswith("server-" + volname + "-"):
logging.debug(logf(
"Ignoring already deployed server statefulsets",
storagename=volname
))
return
# Add new entry in the existing config map
configmap_data = core_v1_client.read_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE)
if configmap_data.data.get("%s.info" % volname, None):
# Volume already exists
logging.debug(logf(
"Ignoring already updated volume config",
storagename=volname
))
return
# Generate new Volume ID
if obj["spec"].get("volume_id", None) is None:
obj["spec"]["volume_id"] = str(uuid.uuid1())
# Apply existing Volume ID to recreate storage pool from existing device/path
else:
logging.info(logf(
"Applying existing volume id",
volume_id=obj["spec"]["volume_id"]
))
voltype = obj["spec"]["type"]
if voltype == VOLUME_TYPE_EXTERNAL:
handle_external_storage_addition(core_v1_client, obj)
return
# Generate Node ID for each storage device.
for idx, _ in enumerate(obj["spec"]["storage"]):
obj["spec"]["storage"][idx]["node_id"] = "node-%d" % idx
# Storage Class
deploy_storage_class(obj)
update_config_map(core_v1_client, obj)
deploy_server_pods(obj)
filename = os.path.join(MANIFESTS_DIR, "services.yaml")
template(filename, namespace=NAMESPACE, volname=volname)
lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
logging.info(logf("Deployed Service", volname=volname, manifest=filename))
def handle_modified(core_v1_client, obj):
"""
Handle when Volume option is updated or Volume
state is changed to maintenance
"""
# TODO: Handle Volume maintenance mode
volname = obj["metadata"]["name"]
voltype = obj["spec"]["type"]
if voltype == VOLUME_TYPE_EXTERNAL:
# Modification of 'External' volume type is not supported
logging.info(logf(
"Modification of 'External' volume type is not supported",
storagename=volname
))
return
if not validate_volume_request(obj):
logging.debug(logf(
"validation of volume request failed",
yaml=obj
))
return
configmap_data = core_v1_client.read_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE)
if not configmap_data.data.get("%s.info" % volname, None):
# Volume doesn't exists
logging.error(logf(
"Volume config not found",
storagename=volname
))
return
# Volume ID (uuid) is already generated, re-use
cfgmap = json.loads(configmap_data.data[volname + ".info"])
# Get volume-id from config map
obj["spec"]["volume_id"] = cfgmap["volume_id"]
# Set Node ID for each storage device from configmap
for idx, _ in enumerate(obj["spec"]["storage"]):
obj["spec"]["storage"][idx]["node_id"] = "node-%d" % idx
# Add new entry in the existing config map
update_config_map(core_v1_client, obj)
deploy_server_pods(obj)
filename = os.path.join(MANIFESTS_DIR, "services.yaml")
template(filename, namespace=NAMESPACE, volname=volname)
lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
logging.info(logf("Deployed Service", volname=volname, manifest=filename))
def handle_deleted(core_v1_client, obj):
"""
If number of pvs provisioned from that volume
is zero - Delete the respective server pods
If number of pvs is not zero, wait or periodically
check for num_pvs. Delete Server pods only when pvs becomes zero.
"""
volname = obj["metadata"]["name"]
storage_info_data = get_configmap_data(volname)
logging.info(logf("Delete requested", volname=volname))
pv_count = get_num_pvs(storage_info_data)
if pv_count == -1:
logging.error(
logf("Storage delete failed. Failed to get PV count",
number_of_pvs=pv_count,
storage=volname))
return
if pv_count != 0:
logging.warning(
logf("Storage delete failed. Storage is not empty",
number_of_pvs=pv_count,
storage=volname))
elif pv_count == 0:
hostvol_type = storage_info_data.get("type")
# We can't delete external volume but cleanup StorageClass and Configmap
# Delete Configmap and Storage class for both Native & External
delete_storage_class(volname, hostvol_type)
delete_config_map(core_v1_client, obj)
if hostvol_type != "External":
delete_server_pods(storage_info_data, obj)
filename = os.path.join(MANIFESTS_DIR, "services.yaml")
template(filename, namespace=NAMESPACE, volname=volname)
lib_execute(KUBECTL_CMD, DELETE_CMD, "-f", filename)
logging.info(
logf("Deleted Service", volname=volname, manifest=filename))
return
def get_configmap_data(volname):
"""
Get storage info data from kadalu configmap
"""
cmd = ["kubectl", "get", "configmap", "kadalu-info", "-nkadalu", "-ojson"]
try:
resp = utils_execute(cmd)
config_data = json.loads(resp.stdout)
data = config_data['data']
storage_name = "%s.info" % volname
storage_info_data = data[storage_name]
# Return data in 'dict' format
return json.loads(storage_info_data)
except CommandError as err:
logging.error(logf(
"Failed to get details from configmap",
error=err
))
return None
def get_num_pvs(storage_info_data):
"""
Get number of PVs provisioned from
volume requested for deletion
through configmap.
"""
volname = storage_info_data['volname']
cmd = None
if storage_info_data.get("type") == "External":
# We can't access external cluster and so query existing PVs which are
# using external storageclass
volname = "kadalu." + volname
jpath = ('jsonpath=\'{range .items[?(@.spec.storageClassName=="%s")]}'
'{.spec.storageClassName}{"\\n"}{end}\'' % volname)
cmd = ["kubectl", "get", "pv", "-o", jpath]
else:
bricks = storage_info_data['bricks']
dbpath = "/bricks/" + volname + "/data/brick/stat.db"
query = ("select count(pvname) from pv_stats;")
cmd = [
"kubectl", "exec", "-i",
bricks[0]['node'].replace("." + volname, ""), "-c", "server",
"-nkadalu", "--", "sqlite3", dbpath, query
]
try:
resp = utils_execute(cmd)
parts = resp.stdout.strip("'").split()
if storage_info_data.get("type") == "External":
return len(parts)
pv_count = int(parts[0])
return pv_count
except CommandError as msg:
# 1. If storage is created but no PV is carved then pv_stats table is not
# created in SQLITE3
# 2. If we fail to create 'server' pod then there'll be no 'server'
# container (this'll be hit if supplied 'storageClass' is invalid)
if msg.stderr.find("no such table") != -1 or msg.stderr.find(
"container not found") != -1:
# We are good to delete server pods
return 0
logging.error(
logf("Failed to get size details of the "
"storage \"%s\"" % volname,
error=msg))
# Return error as its -1
return -1
def delete_server_pods(storage_info_data, obj):
"""
Delete server pods depending on type of Hosting
Volume and other options specified
"""
volname = obj["metadata"]["name"]
voltype = storage_info_data['type']
volumeid = storage_info_data['volume_id']
docker_user = os.environ.get("DOCKER_USER", "kadalu")
shd_required = False
if voltype in (VOLUME_TYPE_REPLICA_3, VOLUME_TYPE_REPLICA_2):
shd_required = True
template_args = {
"namespace": NAMESPACE,
"kadalu_version": VERSION,
"docker_user": docker_user,
"volname": volname,
"voltype": voltype,
"volume_id": volumeid,
"shd_required": shd_required
}
bricks = storage_info_data['bricks']
# Traverse all bricks from configmap
for brick in bricks:
idx = brick['brick_index']
template_args["host_brick_path"] = brick['host_brick_path']
template_args["kube_hostname"] = brick['kube_hostname']
template_args["serverpod_name"] = get_brick_hostname(
volname,
idx,
suffix=False
)
template_args["brick_path"] = "/bricks/%s/data/brick" % volname
template_args["brick_index"] = idx
template_args["brick_device"] = brick['brick_device']
template_args["pvc_name"] = brick['pvc_name']
template_args["brick_device_dir"] = brick['brick_device_dir']
template_args["brick_node_id"] = brick['node_id']
template_args["k8s_dist"] = K8S_DIST
filename = os.path.join(MANIFESTS_DIR, "server.yaml")
template(filename, **template_args)
lib_execute(KUBECTL_CMD, DELETE_CMD, "-f", filename)
logging.info(logf(
"Deleted Server pod",
volname=volname,
manifest=filename,
node=brick['node']
))
def delete_config_map(core_v1_client, obj):
"""
Volinfo of existing Volume is generated and ConfigMap is deleted
"""
volname = obj["metadata"]["name"]
# Add new entry in the existing config map
configmap_data = core_v1_client.read_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE)
volinfo_file = "%s.info" % volname
configmap_data.data[volinfo_file] = None
core_v1_client.patch_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE, configmap_data)
logging.info(logf(
"Deleted configmap",
name=KADALU_CONFIG_MAP,
volname=volname
))
def delete_storage_class(hostvol_name, _):
"""
Deletes deployed External and Custom StorageClass
"""
sc_name = "kadalu." + hostvol_name
lib_execute(KUBECTL_CMD, DELETE_CMD, "sc", sc_name)
logging.info(logf(
"Deleted Storage class",
volname=hostvol_name
))
def watch_stream(core_v1_client, k8s_client):
"""
Watches kubernetes event stream for kadalustorages in Kadalu namespace
"""
crds = client.CustomObjectsApi(k8s_client)
k8s_watch = watch.Watch()
resource_version = ""
for event in k8s_watch.stream(crds.list_cluster_custom_object,
"kadalu-operator.storage",
"v1alpha1",
"kadalustorages",
resource_version=resource_version):
obj = event["object"]
operation = event['type']
spec = obj.get("spec")
if not spec:
continue
metadata = obj.get("metadata")
resource_version = metadata['resourceVersion']
logging.debug(logf("Event", operation=operation, object=repr(obj)))
if operation == "ADDED":
handle_added(core_v1_client, obj)
elif operation == "MODIFIED":
handle_modified(core_v1_client, obj)
elif operation == "DELETED":
handle_deleted(core_v1_client, obj)
def crd_watch(core_v1_client, k8s_client):
"""
Watches the CRD to provision new PV Hosting Volumes
"""
while True:
try:
watch_stream(core_v1_client, k8s_client)
except (ProtocolError, NewConnectionError):
# It might so happen that this'll be logged for every hit in k8s
# event stream in kadalu namespace and better to log at debug level
logging.debug(
logf(
"Watch connection broken and restarting watch on the stream"
))
time.sleep(30)
def deploy_csi_pods(core_v1_client):
"""
Look for CSI pods, if any one CSI pod found then
that means it is deployed
"""
pods = core_v1_client.list_namespaced_pod(
NAMESPACE)
for pod in pods.items:
if pod.metadata.name.startswith(CSI_POD_PREFIX):
logging.info("Updating already deployed CSI pods")
# Deploy CSI Pods
api_instance = client.VersionApi().get_code()
int_api_instance_major = int(api_instance.major)
int_api_instance_minor = int(api_instance.minor)
if int_api_instance_major > 1 or int_api_instance_major == 1 and \
int_api_instance_minor >= 22:
filename = os.path.join(MANIFESTS_DIR, "csi-driver-object-v1.yaml")
template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
elif int_api_instance_major > 1 or int_api_instance_major == 1 and \
int_api_instance_minor >= 14:
filename = os.path.join(MANIFESTS_DIR, "csi-driver-object.yaml")
template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
else:
filename = os.path.join(MANIFESTS_DIR, "csi-driver-crd.yaml")
template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
filename = os.path.join(MANIFESTS_DIR, "csi.yaml")
docker_user = os.environ.get("DOCKER_USER", "kadalu")
template(filename, namespace=NAMESPACE, kadalu_version=VERSION,
docker_user=docker_user, k8s_dist=K8S_DIST,
kubelet_dir=KUBELET_DIR, verbose=VERBOSE,)
lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
logging.info(logf("Deployed CSI Pods", manifest=filename))
def deploy_config_map(core_v1_client):
"""Deploys the template configmap if not exists"""
configmaps = core_v1_client.list_namespaced_config_map(
NAMESPACE)
uid = uuid.uuid4()
upgrade = False
for item in configmaps.items:
if item.metadata.name == KADALU_CONFIG_MAP:
logging.info(logf(
"Found existing configmap. Updating",
name=item.metadata.name
))
# Don't overwrite UID info.
configmap_data = core_v1_client.read_namespaced_config_map(
KADALU_CONFIG_MAP, NAMESPACE)
if configmap_data.data.get("uid", None):
uid = configmap_data.data["uid"]
upgrade = True
# Keep the config details required to be preserved.
# Deploy Config map
filename = os.path.join(MANIFESTS_DIR, "configmap.yaml")
template(filename,
namespace=NAMESPACE,
kadalu_version=VERSION,
uid=uid)
if not upgrade:
lib_execute(KUBECTL_CMD, CREATE_CMD, "-f", filename)
logging.info(logf("ConfigMap Deployed", manifest=filename, uid=uid, upgrade=upgrade))
return uid, upgrade
def deploy_storage_class(obj):
"""Deploys the default and custom storage class for KaDalu if not exists"""
# Deploy defalut Storage Class
api_instance = client.StorageV1Api()
scs = api_instance.list_storage_class()
sc_names = []
for tmpl in os.listdir(MANIFESTS_DIR):
if tmpl.startswith("storageclass-") and tmpl.endswith(".j2"):
sc_names.append(
tmpl.replace("storageclass-", "").replace(".yaml.j2", "")
)
installed_scs = [item.metadata.name for item in scs.items]
for sc_name in sc_names:
filename = os.path.join(MANIFESTS_DIR, "storageclass-%s.yaml" % sc_name)
if sc_name in installed_scs:
logging.info(logf("StorageClass already present, continuing with Apply",
manifest=filename))
template(filename, namespace=NAMESPACE, kadalu_version=VERSION,
hostvol_name=obj["metadata"]["name"],
kadalu_format=obj["spec"].get("kadalu_format", "native"))
lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
logging.info(logf("Deployed StorageClass", manifest=filename))
def main():
"""Main"""
config.load_incluster_config()
# As per the issue https://github.com/kubernetes-client/python/issues/254
clnt = client.Configuration() #go and get a copy of the default config
clnt.verify_ssl = False #set verify_ssl to false in that config
client.Configuration.set_default(clnt) #make that config the default for all new clients
core_v1_client = client.CoreV1Api()
k8s_client = client.ApiClient()
# ConfigMap
uid, upgrade = deploy_config_map(core_v1_client)
# CSI Pods
deploy_csi_pods(core_v1_client)
if upgrade:
logging.info(logf("Upgrading to ", version=VERSION))
upgrade_storage_pods(core_v1_client)
# Send Analytics Tracker
# The information from this analytics is available for
# developers to understand and build project in a better
# way
send_analytics_tracker("operator", uid)
# Watch CRD
crd_watch(core_v1_client, k8s_client)
if __name__ == "__main__":
logging_setup()
# This not advised in general, but in kadalu's operator, it is OK to
# ignore these warnings as we know to make calls only inside of
# kubernetes cluster
urllib3.disable_warnings()
main()
| 35.108738
| 92
| 0.625187
|
b51bed45544a397abb8c24627599e8f655c7e754
| 1,989
|
py
|
Python
|
src/services/crud/room/api.py
|
b1team/trada
|
22ceaf4d50fe3a38ff402315c029e574773ca9e0
|
[
"MIT"
] | null | null | null |
src/services/crud/room/api.py
|
b1team/trada
|
22ceaf4d50fe3a38ff402315c029e574773ca9e0
|
[
"MIT"
] | 1
|
2021-03-12T15:16:03.000Z
|
2021-03-12T15:16:03.000Z
|
src/services/crud/room/api.py
|
b1team/trada
|
22ceaf4d50fe3a38ff402315c029e574773ca9e0
|
[
"MIT"
] | null | null | null |
from . import logic
from src.api.exceptions import room_errors, user_errors
from src.services.crud.users.logic import get_user_by_id
from src.services.crud.users.logic import get_user_id
def create_room(room_name: str, user_id: str):
if logic.get_room(room_name):
raise room_errors.ExistingError(obj=f"Room {room_name}")
user = get_user_by_id(user_id)
room = logic.create_room(room_name)
logic.invite_member(room.id, user.username, is_owner=True)
data = {
"room": room.to_dict(),
"owner": user.to_dict(),
}
return data
def invite_member(room_id: str, member_name: str):
try:
member_id = get_user_id(member_name)
if not member_id:
raise user_errors.NotFoundError(obj=f"User {member_name}")
member = logic.check_member_exists(room_id, member_id)
except Exception as e:
raise room_errors.IdFormatError()
if member:
raise room_errors.ExistingError(obj=f"Member {member_name}")
return logic.invite_member(room_id, member_name)
def delete_room(room_id: str):
try:
room = logic.check_room_exists(room_id)
except:
raise room_errors.IdFormatError()
if room:
return logic.remove_room(room_id)
return False
def get_room_members(room_id: str):
return logic.room_members(room_id)
def delete_member(room_id: str, member_name: str):
try:
member_remove = logic.remove_member(room_id, member_name)
except:
return False
else:
return member_remove
def get_rooms(user_id: str):
try:
rooms = logic.get_user_room(user_id)
except:
raise room_errors.IdFormatError()
return rooms
def room_update(room_id: str, room_name: str, avatar: str):
try:
logic.check_room_exists(room_id)
except:
raise room_errors.IdFormatError()
return logic.update_room(room_id, room_name, avatar)
def members(room_id: str):
return logic.get_members(room_id)
| 25.5
| 70
| 0.69281
|
cdec89fd0bf04ac1e522b22d20cf1b5f60a13a18
| 1,180
|
py
|
Python
|
examples/httpbin/upload_test.py
|
qNone/HttpRunner
|
022b0920d235749b242ed9eee2e575bf04a56653
|
[
"Apache-2.0"
] | 1
|
2021-06-21T11:17:01.000Z
|
2021-06-21T11:17:01.000Z
|
examples/httpbin/upload_test.py
|
qNone/HttpRunner
|
022b0920d235749b242ed9eee2e575bf04a56653
|
[
"Apache-2.0"
] | null | null | null |
examples/httpbin/upload_test.py
|
qNone/HttpRunner
|
022b0920d235749b242ed9eee2e575bf04a56653
|
[
"Apache-2.0"
] | null | null | null |
# NOTE: Generated By HttpRunner v3.1.2
# FROM: upload.yml
from httprunner import HttpRunner, Config, Step, RunRequest, RunTestCase
class TestCaseUpload(HttpRunner):
config = Config("test upload file with httpbin").base_url("${get_httpbin_server()}")
teststeps = [
Step(
RunRequest("upload file")
.with_variables(
**{
"file_path": "test.env",
"m_encoder": "${multipart_encoder(file=$file_path)}",
}
)
.post("/post")
.with_headers(**{"Content-Type": "${multipart_content_type($m_encoder)}"})
.with_data("$m_encoder")
.validate()
.assert_equal("status_code", 200)
.assert_startswith("body.files.file", "UserName=test")
),
Step(
RunRequest("upload file with keyword")
.post("/post")
.upload(**{"file": "test.env"})
.validate()
.assert_equal("status_code", 200)
.assert_startswith("body.files.file", "UserName=test")
),
]
if __name__ == "__main__":
TestCaseUpload().test_start()
| 30.25641
| 88
| 0.534746
|
c58c97c721d9f83a5c1c1576d564800035d5b24b
| 1,068
|
py
|
Python
|
kubernetes/test/test_v1_service_port.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_service_port.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_service_port.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
<<<<<<< HEAD
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
=======
OpenAPI spec version: v1.5.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
>>>>>>> release-1.0
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_service_port import V1ServicePort # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1ServicePort(unittest.TestCase):
"""V1ServicePort unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1ServicePort(self):
"""Test V1ServicePort"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_service_port.V1ServicePort() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.217391
| 124
| 0.694757
|
c2854046229af13bd0f5223c9c0a6c6350659982
| 1,223
|
py
|
Python
|
strings/longest_substring_without_duplication.py
|
maanavshah/coding-interview
|
4c842cdbc6870da79684635f379966d1caec2162
|
[
"MIT"
] | null | null | null |
strings/longest_substring_without_duplication.py
|
maanavshah/coding-interview
|
4c842cdbc6870da79684635f379966d1caec2162
|
[
"MIT"
] | null | null | null |
strings/longest_substring_without_duplication.py
|
maanavshah/coding-interview
|
4c842cdbc6870da79684635f379966d1caec2162
|
[
"MIT"
] | null | null | null |
# # O(n) time | O(n) space
def longestSubstringWithoutDuplication(string):
currentChars = {}
currentSubstring = []
longestSubstring = []
maxLongest = 0
i = 0
while i < len(string):
if string[i] in currentChars:
if maxLongest < len(currentSubstring):
maxLongest = len(currentSubstring)
longestSubstring = currentSubstring
index = currentChars[string[i]] + 1
currentChars = {string[index]: index}
currentSubstring = [string[index]]
i = index
else:
currentChars[string[i]] = i
currentSubstring.append(string[i])
i += 1
if maxLongest < len(currentSubstring):
longestSubstring = currentSubstring
return ''.join(longestSubstring)
def longestSubstringWithoutDuplication(string):
startIdx = 0
longest = [0, 1]
lastSeen = dict()
for idx, s in enumerate(string):
if s in lastSeen:
startIdx = max(startIdx, lastSeen[s] + 1)
if longest[1] - longest[0] < idx + 1 - startIdx:
longest[0] = startIdx
longest[1] = idx + 1
lastSeen[s] = idx
return string[longest[0]: longest[1]]
| 32.184211
| 56
| 0.58054
|
5e4098ce87208a08c94609f13aba78d4b7963348
| 9,750
|
py
|
Python
|
behavior_regularized_offline_rl/brac/sac_agent.py
|
rmitra/google-research
|
ddc22300c4cb3223654c9a981f892dc0f6286e35
|
[
"Apache-2.0"
] | 1
|
2020-03-05T09:34:44.000Z
|
2020-03-05T09:34:44.000Z
|
behavior_regularized_offline_rl/brac/sac_agent.py
|
robot-ai-machinelearning/google-research
|
88481d10a87947ffb9305dc7665682e008b27391
|
[
"Apache-2.0"
] | null | null | null |
behavior_regularized_offline_rl/brac/sac_agent.py
|
robot-ai-machinelearning/google-research
|
88481d10a87947ffb9305dc7665682e008b27391
|
[
"Apache-2.0"
] | 1
|
2020-03-05T09:24:01.000Z
|
2020-03-05T09:24:01.000Z
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Soft Actor Critic Agent.
Based on 'Soft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement
Learning with a Stochastic Actor' by Tuomas Haarnoja, et al.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gin
import tensorflow.compat.v1 as tf
from behavior_regularized_offline_rl.brac import agent
from behavior_regularized_offline_rl.brac import networks
from behavior_regularized_offline_rl.brac import policies
from behavior_regularized_offline_rl.brac import utils
@gin.configurable
class Agent(agent.Agent):
"""SAC Agent."""
def __init__(
self,
target_entropy=None,
ensemble_q_lambda=1.0,
**kwargs):
self._ensemble_q_lambda = ensemble_q_lambda
self._target_entropy = target_entropy
super(Agent, self).__init__(**kwargs)
def _build_fns(self):
self._agent_module = AgentModule(modules=self._modules)
if self._target_entropy is None:
self._target_entropy = - self._action_spec.shape[0]
self._q_fns = self._agent_module.q_nets
self._p_fn = self._agent_module.p_fn
self._log_alpha = self._agent_module.log_alpha
def _get_q_vars(self):
return self._agent_module.q_source_variables
def _get_p_vars(self):
return self._agent_module.p_variables
def _get_q_weight_norm(self):
weights = self._agent_module.q_source_weights
norms = []
for w in weights:
norm = tf.reduce_sum(tf.square(w))
norms.append(norm)
return tf.add_n(norms)
def _get_p_weight_norm(self):
weights = self._agent_module.p_weights
norms = []
for w in weights:
norm = tf.reduce_sum(tf.square(w))
norms.append(norm)
return tf.add_n(norms)
def ensemble_q(self, qs):
lambda_ = self._ensemble_q_lambda
return (lambda_ * tf.reduce_min(qs, axis=-1)
+ (1 - lambda_) * tf.reduce_max(qs, axis=-1))
def _ensemble_q2_target(self, q2_targets):
return self.ensemble_q(q2_targets)
def _ensemble_q1(self, q1s):
return self.ensemble_q(q1s)
def _build_q_loss(self, batch):
s1 = batch['s1']
s2 = batch['s2']
a = batch['a1']
r = batch['r']
dsc = batch['dsc']
_, a2, log_pi_a2 = self._p_fn(s2)
q2_targets = []
q1_preds = []
for q_fn, q_fn_target in self._q_fns:
q2_target_ = q_fn_target(s2, a2)
q1_pred = q_fn(s1, a)
q1_preds.append(q1_pred)
q2_targets.append(q2_target_)
q2_targets = tf.stack(q2_targets, axis=-1)
q2_target = self._ensemble_q2_target(q2_targets)
v2_target = q2_target - tf.exp(self._log_alpha) * log_pi_a2
q1_target = tf.stop_gradient(r + dsc * self._discount * v2_target)
q_losses = []
for q1_pred in q1_preds:
q_loss_ = tf.reduce_mean(tf.square(q1_pred - q1_target))
q_losses.append(q_loss_)
q_loss = tf.add_n(q_losses)
q_w_norm = self._get_q_weight_norm()
norm_loss = self._weight_decays[0] * q_w_norm
loss = q_loss + norm_loss
info = collections.OrderedDict()
info['q_loss'] = q_loss
info['q_norm'] = q_w_norm
info['r_mean'] = tf.reduce_mean(r)
info['dsc_mean'] = tf.reduce_mean(dsc)
info['q1_target_mean'] = tf.reduce_mean(q1_target)
return loss, info
def _build_p_loss(self, batch):
s = batch['s1']
_, a, log_pi_a = self._p_fn(s)
q1s = []
for q_fn, _ in self._q_fns:
q1_ = q_fn(s, a)
q1s.append(q1_)
q1s = tf.stack(q1s, axis=-1)
q1 = self._ensemble_q1(q1s)
p_loss = tf.reduce_mean(tf.exp(self._log_alpha) * log_pi_a - q1)
p_w_norm = self._get_p_weight_norm()
norm_loss = self._weight_decays[1] * p_w_norm
loss = p_loss + norm_loss
info = collections.OrderedDict()
info['p_loss'] = p_loss
info['p_norm'] = p_w_norm
return loss, info
def _build_a_loss(self, batch):
s = batch['s1']
_, _, log_pi_a = self._p_fn(s)
alpha = tf.exp(self._log_alpha)
a_loss = tf.reduce_mean(alpha * (-log_pi_a - self._target_entropy))
info = collections.OrderedDict()
info['a_loss'] = a_loss
info['alpha'] = alpha
return a_loss, info
def _get_source_target_vars(self):
return (self._agent_module.q_source_variables,
self._agent_module.q_target_variables)
def _build_optimizers(self):
opts = self._optimizers
if len(opts) == 1:
opts = tuple([opts[0]] * 3)
elif len(opts) < 3:
raise ValueError('Bad optimizers %s.' % opts)
self._q_optimizer = utils.get_optimizer(opts[0][0])(lr=opts[0][1])
self._p_optimizer = utils.get_optimizer(opts[1][0])(lr=opts[1][1])
self._a_optimizer = utils.get_optimizer(opts[2][0])(lr=opts[2][1])
if len(self._weight_decays) == 1:
self._weight_decays = tuple([self._weight_decays[0]] * 3)
@tf.function
def _optimize_step(self, batch):
info = collections.OrderedDict()
if tf.equal(self._global_step % self._update_freq, 0):
source_vars, target_vars = self._get_source_target_vars()
self._update_target_fns(source_vars, target_vars)
q_info = self._optimize_q(batch)
p_info = self._optimize_p(batch)
a_info = self._optimize_a(batch)
info.update(p_info)
info.update(q_info)
info.update(a_info)
return info
def _optimize_q(self, batch):
vars_ = self._q_vars
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(vars_)
loss, info = self._build_q_loss(batch)
grads = tape.gradient(loss, vars_)
grads_and_vars = tuple(zip(grads, vars_))
self._q_optimizer.apply_gradients(grads_and_vars)
return info
def _optimize_p(self, batch):
vars_ = self._p_vars
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(vars_)
loss, info = self._build_p_loss(batch)
grads = tape.gradient(loss, vars_)
grads_and_vars = tuple(zip(grads, vars_))
self._p_optimizer.apply_gradients(grads_and_vars)
return info
def _optimize_a(self, batch):
vars_ = [self._log_alpha]
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(vars_)
loss, info = self._build_a_loss(batch)
grads = tape.gradient(loss, vars_)
grads_and_vars = tuple(zip(grads, vars_))
self._a_optimizer.apply_gradients(grads_and_vars)
return info
def _build_test_policies(self):
policy = policies.DeterministicSoftPolicy(
a_network=self._agent_module.p_net)
self._test_policies['main'] = policy
def _build_online_policy(self):
return policies.RandomSoftPolicy(
a_network=self._agent_module.p_net,
)
def _init_vars(self, batch):
self._build_q_loss(batch)
self._build_p_loss(batch)
self._q_vars = self._get_q_vars()
self._p_vars = self._get_p_vars()
def _build_checkpointer(self):
return tf.train.Checkpoint(
policy=self._agent_module.p_net,
agent=self._agent_module)
class AgentModule(agent.AgentModule):
"""Tensorflow modules for SAC agent."""
def _build_modules(self):
self._q_nets = []
n_q_fns = self._modules.n_q_fns
for _ in range(n_q_fns):
self._q_nets.append(
[self._modules.q_net_factory(), # Learned Q-value.
self._modules.q_net_factory(),] # Target Q-value.
)
self._p_net = self._modules.p_net_factory()
self._log_alpha = tf.Variable(0.0)
@property
def log_alpha(self):
return self._log_alpha
@property
def q_nets(self):
return self._q_nets
@property
def q_source_weights(self):
q_weights = []
for q_net, _ in self._q_nets:
q_weights += q_net.weights
return q_weights
@property
def q_target_weights(self):
q_weights = []
for _, q_net in self._q_nets:
q_weights += q_net.weights
return q_weights
@property
def q_source_variables(self):
vars_ = []
for q_net, _ in self._q_nets:
vars_ += q_net.trainable_variables
return tuple(vars_)
@property
def q_target_variables(self):
vars_ = []
for _, q_net in self._q_nets:
vars_ += q_net.trainable_variables
return tuple(vars_)
@property
def p_net(self):
return self._p_net
def p_fn(self, s):
return self._p_net(s)
@property
def p_weights(self):
return self._p_net.weights
@property
def p_variables(self):
return self._p_net.trainable_variables
def get_modules(model_params, action_spec):
"""Creates modules for Q-value and policy."""
model_params, n_q_fns = model_params
if len(model_params) == 1:
model_params = tuple([model_params[0]] * 2)
elif len(model_params) < 2:
raise ValueError('Bad model parameters %s.' % model_params)
def q_net_factory():
return networks.CriticNetwork(
fc_layer_params=model_params[0])
def p_net_factory():
return networks.ActorNetwork(
action_spec,
fc_layer_params=model_params[1])
modules = utils.Flags(
q_net_factory=q_net_factory,
p_net_factory=p_net_factory,
n_q_fns=n_q_fns,
)
return modules
class Config(agent.Config):
def _get_modules(self):
return get_modules(
self._agent_flags.model_params,
self._agent_flags.action_spec)
| 29.36747
| 74
| 0.693436
|
d74af98820a660192e4f34ec99f7b86557d38f2d
| 152
|
py
|
Python
|
t14.py
|
mahendra1904/pythod-programs
|
d4d75dac65e9795ea5728f75d90aa0b39296b25e
|
[
"bzip2-1.0.6"
] | null | null | null |
t14.py
|
mahendra1904/pythod-programs
|
d4d75dac65e9795ea5728f75d90aa0b39296b25e
|
[
"bzip2-1.0.6"
] | null | null | null |
t14.py
|
mahendra1904/pythod-programs
|
d4d75dac65e9795ea5728f75d90aa0b39296b25e
|
[
"bzip2-1.0.6"
] | null | null | null |
import statistics
tup = eval(input("Enter a tuple :-"))
sum = sum(tup)
print("Average =", sum / len( tup ))
print("Mean =", statistics.mean( tup ) )
| 19
| 40
| 0.625
|
2eda9878f981928e3eacf5ff3089d0654e08412c
| 33,496
|
py
|
Python
|
tests/jenkins.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2021-08-14T13:48:38.000Z
|
2021-08-14T13:48:38.000Z
|
tests/jenkins.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 3
|
2015-03-31T14:44:05.000Z
|
2015-06-18T19:02:24.000Z
|
tests/jenkins.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:24.000Z
|
2020-01-02T09:03:24.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script is used to test Salt from a Jenkins server, specifically
jenkins.saltstack.com.
This script is intended to be shell-centric!!
'''
# Import python libs
from __future__ import absolute_import, print_function
import glob
import os
import re
import sys
import json
import time
import shutil
import optparse
import subprocess
import random
# Import Salt libs
import salt.utils.files
import salt.utils.stringutils
try:
from salt.utils.nb_popen import NonBlockingPopen
except ImportError:
# Salt not installed, or nb_popen was not yet shipped with it
SALT_LIB = os.path.abspath(
os.path.dirname(os.path.dirname(__file__))
)
if SALT_LIB not in sys.path:
sys.path.insert(0, SALT_LIB)
try:
# Let's try using the current checked out code
from salt.utils.nb_popen import NonBlockingPopen
except ImportError:
# Still an ImportError??? Let's use some "brute-force"
sys.path.insert(
0,
os.path.join(SALT_LIB, 'salt', 'utils')
)
from nb_popen import NonBlockingPopen
# Import 3rd-party libs
import yaml
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
SALT_GIT_URL = 'https://github.com/saltstack/salt.git'
def build_pillar_data(options):
'''
Build a YAML formatted string to properly pass pillar data
'''
pillar = {'test_transport': options.test_transport,
'cloud_only': options.cloud_only,
'with_coverage': options.test_without_coverage is False}
if options.test_git_commit is not None:
pillar['test_git_commit'] = options.test_git_commit
if options.test_git_url is not None:
pillar['test_git_url'] = options.test_git_url
if options.bootstrap_salt_url is not None:
pillar['bootstrap_salt_url'] = options.bootstrap_salt_url
if options.bootstrap_salt_commit is not None:
pillar['bootstrap_salt_commit'] = options.bootstrap_salt_commit
if options.package_source_dir:
pillar['package_source_dir'] = options.package_source_dir
if options.package_build_dir:
pillar['package_build_dir'] = options.package_build_dir
if options.package_artifact_dir:
pillar['package_artifact_dir'] = options.package_artifact_dir
if options.pillar:
pillar.update(dict(options.pillar))
return yaml.dump(pillar, default_flow_style=True, indent=0, width=sys.maxint).rstrip()
def build_minion_target(options, vm_name):
target = vm_name
for grain in options.grain_target:
target += ' and G@{0}'.format(grain)
if options.grain_target:
return '"{0}"'.format(target)
return target
def generate_vm_name(options):
'''
Generate a random enough vm name
'''
if 'BUILD_NUMBER' in os.environ:
random_part = 'BUILD{0:0>6}'.format(os.environ.get('BUILD_NUMBER'))
else:
random_part = os.urandom(3).encode('hex')
return '{0}-{1}-{2}'.format(options.vm_prefix, options.platform, random_part)
def delete_vm(options):
'''
Stop a VM
'''
cmd = 'salt-cloud -d {0} -y'.format(options.delete_vm)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
def echo_parseable_environment(options, parser):
'''
Echo NAME=VAL parseable output
'''
output = []
if options.platform:
name = generate_vm_name(options)
output.extend([
'JENKINS_SALTCLOUD_VM_PLATFORM={0}'.format(options.platform),
'JENKINS_SALTCLOUD_VM_NAME={0}'.format(name)
])
if options.provider:
output.append(
'JENKINS_SALTCLOUD_VM_PROVIDER={0}'.format(options.provider)
)
if options.pull_request:
# This is a Jenkins triggered Pull Request
# We need some more data about the Pull Request available to the
# environment
if HAS_REQUESTS is False:
parser.error(
'The python \'requests\' library needs to be installed'
)
headers = {}
url = 'https://api.github.com/repos/saltstack/salt/pulls/{0}'.format(options.pull_request)
github_access_token_path = os.path.join(
os.environ.get('JENKINS_HOME', os.path.expanduser('~')),
'.github_token'
)
if os.path.isfile(github_access_token_path):
with salt.utils.files.fopen(github_access_token_path) as rfh:
headers = {
'Authorization': 'token {0}'.format(rfh.read().strip())
}
http_req = requests.get(url, headers=headers)
if http_req.status_code != 200:
parser.error(
'Unable to get the pull request: {0[message]}'.format(http_req.json())
)
pr_details = http_req.json()
output.extend([
'SALT_PR_GIT_URL={0}'.format(pr_details['head']['repo']['clone_url']),
'SALT_PR_GIT_BRANCH={0}'.format(pr_details['head']['ref']),
'SALT_PR_GIT_COMMIT={0}'.format(pr_details['head']['sha']),
'SALT_PR_GIT_BASE_BRANCH={0}'.format(pr_details['base']['ref']),
])
sys.stdout.write('\n\n{0}\n\n'.format('\n'.join(output)))
sys.stdout.flush()
def download_unittest_reports(options):
print('Downloading remote unittest reports...')
sys.stdout.flush()
workspace = options.workspace
xml_reports_path = os.path.join(workspace, 'xml-test-reports')
if os.path.isdir(xml_reports_path):
shutil.rmtree(xml_reports_path)
os.makedirs(xml_reports_path)
cmds = (
'salt {0} archive.tar zcvf /tmp/xml-test-reports.tar.gz \'*.xml\' cwd=/tmp/xml-unittests-output/',
'salt {0} cp.push /tmp/xml-test-reports.tar.gz',
'mv -f /var/cache/salt/master/minions/{1}/files/tmp/xml-test-reports.tar.gz {2} && '
'tar zxvf {2}/xml-test-reports.tar.gz -C {2}/xml-test-reports && '
'rm -f {2}/xml-test-reports.tar.gz'
)
vm_name = options.download_unittest_reports
for cmd in cmds:
cmd = cmd.format(build_minion_target(options, vm_name), vm_name, workspace)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
if proc.returncode != 0:
print(
'\nFailed to execute command. Exit code: {0}'.format(
proc.returncode
)
)
time.sleep(0.25)
def download_coverage_report(options):
print('Downloading remote coverage report...')
sys.stdout.flush()
workspace = options.workspace
vm_name = options.download_coverage_report
if os.path.isfile(os.path.join(workspace, 'coverage.xml')):
os.unlink(os.path.join(workspace, 'coverage.xml'))
cmds = (
'salt {0} archive.gzip /tmp/coverage.xml',
'salt {0} cp.push /tmp/coverage.xml.gz',
'gunzip /var/cache/salt/master/minions/{1}/files/tmp/coverage.xml.gz',
'mv /var/cache/salt/master/minions/{1}/files/tmp/coverage.xml {2}'
)
for cmd in cmds:
cmd = cmd.format(build_minion_target(options, vm_name), vm_name, workspace)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
if proc.returncode != 0:
print(
'\nFailed to execute command. Exit code: {0}'.format(
proc.returncode
)
)
time.sleep(0.25)
def download_remote_logs(options):
print('Downloading remote logs...')
sys.stdout.flush()
workspace = options.workspace
vm_name = options.download_remote_logs
for fname in ('salt-runtests.log', 'minion.log'):
if os.path.isfile(os.path.join(workspace, fname)):
os.unlink(os.path.join(workspace, fname))
if not options.remote_log_path:
options.remote_log_path = [
'/tmp/salt-runtests.log',
'/var/log/salt/minion'
]
cmds = []
for remote_log in options.remote_log_path:
cmds.extend([
'salt {{0}} archive.gzip {0}'.format(remote_log),
'salt {{0}} cp.push {0}.gz'.format(remote_log),
'gunzip /var/cache/salt/master/minions/{{1}}/files{0}.gz'.format(remote_log),
'mv /var/cache/salt/master/minions/{{1}}/files{0} {{2}}/{1}'.format(
remote_log,
'{0}{1}'.format(
os.path.basename(remote_log),
'' if remote_log.endswith('.log') else '.log'
)
)
])
for cmd in cmds:
cmd = cmd.format(build_minion_target(options, vm_name), vm_name, workspace)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
if proc.returncode != 0:
print(
'\nFailed to execute command. Exit code: {0}'.format(
proc.returncode
)
)
time.sleep(0.25)
def download_packages(options):
print('Downloading packages...')
sys.stdout.flush()
workspace = options.workspace
vm_name = options.download_packages
for fglob in ('salt-*.rpm',
'salt-*.deb',
'salt-*.pkg.xz',
'salt-buildpackage.log'):
for fname in glob.glob(os.path.join(workspace, fglob)):
if os.path.isfile(fname):
os.unlink(fname)
cmds = [
('salt {{0}} archive.tar czf {0}.tar.gz sources=\'*.*\' cwd={0}'
.format(options.package_artifact_dir)),
'salt {{0}} cp.push {0}.tar.gz'.format(options.package_artifact_dir),
('tar -C {{2}} -xzf /var/cache/salt/master/minions/{{1}}/files{0}.tar.gz'
.format(options.package_artifact_dir)),
]
for cmd in cmds:
cmd = cmd.format(build_minion_target(options, vm_name), vm_name, workspace)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
if proc.returncode != 0:
print(
'\nFailed to execute command. Exit code: {0}'.format(
proc.returncode
)
)
time.sleep(0.25)
def run(opts):
'''
RUN!
'''
vm_name = os.environ.get(
'JENKINS_SALTCLOUD_VM_NAME',
generate_vm_name(opts)
)
if opts.download_remote_reports:
if opts.test_without_coverage is False:
opts.download_coverage_report = vm_name
opts.download_unittest_reports = vm_name
opts.download_packages = vm_name
if opts.bootstrap_salt_commit is not None:
if opts.bootstrap_salt_url is None:
opts.bootstrap_salt_url = 'https://github.com/saltstack/salt.git'
cmd = (
'salt-cloud -l debug'
' --script-args "-D -g {bootstrap_salt_url} -n git {1}"'
' -p {provider}_{platform} {0}'.format(
vm_name,
os.environ.get(
'SALT_MINION_BOOTSTRAP_RELEASE',
opts.bootstrap_salt_commit
),
**opts.__dict__
)
)
else:
cmd = (
'salt-cloud -l debug'
' --script-args "-D -n git {1}" -p {provider}_{platform} {0}'.format(
vm_name,
os.environ.get(
'SALT_MINION_BOOTSTRAP_RELEASE',
opts.bootstrap_salt_commit
),
**opts.__dict__
)
)
if opts.splay is not None:
# Sleep a random number of seconds
cloud_downtime = random.randint(0, opts.splay)
print('Sleeping random period before calling salt-cloud: {0}'.format(cloud_downtime))
time.sleep(cloud_downtime)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
retcode = proc.returncode
if retcode != 0:
print('Failed to bootstrap VM. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
print('VM Bootstrapped. Exit code: {0}'.format(retcode))
sys.stdout.flush()
# Sleep a random number of seconds
bootstrap_downtime = random.randint(0, opts.splay)
print('Sleeping for {0} seconds to allow the minion to breathe a little'.format(bootstrap_downtime))
sys.stdout.flush()
time.sleep(bootstrap_downtime)
if opts.bootstrap_salt_commit is not None:
# Let's find out if the installed version matches the passed in pillar
# information
print('Grabbing bootstrapped minion version information ... ')
cmd = 'salt -t 100 {0} --out json test.version'.format(build_minion_target(opts, vm_name))
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = proc.communicate()
retcode = proc.returncode
if retcode != 0:
print('Failed to get the bootstrapped minion version. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
outstr = salt.utils.stringutils.to_str(stdout).strip()
if not outstr:
print('Failed to get the bootstrapped minion version(no output). Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
try:
version_info = json.loads(outstr)
bootstrap_minion_version = os.environ.get(
'SALT_MINION_BOOTSTRAP_RELEASE',
opts.bootstrap_salt_commit[:7]
)
print('Minion reported salt version: {0}'.format(version_info))
if bootstrap_minion_version not in version_info[vm_name]:
print('\n\nATTENTION!!!!\n')
print('The boostrapped minion version commit does not contain the desired commit:')
print(
' \'{0}\' does not contain \'{1}\''.format(
version_info[vm_name],
bootstrap_minion_version
)
)
print('\n\n')
sys.stdout.flush()
#if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
# delete_vm(opts)
#sys.exit(retcode)
else:
print('matches!')
except ValueError:
print('Failed to load any JSON from \'{0}\''.format(outstr))
if opts.cloud_only:
# Run Cloud Provider tests preparation SLS
cloud_provider_downtime = random.randint(3, opts.splay)
time.sleep(cloud_provider_downtime)
cmd = (
'salt -t 900 {target} state.sls {cloud_prep_sls} pillar="{pillar}" '
'--no-color'.format(
target=build_minion_target(opts, vm_name),
cloud_prep_sls='cloud-only',
pillar=build_pillar_data(opts),
)
)
else:
# Run standard preparation SLS
standard_sls_downtime = random.randint(3, opts.splay)
time.sleep(standard_sls_downtime)
cmd = (
'salt -t 1800 {target} state.sls {prep_sls} pillar="{pillar}" '
'--no-color'.format(
target=build_minion_target(opts, vm_name),
prep_sls=opts.prep_sls,
pillar=build_pillar_data(opts),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate()
if stdout:
print(salt.utils.stringutils.to_str(stdout))
if stderr:
print(salt.utils.stringutils.to_str(stderr))
sys.stdout.flush()
retcode = proc.returncode
if retcode != 0:
print('Failed to execute the preparation SLS file. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
if opts.cloud_only:
cloud_provider_pillar = random.randint(3, opts.splay)
time.sleep(cloud_provider_pillar)
# Run Cloud Provider tests pillar preparation SLS
cmd = (
'salt -t 600 {target} state.sls {cloud_prep_sls} pillar="{pillar}" '
'--no-color'.format(
target=build_minion_target(opts, vm_name),
cloud_prep_sls='cloud-test-configs',
pillar=build_pillar_data(opts),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = proc.communicate()
if stdout:
# DO NOT print the state return here!
print('Cloud configuration files provisioned via pillar.')
if stderr:
print(salt.utils.stringutils.to_str(stderr))
sys.stdout.flush()
retcode = proc.returncode
if retcode != 0:
print('Failed to execute the preparation SLS file. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
if opts.prep_sls_2 is not None:
sls_2_downtime = random.randint(3, opts.splay)
time.sleep(sls_2_downtime)
# Run the 2nd preparation SLS
cmd = (
'salt -t 30 {target} state.sls {prep_sls_2} pillar="{pillar}" '
'--no-color'.format(
prep_sls_2=opts.prep_sls_2,
pillar=build_pillar_data(opts),
target=build_minion_target(opts, vm_name),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = proc.communicate()
if stdout:
print(salt.utils.stringutils.to_str(stdout))
if stderr:
print(salt.utils.stringutils.to_str(stderr))
sys.stdout.flush()
retcode = proc.returncode
if retcode != 0:
print('Failed to execute the 2nd preparation SLS file. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
# Run remote checks
if opts.test_git_url is not None:
test_git_downtime = random.randint(1, opts.splay)
time.sleep(test_git_downtime)
# Let's find out if the cloned repository if checked out from the
# desired repository
print('Grabbing the cloned repository remotes information ... ')
cmd = 'salt -t 100 {0} --out json git.remote_get /testing'.format(build_minion_target(opts, vm_name))
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = proc.communicate()
retcode = proc.returncode
if retcode != 0:
print('Failed to get the cloned repository remote. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
if not stdout:
print('Failed to get the cloned repository remote(no output). Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
try:
remotes_info = json.loads(stdout.strip())
if remotes_info is None or remotes_info[vm_name] is None or opts.test_git_url not in remotes_info[vm_name]:
print('The cloned repository remote is not the desired one:')
print(' \'{0}\' is not in {1}'.format(opts.test_git_url, remotes_info))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
print('matches!')
except ValueError:
print('Failed to load any JSON from \'{0}\''.format(salt.utils.stringutils.to_str(stdout).strip()))
if opts.test_git_commit is not None:
test_git_commit_downtime = random.randint(1, opts.splay)
time.sleep(test_git_commit_downtime)
# Let's find out if the cloned repository is checked out at the desired
# commit
print('Grabbing the cloned repository commit information ... ')
cmd = 'salt -t 100 {0} --out json git.revision /testing'.format(build_minion_target(opts, vm_name))
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = proc.communicate()
sys.stdout.flush()
retcode = proc.returncode
if retcode != 0:
print('Failed to get the cloned repository revision. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
if not stdout:
print('Failed to get the cloned repository revision(no output). Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
try:
revision_info = json.loads(stdout.strip())
if revision_info[vm_name][7:] != opts.test_git_commit[7:]:
print('The cloned repository commit is not the desired one:')
print(' \'{0}\' != \'{1}\''.format(revision_info[vm_name][:7], opts.test_git_commit[:7]))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
print('matches!')
except ValueError:
print('Failed to load any JSON from \'{0}\''.format(salt.utils.stringutils.to_str(stdout).strip()))
# Run tests here
test_begin_downtime = random.randint(3, opts.splay)
time.sleep(test_begin_downtime)
cmd = (
'salt -t 1800 {target} state.sls {sls} pillar="{pillar}" --no-color'.format(
sls=opts.sls,
pillar=build_pillar_data(opts),
target=build_minion_target(opts, vm_name),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate()
outstr = salt.utils.stringutils.to_str(stdout)
if outstr:
print(outstr)
if stderr:
print(salt.utils.stringutils.to_str(stderr))
sys.stdout.flush()
try:
match = re.search(r'Test Suite Exit Code: (?P<exitcode>[\d]+)', outstr)
retcode = int(match.group('exitcode'))
except AttributeError:
# No regex matching
retcode = 1
except ValueError:
# Not a number!?
retcode = 1
except TypeError:
# No output!?
retcode = 1
if outstr:
# Anything else, raise the exception
raise
if retcode == 0:
# Build packages
time.sleep(3)
cmd = (
'salt -t 1800 {target} state.sls buildpackage pillar="{pillar}" --no-color'.format(
pillar=build_pillar_data(opts),
target=build_minion_target(opts, vm_name),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate()
if stdout:
print(salt.utils.stringutils.to_str(stdout))
if stderr:
print(salt.utils.stringutils.to_str(stderr))
sys.stdout.flush()
# Grab packages and log file (or just log file if build failed)
download_packages(opts)
if opts.download_remote_reports:
# Download unittest reports
download_unittest_reports(opts)
# Download coverage report
if opts.test_without_coverage is False:
download_coverage_report(opts)
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
return retcode
def parse():
'''
Parse the CLI options
'''
parser = optparse.OptionParser()
parser.add_option(
'--vm-prefix',
default=os.environ.get('JENKINS_VM_NAME_PREFIX', 'ZJENKINS'),
help='The bootstrapped machine name prefix'
)
parser.add_option(
'-w', '--workspace',
default=os.path.abspath(
os.environ.get(
'WORKSPACE',
os.path.dirname(os.path.dirname(__file__))
)
),
help='Path the execution workspace'
)
parser.add_option(
'--platform',
default=os.environ.get('JENKINS_SALTCLOUD_VM_PLATFORM', None),
help='The target platform, choose from:\ncent6\ncent5\nubuntu12.04')
parser.add_option(
'--provider',
default=os.environ.get('JENKINS_SALTCLOUD_VM_PROVIDER', None),
help='The vm provider')
parser.add_option(
'--bootstrap-salt-url',
default=None,
help='The salt git repository url used to boostrap a minion')
parser.add_option(
'--bootstrap-salt-commit',
default=None,
help='The salt git commit used to boostrap a minion')
parser.add_option(
'--test-git-url',
default=None,
help='The testing git repository url')
parser.add_option(
'--test-git-commit',
default=None,
help='The testing git commit to track')
parser.add_option(
'--test-transport',
default='zeromq',
choices=('zeromq', 'raet', 'tcp'),
help=('Select which transport to run the integration tests with, '
'zeromq, raet, or tcp. Default: %default')
)
parser.add_option(
'--test-without-coverage',
default=False,
action='store_true',
help='Do not generate coverage reports'
)
parser.add_option(
'--prep-sls',
default='git.salt',
help='The sls file to execute to prepare the system')
parser.add_option(
'--prep-sls-2',
default=None,
help='An optional 2nd system preparation SLS')
parser.add_option(
'--sls',
default='testrun-no-deps',
help='The final sls file to execute')
parser.add_option(
'--pillar',
action='append',
nargs=2,
help='Pillar (key, value)s to pass to the sls file. '
'Example: \'--pillar pillar_key pillar_value\'')
parser.add_option(
'--no-clean',
dest='clean',
default=True,
action='store_false',
help='Clean up the built vm')
parser.add_option(
'--echo-parseable-environment',
default=False,
action='store_true',
help='Print a parseable KEY=VAL output'
)
parser.add_option(
'--pull-request',
type=int,
help='Include the PR info only'
)
parser.add_option(
'--delete-vm',
default=None,
help='Delete a running VM'
)
parser.add_option(
'--download-remote-reports',
default=False,
action='store_true',
help='Download remote reports when running remote \'testrun\' state'
)
parser.add_option(
'--download-unittest-reports',
default=None,
help='Download the XML unittest results'
)
parser.add_option(
'--download-coverage-report',
default=None,
help='Download the XML coverage reports'
)
parser.add_option(
'--remote-log-path',
action='append',
default=[],
help='Provide additional log paths to download from remote minion'
)
parser.add_option(
'--download-remote-logs',
default=None,
help='Download remote minion and runtests log files'
)
parser.add_option(
'--grain-target',
action='append',
default=[],
help='Match minions using compound matchers, the minion ID, plus the passed grain.'
)
parser.add_option(
'--cloud-only',
default=False,
action='store_true',
help='Run the cloud provider tests only.'
)
parser.add_option(
'--build-packages',
default=True,
action='store_true',
help='Run buildpackage.py to create packages off of the git build.'
)
# These next three options are ignored if --build-packages is False
parser.add_option(
'--package-source-dir',
default='/testing',
help='Directory where the salt source code checkout is found '
'(default: %default)',
)
parser.add_option(
'--package-build-dir',
default='/tmp/salt-buildpackage',
help='Build root for automated package builds (default: %default)',
)
parser.add_option(
'--package-artifact-dir',
default='/tmp/salt-packages',
help='Location on the minion from which packages should be '
'retrieved (default: %default)',
)
parser.add_option(
'--splay',
default='10',
help='The number of seconds across which calls to provisioning components should be made'
)
options, args = parser.parse_args()
if options.delete_vm is not None and not options.test_git_commit:
delete_vm(options)
parser.exit(0)
if options.download_unittest_reports is not None and not options.test_git_commit:
download_unittest_reports(options)
parser.exit(0)
if options.test_without_coverage is False:
if options.download_coverage_report is not None and not options.test_git_commit:
download_coverage_report(options)
parser.exit(0)
if options.download_remote_logs is not None and not options.test_git_commit:
download_remote_logs(options)
parser.exit(0)
if not options.platform and not options.pull_request:
parser.exit('--platform or --pull-request is required')
if not options.provider and not options.pull_request:
parser.exit('--provider or --pull-request is required')
if options.echo_parseable_environment:
echo_parseable_environment(options, parser)
parser.exit(0)
if not options.test_git_commit and not options.pull_request:
parser.exit('--commit or --pull-request is required')
return options
if __name__ == '__main__':
exit_code = run(parse())
print('Exit Code: {0}'.format(exit_code))
sys.exit(exit_code)
| 33.230159
| 119
| 0.588637
|
845bb9e3c3300d2c59867aa9fb3dc30d73c69554
| 632
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/survey_crm/__manifest__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/survey_crm/__manifest__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/survey_crm/__manifest__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Survey CRM',
'version': '2.0',
'category': 'Marketing',
'complexity': 'easy',
'website': 'https://www.odoo.com/page/survey',
'description': """
Survey - CRM (bridge module)
=================================================================================
This module adds a Survey mass mailing button inside the more option of lead/customers views
""",
'depends': ['crm', 'survey'],
'data': [
'views/survey_crm_views.xml',
],
'installable': True,
'auto_install': True
}
| 30.095238
| 92
| 0.53481
|
1800dcac9f0000fc4a4eb31610c510c91407390b
| 12,734
|
py
|
Python
|
pyglet/__init__.py
|
jmiller89/pyglet
|
311fe4a461e3c37a98fb1015af2a87533df58934
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/__init__.py
|
jmiller89/pyglet
|
311fe4a461e3c37a98fb1015af2a87533df58934
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/__init__.py
|
jmiller89/pyglet
|
311fe4a461e3c37a98fb1015af2a87533df58934
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2019 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""pyglet is a cross-platform games and multimedia package.
More information is available at http://www.pyglet.org
"""
import os
import sys
#: The release version
version = '2.0.dev0'
if 'sphinx' in sys.modules:
setattr(sys, 'is_pyglet_doc_run', True)
_is_pyglet_doc_run = hasattr(sys, "is_pyglet_doc_run") and sys.is_pyglet_doc_run
# Pyglet platform treats *BSD systems as Linux
compat_platform = sys.platform
if "bsd" in compat_platform:
compat_platform = "linux-compat"
_enable_optimisations = not __debug__
if getattr(sys, 'frozen', None):
_enable_optimisations = True
#: Global dict of pyglet options. To change an option from its default, you
#: must import ``pyglet`` before any sub-packages. For example::
#:
#: import pyglet
#: pyglet.options['debug_gl'] = False
#:
#: The default options can be overridden from the OS environment. The
#: corresponding environment variable for each option key is prefaced by
#: ``PYGLET_``. For example, in Bash you can set the ``debug_gl`` option with::
#:
#: PYGLET_DEBUG_GL=True; export PYGLET_DEBUG_GL
#:
#: For options requiring a tuple of values, separate each value with a comma.
#:
#: The non-development options are:
#:
#: audio
#: A sequence of the names of audio modules to attempt to load, in
#: order of preference. Valid driver names are:
#:
#: * directsound, the Windows DirectSound audio module (Windows only)
#: * pulse, the PulseAudio module (Linux only)
#: * openal, the OpenAL audio module
#: * silent, no audio
#: debug_lib
#: If True, prints the path of each dynamic library loaded.
#: debug_gl
#: If True, all calls to OpenGL functions are checked afterwards for
#: errors using ``glGetError``. This will severely impact performance,
#: but provides useful exceptions at the point of failure. By default,
#: this option is enabled if ``__debug__`` is (i.e., if Python was not run
#: with the -O option). It is disabled by default when pyglet is "frozen"
#: within a py2exe or py2app library archive.
#: shadow_window
#: By default, pyglet creates a hidden window with a GL context when
#: pyglet.gl is imported. This allows resources to be loaded before
#: the application window is created, and permits GL objects to be
#: shared between windows even after they've been closed. You can
#: disable the creation of the shadow window by setting this option to
#: False.
#:
#: Some OpenGL driver implementations may not support shared OpenGL
#: contexts and may require disabling the shadow window (and all resources
#: must be loaded after the window using them was created). Recommended
#: for advanced developers only.
#:
#: .. versionadded:: 1.1
#: vsync
#: If set, the `pyglet.window.Window.vsync` property is ignored, and
#: this option overrides it (to either force vsync on or off). If unset,
#: or set to None, the `pyglet.window.Window.vsync` property behaves
#: as documented.
#: xsync
#: If set (the default), pyglet will attempt to synchronise the drawing of
#: double-buffered windows to the border updates of the X11 window
#: manager. This improves the appearance of the window during resize
#: operations. This option only affects double-buffered windows on
#: X11 servers supporting the Xsync extension with a window manager
#: that implements the _NET_WM_SYNC_REQUEST protocol.
#:
#: .. versionadded:: 1.1
#: search_local_libs
#: If False, pyglet won't try to search for libraries in the script
#: directory and its `lib` subdirectory. This is useful to load a local
#: library instead of the system installed version. This option is set
#: to True by default.
#:
#: .. versionadded:: 1.2
#:
options = {
'audio': ('directsound', 'openal', 'pulse', 'silent'),
'debug_font': False,
'debug_gl': not _enable_optimisations,
'debug_gl_trace': False,
'debug_gl_trace_args': False,
'debug_gl_shaders': False,
'debug_graphics_batch': False,
'debug_lib': False,
'debug_media': False,
'debug_texture': False,
'debug_trace': False,
'debug_trace_args': False,
'debug_trace_depth': 1,
'debug_trace_flush': True,
'debug_win32': False,
'debug_x11': False,
'shadow_window': True,
'vsync': None,
'xsync': True,
'xlib_fullscreen_override_redirect': False,
'search_local_libs': True,
}
_option_types = {
'audio': tuple,
'debug_font': bool,
'debug_gl': bool,
'debug_gl_trace': bool,
'debug_gl_trace_args': bool,
'debug_gl_shaders': bool,
'debug_graphics_batch': bool,
'debug_lib': bool,
'debug_media': bool,
'debug_texture': bool,
'debug_trace': bool,
'debug_trace_args': bool,
'debug_trace_depth': int,
'debug_trace_flush': bool,
'debug_win32': bool,
'debug_x11': bool,
'ffmpeg_libs_win': tuple,
'shadow_window': bool,
'vsync': bool,
'xsync': bool,
'xlib_fullscreen_override_redirect': bool,
'search_local_libs': bool,
}
def _read_environment():
"""Read defaults for options from environment"""
for key in options:
env = 'PYGLET_%s' % key.upper()
try:
value = os.environ[env]
if _option_types[key] is tuple:
options[key] = value.split(',')
elif _option_types[key] is bool:
options[key] = value in ('true', 'TRUE', 'True', '1')
elif _option_types[key] is int:
options[key] = int(value)
except KeyError:
pass
_read_environment()
if compat_platform == 'cygwin':
# This hack pretends that the posix-like ctypes provides windows
# functionality. COM does not work with this hack, so there is no
# DirectSound support.
import ctypes
ctypes.windll = ctypes.cdll
ctypes.oledll = ctypes.cdll
ctypes.WINFUNCTYPE = ctypes.CFUNCTYPE
ctypes.HRESULT = ctypes.c_long
# Call tracing
# ------------
_trace_filename_abbreviations = {}
def _trace_repr(value, size=40):
value = repr(value)
if len(value) > size:
value = value[:size // 2 - 2] + '...' + value[-size // 2 - 1:]
return value
def _trace_frame(thread, frame, indent):
from pyglet import lib
if frame.f_code is lib._TraceFunction.__call__.__code__:
is_ctypes = True
func = frame.f_locals['self']._func
name = func.__name__
location = '[ctypes]'
else:
is_ctypes = False
code = frame.f_code
name = code.co_name
path = code.co_filename
line = code.co_firstlineno
try:
filename = _trace_filename_abbreviations[path]
except KeyError:
# Trim path down
dir = ''
path, filename = os.path.split(path)
while len(dir + filename) < 30:
filename = os.path.join(dir, filename)
path, dir = os.path.split(path)
if not dir:
filename = os.path.join('', filename)
break
else:
filename = os.path.join('...', filename)
_trace_filename_abbreviations[path] = filename
location = '(%s:%d)' % (filename, line)
if indent:
name = 'Called from %s' % name
print('[%d] %s%s %s' % (thread, indent, name, location))
if _trace_args:
if is_ctypes:
args = [_trace_repr(arg) for arg in frame.f_locals['args']]
print(' %sargs=(%s)' % (indent, ', '.join(args)))
else:
for argname in code.co_varnames[:code.co_argcount]:
try:
argvalue = _trace_repr(frame.f_locals[argname])
print(' %s%s=%s' % (indent, argname, argvalue))
except:
pass
if _trace_flush:
sys.stdout.flush()
def _thread_trace_func(thread):
def _trace_func(frame, event, arg):
if event == 'call':
indent = ''
for i in range(_trace_depth):
_trace_frame(thread, frame, indent)
indent += ' '
frame = frame.f_back
if not frame:
break
elif event == 'exception':
(exception, value, traceback) = arg
print('First chance exception raised:', repr(exception))
return _trace_func
def _install_trace():
global _trace_thread_count
sys.setprofile(_thread_trace_func(_trace_thread_count))
_trace_thread_count += 1
_trace_thread_count = 0
_trace_args = options['debug_trace_args']
_trace_depth = options['debug_trace_depth']
_trace_flush = options['debug_trace_flush']
if options['debug_trace']:
_install_trace()
# Lazy loading
# ------------
class _ModuleProxy:
_module = None
def __init__(self, name):
self.__dict__['_module_name'] = name
def __getattr__(self, name):
try:
return getattr(self._module, name)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
return getattr(module, name)
def __setattr__(self, name, value):
try:
setattr(self._module, name, value)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
setattr(module, name, value)
if True:
app = _ModuleProxy('app')
canvas = _ModuleProxy('canvas')
clock = _ModuleProxy('clock')
com = _ModuleProxy('com')
event = _ModuleProxy('event')
font = _ModuleProxy('font')
gl = _ModuleProxy('gl')
graphics = _ModuleProxy('graphics')
image = _ModuleProxy('image')
input = _ModuleProxy('input')
lib = _ModuleProxy('lib')
media = _ModuleProxy('media')
model = _ModuleProxy('model')
resource = _ModuleProxy('resource')
sprite = _ModuleProxy('sprite')
text = _ModuleProxy('text')
window = _ModuleProxy('window')
# Fool py2exe, py2app into including all top-level modules
# (doesn't understand lazy loading)
if False:
from . import app
from . import canvas
from . import clock
from . import com
from . import event
from . import font
from . import gl
from . import graphics
from . import input
from . import image
from . import lib
from . import media
from . import model
from . import resource
from . import sprite
from . import text
from . import window
| 33.335079
| 80
| 0.640647
|
dbcb5689c94aa07ef7ffa6a55ba3a528b9220dcf
| 7,122
|
py
|
Python
|
petra_camera/devices/dalsaproxy.py
|
yamedvedya/camera_viewer
|
9e4d213f1ffc5a32517f4cd4f67e7563819ea480
|
[
"MIT"
] | null | null | null |
petra_camera/devices/dalsaproxy.py
|
yamedvedya/camera_viewer
|
9e4d213f1ffc5a32517f4cd4f67e7563819ea480
|
[
"MIT"
] | null | null | null |
petra_camera/devices/dalsaproxy.py
|
yamedvedya/camera_viewer
|
9e4d213f1ffc5a32517f4cd4f67e7563819ea480
|
[
"MIT"
] | null | null | null |
# Created by matveyev at 01.12.2020
# ----------------------------------------------------------------------
# Author: yury.matveev@desy.de
# ----------------------------------------------------------------------
"""Dalsa camera proxy
"""
import numpy as np
import PyTango
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import os.path as ospath
from PIL import Image
from petra_camera.devices.base_camera import BaseCamera
from petra_camera.main_window import APP_NAME
logger = logging.getLogger(APP_NAME)
# ----------------------------------------------------------------------
class DalsaProxy(BaseCamera):
_settings_map = {
'max_level_limit': (None, )
}
visible_layouts = ('folder', 'source')
# ----------------------------------------------------------------------
def __init__(self, settings):
super(DalsaProxy, self).__init__(settings)
if settings.hasAttribute('folders'):
self._possible_folders = [item.strip() for item in settings.getAttribute("folders").split(';')]
else:
self._possible_folders = ['/gpfs/current/raw/', '/gpfs/commissioning/raw/']
if settings.hasAttribute('sources'):
self._possible_sources = [item.strip() for item in settings.getAttribute("folders").split(';')]
else:
self._possible_sources = ['Event', 'Files']
self._my_event_handler = PatternMatchingEventHandler(["*.tif"], "", False, True)
self._my_event_handler.on_created = self._on_created
self._my_observer = None
self._source = self._possible_sources[0]
self.path = self._possible_folders[0]
self._last_frame = np.zeros((1, 1))
self.error_flag = False
self.error_msg = ''
self._running = False
# ----------------------------------------------------------------------
def start_acquisition(self):
if self._source == 'Event':
logger.debug(f'{self._my_name}: starting acquisition: event mode')
if self._device_proxy is None:
raise RuntimeError('No device proxy')
self._device_proxy.write_attribute("PixelFormat", "Mono16")
self._device_proxy.write_attribute("ViewingMode", 2)
self._eid = self._device_proxy.subscribe_event("Image16", PyTango.EventType.DATA_READY_EVENT,
self._on_event, [], True)
self._running = True
return True
elif self._source == 'Files':
if self.path != '':
logger.debug(f'{self._my_name}: starting acquisition: file mode')
self._my_observer = Observer()
self._my_observer.schedule(self._my_event_handler, self.path, recursive=True)
self._my_observer.start()
self._running = True
return True
else:
raise RuntimeError('Path is not exist')
else:
raise RuntimeError('Unknown mode')
# ----------------------------------------------------------------------
def stop_acquisition(self):
if self._source == 'Event':
logger.debug(f'{self._my_name}: stopping acquisition: event mode')
self._device_proxy.unsubscribe_event(self._eid)
elif self._source == 'Files':
logger.debug(f'{self._my_name}: stopping acquisition: file mode')
self._my_observer.stop()
self._my_observer.join()
else:
raise RuntimeError('Unknown mode')
self._running = False
# ----------------------------------------------------------------------
def _on_event(self, event):
if not event.err:
logger.debug(f'{self._my_name}: new tango event')
data = event.device.read_attribute(event.attr_name.split('/')[6])
self._last_frame = np.array(data.value)[self._picture_size[0]:self._picture_size[2],
self._picture_size[1]:self._picture_size[3]]
self._new_frame_flag = True
else:
pass
# ----------------------------------------------------------------------
def _on_created(self, event):
logger.debug(f'{self._my_name}: new file system event')
self.id = ' file: {}'.format(ospath.splitext(ospath.basename(event.src_path))[0])
self._last_frame = np.array(Image.open(event.src_path))[self._picture_size[0]:self._picture_size[2],
self._picture_size[1]:self._picture_size[3]]
self._new_frame_flag = True
# ----------------------------------------------------------------------
def _set_new_path(self, path):
logger.debug(f'{self._my_name}: new file path: {path}')
need_to_restart = self._running
if self._running:
self.stop_acquisition()
self._last_frame = np.zeros((1, 1))
self._new_frame_flag = True
self.path = path
if need_to_restart:
self.start_acquisition()
# ----------------------------------------------------------------------
def get_settings(self, option, cast):
if option in ['Path', 'Source', 'possible_sources', 'possible_folders']:
logger.debug(f'{self._my_name}: setting {cast.__name__}({option}) requested')
if option == 'Path':
path = super(DalsaProxy, self).get_settings(option, cast)
if path != '':
self._set_new_path(path)
return self.path
elif option == 'Source':
source = super(DalsaProxy, self).get_settings(option, cast)
if source != '':
self._change_source(source)
return self._source
elif option == 'possible_sources':
return self._possible_sources
elif option == 'possible_folders':
return self._possible_folders
else:
return super(DalsaProxy, self).get_settings(option, cast)
# ----------------------------------------------------------------------
def save_settings(self, option, value):
if option in ['Path', 'Source']:
logger.debug(f'{self._my_name}: setting {option}: new value {value}')
if option == 'Path':
self._set_new_path(value)
elif option == 'Source':
self._change_source(value)
super(DalsaProxy, self).save_settings(option, value)
# ----------------------------------------------------------------------
def _change_source(self, source):
need_to_restart = self._running
if self._running:
self.stop_acquisition()
self._last_frame = np.zeros((1, 1))
self._new_frame_flag = True
self._source = source
if need_to_restart:
self.start_acquisition()
| 33.753555
| 108
| 0.511654
|
bd8224cc12b9ca9f27ee5d178cd4bdb800490c38
| 15,556
|
py
|
Python
|
thermoplotting/kinetics/trajectories.py
|
Van-der-Ven-Group/thermoplotting
|
d826d728f406896b7a56207f3f4e9b4176de0e97
|
[
"MIT"
] | 10
|
2015-04-28T18:53:00.000Z
|
2020-09-23T13:29:07.000Z
|
thermoplotting/kinetics/trajectories.py
|
Van-der-Ven-Group/thermoplotting
|
d826d728f406896b7a56207f3f4e9b4176de0e97
|
[
"MIT"
] | 1
|
2019-05-20T19:20:24.000Z
|
2019-05-20T19:20:24.000Z
|
thermoplotting/kinetics/trajectories.py
|
goirijo/thermoplotting
|
d826d728f406896b7a56207f3f4e9b4176de0e97
|
[
"MIT"
] | 4
|
2015-08-03T18:36:46.000Z
|
2022-03-30T23:13:04.000Z
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import pandas as pd
import numpy as np
class KineticTrajectory(object):
"""A trajectory is a list of x,y,z and time coordinates for a single
atom in a kinetic Monte Carlo simulation, which has the values of that
atom after every hop that happens in the simulation. When dealing with data
for several atoms, do not use this class. Instead use KineticData."""
def __init__(self, x, y, z, t, copy=False):
"""Initialize with a list of coordinates
Parameters
----------
x : x component of coordinate
y : y component of coordinate
z : z component of coordinate
t : time elapsed for the current coordinate
copy : if True, creates copy of the data passed
"""
#Not convinced this is managing the memory the way you think, but "not copying" appears to be faster
if copy:
self._data = pd.DataFrame(data={"x": x.copy(), "y": y.copy(), "z": z.copy(), "t": t.copy()})
else:
self._data = pd.DataFrame(data={"x": x, "y": y, "z": z, "t": t})
#Add the norm of the distances
self._data["r"]=np.sqrt(np.square(self._data[["x","y","z"]]).sum(axis=1))
def x(self):
return self._data["x"]
def y(self):
return self._data["y"]
def z(self):
return self._data["z"]
def t(self):
return self._data["t"]
def r(self):
return self._data["r"]
def data(self):
return self._data
def size(self):
return len(self.t())
def as_matrix(self):
return self._data[["x","y","z","t"]].as_matrix()
def segment(self, n):
"""Split the trajectory into n independent looking
trajectories. If the number of samples is not divisible
by n, the remainder will be discarded.
Parameters
----------
n : int
Returns
-------
list[KineticTrajectory]
"""
block_size=self.size()//n
data_blocks=[self._data.loc[i*block_size:(i+1)*block_size,["x","y","z","t"]] for i in xrange(n)]
for ix,d in enumerate(data_blocks[1::]):
d-=self._data.loc[block_size*(ix+1)-1]
return [KineticTrajectory(**d) for d in data_blocks]
class KineticData(object):
"""Store and retrieve kinetic Monte Carlo data by type of
species, and other conveniences. This is meant to store a single
KMC simulation from start to finish"""
def _input_sanity_raise(self,trajectories, time, occ_species):
if(trajectories.shape[0]!=len(occ_species)):
raise ValueError("There must be an xyz trajectory for each species to name")
if(trajectories.shape[1]!=len(time)):
raise ValueError("There must be as many time data points as there are coordinates for each atom")
if(trajectories.shape[2]!=3):
raise ValueError("The trajectories arrays must hold only values for the x, y, and z coordinates")
return
def _master_dataframe(self, trajectories, time, occ_species):
"""Given the constructor data, create the master DataFrame that holds
all the information about the trajectories of each atom, including what
species each one is and where it was sitting at the beginning of the
KMC simulation cell.
Parameters
----------
trajectories : list of tx3 arrays of length s as np.array
time : array of float of length t
occ_species list of str of length s
Returns
-------
pd.DataFrame
"""
#Create the labels for each atom, with the species name and the index into the starting configdof
occ_labels=[o+"({})".format(ix) for o,ix in zip(occ_species,xrange(len(occ_species)))]
#Calculate the norm of the displacements for every atom at every time step
norms=np.linalg.norm(trajectories,axis=2)
assert(len(occ_labels)==len(trajectories))
assert(len(norms)==len(trajectories))
#The concatenated numpy array now has shape[2]==4 with the norm travelled as a new value
full_trajectory_data=np.concatenate((trajectories,np.expand_dims(norms,2)),axis=2)
assert(full_trajectory_data.shape[2]==4)
#Create MultiIndex for columns, which will group x,y,z,r by atom doing the trajectory
labels0=[ix for ix,_ in enumerate(occ_labels) for i in xrange(4)]
assert(labels0[0]==labels0[3] and labels0[-1]==labels0[-4])
labels1=[i for ix,_ in enumerate(occ_labels) for i in xrange(4)]
assert(labels0[1]==labels1[-4])
col_mix=pd.MultiIndex(levels=[occ_labels,["x","y","z","r"]],labels=[labels0,labels1],names=["atomic","cart"])
#Reshape the trajectory data so that it's 2 dimensional, with the xyzr columns side by side
nats,ntime,ndim=full_trajectory_data.shape
data_digest=full_trajectory_data.transpose(0,2,1).reshape(nats*ndim,ntime).T
#Include the time into the set of data as an additional Index
time_ix=np.arange(ntime)
timed_mix=pd.MultiIndex(levels=[time_ix,time],labels=[time_ix,time_ix],names=["index","time"])
#Create the master DataFrame, this has all the things and has columns at two levels:
#by species and by trajectory. There are two index levels, sample index and time
master_frame=pd.DataFrame(data_digest,index=timed_mix,columns=col_mix)
return master_frame
def __init__(self, trajectories, time, occ_species,direct=None):
"""Initialize with a list of trajectories, the elapsed time per step,
and a list of the occupation name for each atom. Assumes all data
comes in incremental time (will not sort anything).
Internally this is a multi-indexed Pandas array, where one level
deals with the atoms, naming each "column" things like "Ni(0)", "Al(1)",
etc, to indicate the species and the index into the unrolled configuration
of the starting config, as well as the elapsed time, which is common across
every atom. The other level deals with columns of type "x", "y",
or "z" to keep track of the trajectory of each atom. The master data
should always remain in a state where Level 0 refers to the atom labels
and Level 1 refers to the trajectories
Parameters
----------
trajectories : list of 3xt arrays of length s as np.array
time : array of float of length t
occ_species : list of str of length s
direct : pd.DataFrame, bypasses the normal construction
"""
if(direct is None):
self._input_sanity_raise(trajectories, time, occ_species)
self._master=self._master_dataframe(trajectories,time,occ_species)
else:
self._master=direct
return
def atom_cols(self, va_as_specie=False):
"""Return array of the column names for every atom.
If specified, include the vacancies as a specie.
Parameters
----------
va_as_specie : bool
Returns
-------
list
"""
everything=self._master.columns.get_level_values("atomic").unique()
if va_as_specie:
return everything
else:
return [x for x in everything if "Va" not in x]
def specie_cols(self, specie):
"""Return an array of column names that can be used to index into
every trajectory of a particular specie
Parameters
----------
specie : str
Returns
-------
list of str
"""
return [s for s in self.atom_cols() if specie in s]
def num_atoms(self,va_as_specie=False):
"""Returns total number of sites that there is data for
If specified, include the vacancies as a specie.
Parameters
----------
va_as_specie : bool
Returns
-------
int
"""
return len(self.atom_cols(va_as_specie))
def composition(self, specie, va_as_specie=False):
"""Returns the ratio of number of specie to total number of atoms
(not including vacancies unless specified)
Parameters
----------
specie : str
Returns
-------
float
"""
return len(self.specie_cols(specie))/self.num_atoms(va_as_specie)
def index_trajectory(self, index):
"""Return the x, y, z, and t values of a particular atom throughout
the simulation, specifying only the index and not the specie
Parameters
----------
atom : int
Returns
-------
pd.DataFrame with x,y,z columns and t as secondary index
"""
for a in self.atom_cols():
if "({})".format(index) in a:
return self.atomic_trajectory(a)
def atomic_trajectory(self, atom):
"""Return the x, y, z, and t values of a particular atom throughout
the simulation
Parameters
----------
atom : str (e.g. Ni(9))
Returns
-------
pd.DataFrame with x,y,z columns and t as secondary index
"""
return self._master[atom]
def specie_data(self, specie):
"""Return only the data for a particular species
Parameters
----------
specie : str
Returns
-------
pd.DataFrame
"""
return self._master[self.specie_cols(specie)]
def specie_names(self):
"""Returns the names of all species present
Returns
-------
set of str
"""
all_cols=self.atom_cols(va_as_specie=True)
return set([col.split("(")[0] for col in all_cols])
def _column_swap(self):
"""return the master data with cart over atomic
Returns
-------
DataFrame
"""
return self._master.swaplevel("atomic","cart",axis=1)
def x(self):
return self._column_swap["x"]
def y(self):
return self._column_swap["y"]
def z(self):
return self._column_swap["z"]
def r(self):
return self._column_swap["r"]
def t(self):
return self._master.index.get_level_values("time").values
def _index_at_time(self, time):
"""Return the index (row) corresponding to the data
for the instant just after (or equal to) the specified time
Parameters
----------
time : float
Returns
-------
int
"""
return self._master[self.t()>=time].index.get_level_values("index")[0]
def values_at_time(self, time):
"""Return the values of everything just below the value of
the time specified.
Parameters
----------
time : float
Returns
-------
pd.DataFrame
"""
return self._master.loc[self._index_at_time(time)]
def specie_values_at_time(self, time, specie):
"""Return the values of everything just below the value of
the time specified, but only for the desired specie
Parameters
----------
time : float
specie : str
Returns
-------
pd.DataFrame
"""
specie_dump=self.specie_data(specie)
return specie_dump.loc[self._index_at_time(time)]
def independized_measurements(self):
"""Similar to segmenting the data into multiple apparently independent run,
this routine will make every point appear to have started at t=0 and r=0.
This can be useful for data you collect where you don't sample every step,
and you'd like to keep all the "final" data points in the same array.
Returns
-------
KineticData
"""
#create copy of data and subtract out values
indep=self._master.copy()
indep.iloc[1::]=indep.iloc[1::].values-indep.iloc[0:-1]
#fix the distance
stacked=indep.stack("atomic")
stacked["r"]=np.linalg.norm(stacked[["x","y","z"]],axis=1)
indep=stacked.unstack("atomic").stack("cart").unstack("cart")
#set the time
reset_time=self._master.index.get_level_values("time").values
reset_time[1::]=reset_time[1::]-reset_time[0:-1]
indep.index.set_levels(reset_time,"time",inplace=True)
return KineticData(None,None,None,direct=indep)
def _indexed_segmentation(self, end_inds):
"""Given indexes into the sampled data, split
the master DataFrame into the specified chunks,
and reset the elapsed time and coordinates
such that each segment appears to be an independent
run
Parameters
----------
end_inds : list of int, each int is the "up to" index of each segment
Returns
-------
list of KineticData
"""
start_inds=[0]+end_inds[0:-1]
raw_segments=[self._master.iloc[ix:nx] for ix,nx in zip(start_inds,end_inds)]
# raw_segments=[self._master.iloc[seg_length*s:seg_length*(s+1)] for s in xrange(n)]
n=len(raw_segments)
#We will subtract the values of the "previous simulation", starting with
#the final segment
#These are indexes in reverse that exclude zero
rev_seg_ix=np.arange(n-1)[::-1]+1
for rix in rev_seg_ix:
raw_segments[rix]=raw_segments[rix]-raw_segments[rix-1].iloc[-1]
#The norm (r) needs to be recalculated
raw_segments[rix]=raw_segments[rix].stack("atomic")
raw_segments[rix]["r"]=np.linalg.norm(raw_segments[rix][["x","y","z"]],axis=1)
raw_segments[rix]=raw_segments[rix].unstack("atomic").stack("cart").unstack("cart")
#The time also needs to be reset
reset_time=self._master.index.get_level_values("time")-raw_segments[rix-1].index.get_level_values("time")[-1]
raw_segments[rix].index.set_levels(reset_time,"time",inplace=True)
return [KineticData(None,None,None,direct=raw) for raw in raw_segments]
def sampled_segmentation(self, n):
"""Split the data into n KineticData as if the data
had been run independently, subtracting out time and
coordinates so that they start at zero. Remainder data
is discarded.
Parameters
----------
n : int
Returns
-------
list of KineticData
"""
seg_length=len(self._master)//n
seg_inds=[seg_length*(i+1) for i in xrange(n)]
return self._indexed_segmentation(seg_inds)
def timed_segmentation(self, n):
"""Return segments of data in which equal sets
of time have elapsed
Parameters
----------
time : int
Returns
-------
list of KineticData
"""
time_length=self.total_time()/n
time_inds=[self._index_at_time(time_length*(i+1)) for i in xrange(n)]
return self._indexed_segmentation(time_inds)
def values(self):
"""Return all the data ever
Returns
-------
pd.DataFrame
"""
return self._master
def total_time(self):
"""Returns the most amount of time elapsed
Returns
-------
float
"""
return self._master.index.get_level_values("time")[-1]
| 31.112
| 121
| 0.603304
|
2563c0dccf2c9040fca098bd58622dc6e5a18c9b
| 10,587
|
py
|
Python
|
flaskshop/dashboard/views/product.py
|
dedalgr/flask-shop
|
206c4ec75184d0bf2fbb0fe8014722a7e683b04b
|
[
"BSD-3-Clause"
] | null | null | null |
flaskshop/dashboard/views/product.py
|
dedalgr/flask-shop
|
206c4ec75184d0bf2fbb0fe8014722a7e683b04b
|
[
"BSD-3-Clause"
] | null | null | null |
flaskshop/dashboard/views/product.py
|
dedalgr/flask-shop
|
206c4ec75184d0bf2fbb0fe8014722a7e683b04b
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
from flask import request, render_template, redirect, url_for, current_app
from flask_babel import lazy_gettext, gettext
from flaskshop.product.models import (
ProductAttribute,
ProductType,
Collection,
Product,
Category,
ProductType,
ProductVariant,
AttributeChoiceValue
)
from flaskshop.dashboard.forms import (
AttributeForm,
CollectionForm,
CategoryForm,
ProductTypeForm,
ProductForm,
ProductCreateForm,
VariantForm,
)
def attributes():
page = request.args.get("page", type=int, default=1)
pagination = ProductAttribute.query.paginate(page, 10)
props = {
"id": lazy_gettext("ID"),
"title": lazy_gettext("Title"),
"values_label": lazy_gettext("Value"),
"types_label": lazy_gettext("ProductType"),
}
context = {
"title": lazy_gettext("Product Attribute"),
"items": pagination.items,
"props": props,
"pagination": pagination,
"identity": gettext("attributes"),
}
return render_template("list.html", **context)
def attributes_manage(id=None):
if id:
attr = ProductAttribute.get_by_id(id)
form = AttributeForm(obj=attr)
else:
form = AttributeForm()
if form.validate_on_submit():
if not id:
attr = ProductAttribute()
attr.title = form.title.data
attr.update_types(form.types.data)
attr.update_values(form.values.data)
attr.save()
return redirect(url_for("dashboard.attributes"))
product_types = ProductType.query.all()
return render_template(
"product/attribute.html", form=form, product_types=product_types
)
def collections():
page = request.args.get("page", type=int, default=1)
pagination = Collection.query.paginate(page, 10)
props = {"id": lazy_gettext("ID"), "title": lazy_gettext("Title"), "created_at": lazy_gettext("Created At")}
context = {
"title": lazy_gettext("Product Collection"),
"items": pagination.items,
"props": props,
"pagination": pagination,
"identity": gettext("collections"),
}
return render_template("list.html", **context)
def collections_manage(id=None):
if id:
collection = Collection.get_by_id(id)
form = CollectionForm(obj=collection)
else:
form = CollectionForm()
if form.validate_on_submit():
if not id:
collection = Collection()
collection.title = form.title.data
collection.update_products(form.products.data)
image = form.bgimg_file.data
if image:
background_img = image.filename
upload_file = current_app.config["UPLOAD_DIR"] / background_img
upload_file.write_bytes(image.read())
collection.background_img = (
current_app.config["UPLOAD_FOLDER"] + "/" + background_img
)
collection.save()
return redirect(url_for("dashboard.collections"))
products = Product.query.all()
return render_template("product/collection.html", form=form, products=products)
def categories():
page = request.args.get("page", type=int, default=1)
pagination = Category.query.paginate(page, 10)
props = {
"id": lazy_gettext("ID"),
"title": lazy_gettext("Title"),
"parent": lazy_gettext("Parent"),
"created_at": lazy_gettext("Created At"),
}
context = {
"title": lazy_gettext("Product Category"),
"items": pagination.items,
"props": props,
"pagination": pagination,
"identity": gettext("categories"),
}
return render_template("list.html", **context)
def categories_manage(id=None):
if id:
category = Category.get_by_id(id)
form = CategoryForm(obj=category)
else:
form = CategoryForm()
if form.validate_on_submit():
if not id:
category = Category()
category.title = form.title.data
category.parent_id = form.parent_id.data
image = form.bgimg_file.data
if image:
background_img = image.filename
upload_file = current_app.config["UPLOAD_DIR"] / background_img
upload_file.write_bytes(image.read())
category.background_img = (
current_app.config["UPLOAD_FOLDER"] + "/" + background_img
)
category.save()
return redirect(url_for("dashboard.categories"))
parents = Category.first_level_items()
return render_template("product/category.html", form=form, parents=parents)
def product_types():
page = request.args.get("page", type=int, default=1)
pagination = ProductType.query.paginate(page, 10)
props = {
"id": lazy_gettext("ID"),
"title": lazy_gettext("Title"),
"has_variants": lazy_gettext("Has Variants"),
"is_shipping_required": lazy_gettext("Is Shipping Required"),
"created_at": lazy_gettext("Created At"),
}
context = {
"title": lazy_gettext("Product Type"),
"items": pagination.items,
"props": props,
"pagination": pagination,
"identity": gettext("product_types"),
}
return render_template("list.html", **context)
def product_types_manage(id=None):
if id:
product_type = ProductType.get_by_id(id)
form = ProductTypeForm(obj=product_type)
else:
form = ProductTypeForm()
if form.validate_on_submit():
if not id:
product_type = ProductType()
product_type.update_product_attr(form.product_attributes.data)
product_type.update_variant_attr(form.variant_attr_id.data)
del form.product_attributes
del form.variant_attr_id
form.populate_obj(product_type)
product_type.save()
return redirect(url_for("dashboard.product_types"))
attributes = ProductAttribute.query.all()
return render_template(
"product/product_type.html", form=form, attributes=attributes
)
def products():
page = request.args.get("page", type=int, default=1)
query = Product.query
on_sale = request.args.get("sale", type=int)
if on_sale is not None:
query = query.filter_by(on_sale=on_sale)
category = request.args.get("category", type=int)
if category:
query = query.filter_by(category_id=category)
title = request.args.get("title", type=str)
if title:
query = query.filter(Product.title.like(f"%{title}%"))
created_at = request.args.get("created_at", type=str)
if created_at:
start_date, end_date = created_at.split("-")
start_date = datetime.strptime(start_date.strip(), "%m/%d/%Y")
end_date = datetime.strptime(end_date.strip(), "%m/%d/%Y")
query = query.filter(Product.created_at.between(start_date, end_date))
pagination = query.paginate(page, 10)
props = {
"id": lazy_gettext("ID"),
"title": lazy_gettext("Title"),
"on_sale_human": lazy_gettext("On Sale"),
"sold_count": lazy_gettext("Sold Count"),
"price_human": lazy_gettext("Price"),
"category": lazy_gettext("Category"),
}
context = {
"items": pagination.items,
"props": props,
"pagination": pagination,
"categories": Category.query.all(),
}
return render_template("product/list.html", **context)
def product_detail(id):
product = Product.get_by_id(id)
return render_template("product/detail.html", product=product)
def _save_product(product, form):
product.update_images(form.images.data)
product.update_attributes(form.attributes.data)
del form.images
del form.attributes
form.populate_obj(product)
product.save()
return product
def product_edit(id):
product = Product.get_by_id(id)
form = ProductForm(obj=product)
if form.validate_on_submit():
_save_product(product, form)
return redirect(url_for("dashboard.product_detail", id=product.id))
categories = Category.query.all()
context = {"form": form, "categories": categories, "product": product}
return render_template("product/product_edit.html", **context)
def product_create_step1():
form = ProductCreateForm()
if form.validate_on_submit():
return redirect(
url_for(
"dashboard.product_create_step2",
product_type_id=form.product_type_id.data,
)
)
product_types = ProductType.query.all()
return render_template(
"product/product_create_step1.html", form=form, product_types=product_types
)
def product_create_step2():
form = ProductForm()
product_type_id = request.args.get("product_type_id", 1, int)
product_type = ProductType.get_by_id(product_type_id)
categories = Category.query.all()
if form.validate_on_submit():
product = Product(product_type_id=product_type_id)
product = _save_product(product, form)
#product.generate_variants()
return redirect(url_for("dashboard.product_detail", id=product.id))
return render_template(
"product/product_create_step2.html",
form=form,
product_type=product_type,
categories=categories,
)
def variant_manage(id=None):
product_type_id = request.args.get("product_type_id", 1, int)
product_type = ProductType.get_by_id(product_type_id)
if id:
variant = ProductVariant.get_by_id(id)
form = VariantForm(obj=variant)
var = form.attributes.object_data
var2 = form.attributes.data
del form.attributes
form.attributes = product_type.product_attributes[0]
form.attributes.label = product_type.product_attributes[0].title
form.attributes.data = var2
form.attributes.object_data = var
form.populate_obj(variant)
else:
form = VariantForm()
if form.validate_on_submit():
if not id:
variant = ProductVariant()
product_id = request.args.get("product_id")
if product_id:
variant.product_id = product_id
variant.title = form.title.data
variant.quantity = form.quantity.data
variant.attributes = {product_type_id: form.attributes.data[0]}
variant.sku = str(variant.product_id) + "-" + str(form.sku_id.data)
variant.save()
return redirect(url_for("dashboard.product_detail", id=variant.product_id))
return render_template("product/variant.html", form=form, product_type=product_type)
| 33.503165
| 112
| 0.648342
|
3cb2440d218ffc48aa2ada169e2f66a4d8683096
| 1,537
|
py
|
Python
|
10_Other/Python Assignments/Titanic Dataset/readme open in spyder.py
|
Arunken/PythonScripts
|
702d0a3af7a9be3311f9da0afc5285d453f15484
|
[
"Apache-2.0"
] | null | null | null |
10_Other/Python Assignments/Titanic Dataset/readme open in spyder.py
|
Arunken/PythonScripts
|
702d0a3af7a9be3311f9da0afc5285d453f15484
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:58:47.000Z
|
2021-06-02T00:58:47.000Z
|
10_Other/Python Assignments/Titanic Dataset/readme open in spyder.py
|
Arunken/PythonScripts
|
702d0a3af7a9be3311f9da0afc5285d453f15484
|
[
"Apache-2.0"
] | null | null | null |
'''
Column Description:
------------------
>> survival: Survival (0 = no; 1 = yes)
>> class: Passenger class (1 = first; 2 = second; 3 = third)
>> name: Name
>> sex: Sex
>> age: Age
>> sibsp: Number of siblings/spouses aboard
>> parch: Number of parents/children aboard
>> ticket: Ticket number
>> fare: Passenger fare
>> cabin: Cabin
>> embarked: Port of embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)
>> boat: Lifeboat (if survived)
>> body: Body number (if did not survive and body was recovered)
Q1. Analyze the dataset and find the following :
a) The number of females who survived the disaster.
b) The number of males who survived the disaster.
c) The number of children under the age of 10 who survived the disaster.
d) The number of people who survived the disaster.
e) The probability that a female survives the disaster.
f) The probability that a male survives the disaster.
g) The probability that a person from newyork survives the disaster.
h) Make a plot of the percentage of people who survived in accordance with the passenger class.
i) Make a plot of the percentage of people who survived in accordance with the passenger class and gender.
j) Make a plot of the percentage of people who survived in accordance with the gender.
k) How likely would it be for a woman belonging to first class survive as compared to a woman belonging to some other passenger class.
l) Draw your conclusions based on the findings from the above analysis.
'''
| 41.540541
| 138
| 0.713077
|
645100d0a1372f7ed7dc1d346a17938d6eef5f0d
| 277
|
py
|
Python
|
client/utils/data_request_type.py
|
devhid/tnnl
|
72cf2b2fea8731ec01e4f17732a873539c8c367e
|
[
"MIT"
] | null | null | null |
client/utils/data_request_type.py
|
devhid/tnnl
|
72cf2b2fea8731ec01e4f17732a873539c8c367e
|
[
"MIT"
] | null | null | null |
client/utils/data_request_type.py
|
devhid/tnnl
|
72cf2b2fea8731ec01e4f17732a873539c8c367e
|
[
"MIT"
] | null | null | null |
from enum import Enum
class DataRequestType(Enum):
""" Enum that represents the type of data request that is sent. """
HEAD = 0 # sent to signify beginning of data transfer
NORMAL = 1 # contains actual file data
TAIL = 2 # sent to signify end of data transfer
| 34.625
| 71
| 0.703971
|
8556e6ad2ec40638ce618c789778f5b1bf8e75f1
| 211
|
py
|
Python
|
src/boot.py
|
jsayles/Thing12
|
84a67ed735adfd46ffc2cb384e7a88585e81cb86
|
[
"Apache-2.0"
] | 1
|
2020-03-29T17:06:16.000Z
|
2020-03-29T17:06:16.000Z
|
src/boot.py
|
jsayles/Thing12
|
84a67ed735adfd46ffc2cb384e7a88585e81cb86
|
[
"Apache-2.0"
] | null | null | null |
src/boot.py
|
jsayles/Thing12
|
84a67ed735adfd46ffc2cb384e7a88585e81cb86
|
[
"Apache-2.0"
] | null | null | null |
# This file is executed on every boot (including wake-boot from deepsleep)
import gc
# Disable the ESP debug statements
#import esp
#esp.osdebug(None)
# Web REPL
#import webrepl
#webrepl.start()
gc.collect()
| 16.230769
| 74
| 0.753555
|
16d23475f0797ecdd082e59427ee82f5af7de14e
| 4,793
|
py
|
Python
|
Python/klampt/model/create/moving_base_robot.py
|
joaomcm/Klampt
|
a184c885ad1d1f120511d95229e33b3da1908665
|
[
"BSD-3-Clause"
] | 238
|
2015-01-09T15:21:27.000Z
|
2022-03-30T22:48:45.000Z
|
Python/klampt/model/create/moving_base_robot.py
|
tcrapse/Klampt
|
d5a334e73f1f24ba4c606e03f49915b353799a57
|
[
"BSD-3-Clause"
] | 89
|
2015-08-26T16:56:42.000Z
|
2022-03-29T23:45:46.000Z
|
Python/klampt/model/create/moving_base_robot.py
|
tcrapse/Klampt
|
d5a334e73f1f24ba4c606e03f49915b353799a57
|
[
"BSD-3-Clause"
] | 84
|
2015-01-10T18:41:52.000Z
|
2022-03-30T03:32:50.000Z
|
"""Common code for creating and moving free-floating moving bases.
The way to do this is to add a "virtual linkage" of 3 translational DOFs
and 3 revolute DOFs. Some tuning may need to be done to the motor drivers
in order to make the controller stable.
"""
import os
from klampt.math import vectorops,so3
def make(robotfile,world,tempname="temp.rob",debug=False):
"""Converts the given fixed-base robot file into a moving base robot
and loads it into the given world.
Args:
robotfile (str): the name of a fixed-base robot file to load
world (WorldModel): a world that will contain the new robot
tempname (str, optional): a name of a temporary file containing
the moving-base robot
debug (bool, optional): if True, the robot file named by
``tempname`` is not removed from disk.
Returns:
(RobotModel): the loaded robot, stored in ``world``.
"""
_template_ = """### Boilerplate kinematics of a drivable floating (translating and rotating) cube with a robot hand mounted on it
TParent 1 0 0 0 1 0 0 0 1 0 0 0 \\
1 0 0 0 1 0 0 0 1 0 0 0 \\
1 0 0 0 1 0 0 0 1 0 0 0 \\
1 0 0 0 1 0 0 0 1 0 0 0 \\
1 0 0 0 1 0 0 0 1 0 0 0 \\
1 0 0 0 1 0 0 0 1 0 0 0
parents -1 0 1 2 3 4
axis 1 0 0 0 1 0 0 0 1 0 0 1 0 1 0 1 0 0
jointtype p p p r r r
qMin -1 -1 -1 -inf -inf -inf
qMax 1 1 1 inf inf inf
q 0 0 0 0 0 0
links "tx" "ty" "tz" "rz" "ry" "rx"
geometry "" "" "" "" "" "{TriangleMesh\\nOFF\\n8 12 0\\n0 0 0\\n0 0 1\\n0 1 0\\n0 1 1\\n1 0 0\\n1 0 1\\n1 1 0\\n1 1 1\\n3 0 1 3\\n3 0 3 2\\n3 4 6 7\\n3 4 7 5\\n3 0 4 5\\n3 0 5 1\\n3 2 3 7\\n3 2 7 6\\n3 0 2 6\\n3 0 6 4\\n3 1 5 7\\n3 1 7 3\\n}"
geomscale 1 1 1 1 1 0.01
mass 0.1 0.1 0.1 0.1 0.1 0.1
com 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
inertia 0.001 0 0 0 0.001 0 0 0 0.001 \\
0.001 0 0 0 0.001 0 0 0 0.001 \\
0.001 0 0 0 0.001 0 0 0 0.001 \\
0.001 0 0 0 0.001 0 0 0 0.001 \\
0.001 0 0 0 0.001 0 0 0 0.001 \\
0.001 0 0 0 0.001 0 0 0 0.001
torqueMax 500 500 500 50 50 50
accMax 4 4 4 4 4 4 4
velMax 2 2 2 3 3 3
joint normal 0
joint normal 1
joint normal 2
joint spin 3
joint spin 4
joint spin 5
driver normal 0
driver normal 1
driver normal 2
driver normal 3
driver normal 4
driver normal 5
servoP 5000 5000 5000 500 500 500
servoI 10 10 10 .5 .5 .5
servoD 100 100 100 10 10 10
viscousFriction 50 50 50 50 50 50
dryFriction 1 1 1 1 1 1
property sensors <sensors><ForceTorqueSensor name="base_force" link="5" hasForce="1 1 1" hasTorque="1 1 1" /></sensors>
mount 5 "%s" 1 0 0 0 1 0 0 0 1 0 0 0 as "%s"
"""
robotname = os.path.splitext(os.path.basename(robotfile))[0]
f = open(tempname,'w')
f.write(_template_ % (robotfile,robotname))
f.close()
world.loadElement(tempname)
robot = world.robot(world.numRobots()-1)
#set torques
mass = sum(robot.link(i).getMass().mass for i in range(robot.numLinks()))
inertia = 0.0
for i in range(robot.numLinks()):
m = robot.link(i).getMass()
inertia += (vectorops.normSquared(m.com)*m.mass + max(m.inertia))
tmax = robot.getTorqueMax()
tmax[0] = tmax[1] = tmax[2] = mass*9.8*5
tmax[3] = tmax[4] = tmax[5] = inertia*9.8*5
robot.setName("moving-base["+robotname+"]")
robot.setTorqueMax(tmax)
if debug:
robot.saveFile(tempname)
else:
os.remove(tempname)
return robot
def get_xform(robot):
"""For a moving base robot model, returns the current base rotation
matrix R and translation t."""
return robot.link(5).getTransform()
def set_xform(robot,R,t):
"""For a moving base robot model, set the current base rotation
matrix R and translation t. (Note: if you are controlling a robot
during simulation, use send_moving_base_xform_command)
"""
q = robot.getConfig()
for i in range(3):
q[i] = t[i]
roll,pitch,yaw = so3.rpy(R)
q[3]=yaw
q[4]=pitch
q[5]=roll
robot.setConfig(q)
def send_xform_linear(controller,R,t,dt):
"""For a moving base robot model, send a command to move to the
rotation matrix R and translation t using linear interpolation
over the duration dt.
Note: with the reflex model, can't currently set hand commands
and linear base commands simultaneously
"""
q = controller.getCommandedConfig()
for i in range(3):
q[i] = t[i]
roll,pitch,yaw = so3.rpy(R)
q[3]=yaw
q[4]=pitch
q[5]=roll
controller.setLinear(q,dt)
def send_xform_PID(controller,R,t):
"""For a moving base robot model, send a command to move to the
rotation matrix R and translation t by setting the PID setpoint
Note: with the reflex model, can't currently set hand commands
and linear base commands simultaneously
"""
q = controller.getCommandedConfig()
for i in range(3):
q[i] = t[i]
roll,pitch,yaw = so3.rpy(R)
q[3]=yaw
q[4]=pitch
q[5]=roll
v = controller.getCommandedVelocity()
controller.setPIDCommand(q,v)
| 30.922581
| 256
| 0.665554
|
da98949abf49e2acb40829ac3fab1f07f172acd8
| 5,192
|
py
|
Python
|
examples/sardeshmukh_hoskins.py
|
njweber2/barotropy
|
2cbf9fcba82052e956c52c138f4bfefef77b6381
|
[
"MIT"
] | null | null | null |
examples/sardeshmukh_hoskins.py
|
njweber2/barotropy
|
2cbf9fcba82052e956c52c138f4bfefef77b6381
|
[
"MIT"
] | null | null | null |
examples/sardeshmukh_hoskins.py
|
njweber2/barotropy
|
2cbf9fcba82052e956c52c138f4bfefef77b6381
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from barotropy import (
LinearizedDynamics, LinearizedDiffusion, LinearizedDamping, Forcing,
NonlinearDynamics, NonlinearDiffusion, NonlinearDamping,
super_rotation, debug_plots, gaussian_blob_2d
)
from sympl import (Leapfrog, PlotFunctionMonitor, NetCDFMonitor,
get_component_aliases, get_constant, TendencyInDiagnosticsWrapper)
from datetime import timedelta
import re
import os
import numpy as np
from time import time
import spharm
Re = get_constant('planetary_radius', 'm')
Omega = get_constant('planetary_rotation_rate', 's^-1')
def main():
# ============ Adjustable Variables ============
# Integration Options
dt = timedelta(minutes=15) # timestep
duration = '48_00:00' # run duration ('<days>_<hours>:<mins>')t
linearized = True
ncout_freq = 6 # netcdf write frequency (hours)
plot_freq = 6 # plot Monitor call frequency (hours)
ntrunc = 42 # triangular truncation for spharm (e.g., 21 --> T21)
# Diffusion Options
diff_on = True # Use diffusion?
k = 2.338e16 # Diffusion coefficient for del^4 hyperdiffusion
# Forcing Options
forcing_on = True # Apply vort. tendency forcing?
damp_ts = 14.7 # Damping timescale (in days)
# I/O Options
ncoutfile = os.path.join(os.path.dirname(__file__), 'sardeshmukh88.nc')
append_nc = False # Append to an existing netCDF file?
# ==============================================
start = time()
# Get the initial state
state = super_rotation(linearized=linearized, ntrunc=ntrunc)
# Set up the Timestepper with the desired Prognostics
if linearized:
dynamics_prog = LinearizedDynamics(ntrunc=ntrunc)
diffusion_prog = LinearizedDiffusion(k=k, ntrunc=ntrunc)
damping_prog = LinearizedDamping(tau=damp_ts)
else:
dynamics_prog = NonlinearDynamics(ntrunc=ntrunc)
diffusion_prog = NonlinearDiffusion(k=k, ntrunc=ntrunc)
damping_prog = NonlinearDamping(tau=damp_ts)
prognostics = [TendencyInDiagnosticsWrapper(dynamics_prog, 'dynamics')]
if diff_on:
prognostics.append(TendencyInDiagnosticsWrapper(diffusion_prog, 'diffusion'))
if forcing_on:
# Get our suptropical RWS forcing (from equatorial divergence)
rws, rlat, rlon = rws_from_tropical_divergence(state)
prognostics.append(TendencyInDiagnosticsWrapper(Forcing.from_numpy_array(rws, rlat, rlon, ntrunc=ntrunc,
linearized=linearized), 'forcing'))
prognostics.append(TendencyInDiagnosticsWrapper(damping_prog, 'damping'))
stepper = Leapfrog(prognostics)
# Create Monitors for plotting & storing data
plt_monitor = PlotFunctionMonitor(debug_plots.fourpanel)
if os.path.isfile(ncoutfile) and not append_nc:
os.remove(ncoutfile)
aliases = get_component_aliases(*prognostics)
nc_monitor = NetCDFMonitor(ncoutfile, write_on_store=True, aliases=aliases)
# Figure out the end date of this run
d, h, m = re.split('[_:]', duration)
end_date = state['time'] + timedelta(days=int(d), hours=int(h), minutes=int(m))
# Begin the integration loop
idate = state['time']
while state['time'] <= end_date:
# Get the state at the next timestep using our Timestepper
diagnostics, next_state = stepper(state, dt)
# Add any calculated diagnostics to our current state
state.update(diagnostics)
# Write state to netCDF every <ncout_freq> hours
fhour = (state['time'] - idate).days*24 + (state['time'] - idate).seconds/3600
if fhour % ncout_freq == 0:
print(state['time'])
nc_monitor.store(state)
# Make plot(s) every <plot_freq> hours
if fhour % plot_freq == 0:
plt_monitor.store(state)
# Advance the state to the next timestep
next_state['time'] = state['time'] + dt
state = next_state
print('TOTAL INTEGRATION TIME: {:.02f} min\n'.format((time()-start)/60.))
def rws_from_tropical_divergence(state, center=(0., 145.), amp=6e-6, width=12):
# Get desired state variables
lats = state['latitude'].values
lons = state['longitude'].values
vort_bar = state['base_atmosphere_relative_vorticity'].values
s = spharm.Spharmt(lats.shape[1], lons.shape[0], gridtype='regular', rsphere=Re)
vortb_spec = s.grdtospec(vort_bar)
ubar, vbar = s.getuv(vortb_spec, np.zeros(vortb_spec.shape))
divergence = gaussian_blob_2d(lats, lons, center, width, amp)
# Calculate the Rossby Wave Source
# Term 1
zetabar_spec, _ = s.getvrtdivspec(ubar, vbar)
zetabar = s.spectogrd(zetabar_spec) + 2 * Omega * np.sin(np.deg2rad(lats))
term1 = -zetabar * divergence
# Term 2
uchi, vchi = s.getuv(np.zeros(zetabar_spec.shape), s.grdtospec(divergence))
dzeta_dx, dzeta_dy = s.getgrad(s.grdtospec(zetabar))
term2 = - uchi * dzeta_dx - vchi * dzeta_dy
rws = term1 + term2
return rws, lats, lons
if __name__ == '__main__':
main()
| 39.333333
| 116
| 0.654468
|
c9ce508dfdb05569f4f212137032a7dd16e86a55
| 2,549
|
py
|
Python
|
tensorflow/examples/learn/boston.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 522
|
2016-06-08T02:15:50.000Z
|
2022-03-02T05:30:36.000Z
|
tensorflow/examples/learn/boston.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133
|
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/examples/learn/boston.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 108
|
2016-06-16T15:34:05.000Z
|
2022-03-12T13:23:11.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = datasets.load_boston()
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column('x', shape=np.array(x_train).shape[1:])]
regressor = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_train}, y=y_train, batch_size=1, num_epochs=None, shuffle=True)
regressor.train(input_fn=train_input_fn, steps=2000)
# Predict.
x_transformed = scaler.transform(x_test)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_transformed}, y=y_test, num_epochs=1, shuffle=False)
predictions = regressor.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['predictions'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score_sklearn = metrics.mean_squared_error(y_predicted, y_test)
print('MSE (sklearn): {0:f}'.format(score_sklearn))
# Score with tensorflow.
scores = regressor.evaluate(input_fn=test_input_fn)
print('MSE (tensorflow): {0:f}'.format(scores['average_loss']))
if __name__ == '__main__':
tf.app.run()
| 35.402778
| 79
| 0.75206
|
207e0a773b8923441c1b288e9e18489069f138eb
| 37,694
|
py
|
Python
|
tensorflow/contrib/layers/python/layers/feature_column_ops.py
|
RMORIOKA/tensorflow
|
6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/layers/python/layers/feature_column_ops.py
|
RMORIOKA/tensorflow
|
6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/layers/python/layers/feature_column_ops.py
|
RMORIOKA/tensorflow
|
6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to FeatureColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def _embeddings_from_arguments(column,
args,
weight_collections,
trainable,
output_rank=2):
"""Returns embeddings for a column based on the computed arguments.
Args:
column: the column name.
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
output_rank: the desired rank of the returned `Output`. Inner dimensions will
be combined to produce the desired rank.
Returns:
the embeddings.
Raises:
ValueError: if not possible to create.
"""
# pylint: disable=protected-access
input_tensor = layers._inner_flatten(args.input_tensor, output_rank)
weight_tensor = None
if args.weight_tensor is not None:
weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank)
# pylint: enable=protected-access
if args.hashed:
embeddings = contrib_variables.model_variable(
name='weights',
shape=[args.vocab_size],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
return embedding_ops.hashed_embedding_lookup_sparse(
embeddings, input_tensor, args.dimension,
combiner=args.combiner, name='lookup')
if args.shared_embedding_name is not None:
shared_embedding_collection_name = (
'SHARED_EMBEDDING_COLLECTION_' + args.shared_embedding_name.upper())
graph = ops.get_default_graph()
shared_embedding_collection = (
graph.get_collection_ref(shared_embedding_collection_name))
shape = [args.vocab_size, args.dimension]
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError('Collection %s can only contain one '
'(partitioned) variable.'
% shared_embedding_collection_name)
else:
embeddings = shared_embedding_collection[0]
if embeddings.get_shape() != shape:
raise ValueError('The embedding variable with name {} already '
'exists, but its shape does not match required '
'embedding shape here. Please make sure to use '
'different shared_embedding_name for different '
'shared embeddings.'.format(
args.shared_embedding_name))
else:
embeddings = contrib_variables.model_variable(
name=args.shared_embedding_name,
shape=shape,
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
graph.add_to_collection(shared_embedding_collection_name, embeddings)
else:
embeddings = contrib_variables.model_variable(
name='weights',
shape=[args.vocab_size, args.dimension],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(embeddings, variables.Variable):
embeddings = [embeddings]
else:
embeddings = embeddings._get_variable_list() # pylint: disable=protected-access
# pylint: disable=protected-access
_maybe_restore_from_checkpoint(
column._checkpoint_path(), embeddings)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
combiner=args.combiner,
name=column.name + 'weights',
max_norm=args.max_norm)
def _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank,
default_name):
"""Implementation of `input_from(_sequence)_feature_columns`."""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(scope,
default_name=default_name,
values=columns_to_tensors.values()):
output_tensors = []
transformer = _Transformer(columns_to_tensors)
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.GLOBAL_VARIABLES]))
for column in sorted(set(feature_columns), key=lambda x: x.key):
with variable_scope.variable_scope(None,
default_name=column.name,
values=columns_to_tensors.values()):
transformed_tensor = transformer.transform(column)
try:
# pylint: disable=protected-access
arguments = column._deep_embedding_lookup_arguments(
transformed_tensor)
output_tensors.append(_embeddings_from_arguments(
column,
arguments,
weight_collections,
trainable,
output_rank=output_rank))
except NotImplementedError as ee:
try:
# pylint: disable=protected-access
output_tensors.append(column._to_dnn_input_layer(
transformed_tensor,
weight_collections,
trainable,
output_rank=output_rank))
except ValueError as e:
raise ValueError('Error creating input layer for column: {}.\n'
'{}, {}'.format(column.name, e, ee))
return array_ops.concat(output_rank - 1, output_tensors)
def input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style input layer builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
At the first layer of the model, this column oriented data should be converted
to a single tensor. Each feature column needs a different kind of operation
during this conversion. For example sparse features need a totally different
handling than continuous features.
Example:
```python
# Building model for training
columns_to_tensor = tf.parse_example(...)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns)
second_layer = fully_connected(inputs=first_layer, ...)
...
```
where feature_columns can be defined as follows:
```python
sparse_feature = sparse_column_with_hash_bucket(
column_name="sparse_col", ...)
sparse_feature_emb = embedding_column(sparse_id_column=sparse_feature, ...)
real_valued_feature = real_valued_column(...)
real_valued_buckets = bucketized_column(
source_column=real_valued_feature, ...)
feature_columns=[sparse_feature_emb, real_valued_buckets]
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
return _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=2,
default_name='input_from_feature_columns')
@experimental
def sequence_input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""Builds inputs for sequence models from `FeatureColumn`s.
See documentation for `input_from_feature_columns`. The following types of
`FeatureColumn` are permitted in `feature_columns`: `_OneHotColumn`,
`_EmbeddingColumn`, `_HashedEmbeddingColumn`, `_RealValuedColumn`,
`_DataFrameColumn`. In addition, columns in `feature_columns` may not be
constructed using any of the following: `HashedEmbeddingColumn`,
`BucketizedColumn`, `CrossedColumn`.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
_check_supported_sequence_columns(feature_columns)
_check_forbidden_sequence_columns(feature_columns)
return _input_from_feature_columns(
columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=3,
default_name='sequence_input_from_feature_columns')
def _create_embedding_lookup(column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates variables and returns predictions for linear weights in a model.
Args:
column: the column we're working on.
columns_to_tensors: a map from column name to tensors.
embedding_lookup_arguments: arguments for embedding lookup.
num_outputs: how many outputs.
trainable: whether the variable we create is trainable.
weight_collections: weights will be placed here.
Returns:
variables: the created embeddings.
predictions: the computed predictions.
"""
with variable_scope.variable_scope(
None, default_name=column.name, values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[embedding_lookup_arguments.vocab_size, num_outputs],
dtype=dtypes.float32,
initializer=embedding_lookup_arguments.initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(variable, variables.Variable):
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
embedding_lookup_arguments.input_tensor,
sparse_weights=embedding_lookup_arguments.weight_tensor,
combiner=embedding_lookup_arguments.combiner,
name=column.name + '_weights')
return variable, predictions
def _maybe_restore_from_checkpoint(checkpoint_path, variable):
if checkpoint_path is not None:
path, tensor_name = checkpoint_path
weights_to_restore = variable
if len(variable) == 1:
weights_to_restore = variable[0]
checkpoint_utils.init_from_checkpoint(path,
{tensor_name: weights_to_restore})
def _create_joint_embedding_lookup(columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates an embedding lookup for all columns sharing a single weight."""
for arg in embedding_lookup_arguments:
assert arg.weight_tensor is None, (
'Joint sums for weighted sparse columns are not supported. '
'Please use weighted_sum_from_feature_columns instead.')
assert arg.combiner == 'sum', (
'Combiners other than sum are not supported for joint sums. '
'Please use weighted_sum_from_feature_columns instead.')
assert len(embedding_lookup_arguments) >= 1, (
'At least one column must be in the model.')
prev_size = 0
sparse_tensors = []
for a in embedding_lookup_arguments:
t = a.input_tensor
values = t.values + prev_size
prev_size += a.vocab_size
sparse_tensors.append(
sparse_tensor_py.SparseTensor(t.indices,
values,
t.shape))
sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
with variable_scope.variable_scope(
None, default_name='linear_weights', values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[prev_size, num_outputs],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(variable, variables.Variable):
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
sparse_tensor,
sparse_weights=None,
combiner='sum',
name='_weights')
return variable, predictions
def joint_weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A restricted linear prediction builder based on FeatureColumns.
As long as all feature columns are unweighted sparse columns this computes the
prediction of a linear model which stores all weights in a single variable.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A list of Variables storing the weights.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='joint_weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
transformer = _Transformer(columns_to_tensors)
embedding_lookup_arguments = []
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments.append(
column._wide_embedding_lookup_arguments(transformed_tensor)) # pylint: disable=protected-access
except NotImplementedError:
raise NotImplementedError('Real-valued columns are not supported. '
'Use weighted_sum_from_feature_columns '
'instead, or bucketize these columns.')
variable, predictions_no_bias = _create_joint_embedding_lookup(
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, variable, bias
def weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style linear prediction builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
This function generates weighted sum for each num_outputs. Weighted sum refers
to logits in classification problems. It refers to prediction itself for
linear regression problems.
Example:
```
# Building model for training
feature_columns = (
real_valued_column("my_feature1"),
...
)
columns_to_tensor = tf.parse_example(...)
logits = weighted_sum_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns,
num_outputs=1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, labels)
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A dictionary which maps feature_column to corresponding Variable.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
output_tensors = []
column_to_variable = dict()
transformer = _Transformer(columns_to_tensors)
# pylint: disable=protected-access
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments = column._wide_embedding_lookup_arguments(
transformed_tensor)
variable, predictions = _create_embedding_lookup(
column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
except NotImplementedError:
with variable_scope.variable_scope(
None,
default_name=column.name,
values=columns_to_tensors.values()):
tensor = column._to_dense_tensor(transformed_tensor)
tensor = fc._reshape_real_valued_tensor(tensor, 2, column.name)
variable = [contrib_variables.model_variable(
name='weight',
shape=[tensor.get_shape()[1], num_outputs],
initializer=init_ops.zeros_initializer,
collections=weight_collections)]
predictions = math_ops.matmul(tensor, variable[0], name='matmul')
except ValueError as ee:
raise ValueError('Error creating weighted sum for column: {}.\n'
'{}'.format(column.name, ee))
output_tensors.append(predictions)
column_to_variable[column] = variable
_log_variable(variable)
_maybe_restore_from_checkpoint(column._checkpoint_path(), variable)
# pylint: enable=protected-access
predictions_no_bias = math_ops.add_n(output_tensors)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, column_to_variable, bias
def parse_feature_columns_from_examples(serialized,
feature_columns,
name=None,
example_names=None):
"""Parses tf.Examples to extract tensors for given feature_columns.
This is a wrapper of 'tf.parse_example'.
Example:
```python
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
my_features = [embedding_feature_b, real_feature_buckets, embedding_feature_a]
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping FeatureColumn to `Output` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensors = parsing_ops.parse_example(
serialized=serialized,
features=fc.create_feature_spec_for_parsing(feature_columns),
name=name,
example_names=example_names)
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
return columns_to_tensors
def transform_features(features, feature_columns):
"""Returns transformed features based on features columns passed in.
Example:
```python
columns_to_tensor = transform_features(features=features,
feature_columns=feature_columns)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
feature_columns = [embedding_feature_b,
real_feature_buckets,
embedding_feature_a]
```
Args:
features: A dictionary of features.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A `dict` mapping FeatureColumn to `Output` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensor = features.copy()
transformer = _Transformer(columns_to_tensor)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
keys = list(columns_to_tensor.keys())
for k in keys:
if k not in feature_columns:
columns_to_tensor.pop(k)
return columns_to_tensor
def parse_feature_columns_from_sequence_examples(
serialized,
context_feature_columns,
sequence_feature_columns,
name=None,
example_name=None):
"""Parses tf.SequenceExamples to extract tensors for given `FeatureColumn`s.
Args:
serialized: A scalar (0-D Tensor) of type string, a single serialized
`SequenceExample` proto.
context_feature_columns: An iterable containing the feature columns for
context features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
sequence_feature_columns: An iterable containing the feature columns for
sequence features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
name: A name for this operation (optional).
example_name: A scalar (0-D Tensor) of type string (optional), the names of
the serialized proto.
Returns:
A tuple consisting of:
context_features: a dict mapping `FeatureColumns` from
`context_feature_columns` to their parsed `Output`s/`SparseTensor`s.
sequence_features: a dict mapping `FeatureColumns` from
`sequence_feature_columns` to their parsed `Output`s/`SparseTensor`s.
"""
# Sequence example parsing requires a single (scalar) example.
try:
serialized = array_ops.reshape(serialized, [])
except ValueError as e:
raise ValueError(
'serialized must contain as single sequence example. Batching must be '
'done after parsing for sequence examples. Error: {}'.format(e))
if context_feature_columns is None:
context_feature_columns = []
if sequence_feature_columns is None:
sequence_feature_columns = []
check_feature_columns(context_feature_columns)
context_feature_spec = fc.create_feature_spec_for_parsing(
context_feature_columns)
check_feature_columns(sequence_feature_columns)
sequence_feature_spec = fc._create_sequence_feature_spec_for_parsing( # pylint: disable=protected-access
sequence_feature_columns, allow_missing_by_default=False)
return parsing_ops.parse_single_sequence_example(serialized,
context_feature_spec,
sequence_feature_spec,
example_name,
name)
def _log_variable(variable):
if isinstance(variable, list):
for var in variable:
if isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', var.name,
var.device)
elif isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', variable.name,
variable.device)
def _infer_real_valued_column_for_tensor(name, tensor):
"""Creates a real_valued_column for given tensor and name."""
if isinstance(tensor, sparse_tensor_py.SparseTensor):
raise ValueError(
'SparseTensor is not supported for auto detection. Please define '
'corresponding FeatureColumn for tensor {} {}.', name, tensor)
if not (tensor.dtype.is_integer or tensor.dtype.is_floating):
raise ValueError(
'Non integer or non floating types are not supported for auto detection'
'. Please define corresponding FeatureColumn for tensor {} {}.', name,
tensor)
shape = tensor.get_shape().as_list()
dimension = 1
for i in range(1, len(shape)):
dimension *= shape[i]
return fc.real_valued_column(name, dimension=dimension, dtype=tensor.dtype)
def infer_real_valued_columns(features):
if not isinstance(features, dict):
return [_infer_real_valued_column_for_tensor('', features)]
feature_columns = []
for key, value in features.items():
feature_columns.append(_infer_real_valued_column_for_tensor(key, value))
return feature_columns
def check_feature_columns(feature_columns):
"""Checks the validity of the set of FeatureColumns.
Args:
feature_columns: A set of instances or subclasses of FeatureColumn.
Raises:
ValueError: If there are duplicate feature column keys.
"""
seen_keys = set()
for f in feature_columns:
key = f.key
if key in seen_keys:
raise ValueError('Duplicate feature column key found for column: {}. '
'This usually means that the column is almost identical '
'to another column, and one must be discarded.'.format(
f.name))
seen_keys.add(key)
class _Transformer(object):
"""Handles all the transformations defined by FeatureColumn if needed.
FeatureColumn specifies how to digest an input column to the network. Some
feature columns require data transformations. This class handles those
transformations if they are not handled already.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case Transformer
should create only one bucketization op instead of multiple ops for each
feature column. To handle re-use of transformed columns, Transformer keeps all
previously transformed columns.
Example:
```python
sparse_feature = sparse_column_with_hash_bucket(...)
real_valued_feature = real_valued_column(...)
real_valued_buckets = bucketized_column(source_column=real_valued_feature,
...)
sparse_x_real = crossed_column(
columns=[sparse_feature, real_valued_buckets], hash_bucket_size=10000)
columns_to_tensor = tf.parse_example(...)
transformer = Transformer(columns_to_tensor)
sparse_x_real_tensor = transformer.transform(sparse_x_real)
sparse_tensor = transformer.transform(sparse_feature)
real_buckets_tensor = transformer.transform(real_valued_buckets)
```
"""
def __init__(self, columns_to_tensors):
"""Initializes transfomer.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have FeatureColumn as
a key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
Transformed features are inserted in columns_to_tensors.
"""
self._columns_to_tensors = columns_to_tensors
def transform(self, feature_column):
"""Returns a Tensor which represents given feature_column.
Args:
feature_column: An instance of FeatureColumn.
Returns:
A Tensor which represents given feature_column. It may create a new Tensor
or re-use an existing one.
Raises:
ValueError: if FeatureColumn cannot be handled by this Transformer.
"""
logging.debug('Transforming feature_column %s', feature_column)
if feature_column in self._columns_to_tensors:
# Feature_column is already transformed.
return self._columns_to_tensors[feature_column]
feature_column.insert_transformed_feature(self._columns_to_tensors)
if feature_column not in self._columns_to_tensors:
raise ValueError('Column {} is not supported.'.format(
feature_column.name))
return self._columns_to_tensors[feature_column]
def _add_variable_collection(weight_collections):
if weight_collections:
weight_collections = list(
set(list(weight_collections) + [ops.GraphKeys.GLOBAL_VARIABLES]))
return weight_collections
# TODO(jamieas): remove the following logic once all FeatureColumn types are
# supported for sequences.
# pylint: disable=protected-access
_SUPPORTED_SEQUENCE_COLUMNS = (fc._OneHotColumn,
fc._EmbeddingColumn,
fc._RealValuedColumn)
_FORBIDDEN_SEQUENCE_COLUMNS = (fc._HashedEmbeddingColumn,
fc._BucketizedColumn,
fc._CrossedColumn)
def _check_supported_sequence_columns(feature_columns):
"""Asserts `feature_columns` are in `_SUPPORTED_SEQUENCE_COLUMNS`."""
for col in feature_columns:
if not isinstance(col, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError(
'FeatureColumn type {} is not currently supported for sequence data.'.
format(type(col).__name__))
def _get_parent_columns(feature_column):
"""Returns the tuple of `FeatureColumn`s that `feature_column` depends on."""
if isinstance(feature_column, (fc._WeightedSparseColumn,
fc._OneHotColumn,
fc._EmbeddingColumn,)):
return (feature_column.sparse_id_column,)
if isinstance(feature_column, (fc._BucketizedColumn,)):
return (feature_column.source_column,)
if isinstance(feature_column, (fc._CrossedColumn)):
return tuple(feature_column.columns)
return tuple()
def _gather_feature_columns(feature_columns):
"""Returns a list of all ancestor `FeatureColumns` of `feature_columns`."""
gathered = list(feature_columns)
i = 0
while i < len(gathered):
for column in _get_parent_columns(gathered[i]):
if column not in gathered:
gathered.append(column)
i += 1
return gathered
def _check_forbidden_sequence_columns(feature_columns):
"""Recursively cecks `feature_columns` for `_FORBIDDEN_SEQUENCE_COLUMNS`."""
all_feature_columns = _gather_feature_columns(feature_columns)
for feature_column in all_feature_columns:
if isinstance(feature_column, _FORBIDDEN_SEQUENCE_COLUMNS):
raise ValueError(
'Column {} is of type {}, which is not currently supported for '
'sequences.'.format(feature_column.name,
type(feature_column).__name__))
| 40.1
| 109
| 0.687802
|
263d1b061f95f3c72d83f1cd1095e8510615b43c
| 6,080
|
py
|
Python
|
ros/src/twist_controller/dbw_node.py
|
marcin-sielski/CarND-Capstone
|
9d305fef9a908604809d5b0056a19ce8ff2e1edd
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/dbw_node.py
|
marcin-sielski/CarND-Capstone
|
9d305fef9a908604809d5b0056a19ce8ff2e1edd
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/dbw_node.py
|
marcin-sielski/CarND-Capstone
|
9d305fef9a908604809d5b0056a19ce8ff2e1edd
|
[
"MIT"
] | 1
|
2020-01-13T20:02:31.000Z
|
2020-01-13T20:02:31.000Z
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# TODO: Create `Controller` object
# self.controller = Controller(<Arguments you wish to provide>)
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity=fuel_capacity,
brake_deadband=brake_deadband,
decel_limit=decel_limit,
accel_limit=accel_limit,
wheel_radius=wheel_radius,
wheel_base=wheel_base,
steer_ratio=steer_ratio,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
# TODO: Subscribe to all the topics you need to
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.barke = 0
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
# TODO: Get predicted throttle, brake, and steering using `twist_controller`
# You should only publish the control commands if dbw is enabled
# throttle, brake, steering = self.controller.control(<current linear velocity>,
# <dbw status>,
# <proposed linear velocity>,
# <proposed angular velocity>,
# <any other argument you need>)
# if <dbw is enabled>:
# self.publish(throttle, brake, steer)
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
| 44.705882
| 101
| 0.592928
|
65d9d1824cabf585def706e2aa1ff3056d2bdccc
| 10,885
|
py
|
Python
|
preprocessing/preprocessor.py
|
saams4u/BIMODAL
|
0a52d4e6eef4ad244904fb51892e948ab4a4336e
|
[
"CC-BY-4.0"
] | null | null | null |
preprocessing/preprocessor.py
|
saams4u/BIMODAL
|
0a52d4e6eef4ad244904fb51892e948ab4a4336e
|
[
"CC-BY-4.0"
] | null | null | null |
preprocessing/preprocessor.py
|
saams4u/BIMODAL
|
0a52d4e6eef4ad244904fb51892e948ab4a4336e
|
[
"CC-BY-4.0"
] | null | null | null |
"""
Implementation of all preprocessing steps
"""
import pandas as pd
import numpy as np
from rdkit import Chem
import sys
import os
np.random.seed(1)
class Preprocessor:
def __init__(self, name):
# where name is the name of the file
# List to store data
self._data = []
# If True, check after each function that all duplicates are still removed
self._duplicates_removed = False
if os.path.isfile(name + '.csv'):
self._data = pd.read_csv(name + '.csv', header=None).values[:, 0]
elif os.path.isfile(name + '.tar.xz'):
# Skip first line since empty and last line since nan
self._data = pd.read_csv(name + '.tar.xz', compression='xz', header=None).values[1:-1, 0]
elif os.path.isfile(name + '.smi.zip'):
# Skip first line since empty and last line since nan
self._data = pd.read_csv(name + '.smi.zip', compression='zip', header=None).values[1:-1, 0]
# Remove empty dimensions
self._data = np.squeeze(self._data)
return
def preprocess(self, name, aug=1, length=128):
"""
Preprocess data depending on model type
:param name: Name of the model
:param aug: Data augmentation
:return:
"""
if name == "ForwardRNN":
self.add_ending('E')
self.add_sentinel('G')
self.padding_right('A', l=length+2)
elif name == "FBRNN_fixed" or name == "BIMODAL_fixed":
self.add_middle('G')
self.add_ending('E')
self.add_sentinel('E')
self.padding_left_right('A', l=length+3)
elif name == "FBRNN_random" or name == "BIMODAL_random":
self.add_ending('E')
self.add_sentinel('E')
self.add_token_random_padding(start_token='G', pad_token='A', aug=aug, l=3+length*2)
elif name == "NADE_fixed":
p.padding_left_right('A', l=length)
p.add_ending('G')
p.add_sentinel('G')
elif name == "NADE_random":
self.padding_left_right('A', l=length)
self.add_ending('G')
self.add_sentinel('G')
self.insert_missing_token(missing_token='M', aug=aug)
else:
print("CAN NOT FIND MODEL")
sys.exit()
def remove_not_valid(self):
"""Remove all SMILES not accepted by the RDKit
:return:
"""
# Store index to delete
to_delete = []
# Find not valid SMILES
for i, s in enumerate(self._data):
mol = Chem.MolFromSmiles(str(s))
if mol is None:
to_delete.append(i)
# Delete SMILES
if len(to_delete) != 0:
self._data = np.delete(self._data, to_delete)
return
def remove_duplicates(self):
"""Remove all SMILES appearing more than once
:return:
"""
self._data = np.unique(self._data)
# Set flag to always remove duplicated after an operation
self._duplicates_removed = True
return
def remove_stereochem(self):
"""Remove all token related stereochemistry
:return:
"""
# Token used for stereochemistry
stereochem_token = ['/', '@', '\\']
for t in stereochem_token:
self.remove_token(t)
# Remove possible created duplicates
if self._duplicates_removed:
self.remove_duplicates()
return
def remove_token(self, t):
"""Remove token t from all elements of data
:param t: token to remove
:return:
"""
self._data = np.array([d.replace(t, '') for d in self._data])
# Remove possible created duplicates
if self._duplicates_removed:
self.remove_duplicates()
return
def remove_salts(self):
"""Remove all salts
Non-bonded interactions are represented by '.'
We assume that the one with the largest SMILES sequence should be preserved
:return:
"""
for i, s in enumerate(self._data):
splits = s.split('.')
# Select longest part of SMILES
self._data[i] = max(splits, key=len)
# Remove possible deposits
self.remove_token('.')
# Remove possible created duplicates
if self._duplicates_removed:
self.remove_duplicates()
return
def canonicalize(self):
"""Canonicalize all SMILES from data
:return:
"""
for i, s in enumerate(self._data):
mol = Chem.MolFromSmiles(str(s))
self._data[i] = Chem.MolToSmiles(mol, isomericSmiles=True, canonical=True)
# Remove possible created duplicates
if self._duplicates_removed:
self.remove_duplicates()
return
def remove_length(self, min_len=34, max_len=128):
"""Keep only SMILES with a length between min and max
:param min_len: minimal length (-1: no minimal length)
max_len: maximal length (-1: no maximal length)
:return:
"""
# Store index to delete
to_delete = []
# Find strings longer than max
if max_len != -1:
for i, s in enumerate(self._data):
if len(s) > max_len:
to_delete.append(i)
# Find Strings shorter than min
if min != -1:
for i, s in enumerate(self._data):
if len(s) < min_len:
to_delete.append(i)
# Remove elements
self._data = np.delete(self._data, to_delete)
return
def add_sentinel(self, token='E'):
"""Add token at the beginning of each SMILES
:param token: token to insert
:return:
"""
data = []
for i, s in enumerate(self._data):
data.append(token + s)
self._data = data
return
def add_ending(self, token='E'):
"""Add token at the end of each SMILES
:param token: token to insert
:return:
"""
data = []
for i, s in enumerate(self._data):
data.append(s + token)
self._data = data
return
def add_middle(self, token='G'):
"""Add token in the middle of each SMILES
:param token: token to insert
:return:
"""
data = []
for i, s in enumerate(self._data):
mid = len(s) // 2
data.append(s[:mid] + token + s[mid:])
self._data = data
return
def add_token_random_padding(self, start_token='G', pad_token='A', aug=5, l=0):
'''Add start_token a n different random position and pad to have start_token in the middle of the obtained sequence
Meathod should be applied after add_ending
:param start_token: token introduced in the string
:param pad_token: token used for padding
:param n: number for data augmentation
:param l: length of the final string (if l=0 use length of longest string)
'''
# Compute length of longest string
if l == 0:
max_l = len(max(self._data, key=len)) - 1
else:
max_l = l // 2
aug_data = np.empty((self._data.size, aug)).astype(object)
for i, s in enumerate(self._data):
l = len(s)
# Choose n different position for starting token (after 0 and before l-1,
# since 0 and l-1 are special tokens for the ending (E))
r = np.random.choice(np.arange(l - 1) + 1, aug, replace=False)
# Tmp array to store augmentation of a SMILES
for j, r_j in enumerate(r):
# Added token should be located within the molecule (after 0 and before l-1,
# since 0 and l-1 are special tokens for the ending (E)
aug_data[i, j] = s[:r_j].rjust(max_l, pad_token) + start_token + s[r_j:].ljust(max_l, pad_token)
# Convert array to shape (n_samples, n_augmentation)
print(self._data.shape)
self._data = aug_data.astype(str)
def insert_missing_token(self, missing_token='M', aug=1):
"""Insert missing_token at random position and store changed and reference SMILES
:param missing_token: Token used to indicate missing value
"""
# New data array (n_samples, 2) stores correct SMILES and SMILES with missing values
data = np.empty((self._data.size, aug + 1)).astype(object)
data[:, 0] = self._data
for a in range(aug):
data[:, a + 1] = np.copy(self._data)
# Iteration over complete data
for i, s in enumerate(self._data):
# Compute length of current SMILES
l = len(s)
# Compute number of missing values between 0 and l-2 (First and last token are not replaced)
n_missing = np.random.choice(np.arange(l - 2), aug, replace=False)
for a in range(aug):
# Compute position of missing values between 1 and l-2 (First token (0) and
# last token (l-1) are not replaced)
r = np.random.choice(np.arange(l - 2) + 1, n_missing[a], replace=False)
# Insert missing values
for r_i in r:
data[i, a + 1] = data[i, a + 1][:r_i] + missing_token + data[i, a + 1][r_i + 1:]
self._data = data.astype(str)
def padding_right(self, token='A', l=0):
"""Padding of data on the right side to obtain a consistent length
:param token: token used for padding
:return l: length of the padding (if l=0 use length of longest string)
"""
# Compute length of longest string if no length specified
if l == 0:
l = len(max(self._data, key=len))
# Padding of all strings in array
data = []
for i, s in enumerate(self._data):
data.append(s.ljust(l, token))
self._data = data
return l
def padding_left_right(self, token='A', l=0):
"""Padding of data on the right and left side to obtain a consistent length
:param token: token used for padding
:return l: length of the padding (if l=0 use length of longest string)
"""
# Compute length of longest string
if l == 0:
l = len(max(self._data, key=len))
# Padding of all strings in array
data = []
for i, s in enumerate(self._data):
data.append(s.center(l, token))
self._data = data
return l
def save_data(self, name='data.csv'):
pd.DataFrame(self._data).to_csv(name, header=None, index=None)
return
def get_data(self):
return self._data
| 33.804348
| 123
| 0.56362
|
2056fd5bad150e3b004d922ba326fbdff9f38aa4
| 1,719
|
py
|
Python
|
codes/Lib/site-packages/openpyxl/charts/tests/test_reference.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | null | null | null |
codes/Lib/site-packages/openpyxl/charts/tests/test_reference.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | null | null | null |
codes/Lib/site-packages/openpyxl/charts/tests/test_reference.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2010-2014 openpyxl
import pytest
@pytest.fixture
def column_of_letters(sheet, Reference):
for idx, l in enumerate("ABCDEFGHIJ", 1):
sheet.cell(row=idx, column=2).value = l
return Reference(sheet, (1, 2), (10, 2))
class TestReference:
def test_single_cell_ctor(self, cell):
assert cell.pos1 == (1, 1)
assert cell.pos2 is None
def test_range_ctor(self, cell_range):
assert cell_range.pos1 == (1, 1)
assert cell_range.pos2 == (10, 1)
def test_single_cell_ref(self, cell):
assert cell.values == [0]
assert str(cell) == "'reference'!$A$1"
def test_cell_range_ref(self, cell_range):
assert cell_range.values == [0, 1, 2, 3, 4, 5, 6, 7, 8 , 9]
assert str(cell_range) == "'reference'!$A$1:$A$10"
def test_data_type(self, cell):
with pytest.raises(ValueError):
cell.data_type = 'f'
cell.data_type = None
def test_type_inference(self, cell, cell_range, column_of_letters,
missing_values):
assert cell.values == [0]
assert cell.data_type == 'n'
assert cell_range.values == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert cell_range.data_type == 'n'
assert column_of_letters.values == list("ABCDEFGHIJ")
assert column_of_letters.data_type == "s"
assert missing_values.values == ['', '', 1, 2, 3, 4, 5, 6, 7, 8]
missing_values.values
assert missing_values.data_type == 'n'
def test_number_format(self, cell):
with pytest.raises(ValueError):
cell.number_format = 'YYYY'
cell.number_format = 'd-mmm'
assert cell.number_format == 'd-mmm'
| 30.696429
| 72
| 0.602094
|
4d4d5c44ad6e2406914f4033f8d484c061a785c8
| 6,577
|
py
|
Python
|
rcnn/tools/test_rcnn.py
|
sjiang17/keypoint-maskrcnn
|
f78562d4701a57fe1b95ba56f946e33658d7d039
|
[
"Apache-2.0"
] | 3
|
2018-12-13T09:00:01.000Z
|
2019-09-11T03:38:02.000Z
|
rcnn/tools/test_rcnn.py
|
sjiang17/keypoint-maskrcnn
|
f78562d4701a57fe1b95ba56f946e33658d7d039
|
[
"Apache-2.0"
] | null | null | null |
rcnn/tools/test_rcnn.py
|
sjiang17/keypoint-maskrcnn
|
f78562d4701a57fe1b95ba56f946e33658d7d039
|
[
"Apache-2.0"
] | 1
|
2019-12-01T09:22:36.000Z
|
2019-12-01T09:22:36.000Z
|
import os
import cPickle
import argparse
import pprint
from ..config import config, default, generate_config
from ..symbol import *
from ..dataset import *
from ..core.loader import TestLoader, SequentialLoader
from ..core.tester import Predictor, generate_proposals
from ..utils.load_model import load_param
def test_rcnn(network, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch, vis, shuffle, thresh):
# rpn generate proposal config
config.TEST.HAS_RPN = True
# print config
pprint.pprint(config)
# load symbol
if cfg.MASKFCN.ON:
sym = eval('get_' + network + '_maskfcn_test')(num_anchors=config.NUM_ANCHORS)
else:
sym = eval('get_' + network + '_mask_test')(num_anchors=config.NUM_ANCHORS)
sym = sym.get_internals()['mask_roi_output']
# load dataset and prepare imdb for training
imdb = eval(dataset)(image_set, root_path, dataset_path)
roidb = imdb.gt_roidb()
# (possibly) group the roidb by aspect
horizontal_inds, vertical_inds = [], []
for ind, roirec in enumerate(roidb):
if roirec['width'] > roirec['height']:
horizontal_inds.append(ind)
else:
vertical_inds.append(ind)
aspect_group = True if len(horizontal_inds) > 0 and len(vertical_inds) > 0 else False
print("aspect_group={}".format(aspect_group))
if aspect_group:
horizontal_roidb = [roidb[ind] for ind in horizontal_inds]
vertical_roidb = [roidb[ind] for ind in vertical_inds]
l1 = TestLoader(horizontal_roidb, batch_size=len(ctx), shuffle=shuffle, has_rpn=True)
l2 = TestLoader(vertical_roidb, batch_size=len(ctx), shuffle=shuffle, has_rpn=True)
test_data = SequentialLoader(iters=[l1, l2])
else:
test_data = TestLoader(roidb, batch_size=len(ctx), shuffle=shuffle, has_rpn=True)
# load model
arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=None)
# infer shape
data_shape_dict = dict(test_data.provide_data)
arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
# check parameters
for k in sym.list_arguments():
if k in data_shape_dict or 'label' in k:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(
arg_params[k].shape)
for k in sym.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(
aux_params[k].shape)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data]
label_names = None if test_data.provide_label is None else [k[0] for k in test_data.provide_label]
max_data_shape = [('data', (len(ctx), 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start testing
imdb_boxes, original_boxes = generate_proposals(predictor, test_data, imdb, vis=vis, thresh=thresh)
if aspect_group:
# imdb_boxes = [imdb_boxes[ind] for ind in (horizontal_inds + vertical_inds)]
# original_boxes = [original_boxes[ind] for ind in (horizontal_inds + vertical_inds)]
reordered_imdb_boxes, reordered_original_boxes = [None] * len(imdb_boxes), [None] * len(imdb_boxes)
for i, orig_ind in enumerate(horizontal_inds + vertical_inds):
reordered_imdb_boxes[orig_ind] = imdb_boxes[i]
reordered_original_boxes[orig_ind] = original_boxes[i]
imdb_boxes, original_boxes = reordered_imdb_boxes, reordered_original_boxes
# save results
rpn_folder = os.path.join(imdb.root_path, 'rpn_data')
if not os.path.exists(rpn_folder):
os.mkdir(rpn_folder)
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
if thresh > 0:
full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')
with open(full_rpn_file, 'wb') as f:
cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'wrote rpn proposals to {}'.format(rpn_file)
imdb.evaluate_recall(roidb, candidate_boxes=imdb_boxes)
def parse_args():
parser = argparse.ArgumentParser(description='Test a Region Proposal Network')
# general
parser.add_argument('--network', help='network name', default=default.network, type=str)
parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set', help='image_set name', default=default.test_image_set, type=str)
parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
# testing
parser.add_argument('--prefix', help='model to test with', default=default.rpn_prefix, type=str)
parser.add_argument('--epoch', help='model to test with', default=default.rpn_epoch, type=int)
# rpn
parser.add_argument('--gpu', help='GPU device to test with', default='0', type=str)
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--thresh', help='rpn proposal threshold', default=0, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
print 'Called with argument:', args
ctx = [mx.gpu(int(gpu)) for gpu in args.gpu.split(',')]
test_rpn(args.network, args.dataset, args.image_set, args.root_path, args.dataset_path,
ctx, args.prefix, args.epoch, args.vis, args.shuffle, args.thresh)
if __name__ == '__main__':
main()
| 43.846667
| 118
| 0.684355
|
83e6747f689b22297129a4eddfbe426c44e4d435
| 1,303
|
py
|
Python
|
python_modules/dagster/dagster/core/storage/alembic/versions/024_add_columns_start_time_and_end_time_postgres.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/storage/alembic/versions/024_add_columns_start_time_and_end_time_postgres.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/storage/alembic/versions/024_add_columns_start_time_and_end_time_postgres.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
"""Add columns start_time and end_time
Revision ID: 42add02bf976
Revises: f78059038d01
Create Date: 2021-12-20 13:41:14.924529
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy import inspect
# revision identifiers, used by Alembic.
revision = "42add02bf976"
down_revision = "f78059038d01"
branch_labels = None
depends_on = None
# pylint: disable=no-member
def upgrade():
inspector = inspect(op.get_bind())
has_tables = inspector.get_table_names()
if "runs" in has_tables:
columns = [x.get("name") for x in inspector.get_columns("runs")]
with op.batch_alter_table("runs") as batch_op:
if "start_time" not in columns:
batch_op.add_column(sa.Column("start_time", sa.Float))
if "end_time" not in columns:
batch_op.add_column(sa.Column("end_time", sa.Float))
def downgrade():
inspector = inspect(op.get_bind())
has_tables = inspector.get_table_names()
if "runs" in has_tables:
columns = [x.get("name") for x in inspector.get_columns("runs")]
with op.batch_alter_table("runs") as batch_op:
if "start_time" in columns:
batch_op.drop_column("start_time")
if "end_time" in columns:
batch_op.drop_column("end_time")
| 28.326087
| 72
| 0.663853
|
89d6ca37f625f101e7fcad5b257635540cd5dad3
| 2,908
|
py
|
Python
|
networkit/stopwatch.py
|
krzysztof-turowski/networkit
|
b0db9e30be1a7f7dcf74eaff2a013988a81973ce
|
[
"MIT"
] | null | null | null |
networkit/stopwatch.py
|
krzysztof-turowski/networkit
|
b0db9e30be1a7f7dcf74eaff2a013988a81973ce
|
[
"MIT"
] | null | null | null |
networkit/stopwatch.py
|
krzysztof-turowski/networkit
|
b0db9e30be1a7f7dcf74eaff2a013988a81973ce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- 7oars.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import time
"""
stopwatch is a very simple Python module for measuring time.
Great for finding out how long code takes to execute.
>>> import stopwatch
>>> t = stopwatch.Timer()
>>> t.elapsed
3.8274309635162354
>>> print t
15.9507198334 sec
>>> t.stop()
30.153270959854126
>>> print t
30.1532709599 sec
Decorator exists for printing out execution times:
>>> from stopwatch import clockit
>>> @clockit
def mult(a, b):
return a * b
>>> print mult(2, 6)
mult in 1.38282775879e-05 sec
6
"""
__version__ = '0.3.1'
__author__ = 'John Paulett <http://blog.7oars.com>'
class Timer(object):
"""
Main stopwatch object, providing functionality to measure time.
"""
def __init__(self):
self.__stopped = None
self.__start = self.__time()
def stop(self):
"""
stop()
Stops the clock permanently for the instance of the Timer.
Returns the time at which the instance was stopped.
Returns
-------
float
Stop time.
"""
self.__stopped = self.__last_time()
return self.elapsed
def elapsed(self):
"""
elapsed()
The number of seconds since the current time that the Timer
object was created. If stop() was called, it is the number
of seconds from the instance creation until stop() was called.
Returns
-------
float
Elapsed time.
"""
return self.__last_time() - self.__start
elapsed = property(elapsed)
def start_time(self):
"""
start_time()
The time at which the Timer instance was created.
Returns
-------
float
Starting time.
"""
return self.__start
start_time = property(start_time)
def stop_time(self):
"""
stop_time()
The time at which stop() was called, or None if stop was
never called.
Returns
-------
float or None
Stop time.
"""
return self.__stopped
stop_time = property(stop_time)
def __last_time(self):
"""Return the current time or the time at which stop() was call,
if called at all.
"""
if self.__stopped is not None:
return self.__stopped
return self.__time()
def __time(self):
"""Wrapper for time.time() to allow unit testing.
"""
return time.time()
def __str__(self):
"""Nicely format the elapsed time
"""
return str(self.elapsed) + ' sec'
def clockit(func):
"""
clockit(func)
Function decorator that times the evaluation of *func* and prints the
execution time.
Example
-------
..code
>>> from stopwatch import clockit
>>> @clockit
def mult(a, b):
return a * b
>>> print mult(2, 6)
mult in 1.38282775879e-05 sec
"""
def new(*args, **kw):
t = Timer()
retval = func(*args, **kw)
t.stop()
print('%s in %s') % (func.__name__, t)
del t
return retval
return new
| 19.006536
| 70
| 0.662311
|
20cd834b812b212144d1b798400c9517e6bf13e8
| 391
|
py
|
Python
|
backend/website/wsgi.py
|
Abhiram-Joshi/Projectsv2
|
73416697290161dd45eb3192ed7e6275201f81c9
|
[
"MIT"
] | 13
|
2021-08-31T14:21:45.000Z
|
2021-11-08T13:14:59.000Z
|
backend/website/wsgi.py
|
Abhiram-Joshi/Projectsv2
|
73416697290161dd45eb3192ed7e6275201f81c9
|
[
"MIT"
] | 11
|
2021-08-20T19:10:40.000Z
|
2022-03-30T13:28:49.000Z
|
backend/website/wsgi.py
|
Abhiram-Joshi/Projectsv2
|
73416697290161dd45eb3192ed7e6275201f81c9
|
[
"MIT"
] | 3
|
2021-05-18T15:00:49.000Z
|
2021-08-10T06:59:28.000Z
|
"""
WSGI config for website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website.settings")
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
d1fa9a442f9fff7d44046eba89a7e7a6622c9414
| 10,385
|
py
|
Python
|
validator/tests/test_commands.py
|
s-scherrer/qa4sm
|
99fa62d5e42e5a2b81c5bad1553c8137fe4259e7
|
[
"MIT"
] | 10
|
2019-02-27T15:05:15.000Z
|
2022-03-10T21:13:40.000Z
|
validator/tests/test_commands.py
|
s-scherrer/qa4sm
|
99fa62d5e42e5a2b81c5bad1553c8137fe4259e7
|
[
"MIT"
] | 69
|
2019-07-04T23:20:17.000Z
|
2022-03-29T06:34:06.000Z
|
validator/tests/test_commands.py
|
s-scherrer/qa4sm
|
99fa62d5e42e5a2b81c5bad1553c8137fe4259e7
|
[
"MIT"
] | 10
|
2019-03-14T11:46:58.000Z
|
2022-03-25T13:06:16.000Z
|
'''
Test our custom django commands
'''
from datetime import datetime, timedelta
import logging
from unittest.mock import patch
from dateutil.tz.tz import tzlocal
from django.conf import settings
from django.core.management import call_command
from django.test import TestCase
from django.utils import timezone
from validator.models import Dataset
from validator.models import ValidationRun
from validator.tests.testutils import set_dataset_paths
from django.contrib.auth import get_user_model
User = get_user_model()
# See https://stackoverflow.com/a/6513372/
class TestCommands(TestCase):
fixtures = ['variables', 'versions', 'datasets', 'filters']
__logger = logging.getLogger(__name__)
def setUp(self):
user_data = {
'username': 'testuser',
'password': 'secret',
'email': 'noreply@awst.at',
'first_name': 'Chuck',
'last_name': 'Norris',
}
try:
self.testuser = User.objects.get(username=user_data['username'])
except User.DoesNotExist:
self.testuser = User.objects.create_user(**user_data)
set_dataset_paths()
def test_abortrunningvalidations(self):
# make sure we don't have real running validations
running_validations = ValidationRun.objects.filter(progress__range=(0, 99))
assert not running_validations
# make sure we have a fake running validation for testing
run = ValidationRun()
run.start_time = datetime.now(tzlocal())
run.progress = 50
run.save()
run_id = run.id
running_validations = ValidationRun.objects.filter(progress__range=(0, 99))
assert running_validations
# run the command
args = []
opts = {}
call_command('abortrunningvalidations', *args, **opts)
# make sure that our test validation was marked as failed
running_validations = ValidationRun.objects.filter(progress__range=(0, 99))
assert not running_validations
test_val = ValidationRun.objects.get(id=run_id)
assert test_val
assert test_val.end_time
assert test_val.progress == -1
def test_autocleanupvalidations(self):
ended_vals = ValidationRun.objects.filter(end_time__isnull=False).count()
## unexpired validation
run1 = ValidationRun()
run1.start_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS * 4)
run1.end_time = timezone.now()
run1.user = self.testuser
run1.save()
runid1 = run1.id
## 20% of warning period has passed
run2 = ValidationRun()
run2.start_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS * 4)
run2.end_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS - settings.VALIDATION_EXPIRY_WARNING_DAYS * 0.8)
run2.user = self.testuser
run2.save()
runid2 = run2.id
## 80% of warning period has passed
run3 = ValidationRun()
run3.start_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS * 4)
run3.end_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS - settings.VALIDATION_EXPIRY_WARNING_DAYS * 0.2)
run3.user = self.testuser
run3.save()
runid3 = run3.id
## just expired validation
run4 = ValidationRun()
run4.start_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS * 4)
run4.end_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS)
run4.user = self.testuser
run4.save()
runid4 = run4.id
## long expired validation
run5 = ValidationRun()
run5.start_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS * 4)
run5.end_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS * 2)
run5.user = self.testuser
run5.save()
runid5 = run5.id
# test what happens if there is no user assigned to a validation
no_user_run = ValidationRun()
no_user_run.start_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS * 4)
no_user_run.end_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS)
no_user_run.user = None
no_user_run.save()
no_user_run_id = no_user_run.id
# test what happens if there is no user assigned to a validation, but validation has been published
no_user_run_published = ValidationRun()
no_user_run_published.start_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS * 4)
no_user_run_published.end_time = timezone.now() - timedelta(days=settings.VALIDATION_EXPIRY_DAYS)
no_user_run_published.user = None
no_user_run_published.doi = '10101/101.010'
no_user_run_published.save()
no_user_run_published_id = no_user_run_published.id
ended_vals2 = ValidationRun.objects.filter(end_time__isnull=False).count()
assert ended_vals + 7 == ended_vals2
assert runid1
assert runid2
assert runid3
assert runid4
assert runid5
assert no_user_run_id
assert no_user_run_published_id
# run the command
args = []
opts = {}
call_command('autocleanupvalidations', *args, **opts)
## reload from db because the validations have been changed.
run1 = ValidationRun.objects.get(pk=runid1)
run2 = ValidationRun.objects.get(pk=runid2)
run3 = ValidationRun.objects.get(pk=runid3)
run4 = ValidationRun.objects.get(pk=runid4)
run5 = ValidationRun.objects.get(pk=runid5)
non_user_val = ValidationRun.objects.filter(pk=no_user_run_id)
no_user_run_published = ValidationRun.objects.get(pk=no_user_run_published_id)
## with the last command call, the user should have been notified about most of our test validations
## but the validations should not have been deleted yet
assert not run1.expiry_notified
assert run2.expiry_notified
assert run3.expiry_notified
assert run4.expiry_notified
assert run5.expiry_notified
assert len(non_user_val) == 0 # there should be no validation anymore, because it was already removed
assert not no_user_run_published.expiry_notified # no notification sent
## the validations may have been extended in the previous step, undo that to get them really deleted in the next call
run1.last_extended = None
run1.save()
run2.last_extended = None
run2.save()
run3.last_extended = None
run3.save()
run4.last_extended = None
run4.save()
run5.last_extended = None
run5.save()
call_command('autocleanupvalidations', *args, **opts)
## the two expired validations should be have been deleted now
ended_vals3 = ValidationRun.objects.filter(end_time__isnull=False).count()
assert ended_vals + 4 == ended_vals3
def test_setdatasetpaths(self):
new_test_path = 'new_test_path/'
new_test_path2 = 'another_test_path/'
num_changed = 0
# ensure that every second dataset has no storage path
for counter, dataset in enumerate(Dataset.objects.all().order_by('id')):
if counter % 2 == 0:
dataset.storage_path = ''
dataset.save()
num_changed += 1
self.__logger.debug('setting empty path for: ' + dataset.short_name)
## instruct the command to change only the empty paths, give no default path, and set a new path every time
user_input = [
'u',
'',
]
user_input.extend([new_test_path] * num_changed)
args = []
opts = {}
with patch('builtins.input', side_effect=user_input): ## this mocks user input for the command
# run the command
call_command('setdatasetpaths', *args, **opts)
# check that the datasets were changed correctly
for counter, dataset in enumerate(Dataset.objects.all().order_by('id')):
self.__logger.debug('checking path for ' + dataset.short_name)
if counter % 2 == 0:
assert new_test_path in dataset.storage_path
else:
assert new_test_path not in dataset.storage_path
## second round of testing!
## instruct the command to change all paths, give a default path, and accept the suggestion every time
user_input = [
'',
new_test_path2,
]
user_input.extend([''] * Dataset.objects.count())
args = []
opts = {}
with patch('builtins.input', side_effect=user_input): ## this mocks user input for the command
# run the command
call_command('setdatasetpaths', *args, **opts)
# check that the datasets were changed correctly
for counter, dataset in enumerate(Dataset.objects.all().order_by('id')):
self.__logger.debug('checking path second time for ' + dataset.short_name)
assert new_test_path2 in dataset.storage_path
## third round of testing!
## instruct the command to change all paths, give no default path, and keep the existing path (default) every time
user_input = [
'a',
'',
]
user_input.extend([''] * Dataset.objects.count())
args = []
opts = {}
with patch('builtins.input', side_effect=user_input): ## this mocks user input for the command
# run the command
call_command('setdatasetpaths', *args, **opts)
# check that the datasets were changed correctly
for counter, dataset in enumerate(Dataset.objects.all().order_by('id')):
self.__logger.debug('checking path second time for ' + dataset.short_name)
assert new_test_path2 in dataset.storage_path
assert dataset.short_name in dataset.storage_path
with patch('builtins.input', side_effect=user_input): ## this mocks user input for the command
# run the command to list the paths
call_command('getdatasetpaths', *args, **opts)
| 39.188679
| 136
| 0.651613
|
f7708d4af289e41a22fb7b11ca366ae649f96981
| 1,456
|
py
|
Python
|
tests/testInterference.py
|
KOLANICH/lazyImport.py
|
ee6574c10c941973de8f4ea3b67af3e94fc9668d
|
[
"Unlicense"
] | null | null | null |
tests/testInterference.py
|
KOLANICH/lazyImport.py
|
ee6574c10c941973de8f4ea3b67af3e94fc9668d
|
[
"Unlicense"
] | null | null | null |
tests/testInterference.py
|
KOLANICH/lazyImport.py
|
ee6574c10c941973de8f4ea3b67af3e94fc9668d
|
[
"Unlicense"
] | null | null | null |
import sys
from pathlib import Path
import unittest
thisDir = Path(__file__).parent.absolute()
sys.path.insert(0, str(thisDir.parent))
sys.path.insert(0, str(thisDir))
from ImportTimeline import ImportTimelineTestCase
class Tests(ImportTimelineTestCase):
def testInterference(self):
self.etalon = [
"from lazilyTest2 import b",
"lazilyTest2/__init__.py run",
"lazilyTest2/b.py run",
("lazilyTest1", False),
("lazilyTest1.a", False),
("lazily.lazilyTest1", True),
("lazily.lazilyTest1.a", True),
("lazily.lazilyTest1.b", False),
"from lazily.lazilyTest1 import a",
("lazilyTest1", False),
("lazilyTest1.a", False),
("lazily.lazilyTest1", True),
("lazily.lazilyTest1.a", True),
("lazily.lazilyTest1.b", False)
]
self.log("from lazilyTest2 import b")
from lazilyTest2 import b
self.assertInModulesStatus("lazilyTest1")
self.assertInModulesStatus("lazilyTest1.a")
self.assertInModulesStatus("lazily.lazilyTest1")
self.assertInModulesStatus("lazily.lazilyTest1.a")
self.assertInModulesStatus("lazily.lazilyTest1.b")
self.log("from lazily.lazilyTest1 import a")
from lazily.lazilyTest1 import a
self.assertInModulesStatus("lazilyTest1")
self.assertInModulesStatus("lazilyTest1.a")
self.assertInModulesStatus("lazily.lazilyTest1")
self.assertInModulesStatus("lazily.lazilyTest1.a")
self.assertInModulesStatus("lazily.lazilyTest1.b")
if __name__ == "__main__":
unittest.main()
| 27.471698
| 52
| 0.739698
|
d1a5657994555d96cc13ea302fd99341b8b9c6a2
| 30,644
|
py
|
Python
|
arignote/data/readers.py
|
stephen-hoover/Arignote
|
f438c929295558f3354ec07598a3a023fc4108e0
|
[
"MIT"
] | 2
|
2016-01-18T02:12:13.000Z
|
2018-07-24T01:55:20.000Z
|
arignote/data/readers.py
|
stephen-hoover/Arignote
|
f438c929295558f3354ec07598a3a023fc4108e0
|
[
"MIT"
] | 3
|
2015-07-08T13:30:33.000Z
|
2015-07-10T19:56:08.000Z
|
arignote/data/readers.py
|
stephen-hoover/Arignote
|
f438c929295558f3354ec07598a3a023fc4108e0
|
[
"MIT"
] | null | null | null |
"""
This module reads and iterates over data, making it available for training.
"""
from __future__ import division
import abc
import threading
import numpy as np
try:
import pandas as pd
except ImportError:
# No pandas; we can't read HDF5 files.
pd = None
import six
import theano
from ..util import misc
from ..util import netlog
from ..data import files
log = netlog.setup_logging("data_readers", level="INFO")
def to_data_object(data, batch_size=128, **kwargs):
"""Wrap the input in a Data object. If it has length 1, assume that it's a 1-tuple containing
an array of features. If length 2, assume the second element is the labels. Extra keyword
arguments will be passed to the `Data` constructor. The keyword arguments will be ignored
if `data` is already a Data object or is None."""
if data is None or isinstance(data, Data):
obj = data
else:
labels = kwargs.pop("labels", None)
if len(data) == 1:
features = data[0]
elif len(data) == 2:
features, labels = data
else:
features = data
obj = Data(features, labels, batch_size=batch_size, **kwargs)
return obj
def to_data_partitions(train, valid=0, test=0, batch_size=128, **kwargs):
"""Wrap the input in a DataWithHoldoutPartitions object.
If it has length 1, assume that it's a 1-tuple containing
an array of features. If length 2, assume the second element is the labels. Extra keyword
arguments will be passed to the `Data` constructor. The keyword arguments will be ignored
if `data` is already a DataWithHoldoutPartitions object."""
if isinstance(train, DataWithHoldoutParitions):
output = train.train, train.valid, train.test
elif isinstance(train, Data):
if valid == 0:
valid = None
if test == 0:
test = None
if ((valid is not None and not isinstance(valid, Data))
or (test is not None and not isinstance(test, Data))):
raise TypeError("If inputting training data as a `Data` object, validation and"
"test sets must also be presented as `Data` objects.")
output = (train, valid, test)
else:
train_labels = None
if len(train) == 1:
features = train[0]
elif len(train) == 2:
features, train_labels = train
else:
features = train
valid_frac, test_frac = 0, 0
if misc.is_floatlike(valid):
valid_frac = valid
valid = None
else:
valid = to_data_object(valid, batch_size=batch_size, allow_partial_batch=True, **kwargs)
if misc.is_floatlike(test):
test_frac = test
test = None
else:
test = to_data_object(test, batch_size=batch_size, allow_partial_batch=True, **kwargs)
obj = DataWithHoldoutParitions(features, labels=train_labels, valid_frac=valid_frac,
test_frac=test_frac, batch_size=batch_size, **kwargs)
if valid is None:
valid = obj.valid
if test is None:
test = obj.test
output = obj.train, valid, test
return output
def threaded_generator(generator, num_cached=10):
"""Wrap a generator in a thread, using a queue to return data.
Note that due to the Python GIL, this will not allow generators to work while other
Python code is running. If part of a program releases the GIL, however, this
wrapper can store up extra items from the generator it wraps.
Threaded generator implementation due to Jan Schlueter, https://github.com/f0k
https://github.com/Lasagne/Lasagne/issues/12#issuecomment-59494251
"""
queue = six.moves.queue.Queue(maxsize=num_cached)
sentinel = object() # guaranteed unique reference
# define producer (putting items into queue)
def producer():
for item in generator:
queue.put(item)
queue.put(sentinel)
# start producer (in a background thread)
thread = threading.Thread(target=producer)
thread.daemon = True
thread.start()
# run as consumer (read items from queue, in current thread)
item = queue.get()
while item is not sentinel:
yield item
queue.task_done()
item = queue.get()
class Data(object):
"""
This is the base class for all data iterators suitable for use in training, and can
be used for simple data iteration.
"""
def __init__(self, features, labels=None, batch_size=128, alter_features=None,
alter_labels=None, start=0, stop=None, allow_partial_batch=False):
self.batch_size = batch_size
self.features = features
self.labels = labels
self.alter_features = alter_features
self.alter_labels = alter_labels
self.start = start
self.stop = stop
self.allow_partial_batch = allow_partial_batch
if self.batch_size is None:
raise TypeError("Batch size may not be None!")
self.n_rows = 0
self._setup()
def __len__(self):
stop = self.n_rows if (self.stop is None or self.stop > self.n_rows) else self.stop
return stop - self.start
def _setup(self):
"""Execute setup tasks, both input checking and creating derived attributes."""
# Turn non-Reader data inputs into Readers.
self.features = get_reader(self.features, labels=False)
if self.labels is not None:
self.labels = get_reader(self.labels, labels=True)
self.n_rows = len(self.features)
# For alteration, turn None into a do-nothing function.
if self.alter_features is None:
self.alter_features = lambda x: x
if self.alter_labels is None:
self.alter_labels = lambda x: x
# Check the inputs.
if self.labels is not None and len(self.features) != len(self.labels):
raise ValueError("The features have {} rows, but the labels have {} "
"rows.".format(len(self.features), len(self.labels)))
# Figure out where we're starting each section of the data as a fraction of the whole.
self.n_epochs = 0
def iter_epoch(self, num_cached=3):
for item in threaded_generator(self.iter_epoch_single(), num_cached=num_cached):
yield item
def iter_epoch_single(self):
"""Iterate through the data represented by this object.
**Yields**
A 2-tuple minibatch of (features, labels) if this object holds labels, else
a minibatch of features.
"""
# Set up the feature and label iterators.
feature_rdr = self.features.iter_epoch(batch_size=self.batch_size,
start=self.start,
stop=self.stop,
start_on_batch=True,
allow_partial=self.allow_partial_batch)
data = feature_rdr
if self.labels is not None:
label_rdr = self.labels.iter_epoch(batch_size=self.batch_size,
start=self.start,
stop=self.stop,
start_on_batch=True,
allow_partial=self.allow_partial_batch)
data = six.moves.zip(feature_rdr, label_rdr)
# Iterate over the data.
for item in data:
if self.labels is not None:
yield self.alter_features(item[0]), self.alter_labels(item[1])
else:
yield self.alter_features(item)
self.n_epochs += 1
def peek(self):
"""Return the first epoch of data."""
return next(self.iter_epoch_single())
class DataWithHoldoutParitions(object):
"""
This class partitions input data into three sections: training data, validation data,
and testing data. It uses rows of input data in the order it finds them.
The first section of the data will be used for training, the middle section for validation,
and the last section for testing.
This object will have a training set as `self.train`, a validation set (if any) as
`self.valid`, and a test set (if any) as `self.test`.
"""
def __init__(self, features, labels=None, batch_size=128, valid_frac=0.1, test_frac=0.1,
alter_features=None, alter_labels=None):
self.batch_size = batch_size
self.valid_frac = valid_frac
self.test_frac = test_frac
self.features = features
self.labels = labels
self.alter_features = alter_features
self.alter_labels = alter_labels
self.n_rows = {}
self._setup()
self._set_partitions()
def __len__(self):
return self.n_rows["all"]
def _setup(self):
"""Execute setup tasks, both input checking and creating derived attributes."""
# Turn non-Reader data inputs into Readers.
self.features = get_reader(self.features, labels=False)
if self.labels is not None:
self.labels = get_reader(self.labels, labels=True)
self.n_rows["all"] = len(self.features)
# Allow None for valid or test fractions.
if self.valid_frac is None:
self.valid_frac = 0.
if self.test_frac is None:
self.test_frac = 0.
# Check the inputs.
if self.labels is not None and len(self.features) != len(self.labels):
raise ValueError("The features have {} rows, but the labels have {} "
"rows.".format(len(self.features), len(self.labels)))
if self.valid_frac > 1 or self.valid_frac < 0:
raise ValueError("Select a validation set fraction from [0, 1).")
if self.test_frac > 1 or self.test_frac < 0:
raise ValueError("Select a test set fraction from [0, 1).")
# Figure out where we're starting each section of the data as a fraction of the whole.
self.n_epochs = {"train": 0, "test": 0, "valid": 0}
self._start_stop_frac = {"train": (0., 1 - self.valid_frac - self.test_frac),
"valid": (1 - self.valid_frac - self.test_frac, 1 - self.test_frac),
"test": (1 - self.test_frac, None)}
if self._start_stop_frac["train"][1] <= 0:
raise ValueError("A validation set of {%:.2} of the data and test set of {%:.2} of "
"the data don't leave any training "
"data.".format(self.valid_frac, self.test_frac))
# Translate the start/stop fractions into start/stop rows.
self.start_stop = {}
for key, val in self._start_stop_frac.items():
start_row = self.features.get_start_row(val[0], batch_size=self.batch_size)
# The `batch_size` input makes sure each section stops at an integer number of batches.
# Allow the test partition (the last one) to go to the end of the data.
stop_row = self.features.get_stop_row(start_row, val[1],
batch_size=(None if key == "test" else
self.batch_size))
self.start_stop[key] = (start_row, stop_row)
self.n_rows[key] = stop_row - start_row
# Record if there's data partitions we're not using.
self.using_partition = {"valid": self.valid_frac,
"test": self.test_frac,
"train": True}
def _set_partitions(self):
"""Create a `Data` object for training, testing, and validation partitions, and store
them in this instance."""
for partition_name in ["train", "test", "valid"]:
if self.using_partition[partition_name]:
partition = Data(self.features, self.labels, self.batch_size,
alter_features=self.alter_features,
alter_labels=self.alter_labels,
start=self.start_stop[partition_name][0],
stop=self.start_stop[partition_name][1],
allow_partial_batch=(partition_name != "train"))
else:
partition = None
setattr(self, partition_name, partition)
def iter_epoch(self, which="train", num_cached=3):
"""Return an iterator which steps through one epoch of the specified partition."""
if not self.using_partition[which]:
return
if which not in self.start_stop:
raise ValueError("Pick `which` from {}.".format(list(self.start_stop.keys())))
return getattr(self, which).iter_epoch(num_cached=num_cached)
def get_reader(src, labels=False):
"""Returns a Reader of the appropriate type to iterate over the given source.
If the source is an HDF5 file, we'll attempt to guess the table name.
Create the Reader manually if you have an HDF5 file with a non-inferrable table name.
"""
# If the input is a file, figure out which type.
if isinstance(src, six.string_types):
ftype = files.get_file_type(src)
else:
ftype = None
# If the input was the name of a pickle file, read from that pickle.
# If there's two things inside, then assume it's a tuple of (features, labels).
# Otherwise assume that the entire thing is what we want.
if ftype == "pkl":
data = files.read_pickle(src)
if len(data) == 2:
if labels:
log.debug("Taking the second element of data in {} as our labels.".format(src))
src = data[1]
else:
log.debug("Taking the first element of data in {} as our features.".format(src))
src = data[0]
else:
src = data
# Turn the input into a Reader, if it isn't already.
if isinstance(src, np.ndarray) or (pd is not None and isinstance(src, pd.DataFrame)):
rdr = ArrayReader(src)
elif ftype == "hdf":
if pd is None:
raise ImportError("`pandas` is required for HDF5 file reading.")
# HDF5 file input. Try to infer the proper table name.
with pd.HDFStore(src, "r") as store:
keys = [k.strip("/") for k in store.keys()]
if len(keys) == 1:
table_name = keys[0]
else:
# Assume that a table holds labels if it has one of a standard set of names.
label_keys = [k for k in keys if k in ["label", "labels", "target", "targets"]]
if labels:
if len(label_keys) == 1:
table_name = label_keys[0]
else:
raise ValueError("I could not infer the name of the table holding labels "
"in {}.".format(src))
else:
if len(keys) - len(label_keys) == 1:
table_name = [k for k in keys if k not in label_keys][0]
else:
raise ValueError("I could not infer the name of the table holding features "
"in {}.".format(src))
rdr = HDFReader(src, table_name)
elif isinstance(src, Reader):
# The input could already be a Reader, in which case we don't need to do anything.
rdr = src
else:
raise TypeError("Could not figure out what to do with data source {}.".format(src))
return rdr
class Reader(object):
"""
This is the abstract base class for reading data from various sources.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, data_src):
self.data_src = data_src
# Set the following attributes in the subclasses, once we know how to figure
# this information out from the input data.
self.n_rows = None
self.shape = None # This is the shape of a single row of data.
self.ndim = None # The number of dimensions of a single row of data.
self.dtype = None # This is the data type of the entire data store, not a single row (which might have mixed dtypes).
@abc.abstractmethod
def iter_epoch(self, batch_size, start=0, stop=None, start_on_batch=True, allow_partial=False):
"""Iterate through an opened data source."""
def __len__(self):
return self.n_rows
def get_start_row(self, start, batch_size=None):
"""Figure out which row iteration should start from.
Translate a fraction-of-the-file input into a row index, and shift
the start row to lie on the closest previous batch boundary, if
we're given a `batch_size`.
**Parameters**
* `start` <int or float>: Start iterating from here. May be an integer row index
or a float fraction of the total rows.
**Optional Parameters**
* `batch_size` <int|None>: If provided, shift the starting row so that it's the first row
at or before `start` which is a multiple of `batch_size`.
**Returns**
An integer row index.
**Raises**
`ValueError` if `start` is bad.
"""
# Do input checking on the `start`, and convert it to the appropriate row index if needed.
if isinstance(start, (float, np.floating)):
# Convert fractional starts to row numbers
if start >= 1. or start < 0:
raise ValueError("Fractional start locations must be in [0., 1).")
start = int(start * self.n_rows)
if start >= self.n_rows or start < 0:
# Make sure we're not out of bounds.
raise ValueError("Can't start at row {} of a {}-row array.".format(start, self.n_rows))
if batch_size is not None:
# Often we'll want to start an integer number of batches into the array.
start = (start // batch_size) * batch_size
return start
def get_stop_row(self, start, stop, batch_size=None):
"""Figure out where iteration should end.
Translate a fraction-of-the-file input into a row index, and shift
the end row to be such that iteration will cover an integer number of batches, if
we're given a `batch_size`.
**Parameters**
* `start` <int>: Start iterating from here. Used to adjust the end row so that we
reach an integer number of batches.
* `stop` <int or float>: Stop iterating here. May be an integer row index or a float
fraction of the total number of rows.
**Optional Parameters**
* `batch_size` <int|None>: If provided, shift the ending row so that it's the first row
at or before `stop` which is a multiple of `batch_size` away from `start`.
**Returns**
An integer row index.
**Raises**
`ValueError` if `stop` is bad or `start` is not an integer.
"""
# If `start` is accidentally a fraction, this won't work.
if isinstance(start, (float, np.floating)):
raise ValueError("`start` must be a number of rows, not a fraction.")
# Do input checking on the `stop`, and convert it to the appropriate row index if needed.
if stop is None:
stop = self.n_rows # Default to taking all of the available rows.
elif isinstance(stop, (float, np.floating)) or stop == 1:
if stop > 1. or stop <= 0:
raise ValueError("Fractional stop locations must be in (0., 1].")
stop = int(stop * self.n_rows)
if stop > self.n_rows or stop <= 0:
raise ValueError("Can't stop at row {} of a {}-row array.".format(stop, self.n_rows))
# Adjust the `stop` so that it's an integer number of batches from the `start`.
if batch_size is not None:
stop = ((stop - start) // batch_size) * batch_size + start
return stop
class HDFReader(Reader):
"""
Read from an HDF5 file. We assume that the images are stored in a pandas structure which
can be cast as an array. The tables should be created appendable so that they have
all the necessary metadata. Images should be stored as either a Panel or Panel4D.
For example, you can store a single image in a row of an HDF5 table as
store = pd.HDFStore(filename)
for i_row, image in enumerate(all_my_images):
store.append("labels", labels.iloc[i_row: i_row + 1])
store.append("images", pd.Panel4D({labels.index[i_row]: image}),
axes=["labels", "major_axis", "minor_axis"], complib="zlib", complevel=9)
store.close()
Here, "labels" is a Series object containing the labels of all your images, and
"image" is a 3D array with the color axis first.
"""
def __init__(self, fname, table=None, color=None, asarray=None):
"""
* `fname` <str>: File name of an HDF5 file
* `table` <str|None>: Name of a table in `fname` which contains data. Must be supplied
if the file has more than one table.
* `color` <int|None>: Default choice for the `color` input to `iter_epochs`.
* `asarray` <bool|None>: Cast outputs to arrays? Defaults to True if the rows of
data have more than 1 dimension, and False for 1D rows.
"""
if pd is None:
raise ImportError("`pandas` is required for HDF5 file reading.")
super(HDFReader, self).__init__(fname)
self.color = color
self.filename = fname
self.table_name = table
with pd.HDFStore(self.filename, "r") as data_src:
if self.table_name is None:
if len(data_src.keys()) > 1:
raise ValueError("The HDF5 file has tables {}: which do you "
"want?".format(data_src.keys()))
else:
self.table_name = data_src.keys()[0].strip("/")
# Read the first row of data to find the shape.
# Trim the first element from the shape -- it will be 1, the number of rows we read.
# Assume that the "rows" of data are designated by the first of the index axes.
# For a Panel4D, this is "labels". For a Panel, this is "items".
self._index_name = data_src.get_storer(table).index_axes[0].name
first_row = data_src.select(table, where="{} == 0".format(self._index_name))
self.shape = first_row.shape[1:]
self.ndim = len(self.shape)
self.dtype = type(first_row)
if hasattr(first_row, "columns"):
# Support reading the header from DataFrames.
self.header = first_row.columns
else:
self.header = None
# Figure out if we should cast the output to arrays.
if asarray is None:
asarray = self.ndim > 1
self.asarray = asarray
# Figure out how many rows of data are in the table.
# Pandas stores data of > 2D in row x column format. One dimension of input data
# will be the "columns", and all the rest will be flattened into rows.
self._n_cols = data_src.get_storer(table).ncols
self.n_rows = (data_src.get_storer(table).nrows / (np.prod(self.shape) / self._n_cols))
if self.n_rows != int(self.n_rows):
raise ValueError("Table {} appears to have data of shape {}, but I failed to find the "
"correct number of rows.".format(data_src.get_storer(table),
self.shape))
self.n_rows = int(self.n_rows)
log.debug("Opened file {}. I found {} rows of data with shape "
"{}.".format(self.filename, self.n_rows, self.shape))
def iter_epoch(self, batch_size, start=0, stop=None, start_on_batch=True,
allow_partial=False, color=None):
"""
Iterate through this array, one batch at a time.
**Parameters**
* `batch_size` <int>: Number of rows of the array to return at once.
**Optional Parameters**
* `start` <int or float|0>: Start at this row. Either an integer row number, or a
fraction of the total rows. We may start at a slightly different row
if `start_on_batch` is True.
* `stop` <int or float|None>: Stop iterating when we reach this many rows. May be given
as a fraction of the total rows in the array. Will be modified so that we iterate
through an integer number of batches (unless `allow_partial` is True).
Default to stopping at the end of the array.
* `start_on_batch` <bool|True>: If True, modify the `start` row so that we begin at an
integer number of batches into the array, at or before the requested `start`.
* `allow_partial` <bool|False>: If False, every returned batch will have `batch_size` rows.
Iteration will stop at or before the requested `stop` row. If True, the final returned
batch may have fewer rows, if the requested chunk of data is not an integer number
of batches.
* `color` <int|None>: If not None, select this index from the last axis of the shared
data. For multicolor images, we expect to have shape (rows, columns, colors).
Will not work if the data are not stored as a Panel or Panel4D.
**Returns**
An iterator over portions of the array.
"""
if color is None:
color = self.color
if color is not None and self.dtype not in [pd.Panel, pd.Panel4D]:
raise ValueError("Cannot select a `color` unless reading image data.")
start = self.get_start_row(start=start, batch_size=(batch_size if start_on_batch else None))
stop = self.get_stop_row(start, stop, batch_size=(None if allow_partial else batch_size))
log.debug("Iterating through HDF5 file {} from row {} to row {} in batches "
"of {}.".format(self.filename, start, min([stop, self.n_rows]), batch_size))
# Set up the iteration.
array_maker = (lambda x: np.asarray(x, dtype=theano.config.floatX).squeeze()) \
if self.asarray else (lambda x: x)
item_size = np.prod(self.shape) / self._n_cols # A single row of data is this many "rows" in the HDF5Store.
if color is not None:
item_size /= self.shape[-1]
where_stmt = "minor_axis == {color}".format(color=color) if color is not None else None
# Iterate through the data.
with pd.HDFStore(self.filename, "r") as data_src:
for chunk in data_src.select(self.table_name,
start=start * item_size,
stop=stop * item_size,
chunksize=batch_size * item_size,
where=where_stmt):
yield array_maker(chunk)
# Code below preserved as a different way of iterating through the table.
# It's possibly less efficient, and suffers from the flaw of assuming that the
# index is integers from 0 to n_rows.
#where_stmt += "({index} >= {start} & {index} < {stop})".format(index=self._index_name,
# start="{start}",
# stop="{stop}")
#for i_start in range(start, stop, batch_size):
# i_stop = min([i_start + batch_size, stop])
# yield array_maker(self.data_src.select(
# self.table_name, where=where_stmt.format(start=i_start, stop=i_stop))).squeeze()
class ArrayReader(Reader):
"""
Read from an array which is entirely in memory.
"""
def __init__(self, array):
"""Initialize from an input array. The input may also be the file name of a
pickle which contains a single array.
"""
if isinstance(array, six.string_types):
array = files.read_pickle(array)
super(ArrayReader, self).__init__(np.asarray(array))
self.n_rows = len(array)
self.shape = array.shape[1:]
self.ndim = len(self.shape)
self.dtype = array.dtype
def iter_epoch(self, batch_size, start=0, stop=None, start_on_batch=True, allow_partial=False):
"""
Iterate through this array, one batch at a time.
**Parameters**
* `batch_size` <int>: Number of rows of the array to return at once.
**Optional Parameters**
* `start` <int or float|0>: Start at this row. Either an integer row number, or a
fraction of the total rows. We may start at a slightly different row
if `start_on_batch` is True.
* `stop` <int or float|None>: Stop iterating when we reach this many rows. May be given
as a fraction of the total rows in the array. Will be modified so that we iterate
through an integer number of batches (unless `allow_partial` is True).
Default to stopping at the end of the array.
* `start_on_batch` <bool|True>: If True, modify the `start` row so that we begin at an
integer number of batches into the array, at or before the requested `start`.
* `allow_partial` <bool|False>: If False, every returned batch will have `batch_size` rows.
Iteration will stop at or before the requested `stop` row. If True, the final returned
batch may have fewer rows, if the requested chunk of data is not an integer number
of batches.
**Returns**
An iterator over portions of the array.
"""
start = self.get_start_row(start=start, batch_size=(batch_size if start_on_batch else None))
stop = self.get_stop_row(start, stop, batch_size=(None if allow_partial else batch_size))
log.debug("Iterating through array from row {} to row {} in batches "
"of {}.".format(start, min([stop, self.n_rows]), batch_size))
for i_start in range(start, stop, batch_size):
yield self.data_src[i_start: min([i_start + batch_size, stop])]
| 43.590327
| 126
| 0.597442
|
49815985a6d7f6697cb7354450e50e63a63d4e1a
| 3,187
|
py
|
Python
|
modeller.py
|
fuxes/jiminy-modeler
|
b584a2cd8e73ec825c256546eacd0556545e7c74
|
[
"Apache-2.0"
] | 1
|
2018-06-02T03:37:20.000Z
|
2018-06-02T03:37:20.000Z
|
modeller.py
|
fuxes/jiminy-modeler
|
b584a2cd8e73ec825c256546eacd0556545e7c74
|
[
"Apache-2.0"
] | 21
|
2017-11-10T21:25:02.000Z
|
2018-04-13T14:13:56.000Z
|
modeller.py
|
fuxes/jiminy-modeler
|
b584a2cd8e73ec825c256546eacd0556545e7c74
|
[
"Apache-2.0"
] | 5
|
2017-10-19T09:46:32.000Z
|
2018-08-21T18:34:28.000Z
|
"""ALS Modeller for Project Jiminy."""
import itertools
import math
import operator
import time
import pyspark.mllib.recommendation as rec
import logger
loggers = logger.get_logger()
class Estimator:
"""Estimator class for Project Jiminy.
Used to determine Model parameters.
"""
def __init__(self, data):
self._data = data
# std bootstrap proportions for the training, validation and testing
self._sets = self._split([0.6, 0.3, 0.1])
def _split(self, proportions):
"""Split data into three random chunks."""
split = self._data.randomSplit(proportions)
return {'training': split[0], 'validation': split[1], 'test': split[2]}
def rmse(self, model):
"""Compute root mean squared error for the validation set."""
predictions = model.predictAll(
self._sets['validation'].map(lambda x: (x[0], x[1])))
predictions_rating = predictions.map(Estimator.group_ratings)
validation_rating = self._sets['validation'].map(
Estimator.group_ratings)
joined = validation_rating.join(predictions_rating)
return math.sqrt(joined.map(lambda x: (x[1][0] - x[1][1]) ** 2).mean())
@staticmethod
def group_ratings(x):
"""Return ((userId, movieId), rating)."""
return ((int(x[0]), int(x[1])), float(x[2]))
def _train(self, rank, iterations, lambda_, seed):
"""Train a model, using the given parameters."""
return rec.ALS.train(ratings=self._sets['training'],
rank=rank, seed=seed,
lambda_=lambda_,
iterations=iterations)
def run(self, ranks, lambdas, iterations):
"""Return optimal parameters from given input sets."""
rmses = []
combos = []
for parameters in itertools.product(ranks, lambdas, iterations):
rank, lambda_, iteration = parameters
loggers.info("Evaluating parameters: %s" % str(parameters))
start_time = time.time()
rmse = self.rmse(self._train(rank=rank, iterations=iteration,
lambda_=lambda_, seed=42))
elapsed_time = time.time() - start_time
loggers.info("RMSE = %f (took %f seconds)" % (rmse, elapsed_time))
rmses.append(rmse)
combos.append(parameters)
maximum = min(enumerate(rmses), key=operator.itemgetter(1))[0]
optimal = combos[maximum]
return {
'rank': optimal[0],
'lambda': optimal[1],
'iteration': optimal[2]
}
class Trainer:
"""Train the ALS model."""
def __init__(self, data, rank, iterations, lambda_, seed):
self._data = data
self.rank = rank
self.iterations = iterations
self.lambda_ = lambda_
self.seed = seed
def train(self):
return rec.ALS.train(ratings=self._data,
rank=self.rank,
seed=self.seed,
lambda_=self.lambda_,
iterations=self.iterations)
| 35.808989
| 79
| 0.571698
|
9a919c5329c8a2cc391ef49a72b9b586787144cb
| 3,581
|
py
|
Python
|
source/SpotInterruptionTriggerFunction/app.py
|
horsfieldsa/ec2-spot-interruption-dashboard
|
27d25c3243beac82627d46aca0b6c9c06ca0feb0
|
[
"MIT-0"
] | 27
|
2020-05-15T20:15:42.000Z
|
2022-03-16T04:13:46.000Z
|
source/SpotInterruptionTriggerFunction/app.py
|
horsfieldsa/ec2-spot-interruption-dashboard
|
27d25c3243beac82627d46aca0b6c9c06ca0feb0
|
[
"MIT-0"
] | 5
|
2020-05-15T22:29:30.000Z
|
2021-04-14T19:55:48.000Z
|
source/SpotInterruptionTriggerFunction/app.py
|
horsfieldsa/ec2-spot-interruption-dashboard
|
27d25c3243beac82627d46aca0b6c9c06ca0feb0
|
[
"MIT-0"
] | 10
|
2020-05-15T20:21:42.000Z
|
2022-01-25T10:27:55.000Z
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import boto3
import os
import json
import logging
from botocore.exceptions import ClientError
logger = logging.getLogger()
logger.setLevel(logging.INFO)
instance_metadata_table = boto3.resource('dynamodb').Table(os.environ['INSTANCE_METADATA_TABLE'])
def lambda_handler(event, context):
logger.info(event)
# Transform CloudWatch Event
item = {
'InstanceId': event['detail']['instance-id'],
'Region': event['region'],
'LastEventTime': event['time'],
'LastEventType': 'spot-interruption',
'State': 'none',
'Interrupted': True,
'InterruptedInstanceAction': event['detail']['instance-action'],
'InterruptionTime': event['time']
}
logger.info(item)
# Commit to DynamoDB
try:
response=instance_metadata_table.update_item(
Key={
'InstanceId': item['InstanceId']
},
UpdateExpression="SET #Region = :Region, #LastEventTime = :LastEventTime, #LastEventType = :LastEventType, #Interrupted = :Interrupted, #InterruptedInstanceAction = :InterruptedInstanceAction, #InterruptionTime = :InterruptionTime, #EventHistory = list_append(if_not_exists(#EventHistory, :empty_list), :EventHistory)",
ExpressionAttributeNames={
'#Region' : 'Region',
'#LastEventTime' : 'LastEventTime',
'#LastEventType' : 'LastEventType',
'#Interrupted' : 'Interrupted',
'#InterruptedInstanceAction' : 'InterruptedInstanceAction',
'#InterruptionTime' : 'InterruptionTime',
'#EventHistory' : 'EventHistory'
},
ExpressionAttributeValues={
':Region': item['Region'],
':LastEventTime': item['LastEventTime'],
':LastEventType': item['LastEventType'],
':Interrupted': item['Interrupted'],
':InterruptedInstanceAction': item['InterruptedInstanceAction'],
':InterruptionTime': item['InterruptionTime'],
':EventHistory': [{
"Name": item['LastEventType'],
"Time": item['LastEventTime'],
"State": item['State']
}],
":empty_list": []
},
ReturnValues="NONE"
)
logger.info(response)
except ClientError as e:
message = 'Error updating instance in DynamoDB: {}'.format(e)
logger.info(message)
raise Exception(message)
# End
logger.info('Execution Complete')
return
| 41.16092
| 331
| 0.633343
|
bebd5bf8e72954dde3e59224030c1c2cff462e3a
| 18,128
|
py
|
Python
|
pyriemann/estimation.py
|
stonebig/pyRiemann
|
176e766f540bd4c7846f38573165fc3d27fc69ca
|
[
"BSD-3-Clause"
] | null | null | null |
pyriemann/estimation.py
|
stonebig/pyRiemann
|
176e766f540bd4c7846f38573165fc3d27fc69ca
|
[
"BSD-3-Clause"
] | null | null | null |
pyriemann/estimation.py
|
stonebig/pyRiemann
|
176e766f540bd4c7846f38573165fc3d27fc69ca
|
[
"BSD-3-Clause"
] | null | null | null |
"""Estimation of covariance matrices."""
import numpy as np
from .spatialfilters import Xdawn
from .utils.covariance import (covariances, covariances_EP, cospectrum,
coherence)
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.covariance import shrunk_covariance
def _nextpow2(i):
"""Find next power of 2."""
n = 1
while n < i:
n *= 2
return n
class Covariances(BaseEstimator, TransformerMixin):
"""Estimation of covariance matrix.
Perform a simple covariance matrix estimation for each given trial.
Parameters
----------
estimator : string (default: 'scm')
covariance matrix estimator. For regularization consider 'lwf' or 'oas'
For the complete list of estimators, see parameter `estimator` of
:func:`pyriemann.utils.covariance.covariances`.
See Also
--------
ERPCovariances
XdawnCovariances
CospCovariances
HankelCovariances
"""
def __init__(self, estimator='scm'):
"""Init."""
self.estimator = estimator
def fit(self, X, y=None):
"""Fit.
Do nothing. For compatibility purpose.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
y : ndarray shape (n_trials,)
labels corresponding to each trial, not used.
Returns
-------
self : Covariances instance
The Covariances instance.
"""
return self
def transform(self, X):
"""Estimate covariance matrices.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
Returns
-------
covmats : ndarray, shape (n_trials, n_channels, n_channels)
ndarray of covariance matrices for each trials.
"""
covmats = covariances(X, estimator=self.estimator)
return covmats
class ERPCovariances(BaseEstimator, TransformerMixin):
r"""Estimate special form covariance matrix for ERP.
Estimation of special form covariance matrix dedicated to ERP processing.
For each class, a prototyped response is obtained by average across trial :
.. math::
\mathbf{P} = \frac{1}{N} \sum_i^N \mathbf{X}_i
and a super trial is build using the concatenation of P and the trial X :
.. math::
\mathbf{\tilde{X}}_i = \left[
\begin{array}{c}
\mathbf{P} \\
\mathbf{X}_i
\end{array}
\right]
This super trial :math:`\mathbf{\\tilde{X}}_i` will be used for covariance
estimation.
This allows to take into account the spatial structure of the signal, as
described in [1].
Parameters
----------
classes : list of int | None (default None)
list of classes to take into account for prototype estimation.
If None (default), all classes will be accounted.
estimator : string (default: 'scm')
covariance matrix estimator. For regularization consider 'lwf' or 'oas'
For the complete list of estimators, see parameter `estimator` of
:func:`pyriemann.utils.covariance.covariances`.
svd : int | None (default None)
if not none, the prototype responses will be reduce using a svd using
the number of components passed in svd.
See Also
--------
Covariances
XdawnCovariances
CospCovariances
HankelCovariances
References
----------
[1] A. Barachant, M. Congedo ,"A Plug&Play P300 BCI Using Information
Geometry", arXiv:1409.0107, 2014.
[2] M. Congedo, A. Barachant, A. Andreev ,"A New generation of
Brain-Computer Interface Based on Riemannian Geometry", arXiv: 1310.8115.
2013.
[3] A. Barachant, M. Congedo, G. Van Veen, C. Jutten, "Classification de
potentiels evoques P300 par geometrie riemannienne pour les interfaces
cerveau-machine EEG", 24eme colloque GRETSI, 2013.
"""
def __init__(self, classes=None, estimator='scm', svd=None):
"""Init."""
self.classes = classes
self.estimator = estimator
self.svd = svd
if svd is not None:
if not isinstance(svd, int):
raise TypeError('svd must be None or int')
def fit(self, X, y):
"""Fit.
Estimate the Prototyped response for each classes.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
y : ndarray shape (n_trials,)
labels corresponding to each trial.
Returns
-------
self : ERPCovariances instance
The ERPCovariances instance.
"""
if self.classes is not None:
classes = self.classes
else:
classes = np.unique(y)
self.P_ = []
for c in classes:
# Prototyped responce for each class
P = np.mean(X[y == c, :, :], axis=0)
# Apply svd if requested
if self.svd is not None:
U, s, V = np.linalg.svd(P)
P = np.dot(U[:, 0:self.svd].T, P)
self.P_.append(P)
self.P_ = np.concatenate(self.P_, axis=0)
return self
def transform(self, X):
"""Estimate special form covariance matrices.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
Returns
-------
covmats : ndarray, shape (n_trials, n_c, n_c)
ndarray of covariance matrices for each trials, with n_c the size
of covmats equal to n_channels * (n_classes + 1) in case svd is
None and equal to n_channels + n_classes * svd otherwise.
"""
covmats = covariances_EP(X, self.P_, estimator=self.estimator)
return covmats
class XdawnCovariances(BaseEstimator, TransformerMixin):
"""Estimate special form covariance matrix for ERP combined with Xdawn.
Estimation of special form covariance matrix dedicated to ERP processing
combined with Xdawn spatial filtering. This is similar to `ERPCovariances`
but data are spatially filtered with `Xdawn`. A complete descrition of the
method is available in [1]_.
The advantage of this estimation is to reduce dimensionality of the
covariance matrices efficiently.
Parameters
----------
nfilter: int (default 4)
number of Xdawn filter per classes.
applyfilters: bool (default True)
if set to true, spatial filter are applied to the prototypes and the
signals. When set to False, filters are applied only to the ERP
prototypes allowing for a better generalization across subject and
session at the expense of dimensionality increase. In that case, the
estimation is similar to ERPCovariances with `svd=nfilter` but with
more compact prototype reduction.
classes : list of int | None (default None)
list of classes to take into account for prototype estimation.
If None (default), all classes will be accounted.
estimator : string (default: 'scm')
covariance matrix estimator. For regularization consider 'lwf' or 'oas'
For the complete list of estimators, see parameter `estimator` of
:func:`pyriemann.utils.covariance.covariances`.
xdawn_estimator : string (default: 'scm')
covariance matrix estimator for xdawn spatial filtering.
baseline_cov : baseline_cov : array, shape(n_chan, n_chan) | None (default)
baseline_covariance for xdawn. see `Xdawn`.
See Also
--------
ERPCovariances
Xdawn
References
----------
.. [1] Barachant, A. "MEG decoding using Riemannian Geometry and
Unsupervised classification", 2014
"""
def __init__(self,
nfilter=4,
applyfilters=True,
classes=None,
estimator='scm',
xdawn_estimator='scm',
baseline_cov=None):
"""Init."""
self.applyfilters = applyfilters
self.estimator = estimator
self.xdawn_estimator = xdawn_estimator
self.classes = classes
self.nfilter = nfilter
self.baseline_cov = baseline_cov
def fit(self, X, y):
"""Fit.
Estimate spatial filters and prototyped response for each classes.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
y : ndarray shape (n_trials,)
labels corresponding to each trial.
Returns
-------
self : XdawnCovariances instance
The XdawnCovariances instance.
"""
self.Xd_ = Xdawn(
nfilter=self.nfilter,
classes=self.classes,
estimator=self.xdawn_estimator,
baseline_cov=self.baseline_cov)
self.Xd_.fit(X, y)
self.P_ = self.Xd_.evokeds_
return self
def transform(self, X):
"""Estimate xdawn covariance matrices.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
Returns
-------
covmats : ndarray, shape (n_trials, n_c, n_c)
ndarray of covariance matrices for each trials.
"""
if self.applyfilters:
X = self.Xd_.transform(X)
covmats = covariances_EP(X, self.P_, estimator=self.estimator)
return covmats
###############################################################################
class CospCovariances(BaseEstimator, TransformerMixin):
"""Estimation of cospectral covariance matrix.
Co-spectral matrices are the real part of complex cross-spectral matrices
(see :func:`pyriemann.utils.covariance.cross_spectrum`), estimated as the
spectrum covariance in the frequency domain. This method returns a 4-d
array with a cospectral covariance matrice for each trial and in each
frequency bin of the FFT.
Parameters
----------
window : int (default 128)
The length of the FFT window used for spectral estimation.
overlap : float (default 0.75)
The percentage of overlap between window.
fmin : float | None, (default None)
The minimal frequency to be returned.
fmax : float | None, (default None)
The maximal frequency to be returned.
fs : float | None, (default None)
The sampling frequency of the signal.
Attributes
----------
freqs_ : ndarray, shape (n_freqs,)
If transformed, the frequencies associated to cospectra.
See Also
--------
Covariances
HankelCovariances
Coherences
"""
def __init__(self, window=128, overlap=0.75, fmin=None, fmax=None,
fs=None):
"""Init."""
self.window = _nextpow2(window)
self.overlap = overlap
self.fmin = fmin
self.fmax = fmax
self.fs = fs
def fit(self, X, y=None):
"""Fit.
Do nothing. For compatibility purpose.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
y : ndarray, shape (n_trials,)
labels corresponding to each trial, not used.
Returns
-------
self : CospCovariances instance
The CospCovariances instance.
"""
return self
def transform(self, X):
"""Estimate the cospectral covariance matrices.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
Returns
-------
covmats : ndarray, shape (n_trials, n_channels, n_channels, n_freqs)
ndarray of covariance matrices for each trials and for each
frequency bin.
"""
Nt = len(X)
out = []
for i in range(Nt):
S, freqs = cospectrum(
X[i],
window=self.window,
overlap=self.overlap,
fmin=self.fmin,
fmax=self.fmax,
fs=self.fs)
out.append(S)
self.freqs_ = freqs
return np.array(out)
class Coherences(CospCovariances):
"""Estimation of coherences matrix.
Coherence matrix estimation. this method will return a
4-d array with a coherence matrice estimation for each trial and in each
frequency bin of the FFT.
The estimation of coherence matrix is done with matplotlib cohere function.
Parameters
----------
window : int (default 128)
The lengt of the FFT window used for spectral estimation.
overlap : float (default 0.75)
The percentage of overlap between window.
fmin : float | None, (default None)
the minimal frequency to be returned.
fmax : float | None, (default None)
The maximal frequency to be returned.
fs : float | None, (default None)
The sampling frequency of the signal.
See Also
--------
Covariances
HankelCovariances
CospCovariances
"""
def transform(self, X):
"""Estimate the coherences matrices.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
Returns
-------
covmats : ndarray, shape (n_trials, n_channels, n_channels, n_freqs)
ndarray of coherence matrices for each trials and for each
frequency bin.
"""
Nt, Ne, _ = X.shape
out = []
for i in range(Nt):
S = coherence(
X[i],
window=self.window,
overlap=self.overlap,
fmin=self.fmin,
fmax=self.fmax,
fs=self.fs)
out.append(S)
return np.array(out)
class HankelCovariances(BaseEstimator, TransformerMixin):
"""Estimation of covariance matrix with time delayed hankel matrices.
This estimation is usefull to catch spectral dynamics of the signal,
similarly to the CSSP method. It is done by concatenating time delayed
version of the signal before covariance estimation.
Parameters
----------
delays: int, list of int (default, 2)
the delays to apply for the hankel matrices. if Int, it use a range of
delays up to the given value. A list of int can be given.
estimator : string (default: 'scm')
covariance matrix estimator. For regularization consider 'lwf' or 'oas'
For the complete list of estimators, see parameter `estimator` of
:func:`pyriemann.utils.covariance.covariances`.
See Also
--------
Covariances
ERPCovariances
XdawnCovariances
CospCovariances
"""
def __init__(self, delays=4, estimator='scm'):
"""Init."""
self.delays = delays
self.estimator = estimator
def fit(self, X, y=None):
"""Fit.
Do nothing. For compatibility purpose.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
y : ndarray shape (n_trials,)
labels corresponding to each trial, not used.
Returns
-------
self : Covariances instance
The Covariances instance.
"""
return self
def transform(self, X):
"""Estimate the hankel covariance matrices.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of trials.
Returns
-------
covmats : ndarray, shape (n_trials, n_channels, n_channels)
ndarray of covariance matrices for each trials.
"""
if isinstance(self.delays, int):
delays = range(1, self.delays)
else:
delays = self.delays
X2 = []
for x in X:
tmp = x
for d in delays:
tmp = np.r_[tmp, np.roll(x, d, axis=-1)]
X2.append(tmp)
X2 = np.array(X2)
covmats = covariances(X2, estimator=self.estimator)
return covmats
class Shrinkage(BaseEstimator, TransformerMixin):
"""Regularization of covariance matrices by shrinkage
This transformer apply a shrinkage regularization to any covariance matrix.
It directly use the `shrunk_covariance` function from scikit learn, applied
on each trial.
Parameters
----------
shrinkage: float, (default, 0.1)
Coefficient in the convex combination used for the computation of the
shrunk estimate. must be between 0 and 1
Notes
-----
.. versionadded:: 0.2.5
"""
def __init__(self, shrinkage=0.1):
"""Init."""
self.shrinkage = shrinkage
def fit(self, X, y=None):
"""Fit.
Do nothing. For compatibility purpose.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_samples)
ndarray of Target data.
y : ndarray shape (n_trials,)
Labels corresponding to each trial, not used.
Returns
-------
self : Shrinkage instance
The Shrinkage instance.
"""
return self
def transform(self, X):
"""Shrink and return the covariance matrices.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_channels)
ndarray of covariances matrices
Returns
-------
covmats : ndarray, shape (n_trials, n_channels, n_channels)
ndarray of covariance matrices for each trials.
"""
covmats = np.zeros_like(X)
for ii, x in enumerate(X):
covmats[ii] = shrunk_covariance(x, self.shrinkage)
return covmats
| 29.963636
| 79
| 0.589365
|
4b885584ea2f63e286e80a8812a9ed659ffbfebf
| 354
|
py
|
Python
|
ssdcoin/simulator/simulator_constants.py
|
ZeDon-SP/ssdcoin-blockchain
|
310b461fa43e26305771322438206d9a5fc00f7a
|
[
"Apache-2.0"
] | 7
|
2021-07-20T16:54:56.000Z
|
2021-11-05T10:05:07.000Z
|
ssdcoin/simulator/simulator_constants.py
|
ZeDon-SP/ssdcoin-blockchain
|
310b461fa43e26305771322438206d9a5fc00f7a
|
[
"Apache-2.0"
] | 2
|
2021-07-23T15:26:36.000Z
|
2021-08-18T17:37:50.000Z
|
ssdcoin/simulator/simulator_constants.py
|
ZeDon-SP/ssdcoin-blockchain
|
310b461fa43e26305771322438206d9a5fc00f7a
|
[
"Apache-2.0"
] | null | null | null |
if __name__ == "__main__":
from tests.block_tools import BlockTools, test_constants
from ssdcoin.util.default_root import DEFAULT_ROOT_PATH
# TODO: mariano: fix this with new consensus
bt = BlockTools(root_path=DEFAULT_ROOT_PATH)
new_genesis_block = bt.create_genesis_block(test_constants, b"0")
print(bytes(new_genesis_block))
| 35.4
| 69
| 0.768362
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.