hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e03372c545727bf02da983c175a8b95af089be73
| 25,226
|
py
|
Python
|
lib/googlecloudsdk/core/credentials/store.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/core/credentials/store.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/core/credentials/store.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:13:29.000Z
|
2020-07-24T20:13:29.000Z
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""One-line documentation for auth module.
A detailed description of auth.
"""
from __future__ import absolute_import
from __future__ import division
import datetime
import json
import os
import textwrap
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.credentials import creds
from googlecloudsdk.core.credentials import devshell as c_devshell
from googlecloudsdk.core.credentials import gce as c_gce
from googlecloudsdk.core.util import files
import httplib2
from oauth2client import client
from oauth2client.contrib import gce as oauth2client_gce
from oauth2client.contrib import reauth_errors
GOOGLE_OAUTH2_PROVIDER_AUTHORIZATION_URI = (
'https://accounts.google.com/o/oauth2/auth')
GOOGLE_OAUTH2_PROVIDER_REVOKE_URI = (
'https://accounts.google.com/o/oauth2/revoke')
GOOGLE_OAUTH2_PROVIDER_TOKEN_URI = (
'https://accounts.google.com/o/oauth2/token')
class Error(exceptions.Error):
"""Exceptions for the credentials module."""
class AuthenticationException(Error):
"""Exceptions that tell the users to run auth login."""
def __init__(self, message):
super(AuthenticationException, self).__init__(textwrap.dedent("""\
{message}
Please run:
$ gcloud auth login
to obtain new credentials, or if you have already logged in with a
different account:
$ gcloud config set account ACCOUNT
to select an already authenticated account to use.""".format(
message=message)))
class NoCredentialsForAccountException(AuthenticationException):
"""Exception for when no credentials are found for an account."""
def __init__(self, account):
super(NoCredentialsForAccountException, self).__init__(
'Your current active account [{account}] does not have any'
' valid credentials'.format(account=account))
class NoActiveAccountException(AuthenticationException):
"""Exception for when there are no valid active credentials."""
def __init__(self):
super(NoActiveAccountException, self).__init__(
'You do not currently have an active account selected.')
class TokenRefreshError(AuthenticationException,
client.AccessTokenRefreshError):
"""An exception raised when the auth tokens fail to refresh."""
def __init__(self, error):
message = ('There was a problem refreshing your current auth tokens: {0}'
.format(error))
super(TokenRefreshError, self).__init__(message)
class ReauthenticationException(Error):
"""Exceptions that tells the user to retry his command or run auth login."""
def __init__(self, message):
super(ReauthenticationException, self).__init__(textwrap.dedent("""\
{message}
Please retry your command or run:
$ gcloud auth login
To obtain new credentials.""".format(message=message)))
class TokenRefreshReauthError(ReauthenticationException):
"""An exception raised when the auth tokens fail to refresh due to reauth."""
def __init__(self, error):
message = ('There was a problem reauthenticating while refreshing your '
'current auth tokens: {0}').format(error)
super(TokenRefreshReauthError, self).__init__(message)
class InvalidCredentialFileException(Error):
"""Exception for when an external credential file could not be loaded."""
def __init__(self, f, e):
super(InvalidCredentialFileException, self).__init__(
'Failed to load credential file: [{f}]. {message}'
.format(f=f, message=str(e)))
class CredentialFileSaveError(Error):
"""An error for when we fail to save a credential file."""
pass
class FlowError(Error):
"""Exception for when something goes wrong with a web flow."""
class RevokeError(Error):
"""Exception for when there was a problem revoking."""
class StaticCredentialProviders(object):
"""Manages a list of credential providers."""
def __init__(self):
self._providers = []
def AddProvider(self, provider):
self._providers.append(provider)
def RemoveProvider(self, provider):
self._providers.remove(provider)
def GetCredentials(self, account):
for provider in self._providers:
cred = provider.GetCredentials(account)
if cred is not None:
return cred
return None
def GetAccounts(self):
accounts = set()
for provider in self._providers:
accounts |= provider.GetAccounts()
return accounts
STATIC_CREDENTIAL_PROVIDERS = StaticCredentialProviders()
class DevShellCredentialProvider(object):
"""Provides account, project and credential data for devshell env."""
def GetCredentials(self, account):
devshell_creds = c_devshell.LoadDevshellCredentials()
if devshell_creds and (devshell_creds.devshell_response.user_email ==
account):
return devshell_creds
return None
def GetAccount(self):
return c_devshell.DefaultAccount()
def GetAccounts(self):
devshell_creds = c_devshell.LoadDevshellCredentials()
if devshell_creds:
return set([devshell_creds.devshell_response.user_email])
return set()
def GetProject(self):
return c_devshell.Project()
def Register(self):
properties.VALUES.core.account.AddCallback(self.GetAccount)
properties.VALUES.core.project.AddCallback(self.GetProject)
STATIC_CREDENTIAL_PROVIDERS.AddProvider(self)
def UnRegister(self):
properties.VALUES.core.account.RemoveCallback(self.GetAccount)
properties.VALUES.core.project.RemoveCallback(self.GetProject)
STATIC_CREDENTIAL_PROVIDERS.RemoveProvider(self)
class GceCredentialProvider(object):
"""Provides account, project and credential data for gce vm env."""
def GetCredentials(self, account):
if account in c_gce.Metadata().Accounts():
return AcquireFromGCE(account)
return None
def GetAccount(self):
if properties.VALUES.core.check_gce_metadata.GetBool():
return c_gce.Metadata().DefaultAccount()
return None
def GetAccounts(self):
return set(c_gce.Metadata().Accounts())
def GetProject(self):
if properties.VALUES.core.check_gce_metadata.GetBool():
return c_gce.Metadata().Project()
return None
def Register(self):
properties.VALUES.core.account.AddCallback(self.GetAccount)
properties.VALUES.core.project.AddCallback(self.GetProject)
STATIC_CREDENTIAL_PROVIDERS.AddProvider(self)
def UnRegister(self):
properties.VALUES.core.account.RemoveCallback(self.GetAccount)
properties.VALUES.core.project.RemoveCallback(self.GetProject)
STATIC_CREDENTIAL_PROVIDERS.RemoveProvider(self)
def AvailableAccounts():
"""Get all accounts that have credentials stored for the CloudSDK.
This function will also ping the GCE metadata server to see if GCE credentials
are available.
Returns:
[str], List of the accounts.
"""
store = creds.GetCredentialStore()
accounts = store.GetAccounts() | STATIC_CREDENTIAL_PROVIDERS.GetAccounts()
return sorted(accounts)
def LoadIfEnabled():
"""Get the credentials associated with the current account.
If credentials have been disabled via properties, this will return None.
Otherwise it will load credentials like normal. If credential loading fails
for any reason (including the user not being logged in), the usual exception
is raised.
Returns:
The credentials or None. The only time None is returned is if credentials
are disabled via properties. If no credentials are present but credentials
are enabled via properties, it will be an error.
Raises:
NoActiveAccountException: If account is not provided and there is no
active account.
c_gce.CannotConnectToMetadataServerException: If the metadata server cannot
be reached.
TokenRefreshError: If the credentials fail to refresh.
TokenRefreshReauthError: If the credentials fail to refresh due to reauth.
"""
if properties.VALUES.auth.disable_credentials.GetBool():
return None
return Load()
def Load(account=None, scopes=None, prevent_refresh=False):
"""Get the credentials associated with the provided account.
This loads credentials regardless of whether credentials have been disabled
via properties. Only use this when the functionality of the caller absolutely
requires credentials (like printing out a token) vs logically requiring
credentials (like for an http request).
Args:
account: str, The account address for the credentials being fetched. If
None, the account stored in the core.account property is used.
scopes: tuple, Custom auth scopes to request. By default CLOUDSDK_SCOPES
are requested.
prevent_refresh: bool, If True, do not refresh the access token even if it
is out of date. (For use with operations that do not require a current
access token, such as credential revocation.)
Returns:
oauth2client.client.Credentials, The specified credentials.
Raises:
NoActiveAccountException: If account is not provided and there is no
active account.
NoCredentialsForAccountException: If there are no valid credentials
available for the provided or active account.
c_gce.CannotConnectToMetadataServerException: If the metadata server cannot
be reached.
TokenRefreshError: If the credentials fail to refresh.
TokenRefreshReauthError: If the credentials fail to refresh due to reauth.
"""
# If a credential file is set, just use that and ignore the active account
# and whatever is in the credential store.
cred_file_override = properties.VALUES.auth.credential_file_override.Get()
if cred_file_override:
log.info('Using alternate credentials from file: [%s]',
cred_file_override)
try:
cred = client.GoogleCredentials.from_stream(cred_file_override)
except client.Error as e:
raise InvalidCredentialFileException(cred_file_override, e)
if cred.create_scoped_required():
if scopes is None:
scopes = config.CLOUDSDK_SCOPES
cred = cred.create_scoped(scopes)
# Set token_uri after scopes since token_uri needs to be explicitly
# preserved when scopes are applied.
token_uri_override = properties.VALUES.auth.token_host.Get()
if token_uri_override:
cred_type = creds.CredentialType.FromCredentials(cred)
if cred_type in (creds.CredentialType.SERVICE_ACCOUNT,
creds.CredentialType.P12_SERVICE_ACCOUNT):
cred.token_uri = token_uri_override
# The credential override is not stored in credential store, but we still
# want to cache access tokens between invocations.
return creds.MaybeAttachAccessTokenCacheStore(cred)
if not account:
account = properties.VALUES.core.account.Get()
if not account:
raise NoActiveAccountException()
cred = STATIC_CREDENTIAL_PROVIDERS.GetCredentials(account)
if cred is not None:
return cred
store = creds.GetCredentialStore()
cred = store.Load(account)
if not cred:
raise NoCredentialsForAccountException(account)
# cred.token_expiry is in UTC time.
if (not prevent_refresh and
(not cred.token_expiry or
cred.token_expiry < cred.token_expiry.utcnow())):
Refresh(cred)
return cred
def Refresh(credentials, http_client=None):
"""Refresh credentials.
Calls credentials.refresh(), unless they're SignedJwtAssertionCredentials.
Args:
credentials: oauth2client.client.Credentials, The credentials to refresh.
http_client: httplib2.Http, The http transport to refresh with.
Raises:
TokenRefreshError: If the credentials fail to refresh.
TokenRefreshReauthError: If the credentials fail to refresh due to reauth.
"""
try:
credentials.refresh(http_client or http.Http())
except (client.AccessTokenRefreshError, httplib2.ServerNotFoundError) as e:
raise TokenRefreshError(e.message)
except reauth_errors.ReauthError as e:
raise TokenRefreshReauthError(e.message)
def Store(credentials, account=None, scopes=None):
"""Store credentials according for an account address.
Args:
credentials: oauth2client.client.Credentials, The credentials to be stored.
account: str, The account address of the account they're being stored for.
If None, the account stored in the core.account property is used.
scopes: tuple, Custom auth scopes to request. By default CLOUDSDK_SCOPES
are requested.
Raises:
NoActiveAccountException: If account is not provided and there is no
active account.
"""
cred_type = creds.CredentialType.FromCredentials(credentials)
if not cred_type.is_serializable:
return
if not account:
account = properties.VALUES.core.account.Get()
if not account:
raise NoActiveAccountException()
store = creds.GetCredentialStore()
store.Store(account, credentials)
_LegacyGenerator(account, credentials, scopes).WriteTemplate()
def ActivateCredentials(account, credentials):
"""Validates, stores and activates credentials with given account."""
Refresh(credentials)
Store(credentials, account)
properties.PersistProperty(properties.VALUES.core.account, account)
def RevokeCredentials(credentials):
credentials.revoke(http.Http())
def Revoke(account=None):
"""Revoke credentials and clean up related files.
Args:
account: str, The account address for the credentials to be revoked. If
None, the currently active account is used.
Returns:
'True' if this call revoked the account; 'False' if the account was already
revoked.
Raises:
NoActiveAccountException: If account is not provided and there is no
active account.
NoCredentialsForAccountException: If the provided account is not tied to any
known credentials.
RevokeError: If there was a more general problem revoking the account.
"""
if not account:
account = properties.VALUES.core.account.Get()
if not account:
raise NoActiveAccountException()
if account in c_gce.Metadata().Accounts():
raise RevokeError('Cannot revoke GCE-provided credentials.')
credentials = Load(account, prevent_refresh=True)
if not credentials:
raise NoCredentialsForAccountException(account)
if isinstance(credentials, c_devshell.DevshellCredentials):
raise RevokeError(
'Cannot revoke the automatically provisioned Cloud Shell credential.'
'This comes from your browser session and will not persist outside'
'of your connected Cloud Shell session.')
rv = True
try:
RevokeCredentials(credentials)
except client.TokenRevokeError as e:
if e.args[0] == 'invalid_token':
rv = False
else:
raise
store = creds.GetCredentialStore()
store.Remove(account)
_LegacyGenerator(account, credentials).Clean()
files.RmTree(config.Paths().LegacyCredentialsDir(account))
return rv
def AcquireFromWebFlow(launch_browser=True,
auth_uri=None,
token_uri=None,
scopes=None,
client_id=None,
client_secret=None):
"""Get credentials via a web flow.
Args:
launch_browser: bool, Open a new web browser window for authorization.
auth_uri: str, URI to open for authorization.
token_uri: str, URI to use for refreshing.
scopes: string or iterable of strings, scope(s) of the credentials being
requested.
client_id: str, id of the client requesting authorization
client_secret: str, client secret of the client requesting authorization
Returns:
client.Credentials, Newly acquired credentials from the web flow.
Raises:
FlowError: If there is a problem with the web flow.
"""
if auth_uri is None:
auth_uri = properties.VALUES.auth.auth_host.Get(required=True)
if token_uri is None:
token_uri = properties.VALUES.auth.token_host.Get(required=True)
if scopes is None:
scopes = config.CLOUDSDK_SCOPES
if client_id is None:
client_id = properties.VALUES.auth.client_id.Get(required=True)
if client_secret is None:
client_secret = properties.VALUES.auth.client_secret.Get(required=True)
webflow = client.OAuth2WebServerFlow(
client_id=client_id,
client_secret=client_secret,
scope=scopes,
user_agent=config.CLOUDSDK_USER_AGENT,
auth_uri=auth_uri,
token_uri=token_uri,
prompt='select_account')
return RunWebFlow(webflow, launch_browser=launch_browser)
def RunWebFlow(webflow, launch_browser=True):
"""Runs a preconfigured webflow to get an auth token.
Args:
webflow: client.OAuth2WebServerFlow, The configured flow to run.
launch_browser: bool, Open a new web browser window for authorization.
Returns:
client.Credentials, Newly acquired credentials from the web flow.
Raises:
FlowError: If there is a problem with the web flow.
"""
# pylint:disable=g-import-not-at-top, This is imported on demand for
# performance reasons.
from googlecloudsdk.core.credentials import flow
try:
cred = flow.Run(webflow, launch_browser=launch_browser, http=http.Http())
except flow.Error as e:
raise FlowError(e)
return cred
def AcquireFromToken(refresh_token,
token_uri=GOOGLE_OAUTH2_PROVIDER_TOKEN_URI,
revoke_uri=GOOGLE_OAUTH2_PROVIDER_REVOKE_URI):
"""Get credentials from an already-valid refresh token.
Args:
refresh_token: An oauth2 refresh token.
token_uri: str, URI to use for refreshing.
revoke_uri: str, URI to use for revoking.
Returns:
client.Credentials, Credentials made from the refresh token.
"""
cred = client.OAuth2Credentials(
access_token=None,
client_id=properties.VALUES.auth.client_id.Get(required=True),
client_secret=properties.VALUES.auth.client_secret.Get(required=True),
refresh_token=refresh_token,
# always start expired
token_expiry=datetime.datetime.utcnow(),
token_uri=token_uri,
user_agent=config.CLOUDSDK_USER_AGENT,
revoke_uri=revoke_uri)
return cred
def AcquireFromGCE(account=None):
"""Get credentials from a GCE metadata server.
Args:
account: str, The account name to use. If none, the default is used.
Returns:
client.Credentials, Credentials taken from the metadata server.
Raises:
c_gce.CannotConnectToMetadataServerException: If the metadata server cannot
be reached.
TokenRefreshError: If the credentials fail to refresh.
TokenRefreshReauthError: If the credentials fail to refresh due to reauth.
Error: If a non-default service account is used.
"""
default_account = c_gce.Metadata().DefaultAccount()
if account is None:
account = default_account
if account != default_account:
raise Error('Unable to use non-default GCE service accounts.')
# Metadata server does not yet provide multiple service accounts.
credentials = oauth2client_gce.AppAssertionCredentials()
Refresh(credentials)
return credentials
def SaveCredentialsAsADC(credentials, file_path):
"""Saves the credentials to the given file.
This file can be read back via
cred = client.GoogleCredentials.from_stream(file_path)
Args:
credentials: client.OAuth2Credentials, obtained from a web flow
or service account.
file_path: str, file path to store credentials to. The file will be created.
Raises:
CredentialFileSaveError: on file io errors.
"""
creds_type = creds.CredentialType.FromCredentials(credentials)
if creds_type == creds.CredentialType.P12_SERVICE_ACCOUNT:
raise CredentialFileSaveError(
'Error saving Application Default Credentials: p12 keys are not'
'supported in this format')
if creds_type == creds.CredentialType.USER_ACCOUNT:
credentials = client.GoogleCredentials(
credentials.access_token,
credentials.client_id,
credentials.client_secret,
credentials.refresh_token,
credentials.token_expiry,
credentials.token_uri,
credentials.user_agent,
credentials.revoke_uri)
try:
with files.OpenForWritingPrivate(file_path) as f:
json.dump(credentials.serialization_data, f, sort_keys=True,
indent=2, separators=(',', ': '))
except IOError as e:
log.debug(e, exc_info=True)
raise CredentialFileSaveError(
'Error saving Application Default Credentials: ' + str(e))
class _LegacyGenerator(object):
"""A class to generate the credential file for legacy tools."""
def __init__(self, account, credentials, scopes=None):
self.credentials = credentials
self.credentials_type = creds.CredentialType.FromCredentials(credentials)
if self.credentials_type == creds.CredentialType.UNKNOWN:
raise creds.UnknownCredentialsType('Unknown credentials type.')
if scopes is None:
self.scopes = config.CLOUDSDK_SCOPES
else:
self.scopes = scopes
paths = config.Paths()
# Bq file while not generated here is created for caching
# credentials, register so it is cleaned up.
self._bq_path = paths.LegacyCredentialsBqPath(account)
self._gsutil_path = paths.LegacyCredentialsGSUtilPath(account)
self._p12_key_path = paths.LegacyCredentialsP12KeyPath(account)
self._adc_path = paths.LegacyCredentialsAdcPath(account)
def Clean(self):
"""Remove the credential file."""
paths = [
self._bq_path,
self._gsutil_path,
self._p12_key_path,
self._adc_path,
]
for p in paths:
try:
os.remove(p)
except OSError:
# file did not exist, so we're already done.
pass
def WriteTemplate(self):
"""Write the credential file."""
# General credentials used by bq and gsutil.
if self.credentials_type != creds.CredentialType.P12_SERVICE_ACCOUNT:
SaveCredentialsAsADC(self.credentials, self._adc_path)
if self.credentials_type == creds.CredentialType.USER_ACCOUNT:
# We create a small .boto file for gsutil, to be put in BOTO_PATH.
# Our client_id and client_secret should accompany our refresh token;
# if a user loaded any other .boto files that specified a different
# id and secret, those would override our id and secret, causing any
# attempts to obtain an access token with our refresh token to fail.
self._WriteFileContents(
self._gsutil_path, '\n'.join([
'[OAuth2]',
'client_id = {cid}',
'client_secret = {secret}',
'',
'[Credentials]',
'gs_oauth2_refresh_token = {token}',
]).format(cid=config.CLOUDSDK_CLIENT_ID,
secret=config.CLOUDSDK_CLIENT_NOTSOSECRET,
token=self.credentials.refresh_token))
elif self.credentials_type == creds.CredentialType.SERVICE_ACCOUNT:
self._WriteFileContents(
self._gsutil_path, '\n'.join([
'[Credentials]',
'gs_service_key_file = {key_file}',
]).format(key_file=self._adc_path))
else:
raise CredentialFileSaveError(
'Unsupported credentials type {0}'.format(type(self.credentials)))
else: # P12 service account
cred = self.credentials
key = cred._private_key_pkcs12 # pylint: disable=protected-access
password = cred._private_key_password # pylint: disable=protected-access
with files.OpenForWritingPrivate(self._p12_key_path, binary=True) as pk:
pk.write(key)
# the .boto file gets some different fields
self._WriteFileContents(
self._gsutil_path, '\n'.join([
'[Credentials]',
'gs_service_client_id = {account}',
'gs_service_key_file = {key_file}',
'gs_service_key_file_password = {key_password}',
]).format(account=self.credentials.service_account_email,
key_file=self._p12_key_path,
key_password=password))
def _WriteFileContents(self, filepath, contents):
"""Writes contents to a path, ensuring mkdirs.
Args:
filepath: str, The path of the file to write.
contents: str, The contents to write to the file.
"""
full_path = os.path.realpath(os.path.expanduser(filepath))
try:
with files.OpenForWritingPrivate(full_path) as cred_file:
cred_file.write(contents)
except (OSError, IOError) as e:
raise Exception('Failed to open %s for writing: %s' % (filepath, e))
| 33.860403
| 80
| 0.722271
|
54cf6c01143c0b4a3e9b95f89197f999c638ea3b
| 397
|
py
|
Python
|
school_app/wsgi.py
|
shravakushwaha/school_system
|
840f414a7a56b5d424695af811b621c18a1ec72a
|
[
"MIT"
] | 235
|
2020-05-05T21:05:22.000Z
|
2022-03-27T07:41:51.000Z
|
school_app/wsgi.py
|
shravakushwaha/school_system
|
840f414a7a56b5d424695af811b621c18a1ec72a
|
[
"MIT"
] | 24
|
2020-05-05T21:44:40.000Z
|
2022-03-12T00:28:02.000Z
|
school_app/wsgi.py
|
shravakushwaha/school_system
|
840f414a7a56b5d424695af811b621c18a1ec72a
|
[
"MIT"
] | 133
|
2020-05-05T20:53:42.000Z
|
2022-03-30T00:34:56.000Z
|
"""
WSGI config for school_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "school_app.settings")
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
a8d0293d13645c65b473cb28ef3e0bfc9a6c2a82
| 106,124
|
py
|
Python
|
tensorflow/python/ops/nn_ops.py
|
nuchi/tensorflow
|
3bc595df1d9ea2015fa892bbd69c38ae4c4857d3
|
[
"Apache-2.0"
] | 1
|
2018-04-18T02:47:44.000Z
|
2018-04-18T02:47:44.000Z
|
tensorflow/python/ops/nn_ops.py
|
Richardyouth/tensorflow
|
3bc595df1d9ea2015fa892bbd69c38ae4c4857d3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/nn_ops.py
|
Richardyouth/tensorflow
|
3bc595df1d9ea2015fa892bbd69c38ae4c4857d3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.get_shape().
filter_shape: static filter shape, i.e. filter.get_shape().
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
"""
def __init__(
self,
input_shape,
filter_shape, # pylint: disable=redefined-builtin
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
dilation_rate: see with_space_to_batch
padding: see with_space_to_batch
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see with_space_to_batch
spatial_dims: see with_space_to_batch
data_format: see with_space_to_batch
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a montonically increasing sequence of positive "
"integers") # pylint: disable=line-too-long
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
ValueError("input tensor must have rank %d at least" %
(expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape[1].value:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export("nn.convolution")
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
@{$python/nn#Convolution$comment here}.
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An N-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An N-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "convolution", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = Convolution(
input_shape,
filter_shape,
padding,
strides=strides,
dilation_rate=dilation_rate,
name=name,
data_format=data_format)
return op(input, filter)
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
ValueError("input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
ValueError("filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = input_shape[num_spatial_dims + 1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = input_shape[1]
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.name = name
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(inp, filter)
@tf_export("nn.pool")
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in the
@{tf.nn.convolution$comment here}.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the @{tf.nn.convolution$comment here}
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
@{tf.nn.convolution}, and exists only for backwards compatibility. You can
use @{tf.nn.convolution} to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: [Semantic Image Segmentation with Deep
Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062).
The same operation is investigated further in [Multi-Scale Context Aggregation
by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works
that effectively use atrous convolution in different ways are, among others,
[OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image
Scanning with Deep Max-Pooling Convolutional Neural
Networks](http://arxiv.org/abs/1302.1700).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID`` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
@tf_export("nn.conv2d_transpose")
def conv2d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
if data_format not in ("NCHW", "NHWC"):
raise ValueError("data_format has to be either NCHW or NHWC.")
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 3 if data_format == "NHWC" else 1
if not value.get_shape()[axis].is_compatible_with(filter.get_shape()[3]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filter.get_shape()[2].is_compatible_with(output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `atrous_conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape()[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape()[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export("nn.conv3d_transpose")
def conv3d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv3d` rather than an actual
deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv3d_transpose",
[value, filter, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 1 if data_format == "NCDHW" else 4
if not value.get_shape()[axis].is_compatible_with(filter.get_shape()[4]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[4]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):
raise ValueError("output_shape must have shape (5,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [5] if reached this point.
if not filter.get_shape()[3].is_compatible_with(output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[3]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export("nn.crelu")
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.relu6")
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Source: [Convolutional Deep Belief Networks on CIFAR-10. A.
Krizhevsky](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
"Rectifier Nonlinearities Improve Neural Network Acoustic Models"
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013
http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]):
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.to_float(features)
alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha")
return math_ops.maximum(alpha * features, features)
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
"""Helper function for softmax and log_softmax.
It reshapes and transposes the input logits into a 2-D Tensor and then invokes
the tf.nn._softmax or tf.nn._log_softmax function. The output would be
transposed and reshaped back.
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
"""
def _swap_axis(logits, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim is -1) or (dim == shape.ndims - 1)
if shape.ndims is 2 and is_last_dim:
return compute_op(logits, name=name)
# If dim is the last dimension, simply reshape the logits to a matrix and
# apply the internal softmax.
if is_last_dim:
input_shape = array_ops.shape(logits)
logits = _flatten_outer_dims(logits)
output = compute_op(logits)
output = array_ops.reshape(output, input_shape, name=name)
return output
# If dim is not the last dimension, we have to do a reshape and transpose so
# that we can still perform softmax on its last dimension.
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
shape_after_swap = array_ops.shape(logits)
# Reshape logits into a matrix.
logits = _flatten_outer_dims(logits)
# Do the actual softmax on its last dimension.
output = compute_op(logits)
# Transform back the output tensor.
output = array_ops.reshape(output, shape_after_swap)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since reshape and transpose may erase its static
# shape.
output.set_shape(shape)
return output
@tf_export("nn.softmax")
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.log_softmax")
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits_v2")
def softmax_cross_entropy_with_logits_v2(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through @{tf.stop_gradient}
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
"""
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if dim is not -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, dim, input_rank)
labels = _move_dim_to_end(labels, dim, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus dim.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[dim]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See @{tf.nn.softmax_cross_entropy_with_logits_v2}.
"""
@tf_export("nn.softmax_cross_entropy_with_logits")
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
@{tf.nn.softmax_cross_entropy_with_logits_v2}.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
"""
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, dim=dim, name=name)
@tf_export("nn.sparse_softmax_cross_entropy_with_logits")
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.avg_pool")
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A 1-D int Tensor of 4 elements.
The size of the window for each dimension of the input tensor.
strides: A 1-D int Tensor of 4 elements
The stride of the sliding window for each dimension of the
input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.max_pool")
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: A 1-D int Tensor of 4 elements. The size of the window for
each dimension of the input tensor.
strides: A 1-D int Tensor of 4 elements. The stride of the sliding
window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list())
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export("nn.xw_plus_b")
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export("nn.dropout")
def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name
"""Computes dropout.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]` or if `x` is not a floating
point tensor.
"""
with ops.name_scope(name, "dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if not x.dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going to"
" be scaled. Got a %s tensor instead." % x.dtype)
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(
keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know keep_prob == 1
if tensor_util.constant_value(keep_prob) == 1:
return x
noise_shape = _get_noise_shape(x, noise_shape)
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(x, keep_prob) * binary_tensor
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
@tf_export("nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th order statistic for the last dmension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export("nn.conv1d")
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(value,
filters,
stride,
padding,
use_cudnn_on_gpu=None,
data_format=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A 3D `Tensor`. Must be of type `float16` or `float32`.
filters: A 3D `Tensor`. Must have the same type as `value`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults
to `"NWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
strides = [1, 1, stride, 1]
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
strides = [1, 1, 1, stride]
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
return array_ops.squeeze(result, [spatial_start_dim])
def conv1d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
stride,
padding="SAME",
data_format="NWC",
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv1d` rather than an actual
deconvolution.
Args:
value: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filter: A 3-D `Tensor` with the same type as `value` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv1d_transpose",
[value, filter, output_shape]) as name:
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(3)):
raise ValueError("output_shape must have shape (3,), got {}".format(
output_shape_.get_shape()))
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format_2d = "NHWC"
axis = 2
elif data_format == "NCW":
data_format_2d = "NCHW"
axis = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
if not value.get_shape()[axis].is_compatible_with(filter.get_shape()[2]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[2]))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [3] if reached this point.
if not filter.get_shape()[1].is_compatible_with(output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[1]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format_2d == "NHWC":
output_shape_ = array_ops.concat(
[output_shape_[:1], [1], output_shape_[1:]], axis=0)
spatial_start_dim = 1
strides = [1, 1, stride, 1]
else:
output_shape_ = array_ops.concat(
[output_shape_[:2], [1], output_shape_[2:]], axis=0)
spatial_start_dim = 2
strides = [1, 1, 1, stride]
value = array_ops.expand_dims(value, spatial_start_dim)
filter = array_ops.expand_dims(filter, 0) # pylint: disable=redefined-builtin
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format_2d,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export("nn.erosion2d")
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.in_top_k")
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
| 39.539493
| 98
| 0.682456
|
1f4b1b74e576123650bec973b1d4271bd8415a29
| 1,022
|
py
|
Python
|
web/addons/payment_buckaroo/__init__.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/addons/payment_buckaroo/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/addons/payment_buckaroo/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import models
import controllers
| 42.583333
| 78
| 0.609589
|
1eb3c6c734eea8abb2d8859234449a197d36dd54
| 1,033
|
py
|
Python
|
docs/source/conf.py
|
achillesrasquinha/fluxviz
|
aa0f1af34aa379d51aba45f6a87e38303a86a2f0
|
[
"MIT"
] | 1
|
2020-05-04T04:51:47.000Z
|
2020-05-04T04:51:47.000Z
|
docs/source/conf.py
|
achillesrasquinha/fluxviz
|
aa0f1af34aa379d51aba45f6a87e38303a86a2f0
|
[
"MIT"
] | 2
|
2020-02-10T21:57:02.000Z
|
2021-06-02T01:07:08.000Z
|
docs/source/conf.py
|
achillesrasquinha/fluxviz
|
aa0f1af34aa379d51aba45f6a87e38303a86a2f0
|
[
"MIT"
] | 1
|
2020-02-12T17:17:50.000Z
|
2020-02-12T17:17:50.000Z
|
import sys
import os, os.path as osp
import datetime as dt
def pardir(path, level = 1):
for _ in range(level):
path = osp.dirname(path)
return path
BASEDIR = osp.abspath(pardir(__file__, 3))
DOCSDIR = osp.join(BASEDIR, "docs")
SRCDIR = osp.join(BASEDIR, "src")
NOW = dt.datetime.now()
sys.path.insert(0, BASEDIR)
import fluxviz
project = fluxviz.__name__
author = fluxviz.__author__
copyright = "%s %s" % (NOW.year, fluxviz.__author__)
version = fluxviz.__version__
release = fluxviz.__version__
source_suffix = [".rst"]
master_doc = "index"
exclude_patterns = [
osp.join(DOCSDIR, "source", "notebooks", ".ipynb_checkpoints")
]
extensions = [
"sphinx.ext.autodoc",
"nbsphinx"
]
templates_path = [
osp.join(DOCSDIR, "source", "_templates")
]
html_theme = "alabaster"
html_static_path = [
osp.join(DOCSDIR, "source", "_static")
]
html_sidebars = {
"index": ["sidebar.html"],
"**": [
"sidebar.html"
]
}
| 19.12963
| 66
| 0.621491
|
eabc5f69f472b240841ecc9fee7080050835237a
| 2,277
|
py
|
Python
|
core/brain/show/last/requests/reaction.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2016-10-08T09:01:05.000Z
|
2016-10-08T09:01:05.000Z
|
core/brain/show/last/requests/reaction.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2019-09-24T09:56:52.000Z
|
2019-09-24T09:56:52.000Z
|
core/brain/show/last/requests/reaction.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author:
Description:
'''
from core.broadcast import say, bang
from core.people.person import Profile, ProfileRequest, Session
class Reaction:
"""class Reaction"""
response = ''
request = ''
def __str__(self):
return 'My new reaction'
@classmethod
def __init__(self, *args, **kwargs):
""" original request string """
# get request object
self.req_obj = kwargs.pop('req_obj')
# request word sequence
self.request = self.req_obj.get('request', '')
# request received from (julius, jabber any other resources)
self.req_from = self.req_obj.get('from', '')
self.response = ''
@classmethod
def run(self):
"""default method"""
email = None
sess = Session()
sender = self.req_obj.get('sender', '')
# exctract sender email
if sender:
email = sender.split('/')[0]
uuid = self.req_obj.pop('uuid', '')
if email:
# find user profile by primary email
profile = sess.query(Profile).filter(Profile.email == email).one()
elif uuid:
# find user profile by uuid
profile = sess.query(Profile).filter(Profile.uuid == uuid).one()
hs = sess.query(ProfileRequest).order_by('id desc').limit(10)
response = ''
requests = [h.request for h in hs]
if requests:
response = "\n".join(requests)
#########################################
# If reaction executed by jabber client #
#########################################
if self.req_from == 'jabber':
todo = {'text': response, 'jmsg': response, 'type': 'response'}
self.response = todo
#########################################
# If reaction executed by julius client #
#########################################
if self.req_from == 'julius':
bang()
todo = {'say': response, 'text': response, 'type': 'response'}
self.response = say(self.request.replace('say', '').upper())
return self.response
#n = Reaction(*{'reserved':''}, **{'req_obj':{'from':'', 'request':''}})
#n.run()
| 27.107143
| 78
| 0.508564
|
deb082caff9c21321823545e551ea94b8f3dcd9b
| 22
|
py
|
Python
|
my_test_proj/__init__.py
|
Naman-Goyal/4995-demo
|
b00bdfcd5f33edbcbb73eb0cf14b6129bb8583e6
|
[
"MIT"
] | null | null | null |
my_test_proj/__init__.py
|
Naman-Goyal/4995-demo
|
b00bdfcd5f33edbcbb73eb0cf14b6129bb8583e6
|
[
"MIT"
] | null | null | null |
my_test_proj/__init__.py
|
Naman-Goyal/4995-demo
|
b00bdfcd5f33edbcbb73eb0cf14b6129bb8583e6
|
[
"MIT"
] | null | null | null |
from .foo import inc
| 7.333333
| 20
| 0.727273
|
9519866a9a7051fcdfedf64c5faf8ba13ea5643f
| 1,306
|
py
|
Python
|
src/ui/questions.py
|
giansalex/repo_info_extractor
|
fa31940964e31c2d1aa3cdfd302539e02f70f511
|
[
"MIT"
] | null | null | null |
src/ui/questions.py
|
giansalex/repo_info_extractor
|
fa31940964e31c2d1aa3cdfd302539e02f70f511
|
[
"MIT"
] | null | null | null |
src/ui/questions.py
|
giansalex/repo_info_extractor
|
fa31940964e31c2d1aa3cdfd302539e02f70f511
|
[
"MIT"
] | null | null | null |
from whaaaaat import style_from_dict, Token, prompt, print_json, default_style, Separator
class Questions:
def ask_primary_remote_url(self, repo):
'''
Promots the user the possible remote ULRs and ask her to select the primary
'''
choices = []
for remote in repo.original_remotes:
choices.append(remote + ': ' + repo.original_remotes[remote])
questions = [
{
'type': 'list',
'name': 'remote_repo',
'message': 'Cannot find remote origin. Select which one is the primary remote URL.',
'choices': choices
}
]
return prompt(questions)
def ask_user_identity(self, repo):
choices = []
for key in repo.contributors:
choices.append({
'name': repo.contributors[key]['name'] + ' -> ' + repo.contributors[key]['email'],
})
questions = [
{
'type': 'checkbox',
'name': 'user_identity',
'message': 'The following contributors were found in the repository. Select which ones you are. (With SPACE you can select more than one)',
'choices': choices
}
]
return prompt(questions)
| 33.487179
| 155
| 0.538285
|
4dc76e1bd6ddca0f450caf27139b7b7bc6d190dd
| 9,392
|
py
|
Python
|
spinup/run.py
|
Gerkinator/spinningup
|
a4ccfb447329e89007a36908133a3b0867b5664c
|
[
"MIT"
] | null | null | null |
spinup/run.py
|
Gerkinator/spinningup
|
a4ccfb447329e89007a36908133a3b0867b5664c
|
[
"MIT"
] | null | null | null |
spinup/run.py
|
Gerkinator/spinningup
|
a4ccfb447329e89007a36908133a3b0867b5664c
|
[
"MIT"
] | null | null | null |
import spinup
from spinup.user_config import DEFAULT_BACKEND
from spinup.utils.run_utils import ExperimentGrid
from spinup.utils.serialization_utils import convert_json
import argparse
import gym
import json
import os, subprocess, sys
import os.path as osp
import string
import tensorflow as tf
import torch
from copy import deepcopy
from textwrap import dedent
from spinup.utils import register_custom_envs
register_custom_envs.register()
# Command line args that will go to ExperimentGrid.run, and must possess unique
# values (therefore must be treated separately).
RUN_KEYS = ['num_cpu', 'data_dir', 'datestamp']
# Command line sweetener, allowing short-form flags for common, longer flags.
SUBSTITUTIONS = {'env': 'env_name',
'hid': 'ac_kwargs:hidden_sizes',
'act': 'ac_kwargs:activation',
'cpu': 'num_cpu',
'dt': 'datestamp'}
# Only some algorithms can be parallelized (have num_cpu > 1):
MPI_COMPATIBLE_ALGOS = ['vpg', 'trpo', 'ppo']
# Algo names (used in a few places)
BASE_ALGO_NAMES = ['vpg', 'trpo', 'ppo', 'ddpg', 'td3', 'sac']
def add_with_backends(algo_list):
# helper function to build lists with backend-specific function names
algo_list_with_backends = deepcopy(algo_list)
for algo in algo_list:
algo_list_with_backends += [algo + '_tf1', algo + '_pytorch']
return algo_list_with_backends
def friendly_err(err_msg):
# add whitespace to error message to make it more readable
return '\n\n' + err_msg + '\n\n'
def parse_and_execute_grid_search(cmd, args):
"""Interprets algorithm name and cmd line args into an ExperimentGrid."""
if cmd in BASE_ALGO_NAMES:
backend = DEFAULT_BACKEND[cmd]
print('\n\nUsing default backend (%s) for %s.\n'%(backend, cmd))
cmd = cmd + '_' + backend
algo = eval('spinup.'+cmd)
# Before all else, check to see if any of the flags is 'help'.
valid_help = ['--help', '-h', 'help']
if any([arg in valid_help for arg in args]):
print('\n\nShowing docstring for spinup.'+cmd+':\n')
print(algo.__doc__)
sys.exit()
def process(arg):
# Process an arg by eval-ing it, so users can specify more
# than just strings at the command line (eg allows for
# users to give functions as args).
try:
return eval(arg)
except:
return arg
# Make first pass through args to build base arg_dict. Anything
# with a '--' in front of it is an argument flag and everything after,
# until the next flag, is a possible value.
arg_dict = dict()
for i, arg in enumerate(args):
assert i > 0 or '--' in arg, \
friendly_err("You didn't specify a first flag.")
if '--' in arg:
arg_key = arg.lstrip('-')
arg_dict[arg_key] = []
else:
arg_dict[arg_key].append(process(arg))
# Make second pass through, to catch flags that have no vals.
# Assume such flags indicate that a boolean parameter should have
# value True.
for k,v in arg_dict.items():
if len(v) == 0:
v.append(True)
# Third pass: check for user-supplied shorthands, where a key has
# the form --keyname[kn]. The thing in brackets, 'kn', is the
# shorthand. NOTE: modifying a dict while looping through its
# contents is dangerous, and breaks in 3.6+. We loop over a fixed list
# of keys to avoid this issue.
given_shorthands = dict()
fixed_keys = list(arg_dict.keys())
for k in fixed_keys:
p1, p2 = k.find('['), k.find(']')
if p1 >= 0 and p2 >= 0:
# Both '[' and ']' found, so shorthand has been given
k_new = k[:p1]
shorthand = k[p1+1:p2]
given_shorthands[k_new] = shorthand
arg_dict[k_new] = arg_dict[k]
del arg_dict[k]
# Penultimate pass: sugar. Allow some special shortcuts in arg naming,
# eg treat "env" the same as "env_name". This is super specific
# to Spinning Up implementations, and may be hard to maintain.
# These special shortcuts are described by SUBSTITUTIONS.
for special_name, true_name in SUBSTITUTIONS.items():
if special_name in arg_dict:
# swap it in arg dict
arg_dict[true_name] = arg_dict[special_name]
del arg_dict[special_name]
if special_name in given_shorthands:
# point the shortcut to the right name
given_shorthands[true_name] = given_shorthands[special_name]
del given_shorthands[special_name]
# Final pass: check for the special args that go to the 'run' command
# for an experiment grid, separate them from the arg dict, and make sure
# that they have unique values. The special args are given by RUN_KEYS.
run_kwargs = dict()
for k in RUN_KEYS:
if k in arg_dict:
val = arg_dict[k]
assert len(val) == 1, \
friendly_err("You can only provide one value for %s."%k)
run_kwargs[k] = val[0]
del arg_dict[k]
# Determine experiment name. If not given by user, will be determined
# by the algorithm name.
if 'exp_name' in arg_dict:
assert len(arg_dict['exp_name']) == 1, \
friendly_err("You can only provide one value for exp_name.")
exp_name = arg_dict['exp_name'][0]
del arg_dict['exp_name']
else:
exp_name = 'cmd_' + cmd
# Make sure that if num_cpu > 1, the algorithm being used is compatible
# with MPI.
if 'num_cpu' in run_kwargs and not(run_kwargs['num_cpu'] == 1):
assert cmd in add_with_backends(MPI_COMPATIBLE_ALGOS), \
friendly_err("This algorithm can't be run with num_cpu > 1.")
# Special handling for environment: make sure that env_name is a real,
# registered gym environment.
valid_envs = [e.id for e in list(gym.envs.registry.all())]
assert 'env_name' in arg_dict, \
friendly_err("You did not give a value for --env_name! Add one and try again.")
for env_name in arg_dict['env_name']:
err_msg = dedent("""
%s is not registered with Gym.
Recommendations:
* Check for a typo (did you include the version tag?)
* View the complete list of valid Gym environments at
https://gym.openai.com/envs/
"""%env_name)
assert env_name in valid_envs, err_msg
# Construct and execute the experiment grid.
eg = ExperimentGrid(name=exp_name)
for k,v in arg_dict.items():
eg.add(k, v, shorthand=given_shorthands.get(k))
eg.run(algo, **run_kwargs)
if __name__ == '__main__':
"""
This is a wrapper allowing command-line interfaces to individual
algorithms and the plot / test_policy utilities.
For utilities, it only checks which thing to run, and calls the
appropriate file, passing all arguments through.
For algorithms, it sets up an ExperimentGrid object and uses the
ExperimentGrid run routine to execute each possible experiment.
"""
cmd = sys.argv[1] if len(sys.argv) > 1 else 'help'
valid_algos = add_with_backends(BASE_ALGO_NAMES)
valid_utils = ['plot', 'test_policy']
valid_help = ['--help', '-h', 'help']
valid_cmds = valid_algos + valid_utils + valid_help
assert cmd in valid_cmds, \
"Select an algorithm or utility which is implemented in Spinning Up."
if cmd in valid_help:
# Before all else, check to see if any of the flags is 'help'.
# List commands that are available.
str_valid_cmds = '\n\t' + '\n\t'.join(valid_algos+valid_utils)
help_msg = dedent("""
Experiment in Spinning Up from the command line with
\tpython -m spinup.run CMD [ARGS...]
where CMD is a valid command. Current valid commands are:
""") + str_valid_cmds
print(help_msg)
# Provide some useful details for algorithm running.
subs_list = ['--' + k.ljust(10) + 'for'.ljust(10) + '--' + v \
for k,v in SUBSTITUTIONS.items()]
str_valid_subs = '\n\t' + '\n\t'.join(subs_list)
special_info = dedent("""
FYI: When running an algorithm, any keyword argument to the
algorithm function can be used as a flag, eg
\tpython -m spinup.run ppo --env HalfCheetah-v2 --clip_ratio 0.1
If you need a quick refresher on valid kwargs, get the docstring
with
\tpython -m spinup.run [algo] --help
See the "Running Experiments" docs page for more details.
Also: Some common but long flags can be substituted for shorter
ones. Valid substitutions are:
""") + str_valid_subs
print(special_info)
elif cmd in valid_utils:
# Execute the correct utility file.
runfile = osp.join(osp.abspath(osp.dirname(__file__)), 'utils', cmd +'.py')
args = [sys.executable if sys.executable else 'python', runfile] + sys.argv[2:]
subprocess.check_call(args, env=os.environ)
else:
# Assume that the user plans to execute an algorithm. Run custom
# parsing on the arguments and build a grid search to execute.
args = sys.argv[2:]
parse_and_execute_grid_search(cmd, args)
| 37.269841
| 87
| 0.636286
|
537419de88fe69cb5646b5526f922469c005ab29
| 45,676
|
py
|
Python
|
allennlp/models/encoder_decoders/copynet_seq2seq.py
|
entslscheia/allennlp
|
eeba62e34c8e211ed5963f830528c957f178607b
|
[
"Apache-2.0"
] | null | null | null |
allennlp/models/encoder_decoders/copynet_seq2seq.py
|
entslscheia/allennlp
|
eeba62e34c8e211ed5963f830528c957f178607b
|
[
"Apache-2.0"
] | null | null | null |
allennlp/models/encoder_decoders/copynet_seq2seq.py
|
entslscheia/allennlp
|
eeba62e34c8e211ed5963f830528c957f178607b
|
[
"Apache-2.0"
] | 1
|
2021-09-21T12:03:27.000Z
|
2021-09-21T12:03:27.000Z
|
import logging
from typing import Dict, Tuple, List, Any, Union
import numpy
from overrides import overrides
import torch
from torch.nn.modules.linear import Linear
from torch.nn.modules.rnn import LSTMCell
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Attention, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import InitializerApplicator, util
from allennlp.training.metrics import Metric, BLEU
from allennlp.nn.beam_search import BeamSearch
logger = logging.getLogger(__name__)
@Model.register("copynet_seq2seq")
class CopyNetSeq2Seq(Model):
"""
This is an implementation of `CopyNet <https://arxiv.org/pdf/1603.06393>`_.
CopyNet is a sequence-to-sequence encoder-decoder model with a copying mechanism
that can copy tokens from the source sentence into the target sentence instead of
generating all target tokens only from the target vocabulary.
It is very similar to a typical seq2seq model used in neural machine translation
tasks, for example, except that in addition to providing a "generation" score at each timestep
for the tokens in the target vocabulary, it also provides a "copy" score for each
token that appears in the source sentence. In other words, you can think of CopyNet
as a seq2seq model with a dynamic target vocabulary that changes based on the tokens
in the source sentence, allowing it to predict tokens that are out-of-vocabulary (OOV)
with respect to the actual target vocab.
Parameters
----------
vocab : ``Vocabulary``, required
Vocabulary containing source and target vocabularies.
source_embedder : ``TextFieldEmbedder``, required
Embedder for source side sequences
encoder : ``Seq2SeqEncoder``, required
The encoder of the "encoder/decoder" model
attention : ``Attention``, required
This is used to get a dynamic summary of encoder outputs at each timestep
when producing the "generation" scores for the target vocab.
beam_size : ``int``, required
Beam width to use for beam search prediction.
max_decoding_steps : ``int``, required
Maximum sequence length of target predictions.
target_embedding_dim : ``int``, optional (default = 30)
The size of the embeddings for the target vocabulary.
copy_token : ``str``, optional (default = '@COPY@')
The token used to indicate that a target token was copied from the source.
If this token is not already in your target vocabulary, it will be added.
source_namespace : ``str``, optional (default = 'source_tokens')
The namespace for the source vocabulary.
target_namespace : ``str``, optional (default = 'target_tokens')
The namespace for the target vocabulary.
tensor_based_metric : ``Metric``, optional (default = BLEU)
A metric to track on validation data that takes raw tensors when its called.
This metric must accept two arguments when called: a batched tensor
of predicted token indices, and a batched tensor of gold token indices.
token_based_metric : ``Metric``, optional (default = None)
A metric to track on validation data that takes lists of lists of tokens
as input. This metric must accept two arguments when called, both
of type `List[List[str]]`. The first is a predicted sequence for each item
in the batch and the second is a gold sequence for each item in the batch.
initializer : ``InitializerApplicator``, optional
An initialization strategy for the model weights.
"""
def __init__(
self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
attention: Attention,
beam_size: int,
max_decoding_steps: int,
target_embedding_dim: int = 30,
copy_token: str = "@COPY@",
source_namespace: str = "source_tokens",
target_namespace: str = "target_tokens",
tensor_based_metric: Metric = None,
token_based_metric: Metric = None,
initializer: InitializerApplicator = InitializerApplicator(),
) -> None:
super().__init__(vocab)
self._source_namespace = source_namespace
self._target_namespace = target_namespace
self._src_start_index = self.vocab.get_token_index(START_SYMBOL, self._source_namespace)
self._src_end_index = self.vocab.get_token_index(END_SYMBOL, self._source_namespace)
self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
self._oov_index = self.vocab.get_token_index(self.vocab._oov_token, self._target_namespace)
self._pad_index = self.vocab.get_token_index(
self.vocab._padding_token, self._target_namespace
)
self._copy_index = self.vocab.add_token_to_namespace(copy_token, self._target_namespace)
self._tensor_based_metric = tensor_based_metric or BLEU(
exclude_indices={self._pad_index, self._end_index, self._start_index}
)
self._token_based_metric = token_based_metric
self._target_vocab_size = self.vocab.get_vocab_size(self._target_namespace)
# Encoding modules.
self._source_embedder = source_embedder
self._encoder = encoder
# Decoder output dim needs to be the same as the encoder output dim since we initialize the
# hidden state of the decoder with the final hidden state of the encoder.
# We arbitrarily set the decoder's input dimension to be the same as the output dimension.
self.encoder_output_dim = self._encoder.get_output_dim()
self.decoder_output_dim = self.encoder_output_dim
self.decoder_input_dim = self.decoder_output_dim
target_vocab_size = self.vocab.get_vocab_size(self._target_namespace)
# The decoder input will be a function of the embedding of the previous predicted token,
# an attended encoder hidden state called the "attentive read", and another
# weighted sum of the encoder hidden state called the "selective read".
# While the weights for the attentive read are calculated by an `Attention` module,
# the weights for the selective read are simply the predicted probabilities
# corresponding to each token in the source sentence that matches the target
# token from the previous timestep.
self._target_embedder = Embedding(target_vocab_size, target_embedding_dim)
self._attention = attention
self._input_projection_layer = Linear(
target_embedding_dim + self.encoder_output_dim * 2, self.decoder_input_dim
)
# We then run the projected decoder input through an LSTM cell to produce
# the next hidden state.
self._decoder_cell = LSTMCell(self.decoder_input_dim, self.decoder_output_dim)
# We create a "generation" score for each token in the target vocab
# with a linear projection of the decoder hidden state.
self._output_generation_layer = Linear(self.decoder_output_dim, target_vocab_size)
# We create a "copying" score for each source token by applying a non-linearity
# (tanh) to a linear projection of the encoded hidden state for that token,
# and then taking the dot product of the result with the decoder hidden state.
self._output_copying_layer = Linear(self.encoder_output_dim, self.decoder_output_dim)
# At prediction time, we'll use a beam search to find the best target sequence.
self._beam_search = BeamSearch(
self._end_index, max_steps=max_decoding_steps, beam_size=beam_size
)
initializer(self)
@overrides
def forward(
self, # type: ignore
source_tokens: Dict[str, torch.LongTensor],
source_token_ids: torch.Tensor,
source_to_target: torch.Tensor,
metadata: List[Dict[str, Any]],
target_tokens: Dict[str, torch.LongTensor] = None,
target_token_ids: torch.Tensor = None,
) -> Dict[str, torch.Tensor]:
"""
Make foward pass with decoder logic for producing the entire target sequence.
Parameters
----------
source_tokens : ``Dict[str, torch.LongTensor]``, required
The output of `TextField.as_array()` applied on the source `TextField`. This will be
passed through a `TextFieldEmbedder` and then through an encoder.
source_token_ids : ``torch.Tensor``, required
Tensor containing IDs that indicate which source tokens match each other.
Has shape: `(batch_size, trimmed_source_length)`.
source_to_target : ``torch.Tensor``, required
Tensor containing vocab index of each source token with respect to the
target vocab namespace. Shape: `(batch_size, trimmed_source_length)`.
metadata : ``List[Dict[str, Any]]``, required
Metadata field that contains the original source tokens with key 'source_tokens'
and any other meta fields. When 'target_tokens' is also passed, the metadata
should also contain the original target tokens with key 'target_tokens'.
target_tokens : ``Dict[str, torch.LongTensor]``, optional (default = None)
Output of `Textfield.as_array()` applied on target `TextField`. We assume that the
target tokens are also represented as a `TextField` which must contain a "tokens"
key that uses single ids.
target_token_ids : ``torch.Tensor``, optional (default = None)
A tensor of shape `(batch_size, target_sequence_length)` which indicates which
tokens in the target sequence match tokens in the source sequence.
Returns
-------
Dict[str, torch.Tensor]
"""
state = self._encode(source_tokens)
state["source_token_ids"] = source_token_ids
state["source_to_target"] = source_to_target
if target_tokens:
state = self._init_decoder_state(state)
output_dict = self._forward_loss(target_tokens, target_token_ids, state)
else:
output_dict = {}
output_dict["metadata"] = metadata
if not self.training:
state = self._init_decoder_state(state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens:
if self._tensor_based_metric is not None:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
# shape: (batch_size, target_sequence_length)
gold_tokens = self._gather_extended_gold_tokens(
target_tokens["tokens"], source_token_ids, target_token_ids
)
self._tensor_based_metric(best_predictions, gold_tokens) # type: ignore
if self._token_based_metric is not None:
predicted_tokens = self._get_predicted_tokens(
output_dict["predictions"], metadata, n_best=1
)
self._token_based_metric( # type: ignore
predicted_tokens, [x["target_tokens"] for x in metadata]
)
return output_dict
def _gather_extended_gold_tokens(
self,
target_tokens: torch.Tensor,
source_token_ids: torch.Tensor,
target_token_ids: torch.Tensor,
) -> torch.LongTensor:
"""
Modify the gold target tokens relative to the extended vocabulary.
For gold targets that are OOV but were copied from the source, the OOV index
will be changed to the index of the first occurence in the source sentence,
offset by the size of the target vocabulary.
Parameters
----------
target_tokens : ``torch.Tensor``
Shape: `(batch_size, target_sequence_length)`.
source_token_ids : ``torch.Tensor``
Shape: `(batch_size, trimmed_source_length)`.
target_token_ids : ``torch.Tensor``
Shape: `(batch_size, target_sequence_length)`.
Returns
-------
torch.Tensor
Modified `target_tokens` with OOV indices replaced by offset index
of first match in source sentence.
"""
batch_size, target_sequence_length = target_tokens.size()
trimmed_source_length = source_token_ids.size(1)
# Only change indices for tokens that were OOV in target vocab but copied from source.
# shape: (batch_size, target_sequence_length)
oov = target_tokens == self._oov_index
# shape: (batch_size, target_sequence_length, trimmed_source_length)
expanded_source_token_ids = source_token_ids.unsqueeze(1).expand(
batch_size, target_sequence_length, trimmed_source_length
)
# shape: (batch_size, target_sequence_length, trimmed_source_length)
expanded_target_token_ids = target_token_ids.unsqueeze(-1).expand(
batch_size, target_sequence_length, trimmed_source_length
)
# shape: (batch_size, target_sequence_length, trimmed_source_length)
matches = expanded_source_token_ids == expanded_target_token_ids
# shape: (batch_size, target_sequence_length)
copied = matches.sum(-1) > 0
# shape: (batch_size, target_sequence_length)
mask = (oov & copied).long()
# shape: (batch_size, target_sequence_length)
first_match = ((matches.cumsum(-1) == 1) * matches).argmax(-1)
# shape: (batch_size, target_sequence_length)
new_target_tokens = (
target_tokens * (1 - mask) + (first_match.long() + self._target_vocab_size) * mask
)
return new_target_tokens
def _init_decoder_state(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Initialize the encoded state to be passed to the first decoding time step.
"""
batch_size, _ = state["source_mask"].size()
# Initialize the decoder hidden state with the final output of the encoder,
# and the decoder context with zeros.
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
state["encoder_outputs"], state["source_mask"], self._encoder.is_bidirectional()
)
# shape: (batch_size, decoder_output_dim)
state["decoder_hidden"] = final_encoder_output
# shape: (batch_size, decoder_output_dim)
state["decoder_context"] = state["encoder_outputs"].new_zeros(
batch_size, self.decoder_output_dim
)
return state
def _encode(self, source_tokens: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Encode source input sentences.
"""
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = self._encoder(embedded_input, source_mask)
return {"source_mask": source_mask, "encoder_outputs": encoder_outputs}
def _decoder_step(
self,
last_predictions: torch.Tensor,
selective_weights: torch.Tensor,
state: Dict[str, torch.Tensor],
) -> Dict[str, torch.Tensor]:
# shape: (group_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs_mask = state["source_mask"].float()
# shape: (group_size, target_embedding_dim)
embedded_input = self._target_embedder(last_predictions)
# shape: (group_size, max_input_sequence_length)
attentive_weights = self._attention(
state["decoder_hidden"], state["encoder_outputs"], encoder_outputs_mask
)
# shape: (group_size, encoder_output_dim)
attentive_read = util.weighted_sum(state["encoder_outputs"], attentive_weights)
# shape: (group_size, encoder_output_dim)
selective_read = util.weighted_sum(state["encoder_outputs"][:, 1:-1], selective_weights)
# shape: (group_size, target_embedding_dim + encoder_output_dim * 2)
decoder_input = torch.cat((embedded_input, attentive_read, selective_read), -1)
# shape: (group_size, decoder_input_dim)
projected_decoder_input = self._input_projection_layer(decoder_input)
state["decoder_hidden"], state["decoder_context"] = self._decoder_cell(
projected_decoder_input, (state["decoder_hidden"], state["decoder_context"])
)
return state
def _get_generation_scores(self, state: Dict[str, torch.Tensor]) -> torch.Tensor:
return self._output_generation_layer(state["decoder_hidden"])
def _get_copy_scores(self, state: Dict[str, torch.Tensor]) -> torch.Tensor:
# shape: (batch_size, max_input_sequence_length - 2, encoder_output_dim)
trimmed_encoder_outputs = state["encoder_outputs"][:, 1:-1]
# shape: (batch_size, max_input_sequence_length - 2, decoder_output_dim)
copy_projection = self._output_copying_layer(trimmed_encoder_outputs)
# shape: (batch_size, max_input_sequence_length - 2, decoder_output_dim)
copy_projection = torch.tanh(copy_projection)
# shape: (batch_size, max_input_sequence_length - 2)
copy_scores = copy_projection.bmm(state["decoder_hidden"].unsqueeze(-1)).squeeze(-1)
return copy_scores
def _get_ll_contrib(
self,
generation_scores: torch.Tensor,
generation_scores_mask: torch.Tensor,
copy_scores: torch.Tensor,
target_tokens: torch.Tensor,
target_to_source: torch.Tensor,
copy_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Get the log-likelihood contribution from a single timestep.
Parameters
----------
generation_scores : ``torch.Tensor``
Shape: `(batch_size, target_vocab_size)`
generation_scores_mask : ``torch.Tensor``
Shape: `(batch_size, target_vocab_size)`. This is just a tensor of 1's.
copy_scores : ``torch.Tensor``
Shape: `(batch_size, trimmed_source_length)`
target_tokens : ``torch.Tensor``
Shape: `(batch_size,)`
target_to_source : ``torch.Tensor``
Shape: `(batch_size, trimmed_source_length)`
copy_mask : ``torch.Tensor``
Shape: `(batch_size, trimmed_source_length)`
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Shape: `(batch_size,), (batch_size, max_input_sequence_length)`
"""
_, target_size = generation_scores.size()
# The point of this mask is to just mask out all source token scores
# that just represent padding. We apply the mask to the concatenation
# of the generation scores and the copy scores to normalize the scores
# correctly during the softmax.
# shape: (batch_size, target_vocab_size + trimmed_source_length)
mask = torch.cat((generation_scores_mask, copy_mask), dim=-1)
# shape: (batch_size, target_vocab_size + trimmed_source_length)
all_scores = torch.cat((generation_scores, copy_scores), dim=-1)
# Normalize generation and copy scores.
# shape: (batch_size, target_vocab_size + trimmed_source_length)
log_probs = util.masked_log_softmax(all_scores, mask)
# Calculate the log probability (`copy_log_probs`) for each token in the source sentence
# that matches the current target token. We use the sum of these copy probabilities
# for matching tokens in the source sentence to get the total probability
# for the target token. We also need to normalize the individual copy probabilities
# to create `selective_weights`, which are used in the next timestep to create
# a selective read state.
# shape: (batch_size, trimmed_source_length)
copy_log_probs = log_probs[:, target_size:] + (target_to_source.float() + 1e-45).log()
# Since `log_probs[:, target_size]` gives us the raw copy log probabilities,
# we use a non-log softmax to get the normalized non-log copy probabilities.
selective_weights = util.masked_softmax(log_probs[:, target_size:], target_to_source)
# This mask ensures that item in the batch has a non-zero generation probabilities
# for this timestep only when the gold target token is not OOV or there are no
# matching tokens in the source sentence.
# shape: (batch_size, 1)
gen_mask = ((target_tokens != self._oov_index) | (target_to_source.sum(-1) == 0)).float()
log_gen_mask = (gen_mask + 1e-45).log().unsqueeze(-1)
# Now we get the generation score for the gold target token.
# shape: (batch_size, 1)
generation_log_probs = log_probs.gather(1, target_tokens.unsqueeze(1)) + log_gen_mask
# ... and add the copy score to get the step log likelihood.
# shape: (batch_size, 1 + trimmed_source_length)
combined_gen_and_copy = torch.cat((generation_log_probs, copy_log_probs), dim=-1)
# shape: (batch_size,)
step_log_likelihood = util.logsumexp(combined_gen_and_copy)
return step_log_likelihood, selective_weights
def _forward_loss(
self,
target_tokens: Dict[str, torch.LongTensor],
target_token_ids: torch.Tensor,
state: Dict[str, torch.Tensor],
) -> Dict[str, torch.Tensor]:
"""
Calculate the loss against gold targets.
"""
batch_size, target_sequence_length = target_tokens["tokens"].size()
# shape: (batch_size, max_input_sequence_length)
source_mask = state["source_mask"]
# The last input from the target is either padding or the end symbol.
# Either way, we don't have to process it.
num_decoding_steps = target_sequence_length - 1
# We use this to fill in the copy index when the previous input was copied.
# shape: (batch_size,)
copy_input_choices = source_mask.new_full((batch_size,), fill_value=self._copy_index)
# shape: (batch_size, trimmed_source_length)
copy_mask = source_mask[:, 1:-1].float()
# We need to keep track of the probabilities assigned to tokens in the source
# sentence that were copied during the previous timestep, since we use
# those probabilities as weights when calculating the "selective read".
# shape: (batch_size, trimmed_source_length)
selective_weights = state["decoder_hidden"].new_zeros(copy_mask.size())
# Indicates which tokens in the source sentence match the current target token.
# shape: (batch_size, trimmed_source_length)
target_to_source = state["source_token_ids"].new_zeros(copy_mask.size())
# This is just a tensor of ones which we use repeatedly in `self._get_ll_contrib`,
# so we create it once here to avoid doing it over-and-over.
generation_scores_mask = state["decoder_hidden"].new_full(
(batch_size, self._target_vocab_size), fill_value=1.0
)
step_log_likelihoods = []
for timestep in range(num_decoding_steps):
# shape: (batch_size,)
input_choices = target_tokens["tokens"][:, timestep]
# If the previous target token was copied, we use the special copy token.
# But the end target token will always be THE end token, so we know
# it was not copied.
if timestep < num_decoding_steps - 1:
# Get mask tensor indicating which instances were copied.
# shape: (batch_size,)
copied = (
(input_choices == self._oov_index) & (target_to_source.sum(-1) > 0)
).long()
# shape: (batch_size,)
input_choices = input_choices * (1 - copied) + copy_input_choices * copied
# shape: (batch_size, trimmed_source_length)
target_to_source = state["source_token_ids"] == target_token_ids[
:, timestep + 1
].unsqueeze(-1)
# Update the decoder state by taking a step through the RNN.
state = self._decoder_step(input_choices, selective_weights, state)
# Get generation scores for each token in the target vocab.
# shape: (batch_size, target_vocab_size)
generation_scores = self._get_generation_scores(state)
# Get copy scores for each token in the source sentence, excluding the start
# and end tokens.
# shape: (batch_size, trimmed_source_length)
copy_scores = self._get_copy_scores(state)
# shape: (batch_size,)
step_target_tokens = target_tokens["tokens"][:, timestep + 1]
step_log_likelihood, selective_weights = self._get_ll_contrib(
generation_scores,
generation_scores_mask,
copy_scores,
step_target_tokens,
target_to_source,
copy_mask,
)
step_log_likelihoods.append(step_log_likelihood.unsqueeze(1))
# Gather step log-likelihoods.
# shape: (batch_size, num_decoding_steps = target_sequence_length - 1)
log_likelihoods = torch.cat(step_log_likelihoods, 1)
# Get target mask to exclude likelihood contributions from timesteps after
# the END token.
# shape: (batch_size, target_sequence_length)
target_mask = util.get_text_field_mask(target_tokens)
# The first timestep is just the START token, which is not included in the likelihoods.
# shape: (batch_size, num_decoding_steps)
target_mask = target_mask[:, 1:].float()
# Sum of step log-likelihoods.
# shape: (batch_size,)
log_likelihood = (log_likelihoods * target_mask).sum(dim=-1)
# The loss is the negative log-likelihood, averaged over the batch.
loss = -log_likelihood.sum() / batch_size
return {"loss": loss}
def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
batch_size, source_length = state["source_mask"].size()
trimmed_source_length = source_length - 2
# Initialize the copy scores to zero.
state["copy_log_probs"] = (
state["decoder_hidden"].new_zeros((batch_size, trimmed_source_length)) + 1e-45
).log()
# shape: (batch_size,)
start_predictions = state["source_mask"].new_full(
(batch_size,), fill_value=self._start_index
)
# shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)
# shape (log_probabilities): (batch_size, beam_size)
all_top_k_predictions, log_probabilities = self._beam_search.search(
start_predictions, state, self.take_search_step
)
return {"predicted_log_probs": log_probabilities, "predictions": all_top_k_predictions}
def _get_input_and_selective_weights(
self, last_predictions: torch.LongTensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.LongTensor, torch.Tensor]:
"""
Get input choices for the decoder and the selective copy weights.
The decoder input choices are simply the `last_predictions`, except for
target OOV predictions that were copied from source tokens, in which case
the prediction will be changed to the COPY symbol in the target namespace.
The selective weights are just the probabilities assigned to source
tokens that were copied, normalized to sum to 1. If no source tokens were copied,
there will be all zeros.
Parameters
----------
last_predictions : ``torch.LongTensor``
Shape: `(group_size,)`
state : ``Dict[str, torch.Tensor]``
Returns
-------
Tuple[torch.LongTensor, torch.Tensor]
`input_choices` (shape `(group_size,)`) and `selective_weights`
(shape `(group_size, trimmed_source_length)`).
"""
group_size, trimmed_source_length = state["source_to_target"].size()
# This is a mask indicating which last predictions were copied from the
# the source AND not in the target vocabulary (OOV).
# (group_size,)
only_copied_mask = (last_predictions >= self._target_vocab_size).long()
# If the last prediction was in the target vocab or OOV but not copied,
# we use that as input, otherwise we use the COPY token.
# shape: (group_size,)
copy_input_choices = only_copied_mask.new_full((group_size,), fill_value=self._copy_index)
input_choices = (
last_predictions * (1 - only_copied_mask) + copy_input_choices * only_copied_mask
)
# In order to get the `selective_weights`, we need to find out which predictions
# were copied or copied AND generated, which is the case when a prediction appears
# in both the source sentence and the target vocab. But whenever a prediction
# is in the target vocab (even if it also appeared in the source sentence),
# its index will be the corresponding target vocab index, not its index in
# the source sentence offset by the target vocab size. So we first
# use `state["source_to_target"]` to get an indicator of every source token
# that matches the predicted target token.
# shape: (group_size, trimmed_source_length)
expanded_last_predictions = last_predictions.unsqueeze(-1).expand(
group_size, trimmed_source_length
)
# shape: (group_size, trimmed_source_length)
source_copied_and_generated = (
state["source_to_target"] == expanded_last_predictions
).long()
# In order to get indicators for copied source tokens that are OOV with respect
# to the target vocab, we'll make use of `state["source_token_ids"]`.
# First we adjust predictions relative to the start of the source tokens.
# This makes sense because predictions for copied tokens are given by the index of the copied
# token in the source sentence, offset by the size of the target vocabulary.
# shape: (group_size,)
adjusted_predictions = last_predictions - self._target_vocab_size
# The adjusted indices for items that were not copied will be negative numbers,
# and therefore invalid. So we zero them out.
adjusted_predictions = adjusted_predictions * only_copied_mask
# shape: (group_size, trimmed_source_length)
source_token_ids = state["source_token_ids"]
# shape: (group_size, trimmed_source_length)
adjusted_prediction_ids = source_token_ids.gather(-1, adjusted_predictions.unsqueeze(-1))
# This mask will contain indicators for source tokens that were copied
# during the last timestep.
# shape: (group_size, trimmed_source_length)
source_only_copied = (source_token_ids == adjusted_prediction_ids).long()
# Since we zero'd-out indices for predictions that were not copied,
# we need to zero out all entries of this mask corresponding to those predictions.
source_only_copied = source_only_copied * only_copied_mask.unsqueeze(-1)
# shape: (group_size, trimmed_source_length)
mask = source_only_copied | source_copied_and_generated
# shape: (group_size, trimmed_source_length)
selective_weights = util.masked_softmax(state["copy_log_probs"], mask)
return input_choices, selective_weights
def _gather_final_log_probs(
self,
generation_log_probs: torch.Tensor,
copy_log_probs: torch.Tensor,
state: Dict[str, torch.Tensor],
) -> torch.Tensor:
"""
Combine copy probabilities with generation probabilities for matching tokens.
Parameters
----------
generation_log_probs : ``torch.Tensor``
Shape: `(group_size, target_vocab_size)`
copy_log_probs : ``torch.Tensor``
Shape: `(group_size, trimmed_source_length)`
state : ``Dict[str, torch.Tensor]``
Returns
-------
torch.Tensor
Shape: `(group_size, target_vocab_size + trimmed_source_length)`.
"""
_, trimmed_source_length = state["source_to_target"].size()
source_token_ids = state["source_token_ids"]
# shape: [(batch_size, *)]
modified_log_probs_list: List[torch.Tensor] = []
for i in range(trimmed_source_length):
# shape: (group_size,)
copy_log_probs_slice = copy_log_probs[:, i]
# `source_to_target` is a matrix of shape (group_size, trimmed_source_length)
# where element (i, j) is the vocab index of the target token that matches the jth
# source token in the ith group, if there is one, or the index of the OOV symbol otherwise.
# We'll use this to add copy scores to corresponding generation scores.
# shape: (group_size,)
source_to_target_slice = state["source_to_target"][:, i]
# The OOV index in the source_to_target_slice indicates that the source
# token is not in the target vocab, so we don't want to add that copy score
# to the OOV token.
copy_log_probs_to_add_mask = (source_to_target_slice != self._oov_index).float()
copy_log_probs_to_add = (
copy_log_probs_slice + (copy_log_probs_to_add_mask + 1e-45).log()
)
# shape: (batch_size, 1)
copy_log_probs_to_add = copy_log_probs_to_add.unsqueeze(-1)
# shape: (batch_size, 1)
selected_generation_log_probs = generation_log_probs.gather(
1, source_to_target_slice.unsqueeze(-1)
)
combined_scores = util.logsumexp(
torch.cat((selected_generation_log_probs, copy_log_probs_to_add), dim=1)
)
generation_log_probs = generation_log_probs.scatter(
-1, source_to_target_slice.unsqueeze(-1), combined_scores.unsqueeze(-1)
)
# We have to combine copy scores for duplicate source tokens so that
# we can find the overall most likely source token. So, if this is the first
# occurence of this particular source token, we add the log_probs from all other
# occurences, otherwise we zero it out since it was already accounted for.
if i < (trimmed_source_length - 1):
# Sum copy scores from future occurences of source token.
# shape: (group_size, trimmed_source_length - i)
source_future_occurences = (
source_token_ids[:, (i + 1) :] == source_token_ids[:, i].unsqueeze(-1)
).float() # noqa
# shape: (group_size, trimmed_source_length - i)
future_copy_log_probs = (
copy_log_probs[:, (i + 1) :] + (source_future_occurences + 1e-45).log()
)
# shape: (group_size, 1 + trimmed_source_length - i)
combined = torch.cat(
(copy_log_probs_slice.unsqueeze(-1), future_copy_log_probs), dim=-1
)
# shape: (group_size,)
copy_log_probs_slice = util.logsumexp(combined)
if i > 0:
# Remove copy log_probs that we have already accounted for.
# shape: (group_size, i)
source_previous_occurences = source_token_ids[:, 0:i] == source_token_ids[
:, i
].unsqueeze(-1)
# shape: (group_size,)
duplicate_mask = (source_previous_occurences.sum(dim=-1) == 0).float()
copy_log_probs_slice = copy_log_probs_slice + (duplicate_mask + 1e-45).log()
# Finally, we zero-out copy scores that we added to the generation scores
# above so that we don't double-count them.
# shape: (group_size,)
left_over_copy_log_probs = (
copy_log_probs_slice + (1.0 - copy_log_probs_to_add_mask + 1e-45).log()
)
modified_log_probs_list.append(left_over_copy_log_probs.unsqueeze(-1))
modified_log_probs_list.insert(0, generation_log_probs)
# shape: (group_size, target_vocab_size + trimmed_source_length)
modified_log_probs = torch.cat(modified_log_probs_list, dim=-1)
return modified_log_probs
def take_search_step(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take step during beam search.
This function is what gets passed to the `BeamSearch.search` method. It takes
predictions from the last timestep and the current state and outputs
the log probabilities assigned to tokens for the next timestep, as well as the updated
state.
Since we are predicting tokens out of the extended vocab (target vocab + all unique
tokens from the source sentence), this is a little more complicated that just
making a forward pass through the model. The output log probs will have
shape `(group_size, target_vocab_size + trimmed_source_length)` so that each
token in the target vocab and source sentence are assigned a probability.
Note that copy scores are assigned to each source token based on their position, not unique value.
So if a token appears more than once in the source sentence, it will have more than one score.
Further, if a source token is also part of the target vocab, its final score
will be the sum of the generation and copy scores. Therefore, in order to
get the score for all tokens in the extended vocab at this step,
we have to combine copy scores for re-occuring source tokens and potentially
add them to the generation scores for the matching token in the target vocab, if
there is one.
So we can break down the final log probs output as the concatenation of two
matrices, A: `(group_size, target_vocab_size)`, and B: `(group_size, trimmed_source_length)`.
Matrix A contains the sum of the generation score and copy scores (possibly 0)
for each target token. Matrix B contains left-over copy scores for source tokens
that do NOT appear in the target vocab, with zeros everywhere else. But since
a source token may appear more than once in the source sentence, we also have to
sum the scores for each appearance of each unique source token. So matrix B
actually only has non-zero values at the first occurence of each source token
that is not in the target vocab.
Parameters
----------
last_predictions : ``torch.Tensor``
Shape: `(group_size,)`
state : ``Dict[str, torch.Tensor]``
Contains all state tensors necessary to produce generation and copy scores
for next step.
Notes
-----
`group_size` != `batch_size`. In fact, `group_size` = `batch_size * beam_size`.
"""
_, trimmed_source_length = state["source_to_target"].size()
# Get input to the decoder RNN and the selective weights. `input_choices`
# is the result of replacing target OOV tokens in `last_predictions` with the
# copy symbol. `selective_weights` consist of the normalized copy probabilities
# assigned to the source tokens that were copied. If no tokens were copied,
# there will be all zeros.
# shape: (group_size,), (group_size, trimmed_source_length)
input_choices, selective_weights = self._get_input_and_selective_weights(
last_predictions, state
)
# Update the decoder state by taking a step through the RNN.
state = self._decoder_step(input_choices, selective_weights, state)
# Get the un-normalized generation scores for each token in the target vocab.
# shape: (group_size, target_vocab_size)
generation_scores = self._get_generation_scores(state)
# Get the un-normalized copy scores for each token in the source sentence,
# excluding the start and end tokens.
# shape: (group_size, trimmed_source_length)
copy_scores = self._get_copy_scores(state)
# Concat un-normalized generation and copy scores.
# shape: (batch_size, target_vocab_size + trimmed_source_length)
all_scores = torch.cat((generation_scores, copy_scores), dim=-1)
# shape: (group_size, trimmed_source_length)
copy_mask = state["source_mask"][:, 1:-1].float()
# shape: (batch_size, target_vocab_size + trimmed_source_length)
mask = torch.cat(
(generation_scores.new_full(generation_scores.size(), 1.0), copy_mask), dim=-1
)
# Normalize generation and copy scores.
# shape: (batch_size, target_vocab_size + trimmed_source_length)
log_probs = util.masked_log_softmax(all_scores, mask)
# shape: (group_size, target_vocab_size), (group_size, trimmed_source_length)
generation_log_probs, copy_log_probs = log_probs.split(
[self._target_vocab_size, trimmed_source_length], dim=-1
)
# Update copy_probs needed for getting the `selective_weights` at the next timestep.
state["copy_log_probs"] = copy_log_probs
# We now have normalized generation and copy scores, but to produce the final
# score for each token in the extended vocab, we have to go through and add
# the copy scores to the generation scores of matching target tokens, and sum
# the copy scores of duplicate source tokens.
# shape: (group_size, target_vocab_size + trimmed_source_length)
final_log_probs = self._gather_final_log_probs(generation_log_probs, copy_log_probs, state)
return final_log_probs, state
def _get_predicted_tokens(
self,
predicted_indices: Union[torch.Tensor, numpy.ndarray],
batch_metadata: List[Any],
n_best: int = None,
) -> List[Union[List[List[str]], List[str]]]:
"""
Convert predicted indices into tokens.
If `n_best = 1`, the result type will be `List[List[str]]`. Otherwise the result
type will be `List[List[List[str]]]`.
"""
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
predicted_tokens: List[Union[List[List[str]], List[str]]] = []
for top_k_predictions, metadata in zip(predicted_indices, batch_metadata):
batch_predicted_tokens: List[List[str]] = []
for indices in top_k_predictions[:n_best]:
tokens: List[str] = []
indices = list(indices)
if self._end_index in indices:
indices = indices[: indices.index(self._end_index)]
for index in indices:
if index >= self._target_vocab_size:
adjusted_index = index - self._target_vocab_size
token = metadata["source_tokens"][adjusted_index]
else:
token = self.vocab.get_token_from_index(index, self._target_namespace)
tokens.append(token)
batch_predicted_tokens.append(tokens)
if n_best == 1:
predicted_tokens.append(batch_predicted_tokens[0])
else:
predicted_tokens.append(batch_predicted_tokens)
return predicted_tokens
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, Any]:
"""
Finalize predictions.
After a beam search, the predicted indices correspond to tokens in the target vocabulary
OR tokens in source sentence. Here we gather the actual tokens corresponding to
the indices.
"""
predicted_tokens = self._get_predicted_tokens(
output_dict["predictions"], output_dict["metadata"]
)
output_dict["predicted_tokens"] = predicted_tokens
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._tensor_based_metric is not None:
all_metrics.update(
self._tensor_based_metric.get_metric(reset=reset) # type: ignore
)
if self._token_based_metric is not None:
all_metrics.update(self._token_based_metric.get_metric(reset=reset)) # type: ignore
return all_metrics
| 50.920847
| 106
| 0.661573
|
e47aa7aa50233a2aa9a8e9c0c1213af7149b3ad9
| 6,450
|
py
|
Python
|
code/inference.py
|
i-need-sleep/MMCoref
|
bba168760841ececd4e906b07e8153c0a92b0f49
|
[
"MIT"
] | 2
|
2021-10-29T22:06:43.000Z
|
2021-12-21T01:58:01.000Z
|
code/inference.py
|
i-need-sleep/MMCoref
|
bba168760841ececd4e906b07e8153c0a92b0f49
|
[
"MIT"
] | null | null | null |
code/inference.py
|
i-need-sleep/MMCoref
|
bba168760841ececd4e906b07e8153c0a92b0f49
|
[
"MIT"
] | 1
|
2022-03-31T09:22:24.000Z
|
2022-03-31T09:22:24.000Z
|
import os
import json
import torch
from Transformers_VQA.dataset_final import make_final_loader
from Transformers_VQA.modified_uniter import Modified_Uniter
from Transformers_VQA.modified_uniter_KBid import Modified_Uniter_KBid
from Transformers_VQA.modified_uniter_sceneseg import Modified_Uniter_sceneseg
def inference(checkpoint, model_name, test_set = 'devtest'):
BATCH_SIZE = 1
torch.manual_seed(21)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
# Make loaders
loader = make_final_loader(test_set, BATCH_SIZE, rcnn=False, test=True)
# Load model
if model_name == 'base':
model = Modified_Uniter().to(device)
elif model_name == 'KBid':
model = Modified_Uniter_KBid().to(device)
elif model_name == 'sceneseg':
model = Modified_Uniter_sceneseg().to(device)
model.load_state_dict(torch.load(f'./trained/{checkpoint}.bin', map_location=device)['model_state_dict'], strict=False)
mask_stepper = torch.ones(1, 12, 512, 512).to(device)
for i in range(12):
mask_stepper[0, i, :, :] *= i+1
# Infer
out = {}
out_obj_logit = {}
n_hit = 0
n_pred_pos = 0
n_real_pos = 0
logit_out = torch.tensor([[]]).to(device)
truth_out = torch.tensor([[]]).to(device)
model.eval()
with torch.no_grad():
for batch_idx, batch in enumerate(loader):
input_ids = batch['input_ids'].to(device)
txt_seg_ids = batch['txt_seg_ids'].to(device)
vis_feats = batch['vis_feats'].to(device)
obj_embs = batch['obj_embs'].to(device)
KB_ids = batch['KB_ids'].to(device)
obj_ids = batch['obj_ids'].to(device)
pos_x = batch['pos_x'].to(device)
pos_y = batch['pos_y'].to(device)
pos_z = batch['pos_z'].to(device)
bboxes = batch['bboxes'].to(device)
vis_seg = batch['vis_seg'].to(device)
extended_attention_mask = batch['extended_attention_mask'].to(device)
output_mask = batch['output_mask'].to(device)
reference = batch['reference'].to(device)
scene_seg = batch['scene_segs'].to(device)
dial_idx = batch['dial_idx']
round_idx = batch['round_idx']
if model_name == 'base':
pred = model(input_ids , txt_seg_ids, vis_feats, obj_embs, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask)
elif model_name == 'KBid':
pred = model(input_ids , txt_seg_ids, vis_feats, KB_ids, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask)
elif model_name == 'sceneseg':
pred = model(input_ids , txt_seg_ids, vis_feats, obj_embs, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask, scene_seg)
pred = pred.reshape(1,-1)
pred = pred[output_mask==1].reshape(-1,1)
truth = reference.float().reshape(-1,1)
pred_bin = pred > 0
truth_bin = truth > 0.5
n_hit += torch.sum(pred_bin*truth_bin == 1).detach().item()
n_pred_pos += torch.sum((pred.reshape(1,-1) > 0) ).to('cpu').item()
n_real_pos += torch.sum((reference.reshape(1,-1) > 0.1)).to('cpu').item()
logit_out = torch.cat((logit_out, pred.reshape(1,-1)), axis=1)
truth_out = torch.cat((truth_out, reference.reshape(1,-1)), axis=1)
obj_ids -= 1 # -1 for padding. 0 is a valid index
line_out = []
dial_idx = dial_idx[0]
round_idx = round_idx[0]
for idx, prediction in enumerate(pred):
if prediction > 0:
line_out.append(obj_ids[0][idx].item())
try:
out_obj_logit[dial_idx][round_idx][obj_ids[0][idx].item()] = prediction.item()
except:
try:
out_obj_logit[dial_idx][round_idx] = {obj_ids[0][idx].item(): prediction.item()}
except:
out_obj_logit[dial_idx] = {round_idx: {obj_ids[0][idx].item(): prediction.item()}}
try:
out[dial_idx][round_idx] = line_out
except:
out[dial_idx] = {round_idx: line_out}
try:
with open(f'../data/simmc2_dials_dstc10_{test_set}.json', 'r') as data_file:
data = json.load(data_file)
except:
with open(f'../data/simmc2_dials_dstc10_{test_set}_public.json', 'r') as data_file:
data = json.load(data_file)
for dial in data['dialogue_data']:
dial_mentions = []
dial_idx = dial['dialogue_idx']
for round_idx, round in enumerate(dial['dialogue']):
try:
round['transcript_annotated']['act_attributes']['objects'] = out[dial_idx][round_idx]
for obj_idx in out[dial_idx][round_idx]:
if obj_idx not in dial_mentions:
dial_mentions.append(obj_idx)
except:
try:
round['transcript_annotated']['act_attributes']['objects'] = []
except:
round['transcript_annotated'] = {'act_attributes': {"objects": []}}
dial['mentioned_object_ids'] = dial_mentions
# uncomment this to output coref predictions for each model
# with open(f'./inference/{checkpoint}_{test_set}.json', 'w', encoding='utf-8') as out_file:
# json.dump(data, out_file)
with open(f'./inference/{checkpoint}_{test_set}_obj_logits.json', 'w', encoding='utf-8') as out_file:
json.dump(out_obj_logit, out_file)
print(test_set)
print(n_hit)
print(n_pred_pos)
print(n_real_pos)
torch.save({'logit': logit_out, 'truth': truth_out}, f'./inference/{checkpoint}_{test_set}_logit_truth.pt')
return
if __name__ == '__main__':
inference('base', 'base', test_set='dev')
inference('base', 'base', test_set='devtest')
inference('base', 'base', test_set='teststd')
inference('KBid', 'KBid', test_set='dev')
inference('KBid', 'KBid', test_set='devtest')
inference('KBid', 'KBid', test_set='teststd')
inference('sceneseg', 'sceneseg', test_set='dev')
inference('sceneseg', 'sceneseg', test_set='devtest')
inference('sceneseg', 'sceneseg', test_set='teststd')
| 42.434211
| 157
| 0.597519
|
9b001f46c2262f49679077382a11915a7e5e5e0c
| 2,371
|
py
|
Python
|
distblast/hadoop/distblast_pipes.py
|
bgruening/bcbb
|
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
|
[
"MIT"
] | 339
|
2015-01-04T13:23:04.000Z
|
2022-03-25T23:09:09.000Z
|
distblast/hadoop/distblast_pipes.py
|
bgruening/bcbb
|
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
|
[
"MIT"
] | 39
|
2015-01-14T21:31:09.000Z
|
2021-11-18T15:15:33.000Z
|
distblast/hadoop/distblast_pipes.py
|
bgruening/bcbb
|
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
|
[
"MIT"
] | 176
|
2015-01-10T17:40:44.000Z
|
2022-03-25T05:14:21.000Z
|
#!/usr/bin/env python
"""Process a fasta file through Hadoop one record at a time using pydoop.
"""
import sys
import os
import json
import logging
logging.basicConfig(level=logging.DEBUG)
from pydoop.pipes import Mapper, Reducer, Factory, runTask
from pydoop.pipes import RecordReader, InputSplit, RecordWriter
from pydoop.hdfs import hdfs
from pydoop.utils import split_hdfs_path
from Bio import SeqIO
from bcbio.phylo import blast
class FastaMapper(Mapper):
def map(self, context):
config = context.getJobConf()
tmp_dir = config.get("job.local.dir")
xref_dbs = config.get("fasta.blastdb").split(",")
cur_key, ids, scores = blast.blast_top_hits(context.getInputKey(),
context.getInputValue(), xref_dbs, tmp_dir)
cur_val = dict(ids=ids, scores=scores)
context.emit(cur_key, json.dumps(cur_val))
class FastaReducer(Reducer):
"""Simple reducer that returns a value per input record identifier.
"""
def reduce(self, context):
key = context.getInputKey()
vals = []
while context.nextValue():
vals.append(context.getInputValue())
if len(vals) > 0:
context.emit(key, vals[0])
class FastaReader(RecordReader):
"""Return one text FASTA record at a time using Biopython SeqIO iterators.
"""
def __init__(self, context):
super(FastaReader, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.isplit = InputSplit(context.getInputSplit())
self.host, self.port, self.fpath = split_hdfs_path(self.isplit.filename)
self.fs = hdfs(self.host, self.port)
self.file = self.fs.open_file(self.fpath, os.O_RDONLY)
self._iterator = (SeqIO.parse(self.file, "fasta") if
self.isplit.offset == 0 else None)
def __del__(self):
self.file.close()
self.fs.close()
def next(self):
if self._iterator:
try:
record = self._iterator.next()
return (True, record.id, record.format("fasta"))
except StopIteration:
pass
return (False, "", "")
def getProgress(self):
return 0
def main(argv):
runTask(Factory(FastaMapper, FastaReducer, record_reader_class=FastaReader))
if __name__ == "__main__":
main(sys.argv)
| 32.040541
| 80
| 0.646984
|
5b4fbbeb0661708083f4f75a2a1867005816654e
| 9,389
|
py
|
Python
|
lib/utils/keypoints.py
|
terrychenism/Detectron
|
400687fb105e6a64d75d420e86e843971653409f
|
[
"Apache-2.0"
] | 14
|
2018-03-17T13:11:27.000Z
|
2019-02-17T18:17:54.000Z
|
lib/utils/keypoints.py
|
terrychenism/Detectron
|
400687fb105e6a64d75d420e86e843971653409f
|
[
"Apache-2.0"
] | null | null | null |
lib/utils/keypoints.py
|
terrychenism/Detectron
|
400687fb105e6a64d75d420e86e843971653409f
|
[
"Apache-2.0"
] | 6
|
2018-05-10T09:25:42.000Z
|
2018-10-15T13:18:37.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Keypoint utilities (somewhat specific to COCO keypoints)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
from core.config import cfg
import utils.blob as blob_utils
def get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
return keypoints, keypoint_flip_map
def get_person_class_index():
"""Index of the person class in COCO."""
return 1
def flip_keypoints(keypoints, keypoint_flip_map, keypoint_coords, width):
"""Left/right flip keypoint_coords. keypoints and keypoint_flip_map are
accessible from get_keypoints().
"""
flipped_kps = keypoint_coords.copy()
for lkp, rkp in keypoint_flip_map.items():
lid = keypoints.index(lkp)
rid = keypoints.index(rkp)
flipped_kps[:, :, lid] = keypoint_coords[:, :, rid]
flipped_kps[:, :, rid] = keypoint_coords[:, :, lid]
# Flip x coordinates
flipped_kps[:, 0, :] = width - flipped_kps[:, 0, :] - 1
# Maintain COCO convention that if visibility == 0, then x, y = 0
inds = np.where(flipped_kps[:, 2, :] == 0)
flipped_kps[inds[0], 0, inds[1]] = 0
return flipped_kps
def flip_heatmaps(heatmaps):
"""Flip heatmaps horizontally."""
keypoints, flip_map = get_keypoints()
heatmaps_flipped = heatmaps.copy()
for lkp, rkp in flip_map.items():
lid = keypoints.index(lkp)
rid = keypoints.index(rkp)
heatmaps_flipped[:, rid, :, :] = heatmaps[:, lid, :, :]
heatmaps_flipped[:, lid, :, :] = heatmaps[:, rid, :, :]
heatmaps_flipped = heatmaps_flipped[:, :, :, ::-1]
return heatmaps_flipped
def heatmaps_to_keypoints(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = cfg.KRCNN.INFERENCE_MIN_SIZE
xy_preds = np.zeros(
(len(rois), 4, cfg.KRCNN.NUM_KEYPOINTS), dtype=np.float32)
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
# roi_map = cv2.resize(
# maps[i], (roi_map_width, roi_map_height),
# interpolation=cv2.INTER_CUBIC)
roi_map = maps[i].copy()
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
for k in range(cfg.KRCNN.NUM_KEYPOINTS):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int*float(roi_map_width/w) + 0.5) * width_correction
y = (y_int*float(roi_map_height/w) + 0.5) * height_correction
xy_preds[i, 0, k] = x + offset_x[i]
xy_preds[i, 1, k] = y + offset_y[i]
xy_preds[i, 2, k] = roi_map[k, y_int, x_int]
xy_preds[i, 3, k] = roi_map_probs[k, y_int, x_int]
return xy_preds
def keypoints_to_heatmap_labels(keypoints, rois):
"""Encode keypoint location in the target heatmap for use in
SoftmaxWithLoss.
"""
# Maps keypoints from the half-open interval [x1, x2) on continuous image
# coordinates to the closed interval [0, HEATMAP_SIZE - 1] on discrete image
# coordinates. We use the continuous <-> discrete conversion from Heckbert
# 1990 ("What is the coordinate of a pixel?"): d = floor(c) and c = d + 0.5,
# where d is a discrete coordinate and c is a continuous coordinate.
assert keypoints.shape[2] == cfg.KRCNN.NUM_KEYPOINTS
shape = (len(rois), cfg.KRCNN.NUM_KEYPOINTS)
heatmaps = blob_utils.zeros(shape)
weights = blob_utils.zeros(shape)
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = cfg.KRCNN.HEATMAP_SIZE / (rois[:, 2] - rois[:, 0])
scale_y = cfg.KRCNN.HEATMAP_SIZE / (rois[:, 3] - rois[:, 1])
for kp in range(keypoints.shape[2]):
vis = keypoints[:, 2, kp] > 0
x = keypoints[:, 0, kp].astype(np.float32)
y = keypoints[:, 1, kp].astype(np.float32)
# Since we use floor below, if a keypoint is exactly on the roi's right
# or bottom boundary, we shift it in by eps (conceptually) to keep it in
# the ground truth heatmap.
x_boundary_inds = np.where(x == rois[:, 2])[0]
y_boundary_inds = np.where(y == rois[:, 3])[0]
x = (x - offset_x) * scale_x
x = np.floor(x)
if len(x_boundary_inds) > 0:
x[x_boundary_inds] = cfg.KRCNN.HEATMAP_SIZE - 1
y = (y - offset_y) * scale_y
y = np.floor(y)
if len(y_boundary_inds) > 0:
y[y_boundary_inds] = cfg.KRCNN.HEATMAP_SIZE - 1
valid_loc = np.logical_and(
np.logical_and(x >= 0, y >= 0),
np.logical_and(
x < cfg.KRCNN.HEATMAP_SIZE, y < cfg.KRCNN.HEATMAP_SIZE))
valid = np.logical_and(valid_loc, vis)
valid = valid.astype(np.int32)
lin_ind = y * cfg.KRCNN.HEATMAP_SIZE + x
heatmaps[:, kp] = lin_ind * valid
weights[:, kp] = valid
return heatmaps, weights
def scores_to_probs(scores):
"""Transforms CxHxW of scores to probabilities spatially."""
channels = scores.shape[0]
for c in range(channels):
temp = scores[c, :, :]
max_score = temp.max()
temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))
scores[c, :, :] = temp
return scores
def nms_oks(kp_predictions, rois, thresh):
"""Nms based on kp predictions."""
scores = np.mean(kp_predictions[:, 2, :], axis=1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ovr = compute_oks(
kp_predictions[i], rois[i], kp_predictions[order[1:]],
rois[order[1:]])
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def compute_oks(src_keypoints, src_roi, dst_keypoints, dst_roi):
"""Compute OKS for predicted keypoints wrt gt_keypoints.
src_keypoints: 4xK
src_roi: 4x1
dst_keypoints: Nx4xK
dst_roi: Nx4
"""
sigmas = np.array([
.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87,
.87, .89, .89]) / 10.0
vars = (sigmas * 2)**2
# area
src_area = (src_roi[2] - src_roi[0] + 1) * (src_roi[3] - src_roi[1] + 1)
# measure the per-keypoint distance if keypoints visible
dx = dst_keypoints[:, 0, :] - src_keypoints[0, :]
dy = dst_keypoints[:, 1, :] - src_keypoints[1, :]
e = (dx**2 + dy**2) / vars / (src_area + np.spacing(1)) / 2
e = np.sum(np.exp(-e), axis=1) / e.shape[1]
return e
| 35.033582
| 80
| 0.607732
|
007efcea58e6fa3c8f9cec6e7e31341aaff2bd14
| 3,401
|
py
|
Python
|
tensorflow/python/training/gradient_descent.py
|
wainshine/tensorflow
|
dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d
|
[
"Apache-2.0"
] | 54
|
2017-06-17T14:07:48.000Z
|
2022-03-29T02:11:20.000Z
|
tensorflow/python/training/gradient_descent.py
|
wainshine/tensorflow
|
dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d
|
[
"Apache-2.0"
] | 19
|
2021-12-28T12:44:55.000Z
|
2022-01-13T08:11:28.000Z
|
tensorflow/python/training/gradient_descent.py
|
wainshine/tensorflow
|
dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d
|
[
"Apache-2.0"
] | 11
|
2018-04-19T22:36:01.000Z
|
2021-08-02T08:44:43.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GradientDescent for TensorFlow."""
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.GradientDescentOptimizer"])
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
@compatibility(eager)
When eager execution is enabled, `learning_rate` can be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
@end_compatibility
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._learning_rate_tensor = None
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle):
return training_ops.resource_apply_gradient_descent(
handle.handle, math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return resource_variable_ops.resource_scatter_add(
handle.handle,
indices,
-grad * math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype))
def _apply_sparse_duplicate_indices(self, grad, var):
delta = indexed_slices.IndexedSlices(
grad.values *
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
| 40.975904
| 80
| 0.727433
|
69fb4de71951084d73a21c14b03adf75d5ec4e35
| 305
|
py
|
Python
|
main.py
|
zsewa/app-template
|
8fe7e66d8c09a44846a5501d6a1535bc9d185548
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
zsewa/app-template
|
8fe7e66d8c09a44846a5501d6a1535bc9d185548
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
zsewa/app-template
|
8fe7e66d8c09a44846a5501d6a1535bc9d185548
|
[
"Apache-2.0"
] | null | null | null |
from bottle import Bottle
from api.http_ctrl import api_server
from api.http_web import web_server
from app import app
server = Bottle()
if __name__ == '__main__':
app.register()
server.mount('/api', api_server)
server.mount('/web', web_server)
server.run(host='localhost', port=5000)
| 20.333333
| 43
| 0.721311
|
f4c303e269bc2c5237e68a7481f42ffe6226c62c
| 3,103
|
py
|
Python
|
life_manage/life_manage/settings.py
|
chengfeiZhou/Campus-cloud-life-platform
|
6189e822db150fdeade931886c79a59e001d9ada
|
[
"MIT"
] | null | null | null |
life_manage/life_manage/settings.py
|
chengfeiZhou/Campus-cloud-life-platform
|
6189e822db150fdeade931886c79a59e001d9ada
|
[
"MIT"
] | 1
|
2021-03-10T22:10:08.000Z
|
2021-03-10T22:10:08.000Z
|
life_manage/life_manage/settings.py
|
chengfeiZhou/Campus-cloud-life-platform
|
6189e822db150fdeade931886c79a59e001d9ada
|
[
"MIT"
] | null | null | null |
"""
Django settings for life_manage project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-ck0did0a#rfk$g4r$c_ri5))8*chiy$$!5kk^f83fze4749-3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'life_manage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'life_manage.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.644628
| 91
| 0.69739
|
50ceae38e17c80649c10a9d6fd4323f1efc2cdbc
| 7,886
|
py
|
Python
|
RDT_2_1/RDT.py
|
poncem91/rdt2_1-rdt3_0
|
88f37934963d2252a579ccf38765152dc4641860
|
[
"Apache-2.0"
] | 1
|
2020-12-09T09:53:21.000Z
|
2020-12-09T09:53:21.000Z
|
RDT_2_1/RDT.py
|
poncem91/rdt2_1-rdt3_0
|
88f37934963d2252a579ccf38765152dc4641860
|
[
"Apache-2.0"
] | null | null | null |
RDT_2_1/RDT.py
|
poncem91/rdt2_1-rdt3_0
|
88f37934963d2252a579ccf38765152dc4641860
|
[
"Apache-2.0"
] | 1
|
2021-11-15T10:06:21.000Z
|
2021-11-15T10:06:21.000Z
|
import threading
import Network
import argparse
import hashlib
from time import sleep
# keeps print statements from overlapping
def print_lock(statement):
with threading.Lock():
print(statement)
class Packet:
# the number of bytes used to store packet length
seq_num_S_length = 10
length_S_length = 10
# length of md5 checksum in hex
checksum_length = 32
def __init__(self, seq_num, msg_S):
self.seq_num = seq_num
self.msg_S = msg_S
@classmethod
def from_byte_S(self, byte_S):
if Packet.corrupt(byte_S):
raise RuntimeError('Cannot initialize Packet: byte_S is corrupt')
# extract the fields
seq_num = int(byte_S[Packet.length_S_length: Packet.length_S_length + Packet.seq_num_S_length])
msg_S = byte_S[Packet.length_S_length + Packet.seq_num_S_length + Packet.checksum_length:]
return self(seq_num, msg_S)
def get_byte_S(self):
# convert sequence number of a byte field of seq_num_S_length bytes
seq_num_S = str(self.seq_num).zfill(self.seq_num_S_length)
# convert length to a byte field of length_S_length bytes
length_S = str(self.length_S_length + len(seq_num_S) + self.checksum_length + len(self.msg_S)).zfill(
self.length_S_length)
# compute the checksum
checksum = hashlib.md5((length_S + seq_num_S + self.msg_S).encode('utf-8'))
checksum_S = checksum.hexdigest()
# compile into a string
return length_S + seq_num_S + checksum_S + self.msg_S
def isACK(self):
return self.msg_S == "1"
def isNAK(self):
return self.msg_S == "0"
@staticmethod
def corrupt(byte_S):
# extract the fields
length_S = byte_S[0:Packet.length_S_length]
seq_num_S = byte_S[Packet.length_S_length: Packet.length_S_length + Packet.seq_num_S_length]
checksum_S = byte_S[
Packet.length_S_length + Packet.seq_num_S_length: Packet.length_S_length + Packet.seq_num_S_length + Packet.checksum_length]
msg_S = byte_S[Packet.length_S_length + Packet.seq_num_S_length + Packet.checksum_length:]
# compute the checksum locally
checksum = hashlib.md5(str(length_S + seq_num_S + msg_S).encode('utf-8'))
computed_checksum_S = checksum.hexdigest()
# and check if the same
return checksum_S != computed_checksum_S
class RDT:
# latest sequence number used in a packet for each thread (send and receive)
seq_num_snd = 1
seq_num_rcv = 1
# buffer of bytes read from network
byte_buffer = ''
receive_thread = None
def __init__(self, role_S, server_S, port):
# use the passed in port and port+1 to set up unidirectional links between
# RDT send and receive functions
# cross the ports on the client and server to match net_snd to net_rcv
if role_S == 'server':
self.net_snd = Network.NetworkLayer(role_S, server_S, port)
self.net_rcv = Network.NetworkLayer(role_S, server_S, port + 1)
else:
self.net_rcv = Network.NetworkLayer(role_S, server_S, port)
self.net_snd = Network.NetworkLayer(role_S, server_S, port + 1)
self.receive_thread = threading.Thread(target=self.receive_helper)
self.receive_thread.daemon = True
self.receive_thread.start()
def disconnect(self):
self.net_snd.disconnect()
self.net_rcv.disconnect()
if self.receive_thread:
self.receive_thread.join()
def rdt_2_1_send(self, msg_S):
sleep(0.2)
p = Packet(self.seq_num_snd, msg_S)
while True:
self.net_snd.udt_send(p.get_byte_S())
ack_or_nak = self.net_snd.udt_receive()
# wait for an ACK/NAK response
while not ack_or_nak:
ack_or_nak = self.net_snd.udt_receive()
# extract length of packet
length = int(ack_or_nak[:Packet.length_S_length])
# check if ACK/NAK is corrupt
ack_or_nak_bytes = ack_or_nak[0:length]
corrupt = Packet.corrupt(ack_or_nak_bytes)
# If ACK/NAK is corrupt it loops back and re-send packet
if corrupt:
print_lock("SENDER: ACK/NAK corrupt... Re-sending packet")
continue
if not corrupt:
response = Packet.from_byte_S(ack_or_nak_bytes)
# If ACK/NAK is for wrong packet, it loops back and re-sends packet
if self.seq_num_snd != response.seq_num:
print_lock("SENDER: Unexpected numbered packet... Re-sending packet")
continue
# If it was ACK it moves on seq num and send it done
if response.isACK():
self.seq_num_snd = (self.seq_num_snd + 1) % 2
print_lock("SENDER: ACK received... Updating sequence number")
break
# If it was NAK it loops back and re-sends packet
elif response.isNAK():
print_lock("SENDER: NAK received... Re-sending packet")
def receive_helper(self):
while True:
byte_S = self.net_rcv.udt_receive()
# check if we have received enough bytes
if len(byte_S) < Packet.length_S_length:
continue # not enough bytes to read packet length
# extract length of packet
length = int(byte_S[:Packet.length_S_length])
if len(byte_S) < length:
continue # not enough bytes to read the whole packet
# create packet from buffer content and add to return string
corrupt = Packet.corrupt(byte_S)
# If packet is corrupt it sends a NAK
if corrupt:
sndpkt = Packet(self.seq_num_rcv, "0")
self.net_rcv.udt_send(sndpkt.get_byte_S())
print_lock("RECEIVER: Packet corrupt, sending NAK")
elif not corrupt:
p = Packet.from_byte_S(byte_S[0:length])
# If packet is correct packet, it sends back ACK and updates seq num
if self.seq_num_rcv == p.seq_num:
self.byte_buffer += byte_S
sndpkt = Packet(p.seq_num, "1")
self.net_rcv.udt_send(sndpkt.get_byte_S())
self.seq_num_rcv = (self.seq_num_rcv + 1) % 2
print_lock("RECEIVER: Packet received successfully, sending ACK and updating seq num")
# If packet is not the expected numbered packet, it re-sends ACK for previous packet
else:
sndpkt = Packet(p.seq_num, "1")
self.net_rcv.udt_send(sndpkt.get_byte_S())
print_lock("RECEIVER: Unexpected numbered packet, resending ACK")
def rdt_2_1_receive(self):
sleep(0.2)
ret_S = None
if self.byte_buffer:
length = int(self.byte_buffer[:Packet.length_S_length])
ret_S = Packet.from_byte_S(self.byte_buffer[0:length]).msg_S
self.byte_buffer = ""
return ret_S
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RDT implementation.')
parser.add_argument('role', help='Role is either client or server.', choices=['client', 'server'])
parser.add_argument('server', help='Server.')
parser.add_argument('port', help='Port.', type=int)
args = parser.parse_args()
rdt = RDT(args.role, args.server, args.port)
if args.role == 'client':
rdt.rdt_2_1_send('MSG_FROM_CLIENT')
sleep(2)
print(rdt.rdt_2_1_receive())
rdt.disconnect()
else:
sleep(1)
print(rdt.rdt_2_1_receive())
rdt.rdt_2_1_send('MSG_FROM_SERVER')
rdt.disconnect()
| 38.096618
| 145
| 0.618057
|
743008d7b9995598698133239ccf65b07892a9be
| 3,697
|
py
|
Python
|
examples/run_sakt.py
|
jdxyw/deepKT
|
985c67f257d56e30de89872e2c54d8c7859c9147
|
[
"MIT"
] | 5
|
2021-05-19T05:05:16.000Z
|
2022-03-31T02:41:14.000Z
|
examples/run_sakt.py
|
jdxyw/deepKT
|
985c67f257d56e30de89872e2c54d8c7859c9147
|
[
"MIT"
] | 1
|
2021-09-07T07:10:56.000Z
|
2021-09-07T07:27:23.000Z
|
examples/run_sakt.py
|
jdxyw/deepKT
|
985c67f257d56e30de89872e2c54d8c7859c9147
|
[
"MIT"
] | 1
|
2022-03-08T07:37:53.000Z
|
2022-03-08T07:37:53.000Z
|
import sys
sys.path.insert(0, "..")
import argparse
import torch
import torch.optim
import deepkt.utils
from deepkt.data import SAKTDataset
from deepkt.model import SAKTModel
from deepkt.loss import SAKTLoss
from torch.utils.data import DataLoader
import pandas as pd
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
def run(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_df = pd.read_csv("../data/assist2015_train.csv",
header=None,
sep='\t')
test_df = pd.read_csv("../data/assist2015_test.csv", header=None, sep='\t')
train = SAKTDataset(train_df, args.num_skill, max_len=128)
test = SAKTDataset(test_df, args.num_skill, max_len=128)
train_dataloader = DataLoader(train,
batch_size=args.batch_size,
num_workers=args.num_worker,
shuffle=True)
test_dataloader = DataLoader(test,
batch_size=args.batch_size * 2,
num_workers=args.num_worker,
shuffle=False)
sakt = SAKTModel(args.num_skill, args.embed_dim, args.dropout, args.num_heads, device=device, max_len=128)
optimizer = torch.optim.Adam(sakt.parameters(), lr=args.learning_rate)
loss_func = SAKTLoss()
sakt.to(device)
loss_func.to(device)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9)
for epoch in range(args.epoch):
deepkt.utils.train_epoch(sakt, train_dataloader, optimizer, loss_func,
device)
deepkt.utils.eval_epoch(sakt, test_dataloader, loss_func, deepkt.utils.sakt_eval, device)
scheduler.step()
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="train deep IRT model")
arg_parser.add_argument("--learning_rate",
dest="learning_rate",
default=0.001,
type=float,
required=False)
arg_parser.add_argument("--batch_size",
dest="batch_size",
default=64,
type=int,
required=False)
arg_parser.add_argument("--num_skill",
dest="num_skill",
default=100,
type=int,
required=False)
arg_parser.add_argument("--embed_dim",
dest="embed_dim",
default=200,
type=int,
required=False)
arg_parser.add_argument("--dropout",
dest="dropout",
default=0.2,
type=float,
required=False)
arg_parser.add_argument("--num_heads",
dest="num_heads",
default=5,
type=int,
required=False)
arg_parser.add_argument("--epoch",
dest="epoch",
default=15,
type=int,
required=False)
arg_parser.add_argument("--num_worker",
dest="num_worker",
default=0,
type=int,
required=False)
args = arg_parser.parse_args()
run(args)
| 38.113402
| 110
| 0.503381
|
34548047db0a4742aa01c51a0081a891ca421a93
| 30,366
|
py
|
Python
|
examples/python/cvrptw_plot.py
|
leandromundim/or-tools
|
592e4878caf1f989bb65f21c2270f82d19957370
|
[
"Apache-2.0"
] | 2
|
2018-05-31T09:07:23.000Z
|
2020-01-21T11:36:59.000Z
|
examples/python/cvrptw_plot.py
|
leandromundim/or-tools
|
592e4878caf1f989bb65f21c2270f82d19957370
|
[
"Apache-2.0"
] | null | null | null |
examples/python/cvrptw_plot.py
|
leandromundim/or-tools
|
592e4878caf1f989bb65f21c2270f82d19957370
|
[
"Apache-2.0"
] | 2
|
2018-05-31T12:36:37.000Z
|
2020-01-02T08:21:31.000Z
|
# This Python file uses the following encoding: utf-8
# Copyright 2015 Tin Arm Engineering AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Capacitated Vehicle Routing Problem with Time Windows (and optional orders).
This is a sample using the routing library python wrapper to solve a
CVRPTW problem.
A description of the problem can be found here:
http://en.wikipedia.org/wiki/Vehicle_routing_problem.
The variant which is tackled by this model includes a capacity dimension,
time windows and optional orders, with a penalty cost if orders are not
performed.
Too help explore the problem, two classes are provided Customers() and
Vehicles(): used to randomly locate orders and depots, and to randomly
generate demands, time-window constraints and vehicles.
Distances are computed using the Great Circle distances. Distances are in km
and times in seconds.
A function for the displaying of the vehicle plan
display_vehicle_output
The optimization engine uses local search to improve solutions, first
solutions being generated using a cheapest addition heuristic.
Numpy and Matplotlib are required for the problem creation and display.
"""
import os
import numpy as np
from matplotlib import pyplot as plt
from collections import namedtuple
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
from datetime import datetime, timedelta
class Customers():
"""
A class that generates and holds customers information.
Randomly normally distribute a number of customers and locations within
a region described by a rectangle. Generate a random demand for each
customer. Generate a random time window for each customer.
May either be initiated with the extents, as a dictionary describing
two corners of a rectangle in latitude and longitude OR as a center
point (lat, lon), and box_size in km. The default arguments are for a
10 x 10 km square centered in Sheffield).
Args:
extents (Optional[Dict]): A dictionary describing a rectangle in
latitude and longitude with the keys 'llcrnrlat', 'llcrnrlon' &
'urcrnrlat' & 'urcrnrlat'
center (Optional(Tuple): A tuple of (latitude, longitude)
describing the centre of the rectangle.
box_size (Optional float: The length in km of the box's sides.
num_stops (int): The number of customers, including the depots that
are placed normally distributed in the rectangle.
min_demand (int): Lower limit on the randomly generated demand at
each customer.
max_demand (int): Upper limit on the randomly generated demand at
each customer.
min_tw: shortest random time window for a customer, in hours.
max_tw: longest random time window for a customer, in hours.
Examples:
To place 100 customers randomly within 100 km x 100 km rectangle,
centered in the default location, with a random demand of between 5
and 10 units:
>>> customers = Customers(num_stops=100, box_size=100,
... min_demand=5, max_demand=10)
alternatively, to place 75 customers in the same area with default
arguments for demand:
>>> extents = {'urcrnrlon': 0.03403, 'llcrnrlon': -2.98325,
... 'urcrnrlat': 54.28127, 'llcrnrlat': 52.48150}
>>> customers = Customers(num_stops=75, extents=extents)
"""
def __init__(self, extents=None, center=(53.381393, -1.474611),
box_size=10, num_stops=100,
min_demand=0, max_demand=25,
min_tw=1, max_tw=5):
self.number = num_stops #: The number of customers and depots
#: Location, a named tuple for locations.
Location = namedtuple("Location", ['lat', 'lon'])
if extents is not None:
self.extents = extents #: The lower left and upper right points
#: Location[lat,lon]: the centre point of the area.
self.center = Location(extents['urcrnrlat'] -
0.5 * (extents['urcrnrlat'] -
extents['llcrnrlat']),
extents['urcrnrlon'] -
0.5 * (extents['urcrnrlon'] -
extents['llcrnrlon']))
else:
#: Location[lat,lon]: the centre point of the area.
(clat, clon) = self.center = Location(center[0], center[1])
rad_earth = 6367 # km
circ_earth = np.pi * rad_earth
#: The lower left and upper right points
self.extents = {'llcrnrlon': (clon - 180 * box_size /
(circ_earth *
np.cos(np.deg2rad(clat)))),
'llcrnrlat': clat - 180 * box_size / circ_earth,
'urcrnrlon': (clon + 180 * box_size /
(circ_earth *
np.cos(np.deg2rad(clat)))),
'urcrnrlat': clat + 180 * box_size / circ_earth}
# The 'name' of the stop, indexed from 0 to num_stops-1
stops = np.array(range(0, num_stops))
# normaly distributed random distribution of stops within the box
stdv = 6 # the number of standard deviations 99.9% will be within +-3
lats = (self.extents['llcrnrlat'] + np.random.randn(num_stops) *
(self.extents['urcrnrlat'] - self.extents['llcrnrlat']) / stdv)
lons = (self.extents['llcrnrlon'] + np.random.randn(num_stops) *
(self.extents['urcrnrlon'] - self.extents['llcrnrlon']) / stdv)
# uniformly distributed integer demands.
demmands = np.random.randint(min_demand, max_demand, num_stops)
self.time_horizon = 24 * 60 ** 2 # A 24 hour period.
# The customers demand min_tw to max_tw hour time window for each
# delivery
time_windows = np.random.random_integers(min_tw * 3600,
max_tw * 3600, num_stops)
# The last time a delivery window can start
latest_time = self.time_horizon - time_windows
start_times = [None for o in time_windows]
stop_times = [None for o in time_windows]
# Make random timedeltas, nominaly from the start of the day.
for idx in range(self.number):
stime = int(np.random.random_integers(0, latest_time[idx]))
start_times[idx] = timedelta(seconds=stime)
stop_times[idx] = (start_times[idx] +
timedelta(seconds=int(time_windows[idx])))
# A named tuple for the customer
Customer = namedtuple("Customer", ['index', # the index of the stop
'demand', # the demand for the stop
'lat', # the latitude of the stop
'lon', # the longitude of the stop
'tw_open', # timedelta window open
'tw_close']) # timedelta window cls
self.customers = [Customer(idx, dem, lat, lon, tw_open, tw_close) for
idx, dem, lat, lon, tw_open, tw_close
in zip(stops, demmands, lats, lons,
start_times, stop_times)]
# The number of seconds needed to 'unload' 1 unit of goods.
self.service_time_per_dem = 300 # seconds
def central_start_node(self, invert=False):
"""
Return a random starting node, with probability weighted by distance
from the centre of the extents, so that a central starting node is
likely.
Args:
invert (Optional bool): When True, a peripheral starting node is
most likely.
Returns:
int: a node index.
Examples:
>>> customers.central_start_node(invert=True)
42
"""
num_nodes = len(self.customers)
dist = np.empty((num_nodes, 1))
for idx_to in range(num_nodes):
dist[idx_to] = self._haversine(self.center.lon,
self.center.lat,
self.customers[idx_to].lon,
self.customers[idx_to].lat)
furthest = np.max(dist)
if invert:
prob = dist * 1.0 / sum(dist)
else:
prob = (furthest - dist * 1.0) / sum(furthest - dist)
indexes = np.array([range(num_nodes)])
start_node = np.random.choice(indexes.flatten(),
size=1,
replace=True,
p=prob.flatten())
return start_node[0]
def make_distance_mat(self, method='haversine'):
"""
Return a distance matrix and make it a member of Customer, using the
method given in the call. Currently only Haversine (GC distance) is
implemented, but Manhattan, or using a maps API could be added here.
Raises an AssertionError for all other methods.
Args:
method (Optional[str]): method of distance calculation to use. The
Haversine formula is the only method implemented.
Returns:
Numpy array of node to node distances.
Examples:
>>> dist_mat = customers.make_distance_mat(method='haversine')
>>> dist_mat = customers.make_distance_mat(method='manhattan')
AssertionError
"""
self.distmat = np.zeros((self.number, self.number))
methods = {'haversine': self._haversine}
assert(method in methods)
for frm_idx in range(self.number):
for to_idx in range(self.number):
if frm_idx != to_idx:
frm_c = self.customers[frm_idx]
to_c = self.customers[to_idx]
self.distmat[frm_idx, to_idx] = self._haversine(frm_c.lon,
frm_c.lat,
to_c.lon,
to_c.lat)
return(self.distmat)
def _haversine(self, lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth specified in decimal degrees of latitude and longitude.
https://en.wikipedia.org/wiki/Haversine_formula
Args:
lon1: longitude of pt 1,
lat1: latitude of pt 1,
lon2: longitude of pt 2,
lat2: latitude of pt 2
Returns:
the distace in km between pt1 and pt2
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (np.sin(dlat / 2) ** 2 + np.cos(lat1) *
np.cos(lat2) * np.sin(dlon / 2) ** 2)
c = 2 * np.arcsin(np.sqrt(a))
# 6367 km is the radius of the Earth
km = 6367 * c
return km
def get_total_demand(self):
"""
Return the total demand of all customers.
"""
return(sum([c.demand for c in self.customers]))
def return_dist_callback(self, **kwargs):
"""
Return a callback function for the distance matrix.
Args:
**kwargs: Arbitrary keyword arguments passed on to
make_distance_mat()
Returns:
function: dist_return(a,b) A function that takes the 'from' node
index and the 'to' node index and returns the distance in km.
"""
self.make_distance_mat(**kwargs)
def dist_return(a, b): return(self.distmat[a][b])
return dist_return
def return_dem_callback(self):
"""
Return a callback function that gives the demands.
Returns:
function: dem_return(a,b) A function that takes the 'from' node
index and the 'to' node index and returns the distance in km.
"""
def dem_return(a, b): return(self.customers[a].demand)
return dem_return
def zero_depot_demands(self, depot):
"""
Zero out the demands and time windows of depot. The Depots do not have
demands or time windows so this function clears them.
Args:
depot (int): index of the stop to modify into a depot.
Examples:
>>> customers.zero_depot_demands(5)
>>> customers.customers[5].demand == 0
True
"""
start_depot = self.customers[depot]
self.customers[depot] = start_depot._replace(demand=0,
tw_open=None,
tw_close=None)
def make_service_time_call_callback(self):
"""
Return a callback function that provides the time spent servicing the
customer. Here is it proportional to the demand given by
self.service_time_per_dem, default 300 seconds per unit demand.
Returns:
function [dem_return(a, b)]: A function that takes the from/a node
index and the to/b node index and returns the service time at a
"""
def service_time_return(a, b):
return(self.customers[a].demand * self.service_time_per_dem)
return service_time_return
def make_transit_time_callback(self, speed_kmph=10):
"""
Creates a callback function for transit time. Assuming an average
speed of speed_kmph
Args:
speed_kmph: the average speed in km/h
Returns:
function [tranit_time_return(a, b)]: A function that takes the
from/a node index and the to/b node index and returns the
tranit time from a to b.
"""
def tranit_time_return(a, b):
return(self.distmat[a][b] / (speed_kmph * 1.0 / 60 ** 2))
return tranit_time_return
class Vehicles():
"""
A Class to create and hold vehicle information.
The Vehicles in a CVRPTW problem service the customers and belong to a
depot. The class Vehicles creates a list of named tuples describing the
Vehicles. The main characteristics are the vehicle capacity, fixed cost,
and cost per km. The fixed cost of using a certain type of vehicles can be
higher or lower than others. If a vehicle is used, i.e. this vehicle serves
at least one node, then this cost is added to the objective function.
Note:
If numpy arrays are given for capacity and cost, then they must be of
the same length, and the number of vehicles are infered from them.
If scalars are given, the fleet is homogenious, and the number of
vehicles is determied by number.
Args:
capacity (scalar or numpy array): The integer capacity of demand units.
cost (scalar or numpy array): The fixed cost of the vehicle.
number (Optional [int]): The number of vehicles in a homogenious fleet.
"""
def __init__(self, capacity=100, cost=100, number=None):
Vehicle = namedtuple("Vehicle", ['index', 'capacity', 'cost'])
if number is None:
self.number = np.size(capacity)
else:
self.number = number
idxs = np.array(range(0, self.number))
if np.isscalar(capacity):
capacities = capacity * np.ones_like(idxs)
elif np.size(capacity) != np.size(capacity):
print('capacity is neither scalar, nor the same size as num!')
else:
capacities = capacity
if np.isscalar(cost):
costs = cost * np.ones_like(idxs)
elif np.size(cost) != self.number:
print(np.size(cost))
print('cost is neither scalar, nor the same size as num!')
else:
costs = cost
self.vehicles = [Vehicle(idx, capacity, cost) for idx, capacity, cost
in zip(idxs, capacities, costs)]
def get_total_capacity(self):
return(sum([c.capacity for c in self.vehicles]))
def return_starting_callback(self, customers, sameStartFinish=False):
# create a different starting and finishing depot for each vehicle
self.starts = [int(customers.central_start_node()) for o in
range(self.number)]
if sameStartFinish:
self.ends = self.starts
else:
self.ends = [int(customers.central_start_node(invert=True)) for
o in range(self.number)]
# the depots will not have demands, so zero them.
for depot in self.starts:
customers.zero_depot_demands(depot)
for depot in self.ends:
customers.zero_depot_demands(depot)
def start_return(v): return(self.starts[v])
return start_return
def discrete_cmap(N, base_cmap=None):
"""
Create an N-bin discrete colormap from the specified input map
"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return base.from_list(cmap_name, color_list, N)
def vehicle_output_string(routing, plan):
"""
Return a string displaying the output of the routing instance and
assignment (plan).
Args:
routing (ortools.constraint_solver.pywrapcp.RoutingModel): routing.
plan (ortools.constraint_solver.pywrapcp.Assignment): the assignment.
Returns:
(string) plan_output: describing each vehicle's plan.
(List) dropped: list of dropped orders.
"""
dropped = []
for order in range(routing.Size()):
if (plan.Value(routing.NextVar(order)) == order):
dropped.append(str(order))
capacity_dimension = routing.GetDimensionOrDie("Capacity")
time_dimension = routing.GetDimensionOrDie("Time")
plan_output = ''
for route_number in range(routing.vehicles()):
order = routing.Start(route_number)
plan_output += 'Route {0}:'.format(route_number)
if routing.IsEnd(plan.Value(routing.NextVar(order))):
plan_output += ' Empty \n'
else:
while True:
load_var = capacity_dimension.CumulVar(order)
time_var = time_dimension.CumulVar(order)
plan_output += \
" {order} Load({load}) Time({tmin}, {tmax}) -> ".format(
order=order,
load=plan.Value(load_var),
tmin=str(timedelta(seconds=plan.Min(time_var))),
tmax=str(timedelta(seconds=plan.Max(time_var))))
if routing.IsEnd(order):
plan_output += ' EndRoute {0}. \n'.format(route_number)
break
order = plan.Value(routing.NextVar(order))
plan_output += "\n"
return(plan_output, dropped)
def build_vehicle_route(routing, plan, customers, veh_number):
"""
Build a route for a vehicle by starting at the strat node and
continuing to the end node.
Args:
routing (ortools.constraint_solver.pywrapcp.RoutingModel): routing.
plan (ortools.constraint_solver.pywrapcp.Assignment): the assignment.
customers (Customers): the customers instance.
veh_number (int): index of the vehicle
Returns:
(List) route: indexes of the customers for vehicle veh_number
"""
veh_used = routing.IsVehicleUsed(plan, veh_number)
print('Vehicle {0} is used {1}'.format(veh_number, veh_used))
if veh_used:
route = []
node = routing.Start(veh_number) # Get the starting node index
route.append(customers.customers[routing.IndexToNode(node)])
while not routing.IsEnd(node):
route.append(customers.customers[routing.IndexToNode(node)])
node = plan.Value(routing.NextVar(node))
route.append(customers.customers[routing.IndexToNode(node)])
return route
else:
return None
def plot_vehicle_routes(veh_route, ax1, customers, vehicles):
"""
Plot the vehicle routes on matplotlib axis ax1.
Args:
veh_route (dict): a dictionary of routes keyed by vehicle idx.
ax1 (matplotlib.axes._subplots.AxesSubplot): Matplotlib axes
customers (Customers): the customers instance.
vehicles (Vehicles): the vehicles instance.
"""
veh_used = [v for v in veh_route if veh_route[v] is not None]
cmap = discrete_cmap(vehicles.number+2, 'nipy_spectral')
for veh_number in veh_used:
lats, lons = zip(*[(c.lat, c.lon) for c in veh_route[veh_number]])
lats = np.array(lats)
lons = np.array(lons)
s_dep = customers.customers[vehicles.starts[veh_number]]
s_fin = customers.customers[vehicles.ends[veh_number]]
ax1.annotate('v({veh}) S @ {node}'.format(
veh=veh_number,
node=vehicles.starts[veh_number]),
xy=(s_dep.lon, s_dep.lat),
xytext=(10, 10),
xycoords='data',
textcoords='offset points',
arrowprops=dict(
arrowstyle="->",
connectionstyle="angle3,angleA=90,angleB=0",
shrinkA=0.05),
)
ax1.annotate('v({veh}) F @ {node}'.format(
veh=veh_number,
node=vehicles.ends[veh_number]),
xy=(s_fin.lon, s_fin.lat),
xytext=(10, -20),
xycoords='data',
textcoords='offset points',
arrowprops=dict(
arrowstyle="->",
connectionstyle="angle3,angleA=-90,angleB=0",
shrinkA=0.05),
)
ax1.plot(lons, lats, 'o', mfc=cmap(veh_number+1))
ax1.quiver(lons[:-1], lats[:-1],
lons[1:]-lons[:-1], lats[1:]-lats[:-1],
scale_units='xy', angles='xy', scale=1,
color=cmap(veh_number+1))
def main():
# Create a set of customer, (and depot) stops.
customers = Customers(num_stops=50, min_demand=1,
max_demand=15, box_size=40,
min_tw=3, max_tw=6)
# Create callback fns for distances, demands, service and transit-times.
dist_fn = customers.return_dist_callback()
dem_fn = customers.return_dem_callback()
serv_time_fn = customers.make_service_time_call_callback()
transit_time_fn = customers.make_transit_time_callback()
def tot_time_fn(a, b):
"""
The time function we want is both transit time and service time.
"""
return serv_time_fn(a, b) + transit_time_fn(a, b)
# Create a list of inhomgenious vehicle capacities as integer units.
capacity = [50, 75, 100, 125, 150, 175, 200, 250]
# Create a list of inhomogenious fixed vehicle costs.
cost = [int(100 + 2 * np.sqrt(c)) for c in capacity]
# Create a set of vehicles, the number set by the length of capacity.
vehicles = Vehicles(capacity=capacity, cost=cost)
# check to see that the problem is feasible, if we don't have enough
# vehicles to cover the demand, there is no point in going further.
assert(customers.get_total_demand() < vehicles.get_total_capacity())
# Set the starting nodes, and create a callback fn for the starting node.
start_fn = vehicles.return_starting_callback(customers,
sameStartFinish=False)
# Set model parameters
model_parameters = pywrapcp.RoutingModel.DefaultModelParameters()
# The solver parameters can be accessed from the model parameters. For example :
# model_parameters.solver_parameters.CopyFrom(
# pywrapcp.Solver.DefaultSolverParameters())
# model_parameters.solver_parameters.trace_propagation = True
# Make the routing model instance.
routing = pywrapcp.RoutingModel(customers.number, # int number
vehicles.number, # int number
vehicles.starts, # List of int start depot
vehicles.ends, # List of int end depot
model_parameters)
parameters = routing.DefaultSearchParameters()
# Setting first solution heuristic (cheapest addition).
parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Disabling Large Neighborhood Search, (this is the default behaviour)
parameters.local_search_operators.use_path_lns = False
parameters.local_search_operators.use_inactive_lns = False
# Routing: forbids use of TSPOpt neighborhood,
parameters.local_search_operators.use_tsp_opt = False
parameters.time_limit_ms = 10 * 1000 # 10 seconds
parameters.use_light_propagation = False
# parameters.log_search = True
# Set the cost function (distance callback) for each arc, homogenious for
# all vehicles.
routing.SetArcCostEvaluatorOfAllVehicles(dist_fn)
# Set vehicle costs for each vehicle, not homogenious.
for veh in vehicles.vehicles:
routing.SetFixedCostOfVehicle(veh.cost, int(veh.index))
# Add a dimension for vehicle capacities
null_capacity_slack = 0
routing.AddDimensionWithVehicleCapacity(dem_fn, # demand callback
null_capacity_slack,
capacity, # capacity array
True,
"Capacity")
# Add a dimension for time and a limit on the total time_horizon
routing.AddDimension(tot_time_fn, # total time function callback
customers.time_horizon,
customers.time_horizon,
True,
"Time")
time_dimension = routing.GetDimensionOrDie("Time")
for cust in customers.customers:
if cust.tw_open is not None:
time_dimension.CumulVar(routing.NodeToIndex(cust.index)).SetRange(
cust.tw_open.seconds,
cust.tw_close.seconds)
"""
To allow the dropping of orders, we add disjunctions to all the customer
nodes. Each disjunction is a list of 1 index, which allows that customer to
be active or not, with a penalty if not. The penalty should be larger
than the cost of servicing that customer, or it will always be dropped!
"""
# To add disjunctions just to the customers, make a list of non-depots.
non_depot = set(range(customers.number))
non_depot.difference_update(vehicles.starts)
non_depot.difference_update(vehicles.ends)
penalty = 400000 # The cost for dropping a node from the plan.
nodes = [routing.AddDisjunction([int(c)], penalty) for c in non_depot]
# This is how you would implement partial routes if you already knew part
# of a feasible solution for example:
# partial = np.random.choice(list(non_depot), size=(4,5), replace=False)
# routing.CloseModel()
# partial_list = [partial[0,:].tolist(),
# partial[1,:].tolist(),
# partial[2,:].tolist(),
# partial[3,:].tolist(),
# [],[],[],[]]
# print(routing.ApplyLocksToAllVehicles(partial_list, False))
# Solve the problem !
assignment = routing.SolveWithParameters(parameters)
# The rest is all optional for saving, printing or plotting the solution.
if assignment:
# save the assignment, (Google Protobuf format)
save_file_base = os.path.realpath(__file__).split('.')[0]
if routing.WriteAssignment(save_file_base + '_assignment.ass'):
print('succesfully wrote assignment to file ' +
save_file_base + '_assignment.ass')
print('The Objective Value is {0}'.format(assignment.ObjectiveValue()))
plan_output, dropped = vehicle_output_string(routing, assignment)
print(plan_output)
print('dropped nodes: ' + ', '.join(dropped))
# you could print debug information like this:
# print(routing.DebugOutputAssignment(assignment, 'Capacity'))
vehicle_routes = {}
for veh in range(vehicles.number):
vehicle_routes[veh] = build_vehicle_route(routing, assignment,
customers, veh)
# Plotting of the routes in matplotlib.
fig = plt.figure()
ax = fig.add_subplot(111)
# Plot all the nodes as black dots.
clon, clat = zip(*[(c.lon, c.lat) for c in customers.customers])
ax.plot(clon, clat, 'k.')
# plot the routes as arrows
plot_vehicle_routes(vehicle_routes, ax, customers, vehicles)
else:
print('No assignment')
if __name__ == '__main__':
main()
| 40.814516
| 84
| 0.594579
|
51eac4448ea07048508752db882e075b20469407
| 16,564
|
py
|
Python
|
client/env.py
|
FooJiaYin/carla-rl
|
46abf25a57571ce8f64dfd86bd8eb6547281cf49
|
[
"MIT"
] | 80
|
2019-01-30T13:14:11.000Z
|
2022-02-14T08:51:01.000Z
|
client/env.py
|
renweiya/carla-rl
|
e3ea0df450fe9716c6f1d2e6fbaec05009fb7da8
|
[
"MIT"
] | 8
|
2019-02-03T18:21:36.000Z
|
2020-10-23T00:51:30.000Z
|
client/env.py
|
renweiya/carla-rl
|
e3ea0df450fe9716c6f1d2e6fbaec05009fb7da8
|
[
"MIT"
] | 27
|
2019-03-15T08:22:19.000Z
|
2022-03-20T05:37:48.000Z
|
import time
import os
import cv2
import skvideo.io
import numpy as np
import rewards
import experiment_suites
import carla.driving_benchmark.experiment_suites as experiment_suites_benchmark
from carla.client import VehicleControl
from carla.planner.planner import Planner
from carla.settings import CarlaSettings
from carla.client import CarlaClient
from carla.tcp import TCPConnectionError
from observation_utils import CameraException
import gym
from carla_logger import get_carla_logger
# TODO: Remove this before open-sourcing environment
class CarlaEnv(object):
'''
An OpenAI Gym Environment for CARLA.
'''
def __init__(self,
obs_converter,
action_converter,
env_id,
random_seed=0,
exp_suite_name='TrainingSuite',
reward_class_name='RewardCarla',
host='127.0.0.1',
port=2000,
city_name='Town01',
subset=None,
video_every=100,
video_dir='./video/',
distance_for_success=2.0,
benchmark=False):
self.logger = get_carla_logger()
self.logger.info('Environment {} running in port {}'.format(env_id, port))
self.host, self.port = host, port
self.id = env_id
self._obs_converter = obs_converter
self.observation_space = obs_converter.get_observation_space()
self._action_converter = action_converter
self.action_space = self._action_converter.get_action_space()
if benchmark:
self._experiment_suite = getattr(experiment_suites_benchmark, exp_suite_name)(city_name)
else:
self._experiment_suite = getattr(experiment_suites, exp_suite_name)(city_name, subset)
self._reward = getattr(rewards, reward_class_name)()
self._experiments = self._experiment_suite.get_experiments()
self.subset = subset
self._make_carla_client(host, port)
self._distance_for_success = distance_for_success
self._planner = Planner(city_name)
self.done = False
self.last_obs = None
self.last_distance_to_goal = None
self.last_direction = None
self.last_measurements = None
np.random.seed(random_seed)
self.video_every = video_every
self.video_dir = video_dir
self.video_writer = None
self._success = False
self._failure_timeout = False
self._failure_collision = False
self.benchmark = benchmark
self.benchmark_index = [0, 0, 0]
try:
if not os.path.isdir(self.video_dir):
os.makedirs(self.video_dir)
except OSError:
pass
self.steps = 0
self.num_episodes = 0
def step(self, action):
if self.done:
raise ValueError('self.done should always be False when calling step')
while True:
try:
# Send control
control = self._action_converter.action_to_control(action, self.last_measurements)
self._client.send_control(control)
# Gather the observations (including measurements, sensor and directions)
measurements, sensor_data = self._client.read_data()
self.last_measurements = measurements
current_timestamp = measurements.game_timestamp
distance_to_goal = self._get_distance_to_goal(measurements, self._target)
self.last_distance_to_goal = distance_to_goal
directions = self._get_directions(measurements.player_measurements.transform,
self._target)
self.last_direction = directions
obs = self._obs_converter.convert(measurements, sensor_data, directions, self._target, self.id)
if self.video_writer is not None and self.steps % 2 == 0:
self._raster_frame(sensor_data, measurements, directions, obs)
self.last_obs = obs
except CameraException:
self.logger.debug('Camera Exception in step()')
obs = self.last_obs
distance_to_goal = self.last_distance_to_goal
current_timestamp = self.last_measurements.game_timestamp
except TCPConnectionError as e:
self.logger.debug('TCPConnectionError inside step(): {}'.format(e))
self.done = True
return self.last_obs, 0.0, True, {'carla-reward': 0.0}
break
# Check if terminal state
timeout = (current_timestamp - self._initial_timestamp) > (self._time_out * 1000)
collision, _ = self._is_collision(measurements)
success = distance_to_goal < self._distance_for_success
if timeout:
self.logger.debug('Timeout')
self._failure_timeout = True
if collision:
self.logger.debug('Collision')
self._failure_collision = True
if success:
self.logger.debug('Success')
self.done = timeout or collision or success
# Get the reward
env_state = {'timeout': timeout, 'collision': collision, 'success': success}
reward = self._reward.get_reward(measurements, self._target, self.last_direction, control, env_state)
# Additional information
info = {'carla-reward': reward}
self.steps += 1
return obs, reward, self.done, info
def reset(self):
# Loop forever due to TCPConnectionErrors
while True:
try:
self._reward.reset_reward()
self.done = False
if self.video_writer is not None:
try:
self.video_writer.close()
except Exception as e:
self.logger.debug('Error when closing video writer in reset')
self.logger.error(e)
self.video_writer = None
if self.benchmark:
end_indicator = self._new_episode_benchmark()
if end_indicator is False:
return False
else:
self._new_episode()
# Hack: Try sleeping so that the server is ready. Reduces the number of TCPErrors
time.sleep(4)
# measurements, sensor_data = self._client.read_data()
self._client.send_control(VehicleControl())
measurements, sensor_data = self._client.read_data()
self._initial_timestamp = measurements.game_timestamp
self.last_measurements = measurements
self.last_distance_to_goal = self._get_distance_to_goal(measurements, self._target)
directions = self._get_directions(measurements.player_measurements.transform, self._target)
self.last_direction = directions
obs = self._obs_converter.convert(measurements, sensor_data, directions, self._target, self.id)
self.last_obs = obs
self.done = False
self._success = False
self._failure_timeout = False
self._failure_collision = False
return obs
except CameraException:
self.logger.debug('Camera Exception in reset()')
continue
except TCPConnectionError as e:
self.logger.debug('TCPConnectionError in reset()')
self.logger.error(e)
# Disconnect and reconnect
self.disconnect()
time.sleep(5)
self._make_carla_client(self.host, self.port)
def disconnect(self):
if self.video_writer is not None:
try:
self.video_writer.close()
except Exception as e:
self.logger.debug('Error when closing video writer in disconnect')
self.logger.error(e)
self.video_writer = None
self._client.disconnect()
def _raster_frame(self, sensor_data, measurements, directions, obs):
frame = sensor_data['CameraRGB'].data.copy()
cv2.putText(frame, text='Episode number: {:,}'.format(self.num_episodes-1),
org=(50, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0,
color=[0, 0, 0], thickness=2)
cv2.putText(frame, text='Environment steps: {:,}'.format(self.steps),
org=(50, 80), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0,
color=[0, 0, 0], thickness=2)
REACH_GOAL = 0.0
GO_STRAIGHT = 5.0
TURN_RIGHT = 4.0
TURN_LEFT = 3.0
LANE_FOLLOW = 2.0
if np.isclose(directions, REACH_GOAL):
dir_str = 'REACH GOAL'
elif np.isclose(directions, GO_STRAIGHT):
dir_str = 'GO STRAIGHT'
elif np.isclose(directions, TURN_RIGHT):
dir_str = 'TURN RIGHT'
elif np.isclose(directions, TURN_LEFT):
dir_str = 'TURN LEFT'
elif np.isclose(directions, LANE_FOLLOW):
dir_str = 'LANE FOLLOW'
else:
raise ValueError(directions)
cv2.putText(frame, text='Direction: {}'.format(dir_str),
org=(50, 110), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0,
color=[0, 0, 0], thickness=2)
cv2.putText(frame, text='Speed: {:.02f}'.format( measurements.player_measurements.forward_speed * 3.6),
org=(50, 140), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0,
color=[0, 0, 0], thickness=2)
cv2.putText(frame, text='rel_x: {:.02f}, rel_y: {:.02f}'.format(obs['v'][-2].item(), obs['v'][-1].item()),
org=(50, 170), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0,
color=[0, 0, 0], thickness=2)
self.video_writer.writeFrame(frame)
def _get_distance_to_goal(self, measurements, target):
current_x = measurements.player_measurements.transform.location.x
current_y = measurements.player_measurements.transform.location.y
distance_to_goal = np.linalg.norm(np.array([current_x, current_y]) - \
np.array([target.location.x, target.location.y]))
return distance_to_goal
def _new_episode(self):
experiment_idx = np.random.randint(0, len(self._experiments))
experiment = self._experiments[experiment_idx]
exp_settings = experiment.conditions
exp_settings.set(QualityLevel='Low')
positions = self._client.load_settings(exp_settings).player_start_spots
idx_pose = np.random.randint(0, len(experiment.poses))
pose = experiment.poses[idx_pose]
self.logger.info('Env {} gets experiment {} with pose {}'.format(self.id, experiment_idx, idx_pose))
start_index = pose[0]
end_index = pose[1]
self._client.start_episode(start_index)
self._time_out = self._experiment_suite.calculate_time_out(
self._get_shortest_path(positions[start_index], positions[end_index]))
self._target = positions[end_index]
self._episode_name = str(experiment.Conditions.WeatherId) + '_' \
+ str(experiment.task) + '_' + str(start_index) \
+ '_' + str(end_index)
if ((self.num_episodes % self.video_every) == 0) and (self.id == 0):
video_path = os.path.join(self.video_dir, '{:08d}_'.format(self.num_episodes) + self._episode_name + '.mp4')
self.logger.info('Writing video at {}'.format(video_path))
self.video_writer = skvideo.io.FFmpegWriter(video_path, inputdict={'-r': '30'}, outputdict={'-r': '30'})
else:
self.video_writer = None
self.num_episodes += 1
def _new_episode_benchmark(self):
experiment_idx_past = self.benchmark_index[0]
pose_idx_past = self.benchmark_index[1]
repetition_idx_past = self.benchmark_index[2]
experiment_past = self._experiments[experiment_idx_past]
poses_past = experiment_past.poses[0:]
repetition_past = experiment_past.repetitions
if repetition_idx_past == repetition_past:
if pose_idx_past == len(poses_past) - 1:
if experiment_idx_past == len(self._experiments) - 1:
return False
else:
experiment = self._experiments[experiment_idx_past + 1]
pose = experiment.poses[0:][0]
self.benchmark_index = [experiment_idx_past + 1, 0, 1]
else:
experiment = experiment_past
pose = poses_past[pose_idx_past + 1]
self.benchmark_index = [experiment_idx_past, pose_idx_past + 1, 1]
else:
experiment = experiment_past
pose = poses_past[pose_idx_past]
self.benchmark_index = [experiment_idx_past, pose_idx_past, repetition_idx_past + 1]
exp_settings = experiment.Conditions
exp_settings.set(QualityLevel='Low')
positions = self._client.load_settings(exp_settings).player_start_spots
start_index = pose[0]
end_index = pose[1]
self._client.start_episode(start_index)
self._time_out = self._experiment_suite.calculate_time_out(
self._get_shortest_path(positions[start_index], positions[end_index]))
self._target = positions[end_index]
self._episode_name = str(experiment.Conditions.WeatherId) + '_' \
+ str(experiment.task) + '_' + str(start_index) \
+ '_' + str(end_index)
if ((self.num_episodes % self.video_every) == 0) and (self.id == 0):
video_path = os.path.join(self.video_dir, '{:08d}_'.format(self.num_episodes) + self._episode_name + '.mp4')
self.logger.info('Writing video at {}'.format(video_path))
self.video_writer = skvideo.io.FFmpegWriter(video_path, inputdict={'-r': '30'}, outputdict={'-r': '30'})
else:
self.video_writer = None
self.num_episodes += 1
def _get_directions(self, current_point, end_point):
directions = self._planner.get_next_command(
(current_point.location.x,
current_point.location.y, 0.22),
(current_point.orientation.x,
current_point.orientation.y,
current_point.orientation.z),
(end_point.location.x, end_point.location.y, 0.22),
(end_point.orientation.x, end_point.orientation.y, end_point.orientation.z))
return directions
def _get_shortest_path(self, start_point, end_point):
return self._planner.get_shortest_path_distance(
[ start_point.location.x, start_point.location.y, 0.22], [
start_point.orientation.x, start_point.orientation.y, 0.22], [
end_point.location.x, end_point.location.y, end_point.location.z], [
end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])
@staticmethod
def _is_collision(measurements):
c = 0
c += measurements.player_measurements.collision_vehicles
c += measurements.player_measurements.collision_pedestrians
c += measurements.player_measurements.collision_other
sidewalk_intersection = measurements.player_measurements.intersection_offroad
otherlane_intersection = measurements.player_measurements.intersection_otherlane
return (c > 1e-9) or (sidewalk_intersection > 0.01) or (otherlane_intersection > 0.9), c
def _make_carla_client(self, host, port):
while True:
try:
self.logger.info("Trying to make client on port {}".format(port))
self._client = CarlaClient(host, port, timeout=100)
self._client.connect()
self._client.load_settings(CarlaSettings(QualityLevel='Low'))
self._client.start_episode(0)
self.logger.info("Successfully made client on port {}".format(port))
break
except TCPConnectionError as error:
self.logger.debug('Got TCPConnectionError..sleeping for 1')
self.logger.error(error)
time.sleep(1)
| 42.255102
| 120
| 0.608549
|
c5d538dfa54dd3521805b015c57b23cc35317c92
| 1,901
|
py
|
Python
|
tests/requirements.py
|
maparent/alembic
|
a294f8cc3f2e5fc2cad048bc4ce27c57554e2688
|
[
"MIT"
] | null | null | null |
tests/requirements.py
|
maparent/alembic
|
a294f8cc3f2e5fc2cad048bc4ce27c57554e2688
|
[
"MIT"
] | null | null | null |
tests/requirements.py
|
maparent/alembic
|
a294f8cc3f2e5fc2cad048bc4ce27c57554e2688
|
[
"MIT"
] | null | null | null |
from alembic.testing.requirements import SuiteRequirements
from alembic.testing import exclusions
from alembic import util
class DefaultRequirements(SuiteRequirements):
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.skip_if([
"sqlite",
"firebird"
], "no schema support")
@property
def no_referential_integrity(self):
"""test will fail if referential integrity is enforced"""
return exclusions.fails_on_everything_except("sqlite")
@property
def non_native_boolean(self):
"""test will fail if native boolean is provided"""
return exclusions.fails_if(
exclusions.LambdaPredicate(
lambda config: config.db.dialect.supports_native_boolean
)
)
@property
def no_fk_names(self):
"""foreign key constraints *never* have names in the DB"""
return exclusions.only_if(
lambda config: exclusions.against(config, "sqlite")
and not util.sqla_100
)
@property
def check_constraints_w_enforcement(self):
return exclusions.fails_on("mysql")
@property
def unnamed_constraints(self):
"""constraints without names are supported."""
return exclusions.only_on(['sqlite'])
@property
def fk_names(self):
"""foreign key constraints always have names in the DB"""
return exclusions.fails_on('sqlite')
@property
def reflects_unique_constraints_unambiguously(self):
return exclusions.fails_on("mysql")
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return exclusions.fails_on_everything_except(
'postgresql', 'oracle', 'mssql', 'sybase')
| 28.373134
| 73
| 0.654392
|
e882d02d747ed0bbacacc40d7ee92a58cec46f13
| 3,883
|
py
|
Python
|
metrics/pearsonr/pearsonr.py
|
shirte/datasets
|
16ab1566d963f521a1a3df9c41a20f599078bb2e
|
[
"Apache-2.0"
] | 2
|
2021-08-28T06:48:02.000Z
|
2021-08-28T23:18:34.000Z
|
metrics/pearsonr/pearsonr.py
|
shirte/datasets
|
16ab1566d963f521a1a3df9c41a20f599078bb2e
|
[
"Apache-2.0"
] | null | null | null |
metrics/pearsonr/pearsonr.py
|
shirte/datasets
|
16ab1566d963f521a1a3df9c41a20f599078bb2e
|
[
"Apache-2.0"
] | 4
|
2021-07-25T17:09:39.000Z
|
2022-02-12T03:30:08.000Z
|
# coding=utf-8
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pearson correlation coefficient metric."""
from scipy.stats import pearsonr
import datasets
_DESCRIPTION = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: Predicted labels, as returned by a model.
references: Ground truth labels.
Returns:
pearsonr: Pearson correlation coefficient.
Examples:
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(references=[0, 1], predictions=[0, 1])
>>> print(results)
{'pearsonr': 1.0}
"""
_CITATION = r"""\
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Pearsonr(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}
),
reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"],
)
def _compute(self, predictions, references):
return {
"pearsonr": pearsonr(references, predictions)[0].item(),
}
| 39.222222
| 110
| 0.685295
|
4c93aae46392f14e01c7daedcc4b49bf6b719247
| 4,031
|
py
|
Python
|
cryptocurrencies/script.py
|
szypkiwonsz/Cryptocurrency-Script
|
e9ec7803fd78c49a598bfb3c89c800c347d5eb34
|
[
"MIT"
] | null | null | null |
cryptocurrencies/script.py
|
szypkiwonsz/Cryptocurrency-Script
|
e9ec7803fd78c49a598bfb3c89c800c347d5eb34
|
[
"MIT"
] | null | null | null |
cryptocurrencies/script.py
|
szypkiwonsz/Cryptocurrency-Script
|
e9ec7803fd78c49a598bfb3c89c800c347d5eb34
|
[
"MIT"
] | 1
|
2021-09-29T18:34:11.000Z
|
2021-09-29T18:34:11.000Z
|
import click
from cache_handler import CacheHandler
from data_exporters import JsonExporter, CsvExporter
from data_loaders import DatabaseDataLoader
from query_handler import AveragePriceHandler, ConsecutivePriceIncreaseHandler
from utils import date_with_last_day_of_month, file_with_extension
from validators import validate_start_date, validate_end_date
@click.group()
def cli():
pass
@cli.command(help='Shows average price of currency by month for given period.')
@click.option('--start_date', nargs=1, required=True, type=click.DateTime(formats=['%Y-%m']),
callback=validate_start_date)
@click.option('--end_date', nargs=1, required=True, type=click.DateTime(formats=['%Y-%m']), callback=validate_end_date)
@click.option('--coin', nargs=1, type=str, default='btc-bitcoin')
def average_price_by_month(start_date, end_date, coin):
temp_cache_handler = CacheHandler()
temp_cache_handler.load_data_into_database_if_needed(start_date, date_with_last_day_of_month(end_date), coin)
temp_average_price_handler = AveragePriceHandler()
click.echo(temp_average_price_handler.get_average_price_by_month_from_time_period(start_date, end_date, coin))
@cli.command(help='Finds the longest consecutive period in which price was increasing.')
@click.option('--start_date', nargs=1, required=True, type=click.DateTime(formats=['%Y-%m-%d']),
callback=validate_start_date)
@click.option('--end_date', nargs=1, required=True, type=click.DateTime(formats=['%Y-%m-%d']),
callback=validate_end_date)
@click.option('--coin', nargs=1, type=str, default='btc-bitcoin')
def consecutive_increase(start_date, end_date, coin):
temp_cache_handler = CacheHandler()
temp_cache_handler.load_data_into_database_if_needed(start_date, end_date, coin)
temp_consecutive_price_increase_handler = ConsecutivePriceIncreaseHandler()
# it is possible to return multiple results (the longest not the greatest result)
longest_price_increases = temp_consecutive_price_increase_handler.get_longest_consecutive_price_increases_period(
start_date, end_date, coin).to_dict('records')
for record in longest_price_increases:
click.echo(temp_consecutive_price_increase_handler.get_longest_consecutive_price_increase_as_msg(record))
@cli.command(help='Export data for given period in one of selected format csv or json.')
@click.option('--start_date', nargs=1, required=True, type=click.DateTime(formats=['%Y-%m-%d']),
callback=validate_start_date)
@click.option('--end_date', nargs=1, required=True, type=click.DateTime(formats=['%Y-%m-%d']),
callback=validate_end_date)
@click.option('--coin', nargs=1, type=str, default='btc-bitcoin')
@click.option('--format_type', nargs=1, type=click.Choice(['json', 'csv']), required=True)
@click.option('--file', nargs=1, type=click.Path(), required=True)
def export(start_date, end_date, coin, format_type, file):
temp_cache_handler = CacheHandler()
temp_cache_handler.load_data_into_database_if_needed(start_date, end_date, coin)
temp_database_data_loader = DatabaseDataLoader()
if format_type == 'json':
temp_database_data_loader.load_data_from_database(start_date, end_date, coin)
temp_database_data_loader.modify_data()
temp_json_exporter = JsonExporter()
temp_json_exporter.export_data_into_json(file_with_extension(file, format_type), temp_database_data_loader.data)
click.echo(f'The data was correctly exported to the {file_with_extension(file, format_type)} file')
elif format_type == 'csv':
temp_database_data_loader.load_data_from_database(start_date, end_date, coin)
temp_database_data_loader.modify_data()
temp_csv_exporter = CsvExporter()
temp_csv_exporter.export_data_into_csv(file_with_extension(file, format_type), temp_database_data_loader.data)
click.echo(f'The data was correctly exported to the {file_with_extension(file, format_type)} file')
if __name__ == '__main__':
cli()
| 55.219178
| 120
| 0.767303
|
88548e2a1b32f353bf3685116e96346727b4d4da
| 741
|
py
|
Python
|
homeassistant/components/browser.py
|
hcchu/home-assistant-clone
|
dbc91c1d48c6570764bbaa58467aa4dc87f2186b
|
[
"MIT"
] | 1
|
2019-05-19T01:51:57.000Z
|
2019-05-19T01:51:57.000Z
|
homeassistant/components/browser.py
|
hcchu/home-assistant-clone
|
dbc91c1d48c6570764bbaa58467aa4dc87f2186b
|
[
"MIT"
] | null | null | null |
homeassistant/components/browser.py
|
hcchu/home-assistant-clone
|
dbc91c1d48c6570764bbaa58467aa4dc87f2186b
|
[
"MIT"
] | 1
|
2022-02-12T23:56:40.000Z
|
2022-02-12T23:56:40.000Z
|
"""
homeassistant.components.browser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to launch a webbrowser on the host machine.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/browser/
"""
DOMAIN = "browser"
SERVICE_BROWSE_URL = "browse_url"
def setup(hass, config):
""" Listen for browse_url events and open
the url in the default webbrowser. """
import webbrowser
hass.services.register(DOMAIN, SERVICE_BROWSE_URL,
lambda service:
webbrowser.open(
service.data.get(
'url', 'https://www.google.com')))
return True
| 26.464286
| 75
| 0.588394
|
e6ab9ed706dc3a14beb2728f6cde95abdcfa7885
| 377
|
py
|
Python
|
codewars/8kyu/countSheep.py
|
jglez/code-solutions
|
92e9f8f249e238f94e01db71663eac4c82b1a0ef
|
[
"MIT"
] | null | null | null |
codewars/8kyu/countSheep.py
|
jglez/code-solutions
|
92e9f8f249e238f94e01db71663eac4c82b1a0ef
|
[
"MIT"
] | null | null | null |
codewars/8kyu/countSheep.py
|
jglez/code-solutions
|
92e9f8f249e238f94e01db71663eac4c82b1a0ef
|
[
"MIT"
] | null | null | null |
"""
If you can't sleep, just count sheep!!
Task:
Given a non-negative integer, 3 for example, return a string with a murmur: "1 sheep...2 sheep...3 sheep...".
Input will always be valid, i.e. no negative integers.
"""
def count_sheep(n):
result = ""
for num in range(1, n + 1):
result = result + str(num) + " sheep..."
return result
| 20.944444
| 109
| 0.591512
|
2b34a91ff45c200666a7d4db6adf98412397b41b
| 1,401
|
py
|
Python
|
python_matplotlib_2/python_matplotlib_2.py
|
Yash0411/SDL-Assignments-TE-Comp
|
95e1d4214d8cc810a244da669bf0e58646bab31d
|
[
"MIT"
] | null | null | null |
python_matplotlib_2/python_matplotlib_2.py
|
Yash0411/SDL-Assignments-TE-Comp
|
95e1d4214d8cc810a244da669bf0e58646bab31d
|
[
"MIT"
] | null | null | null |
python_matplotlib_2/python_matplotlib_2.py
|
Yash0411/SDL-Assignments-TE-Comp
|
95e1d4214d8cc810a244da669bf0e58646bab31d
|
[
"MIT"
] | null | null | null |
# TECOB220
# Yash Morankar
import csv
import matplotlib.pyplot as plt
#============== Write data into csv =================
# for Bar Plot
heading = ["Country_Name","GOLD MEDALS","SILVER MEDALS","BRONZE MEDALS"]
entry1 = ["Aus",120,60,30]
entry2 = ["Ind",110,70,40]
entry3 = ["Eng",90,60,50]
data = [heading, entry1, entry2, entry3]
with open("Game_medal.csv",'w') as f:
writer = csv.writer(f,lineterminator="\n")
writer.writerows(data)
#============== Plotting Graph =================
# Bar Chart
Country = []
value1 = []
value2 = []
value3 = []
with open("Game_medal.csv",'r') as f:
reader = csv.reader(f)
heading = next(reader)
for row in reader:
Country.append(list(row)[0])
value1.append(int(list(row)[1]))
value2.append(int(list(row)[2]))
value3.append(int(list(row)[3]))
xAxis1 = [i - 0.2 for i, _ in enumerate(Country)]
xAxis2 = [i for i, _ in enumerate(Country)]
xAxis3 = [i + 0.2 for i, _ in enumerate(Country)]
plt.bar(xAxis1, value1, width=0.2, color='skyblue')
plt.bar(xAxis2, value2, width=0.2, color='orange')
plt.bar(xAxis3, value3, width=0.2, color='green')
plt.title('Olympics 2018', fontsize=16, color='magenta')
plt.xlabel('Nations', fontsize=14, color='cyan')
plt.ylabel('Medals', fontsize=14, color='cyan')
plt.xticks([i for i, _ in enumerate(Country)], Country)
plt.legend(heading[1:], loc ="upper right")
plt.show()
| 28.02
| 72
| 0.630978
|
60899455bf8d612cc13a4bf1f03203e8e9261876
| 434
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/densitymapbox/colorbar/_tickvalssrc.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/densitymapbox/colorbar/_tickvalssrc.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/densitymapbox/colorbar/_tickvalssrc.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="tickvalssrc", parent_name="densitymapbox.colorbar", **kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| 31
| 87
| 0.668203
|
7c22519174c2da49bbaff229d6765cf49a141014
| 3,198
|
py
|
Python
|
tests/test_builder_security_schemes.py
|
tabebqena/flask-open-spec
|
ee1fd9cd349e46e1d8295fc2799898731392af6a
|
[
"MIT"
] | null | null | null |
tests/test_builder_security_schemes.py
|
tabebqena/flask-open-spec
|
ee1fd9cd349e46e1d8295fc2799898731392af6a
|
[
"MIT"
] | null | null | null |
tests/test_builder_security_schemes.py
|
tabebqena/flask-open-spec
|
ee1fd9cd349e46e1d8295fc2799898731392af6a
|
[
"MIT"
] | null | null | null |
from ..open_oas.builder.builder import OasBuilder
from unittest import TestCase
from ..open_oas.decorators import (
Deferred,
api_key_security_schema,
api_basic_security_scheme,
api_bearer_security_scheme,
)
class TestData(TestCase):
basic_auth_data = {
"components": {
"securitySchemes": {
"basicAuth": {"type": "http", "scheme": "basic"}
}
}
}
bearer_auth_data = {
"components": {
"securitySchemes": {
"bearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "JWT",
},
}
}
}
api_key_auth_data = {
"components": {
"securitySchemes": {
"ApiKeyAuth": {
"type": "apiKey",
"in": "header",
"name": "X-API-KEY",
},
}
}
}
def test_basic_auth(self):
self.builder = OasBuilder(
self.basic_auth_data,
)
data = self.builder.get_data()
self.assertEqual(
data.get("components", {}).get("securitySchemes"),
self.basic_auth_data.get("components", {}).get("securitySchemes"),
)
def test_basic_auth_decorator(self):
api_basic_security_scheme("basicAuth")
self.builder = OasBuilder()
data = self.builder.get_data()
self.assertEqual(
data.get("components", {}).get("securitySchemes"),
self.basic_auth_data.get("components", {}).get("securitySchemes"),
)
def test_bearer_auth(self):
self.builder = OasBuilder(
self.bearer_auth_data,
)
data = self.builder.get_data()
self.assertEqual(
data.get("components", {}).get("securitySchemes"),
self.bearer_auth_data.get("components", {}).get("securitySchemes"),
)
def test_bearer_auth_decorator(self):
api_bearer_security_scheme("bearerAuth", bearer_format="JWT")
self.builder = OasBuilder()
data = self.builder.get_data()
self.assertEqual(
data.get("components", {}).get("securitySchemes"),
self.bearer_auth_data.get("components", {}).get("securitySchemes"),
)
def test_api_key_auth(self):
self.builder = OasBuilder(
self.api_key_auth_data,
)
data = self.builder.get_data()
self.assertEqual(
data.get("components", {}).get("securitySchemes"),
self.api_key_auth_data.get("components", {}).get("securitySchemes"),
)
def test_api_key_auth_decorator(self):
api_key_security_schema(
"ApiKeyAuth", in_="header", location_name="X-API-KEY"
)
self.builder = OasBuilder()
data = self.builder.get_data()
self.assertEqual(
data.get("components", {}).get("securitySchemes"),
self.api_key_auth_data.get("components", {}).get("securitySchemes"),
)
def tearDown(self) -> None:
Deferred._deferred = []
return super().tearDown()
| 30.75
| 80
| 0.542527
|
66c5269f8cb4c125a570477b312256bd2e2dfdf6
| 320
|
py
|
Python
|
src/kbpo/params/db/local_kbpo.py
|
arunchaganty/kbp-online
|
9f8763d8f4bfb1fb8a01f1f4f506f56625dd38d8
|
[
"MIT"
] | 4
|
2017-08-09T14:05:48.000Z
|
2018-12-25T01:34:23.000Z
|
src/kbpo/params/db/local_kbpo.py
|
arunchaganty/kbp-online
|
9f8763d8f4bfb1fb8a01f1f4f506f56625dd38d8
|
[
"MIT"
] | 12
|
2017-01-19T23:18:18.000Z
|
2018-12-23T18:57:54.000Z
|
src/kbpo/params/db/local_kbpo.py
|
arunchaganty/kbp-online
|
9f8763d8f4bfb1fb8a01f1f4f506f56625dd38d8
|
[
"MIT"
] | 2
|
2017-08-08T09:48:20.000Z
|
2018-07-09T09:12:43.000Z
|
# File wide connection.
"""
Database connection parameters for kbpo database
"""
from psycopg2.extras import NamedTupleCursor
_PARAMS = {
'dbname':'kbpo',
'user':'kbpo',
'password':'kbpo',
'host':'localhost',
'port': 5432,
'cursor_factory': NamedTupleCursor,
'application_name': 'kbpo'
}
| 21.333333
| 48
| 0.653125
|
b981e9f2e8d98f31743174c4121fda4baa9a1d63
| 1,951
|
py
|
Python
|
tools/doc_tester_reader.py
|
loveululu/Serving
|
3a64af45b87f5a8a75ecd20059423d320849295d
|
[
"Apache-2.0"
] | 789
|
2019-04-05T09:20:46.000Z
|
2022-03-31T13:43:54.000Z
|
tools/doc_tester_reader.py
|
loveululu/Serving
|
3a64af45b87f5a8a75ecd20059423d320849295d
|
[
"Apache-2.0"
] | 1,195
|
2019-04-08T10:05:28.000Z
|
2022-03-31T03:43:42.000Z
|
tools/doc_tester_reader.py
|
loveululu/Serving
|
3a64af45b87f5a8a75ecd20059423d320849295d
|
[
"Apache-2.0"
] | 229
|
2019-04-05T09:20:57.000Z
|
2022-03-30T06:21:22.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
def ReadMarkDown(file):
folder = 'test'
os.system('rm -rf ' + folder + ' && mkdir -p ' + folder)
with open(file, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if '[//file]:#' in line:
filename = line[10:].strip()
GetCodeFile(lines, i, os.path.join(folder, filename))
if '<!--' in line:
filename = 'start.sh'
GetTestFile(lines, i, os.path.join(folder, filename))
def GetCodeFile(lines, i, filename):
if '```' not in lines[i + 1]:
raise ValueError(
'Syntax Error, code block should be tightly followed by "[//file]:#" '
)
i += 2
code = ''
while True:
if '```' in lines[i]:
break
code += lines[i]
i += 1
with open(filename, 'w+') as f:
f.write(code)
def GetTestFile(lines, i, filename):
i += 1
code = ''
while True:
if '-->' in lines[i]:
break
code += lines[i]
i += 1
with open(filename, 'w+') as f:
f.write(code)
def RunTest():
folder = 'test'
os.system('cd ' + folder + ' && sh start.sh')
os.system('cd .. && rm -rf ' + folder)
if __name__ == '__main__':
ReadMarkDown(os.path.join(sys.argv[1], sys.argv[2]))
RunTest()
| 27.478873
| 82
| 0.587904
|
af7d03373810bdac974f461098682f26a9c20e62
| 11,583
|
py
|
Python
|
tests/test_printer_neid.py
|
jimregan/pygramadan
|
f5ea7771a0bed8559ea35c8d5fc4f3c0786caf2e
|
[
"MIT"
] | null | null | null |
tests/test_printer_neid.py
|
jimregan/pygramadan
|
f5ea7771a0bed8559ea35c8d5fc4f3c0786caf2e
|
[
"MIT"
] | 1
|
2021-08-15T12:43:07.000Z
|
2021-08-15T12:43:07.000Z
|
tests/test_printer_neid.py
|
jimregan/pygramadan
|
f5ea7771a0bed8559ea35c8d5fc4f3c0786caf2e
|
[
"MIT"
] | 1
|
2021-08-19T18:20:46.000Z
|
2021-08-19T18:20:46.000Z
|
from pygramadan.preposition import Preposition
from .test_adjective import make_beag
from .test_noun import make_ainm
from .test_noun_phrase import FEAR_POIST_XML
from .test_preposition import LE_XML, make_le
from .test_verb import AIMSIGH_XML_FULL, Verb
from pygramadan.printer_neid import PrinterNeid
from pygramadan.noun_phrase import NP
from pygramadan.prepositional_phrase import PP
from lxml.doctestcompare import LXMLOutputChecker, PARSE_XML
import io
_NOUN_XML = """
<Lemma lemma="ainm" uid="ainm_masc_4">
<noun gender="masc" declension="4">
<sgNom>
<articleNo>ainm</articleNo>
<articleYes>an t-ainm</articleYes>
</sgNom>
<sgGen>
<articleNo>ainm</articleNo>
<articleYes>an ainm</articleYes>
</sgGen>
<plNom>
<articleNo>ainmneacha</articleNo>
<articleYes>na hainmneacha</articleYes>
</plNom>
<plGen>
<articleNo>ainmneacha</articleNo>
<articleYes>na n-ainmneacha</articleYes>
</plGen>
</noun>
</Lemma>
"""
_NP_XML = """
<Lemma lemma="fear poist" uid="fear_poist_NP">
<nounPhrase gender="masc" forceNominative="1">
<sgNom>
<articleNo>fear poist</articleNo>
<articleYes>an fear poist</articleYes>
</sgNom>
<sgGen>
<articleNo>fir phoist</articleNo>
<articleYes>an fhir phoist</articleYes>
</sgGen>
<plNom>
<articleNo>fir phoist</articleNo>
<articleYes>na fir phoist</articleYes>
</plNom>
<plGen>
<articleNo>fear poist</articleNo>
<articleYes>na bhfear poist</articleYes>
</plGen>
</nounPhrase>
</Lemma>
"""
_PP_XML = """
<Lemma lemma="le hainm beag" uid="le_hainm_beag_PP">
<prepositionalPhrase>
<sg>
<articleNo>le hainm beag</articleNo>
<articleYes var='north'>leis an ainm bheag</articleYes>
<articleYes var='south'>leis an ainm beag</articleYes>
</sg>
<pl>
<articleNo>le hainmneacha beaga</articleNo>
<articleYes>leis na hainmneacha beaga</articleYes>
</pl>
</prepositionalPhrase>
</Lemma>
"""
_PREP_XML = """
<Lemma lemma="le" uid="le_prep">
<preposition>
<persSg1>liom</persSg1>
<persSg2>leat</persSg2>
<persSg3Masc>leis</persSg3Masc>
<persSg3Fem>léi</persSg3Fem>
<persPl1>linn</persPl1>
<persPl2>libh</persPl2>
<persPl3>leo</persPl3>
</preposition>
</Lemma>
"""
_BEAG_XML = """
<Lemma lemma="beag" uid="beag_adj1">
<adjective declension="1">
<sgNomMasc>beag</sgNomMasc>
<sgNomFem>bheag</sgNomFem>
<sgGenMasc>bhig</sgGenMasc>
<sgGenFem>bige</sgGenFem>
<plNom>beaga</plNom>
<plNomSlen>bheaga</plNomSlen>
<plGenStrong>beaga</plGenStrong>
<plGenWeak>beag</plGenWeak>
<comparPres>níos lú</comparPres>
<comparPast>ní ba lú</comparPast>
<superPres>is lú</superPres>
<superPast>ba lú</superPast>
<abstractNoun>laghad</abstractNoun>
<abstractNounExamples>
<example>dá laghad</example>
<example>ag dul i laghad</example>
</abstractNounExamples>
</adjective>
</Lemma>
"""
_AIMSIGH_XML = """
<Lemma lemma='aimsigh' uid='aimsigh_verb'>
<verb>
<vn>aimsiú</vn>
<va>aimsithe</va>
<past>
<sg1>
<pos>d'aimsigh mé</pos>
<quest>ar aimsigh mé?</quest>
<neg>níor aimsigh mé</neg>
</sg1>
<sg2>
<pos>d'aimsigh tú</pos>
<quest>ar aimsigh tú?</quest>
<neg>níor aimsigh tú</neg>
</sg2>
<sg3Masc>
<pos>d'aimsigh sé</pos>
<quest>ar aimsigh sé?</quest>
<neg>níor aimsigh sé</neg>
</sg3Masc>
<sg3Fem>
<pos>d'aimsigh sí</pos>
<quest>ar aimsigh sí?</quest>
<neg>níor aimsigh sí</neg>
</sg3Fem>
<pl1>
<pos>d'aimsíomar</pos>
<pos>d'aimsigh muid</pos>
<quest>ar aimsíomar?</quest>
<quest>ar aimsigh muid?</quest>
<neg>níor aimsíomar</neg>
<neg>níor aimsigh muid</neg>
</pl1>
<pl2>
<pos>d'aimsigh sibh</pos>
<quest>ar aimsigh sibh?</quest>
<neg>níor aimsigh sibh</neg>
</pl2>
<pl3>
<pos>d'aimsigh siad</pos>
<pos>d'aimsíodar</pos>
<quest>ar aimsigh siad?</quest>
<quest>ar aimsíodar?</quest>
<neg>níor aimsigh siad</neg>
<neg>níor aimsíodar</neg>
</pl3>
<auto>
<pos>aimsíodh</pos>
<quest>ar aimsíodh?</quest>
<neg>níor aimsíodh</neg>
</auto>
</past>
<present>
<sg1>
<pos>aimsím</pos>
<quest>an aimsím?</quest>
<neg>ní aimsím</neg>
</sg1>
<sg2>
<pos>aimsíonn tú</pos>
<quest>an aimsíonn tú?</quest>
<neg>ní aimsíonn tú</neg>
</sg2>
<sg3Masc>
<pos>aimsíonn sé</pos>
<quest>an aimsíonn sé?</quest>
<neg>ní aimsíonn sé</neg>
</sg3Masc>
<sg3Fem>
<pos>aimsíonn sí</pos>
<quest>an aimsíonn sí?</quest>
<neg>ní aimsíonn sí</neg>
</sg3Fem>
<pl1>
<pos>aimsímid</pos>
<pos>aimsíonn muid</pos>
<quest>an aimsímid?</quest>
<quest>an aimsíonn muid?</quest>
<neg>ní aimsímid</neg>
<neg>ní aimsíonn muid</neg>
</pl1>
<pl2>
<pos>aimsíonn sibh</pos>
<quest>an aimsíonn sibh?</quest>
<neg>ní aimsíonn sibh</neg>
</pl2>
<pl3>
<pos>aimsíonn siad</pos>
<quest>an aimsíonn siad?</quest>
<neg>ní aimsíonn siad</neg>
</pl3>
<auto>
<pos>aimsítear</pos>
<quest>an aimsítear?</quest>
<neg>ní aimsítear</neg>
</auto>
</present>
<future>
<sg1>
<pos>aimseoidh mé</pos>
<quest>an aimseoidh mé?</quest>
<neg>ní aimseoidh mé</neg>
</sg1>
<sg2>
<pos>aimseoidh tú</pos>
<quest>an aimseoidh tú?</quest>
<neg>ní aimseoidh tú</neg>
</sg2>
<sg3Masc>
<pos>aimseoidh sé</pos>
<quest>an aimseoidh sé?</quest>
<neg>ní aimseoidh sé</neg>
</sg3Masc>
<sg3Fem>
<pos>aimseoidh sí</pos>
<quest>an aimseoidh sí?</quest>
<neg>ní aimseoidh sí</neg>
</sg3Fem>
<pl1>
<pos>aimseoimid</pos>
<pos>aimseoidh muid</pos>
<quest>an aimseoimid?</quest>
<quest>an aimseoidh muid?</quest>
<neg>ní aimseoimid</neg>
<neg>ní aimseoidh muid</neg>
</pl1>
<pl2>
<pos>aimseoidh sibh</pos>
<quest>an aimseoidh sibh?</quest>
<neg>ní aimseoidh sibh</neg>
</pl2>
<pl3>
<pos>aimseoidh siad</pos>
<quest>an aimseoidh siad?</quest>
<neg>ní aimseoidh siad</neg>
</pl3>
<auto>
<pos>aimseofar</pos>
<quest>an aimseofar?</quest>
<neg>ní aimseofar</neg>
</auto>
</future>
<condi>
<sg1>
<pos>d'aimseoinn</pos>
<quest>an aimseoinn?</quest>
<neg>ní aimseoinn</neg>
</sg1>
<sg2>
<pos>d'aimseofá</pos>
<quest>an aimseofá?</quest>
<neg>ní aimseofá</neg>
</sg2>
<sg3Masc>
<pos>d'aimseodh sé</pos>
<quest>an aimseodh sé?</quest>
<neg>ní aimseodh sé</neg>
</sg3Masc>
<sg3Fem>
<pos>d'aimseodh sí</pos>
<quest>an aimseodh sí?</quest>
<neg>ní aimseodh sí</neg>
</sg3Fem>
<pl1>
<pos>d'aimseoimis</pos>
<pos>d'aimseodh muid</pos>
<quest>an aimseoimis?</quest>
<quest>an aimseodh muid?</quest>
<neg>ní aimseoimis</neg>
<neg>ní aimseodh muid</neg>
</pl1>
<pl2>
<pos>d'aimseodh sibh</pos>
<quest>an aimseodh sibh?</quest>
<neg>ní aimseodh sibh</neg>
</pl2>
<pl3>
<pos>d'aimseoidís</pos>
<pos>d'aimseodh siad</pos>
<quest>an aimseoidís?</quest>
<quest>an aimseodh siad?</quest>
<neg>ní aimseoidís</neg>
<neg>ní aimseodh siad</neg>
</pl3>
<auto>
<pos>d'aimseofaí</pos>
<quest>an aimseofaí?</quest>
<neg>ní aimseofaí</neg>
</auto>
</condi>
<pastConti>
<sg1>
<pos>d'aimsínn</pos>
<quest>an aimsínn?</quest>
<neg>ní aimsínn</neg>
</sg1>
<sg2>
<pos>d'aimsíteá</pos>
<quest>an aimsíteá?</quest>
<neg>ní aimsíteá</neg>
</sg2>
<sg3Masc>
<pos>d'aimsíodh sé</pos>
<quest>an aimsíodh sé?</quest>
<neg>ní aimsíodh sé</neg>
</sg3Masc>
<sg3Fem>
<pos>d'aimsíodh sí</pos>
<quest>an aimsíodh sí?</quest>
<neg>ní aimsíodh sí</neg>
</sg3Fem>
<pl1>
<pos>d'aimsímis</pos>
<pos>d'aimsíodh muid</pos>
<quest>an aimsímis?</quest>
<quest>an aimsíodh muid?</quest>
<neg>ní aimsímis</neg>
<neg>ní aimsíodh muid</neg>
</pl1>
<pl2>
<pos>d'aimsíodh sibh</pos>
<quest>an aimsíodh sibh?</quest>
<neg>ní aimsíodh sibh</neg>
</pl2>
<pl3>
<pos>d'aimsídís</pos>
<pos>d'aimsíodh siad</pos>
<quest>an aimsídís?</quest>
<quest>an aimsíodh siad?</quest>
<neg>ní aimsídís</neg>
<neg>ní aimsíodh siad</neg>
</pl3>
<auto>
<pos>d'aimsítí</pos>
<quest>an aimsítí?</quest>
<neg>ní aimsítí</neg>
</auto>
</pastConti>
<imper>
<sg1>
<pos>aimsím!</pos>
<neg>ná haimsím!</neg>
</sg1>
<sg2>
<pos>aimsigh!</pos>
<neg>ná haimsigh!</neg>
</sg2>
<sg3Masc>
<pos>aimsíodh sé!</pos>
<neg>ná haimsíodh sé!</neg>
</sg3Masc>
<sg3Fem>
<pos>aimsíodh sí!</pos>
<neg>ná haimsíodh sí!</neg>
</sg3Fem>
<pl1>
<pos>aimsímis!</pos>
<pos>aimsíodh muid!</pos>
<neg>ná haimsímis!</neg>
<neg>ná haimsíodh muid!</neg>
</pl1>
<pl2>
<pos>aimsígí!</pos>
<neg>ná haimsígí!</neg>
</pl2>
<pl3>
<pos>aimsídís!</pos>
<pos>aimsíodh siad!</pos>
<neg>ná haimsídís!</neg>
<neg>ná haimsíodh siad!</neg>
</pl3>
<auto>
<pos>aimsítear!</pos>
<neg>ná haimsítear!</neg>
</auto>
</imper>
<subj>
<sg1>
<pos>go n-aimsí mé</pos>
<neg>nár aimsí mé</neg>
</sg1>
<sg2>
<pos>go n-aimsí tú</pos>
<neg>nár aimsí tú</neg>
</sg2>
<sg3Masc>
<pos>go n-aimsí sé</pos>
<neg>nár aimsí sé</neg>
</sg3Masc>
<sg3Fem>
<pos>go n-aimsí sí</pos>
<neg>nár aimsí sí</neg>
</sg3Fem>
<pl1>
<pos>go n-aimsímid</pos>
<pos>go n-aimsí muid</pos>
<neg>nár aimsímid</neg>
<neg>nár aimsí muid</neg>
</pl1>
<pl2>
<pos>go n-aimsí sibh</pos>
<neg>nár aimsí sibh</neg>
</pl2>
<pl3>
<pos>go n-aimsí siad</pos>
<neg>nár aimsí siad</neg>
</pl3>
<auto>
<pos>go n-aimsítear</pos>
<neg>nár aimsítear</neg>
</auto>
</subj>
</verb>
</Lemma>
"""
def test_print_noun():
pn = PrinterNeid(with_xml_declarations=True)
out = pn.print_noun_xml(make_ainm())
checker = LXMLOutputChecker()
assert checker.check_output(_NOUN_XML, out, PARSE_XML) is True
assert bytes('xml-stylesheet', encoding='UTF-8') in out
def test_print_noun_no_decl():
pn = PrinterNeid(with_xml_declarations=False)
out = pn.print_noun_xml(make_ainm())
checker = LXMLOutputChecker()
assert checker.check_output(_NOUN_XML, out, PARSE_XML) is True
assert bytes('xml-stylesheet', encoding='UTF-8') not in out
def test_print_np():
pn = PrinterNeid(with_xml_declarations=False)
sio = io.StringIO(FEAR_POIST_XML)
out = pn.print_np_xml(NP(source=sio))
checker = LXMLOutputChecker()
assert checker.check_output(_NP_XML, out, PARSE_XML) is True
def test_print_adj():
pn = PrinterNeid(with_xml_declarations=False)
out = pn.print_adjective_xml(make_beag())
checker = LXMLOutputChecker()
assert checker.check_output(_BEAG_XML, out, PARSE_XML) is True
def test_print_prep():
pn = PrinterNeid(with_xml_declarations=False)
out = pn.print_preposition_xml(make_le())
checker = LXMLOutputChecker()
assert checker.check_output(_PREP_XML, out, PARSE_XML) is True
def test_print_pp():
pn = PrinterNeid(with_xml_declarations=False)
np = NP(noun=make_ainm(), adjective=make_beag())
sio = io.StringIO(LE_XML)
prp = Preposition(source=sio)
pp = PP(preposition=prp, np=np)
out = pn.print_pp_xml(pp)
checker = LXMLOutputChecker()
assert checker.check_output(_PP_XML, out, PARSE_XML) is True
def test_print_verb():
pn = PrinterNeid(with_xml_declarations=False)
sio = io.StringIO(AIMSIGH_XML_FULL)
v = Verb(source=sio)
out = pn.print_verb_xml(v)
checker = LXMLOutputChecker()
assert checker.check_output(_AIMSIGH_XML, out, PARSE_XML) is True
| 23.494929
| 69
| 0.648882
|
32d0713a88139b28d54ed2332f1ac55a353714e3
| 1,148
|
py
|
Python
|
test_scrapy/test_scrapy/test.py
|
lijie28/python_demo
|
bb9ca737eae28ac655a4030f5f110545cf3ed815
|
[
"Apache-2.0"
] | null | null | null |
test_scrapy/test_scrapy/test.py
|
lijie28/python_demo
|
bb9ca737eae28ac655a4030f5f110545cf3ed815
|
[
"Apache-2.0"
] | null | null | null |
test_scrapy/test_scrapy/test.py
|
lijie28/python_demo
|
bb9ca737eae28ac655a4030f5f110545cf3ed815
|
[
"Apache-2.0"
] | null | null | null |
#coding=utf-8
import requests
from lxml import etree
url = 'http://weibo.cn/fishli28' #此处请修改为微博地址
url_login = 'https://login.weibo.cn/login/'
html = requests.get(url_login).content
selector = etree.HTML(html)
password = selector.xpath('//input[@type="password"]/@name')[0]
vk = selector.xpath('//input[@name="vk"]/@value')[0]
action = selector.xpath('//form[@method="post"]/@action')[0]
imgsrc = selector.xpath('/html/body/div[2]/form/div/img[1]/@src')[0]
index = imgsrc.find('cpt=')
capId = imgsrc[index + 4:]
print imgsrc ### 验证码
code = raw_input("plz input code:")
print action
print password
print vk
new_url = url_login + action
data = {
'mobile' : 'fishli28@126.com',#你的微博帐号
password : 'sosoivan*06', #你的微博密码
'remember' : 'on',
'backURL' : 'https://weibo.cn/fishli28', #此处请填写微博地址
'backTitle' : u'微博',
'tryCount' : '',
'vk' : vk,
'capId':capId,
'code':code,
'submit' : u'登录'
}
newhtml = requests.post(new_url,data=data).content
new_selector = etree.HTML(newhtml)
content = new_selector.xpath('//span[@class="ctt"]')
for each in content:
text = each.xpath('string(.)')
print text
| 23.428571
| 68
| 0.649826
|
541d87ed5ead46299273a3810a9b1e76c3ef33c8
| 1,210
|
py
|
Python
|
skyportal/services/gcn_service/gcn_service.py
|
bparazin/skyportal
|
c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56
|
[
"BSD-3-Clause"
] | 52
|
2018-11-02T00:53:21.000Z
|
2022-03-08T16:03:52.000Z
|
skyportal/services/gcn_service/gcn_service.py
|
bparazin/skyportal
|
c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56
|
[
"BSD-3-Clause"
] | 1,944
|
2017-04-27T18:51:20.000Z
|
2022-03-31T20:17:44.000Z
|
skyportal/services/gcn_service/gcn_service.py
|
bparazin/skyportal
|
c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56
|
[
"BSD-3-Clause"
] | 63
|
2017-05-13T01:40:47.000Z
|
2022-03-12T11:32:11.000Z
|
import yaml
import gcn
from baselayer.log import make_log
from skyportal.tests import api
def get_token():
try:
token = yaml.load(open('.tokens.yaml'), Loader=yaml.Loader)['INITIAL_ADMIN']
print('Token loaded from `.tokens.yaml`')
return token
except (FileNotFoundError, TypeError, KeyError):
print('Error: no token specified, and no suitable token found in .tokens.yaml')
return None
admin_token = get_token()
@gcn.include_notice_types(
gcn.NoticeType.FERMI_GBM_FLT_POS,
gcn.NoticeType.FERMI_GBM_GND_POS,
gcn.NoticeType.FERMI_GBM_FIN_POS,
gcn.NoticeType.FERMI_GBM_SUBTHRESH,
gcn.NoticeType.LVC_PRELIMINARY,
gcn.NoticeType.LVC_INITIAL,
gcn.NoticeType.LVC_UPDATE,
gcn.NoticeType.LVC_RETRACTION,
gcn.NoticeType.LVC_TEST,
gcn.NoticeType.AMON_ICECUBE_COINC,
gcn.NoticeType.AMON_ICECUBE_HESE,
gcn.NoticeType.ICECUBE_ASTROTRACK_GOLD,
gcn.NoticeType.ICECUBE_ASTROTRACK_BRONZE,
)
def handle(payload, root):
response_status, data = api(
'POST', 'gcn_event', data={'xml': payload}, token=admin_token
)
if __name__ == "__main__":
log = make_log("gcnserver")
gcn.listen(handler=handle)
| 25.208333
| 87
| 0.716529
|
9be3afabc9f3eecc3feb44a51504f9e42ed84f47
| 4,464
|
py
|
Python
|
homeassistant/components/worxlandroid/sensor.py
|
paranoidmonoid/core
|
c4f98a3084f2648749d3e4eeeade9696630d9abd
|
[
"Apache-2.0"
] | 1
|
2021-03-23T07:20:03.000Z
|
2021-03-23T07:20:03.000Z
|
homeassistant/components/worxlandroid/sensor.py
|
paranoidmonoid/core
|
c4f98a3084f2648749d3e4eeeade9696630d9abd
|
[
"Apache-2.0"
] | 19
|
2021-08-18T06:16:06.000Z
|
2022-03-31T06:17:46.000Z
|
homeassistant/components/worxlandroid/sensor.py
|
mkrzywie/core
|
0503b14fbe5a50bc725a22bcaf40167445689dc8
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Worx Landroid mower."""
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_HOST, CONF_PIN, CONF_TIMEOUT, PERCENTAGE
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ALLOW_UNREACHABLE = "allow_unreachable"
DEFAULT_TIMEOUT = 5
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PIN): vol.All(vol.Coerce(str), vol.Match(r"\d{4}")),
vol.Optional(CONF_ALLOW_UNREACHABLE, default=True): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
ERROR_STATE = [
"blade-blocked",
"repositioning-error",
"wire-bounced",
"blade-blocked",
"outside-wire",
"mower-lifted",
"alarm-6",
"upside-down",
"alarm-8",
"collision-sensor-blocked",
"mower-tilted",
"charge-error",
"battery-error",
]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Worx Landroid sensors."""
for typ in ("battery", "state"):
async_add_entities([WorxLandroidSensor(typ, config)])
class WorxLandroidSensor(SensorEntity):
"""Implementation of a Worx Landroid sensor."""
def __init__(self, sensor, config):
"""Initialize a Worx Landroid sensor."""
self._state = None
self.sensor = sensor
self.host = config.get(CONF_HOST)
self.pin = config.get(CONF_PIN)
self.timeout = config.get(CONF_TIMEOUT)
self.allow_unreachable = config.get(CONF_ALLOW_UNREACHABLE)
self.url = f"http://{self.host}/jsondata.cgi"
@property
def name(self):
"""Return the name of the sensor."""
return f"worxlandroid-{self.sensor}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of the sensor."""
if self.sensor == "battery":
return PERCENTAGE
return None
async def async_update(self):
"""Update the sensor data from the mower."""
connection_error = False
try:
session = async_get_clientsession(self.hass)
with async_timeout.timeout(self.timeout):
auth = aiohttp.helpers.BasicAuth("admin", self.pin)
mower_response = await session.get(self.url, auth=auth)
except (asyncio.TimeoutError, aiohttp.ClientError):
if self.allow_unreachable is False:
_LOGGER.error("Error connecting to mower at %s", self.url)
connection_error = True
# connection error
if connection_error is True and self.allow_unreachable is False:
if self.sensor == "error":
self._state = "yes"
elif self.sensor == "state":
self._state = "connection-error"
# connection success
elif connection_error is False:
# set the expected content type to be text/html
# since the mover incorrectly returns it...
data = await mower_response.json(content_type="text/html")
# sensor battery
if self.sensor == "battery":
self._state = data["perc_batt"]
# sensor error
elif self.sensor == "error":
self._state = "no" if self.get_error(data) is None else "yes"
# sensor state
elif self.sensor == "state":
self._state = self.get_state(data)
else:
if self.sensor == "error":
self._state = "no"
@staticmethod
def get_error(obj):
"""Get the mower error."""
for i, err in enumerate(obj["allarmi"]):
if i != 2: # ignore wire bounce errors
if err == 1:
return ERROR_STATE[i]
return None
def get_state(self, obj):
"""Get the state of the mower."""
state = self.get_error(obj)
if state is None:
if obj["batteryChargerState"] == "charging":
return obj["batteryChargerState"]
return obj["state"]
return state
| 30.367347
| 86
| 0.615815
|
7f3fe9efb27000d887c33d32379c7cbcdedbbd6e
| 2,146
|
py
|
Python
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/options.py
|
BadDevCode/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/options.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/options.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
"""
Target Options
"""
from __future__ import print_function, division, absolute_import
from .. import config
class TargetOptions(object):
OPTIONS = {}
def __init__(self):
self.values = {}
def from_dict(self, dic):
for k, v in dic.items():
try:
ctor = self.OPTIONS[k]
except KeyError:
fmt = "%r does not support option: '%s'"
raise KeyError(fmt % (self.__class__, k))
else:
self.values[k] = ctor(v)
@classmethod
def parse_as_flags(cls, flags, options):
opt = cls()
opt.from_dict(options)
opt.set_flags(flags)
return flags
def set_flags(self, flags):
"""
Provide default flags setting logic.
Subclass can override.
"""
kws = self.values.copy()
if kws.pop('nopython', False) == False:
flags.set("enable_pyobject")
if kws.pop("forceobj", False):
flags.set("force_pyobject")
if kws.pop('looplift', True):
flags.set("enable_looplift")
if kws.pop('boundscheck', False):
flags.set("boundscheck")
if kws.pop('_nrt', True):
flags.set("nrt")
if kws.pop('debug', config.DEBUGINFO_DEFAULT):
flags.set("debuginfo")
flags.set("boundscheck")
if kws.pop('nogil', False):
flags.set("release_gil")
if kws.pop('no_rewrites', False):
flags.set('no_rewrites')
if kws.pop('no_cpython_wrapper', False):
flags.set('no_cpython_wrapper')
if 'parallel' in kws:
flags.set('auto_parallel', kws.pop('parallel'))
if 'fastmath' in kws:
flags.set('fastmath', kws.pop('fastmath'))
if 'error_model' in kws:
flags.set('error_model', kws.pop('error_model'))
if 'inline' in kws:
flags.set('inline', kws.pop('inline'))
flags.set("enable_pyobject_looplift")
if kws:
# Unread options?
raise NameError("Unrecognized options: %s" % kws.keys())
| 25.855422
| 68
| 0.541007
|
f33846450be5828c0cd5eb7219332b6bd8bd9d90
| 75
|
py
|
Python
|
Harvard's CS50/string.py
|
RichelleT/Python
|
87aff2392964ca5630ffa44225f9e13d040cdd91
|
[
"MIT"
] | 1
|
2019-03-04T05:43:35.000Z
|
2019-03-04T05:43:35.000Z
|
Harvard's CS50/string.py
|
RichelleT/Python
|
87aff2392964ca5630ffa44225f9e13d040cdd91
|
[
"MIT"
] | null | null | null |
Harvard's CS50/string.py
|
RichelleT/Python
|
87aff2392964ca5630ffa44225f9e13d040cdd91
|
[
"MIT"
] | null | null | null |
from cs50 import get_string
s = get_string("Name: ")
print(s, ", hello!")
| 18.75
| 28
| 0.666667
|
8f0a33883a04d36a0c10cd16887e58bc91da90a7
| 5,380
|
py
|
Python
|
src/test/tinc/tincrepo/functions/builtin/__init__.py
|
khuddlefish/gpdb
|
2d20bae838c5ed433eecf6ecceca1b8dd5221197
|
[
"PostgreSQL",
"Apache-2.0"
] | 1
|
2017-09-15T06:09:56.000Z
|
2017-09-15T06:09:56.000Z
|
src/test/tinc/tincrepo/functions/builtin/__init__.py
|
guofengrichard/gpdb
|
29bdd6ef38d8d9b9cb04ca31d44e279eb9f640d3
|
[
"PostgreSQL",
"Apache-2.0"
] | 6
|
2018-08-04T07:51:37.000Z
|
2018-11-26T07:09:44.000Z
|
src/test/tinc/tincrepo/functions/builtin/__init__.py
|
guofengrichard/gpdb
|
29bdd6ef38d8d9b9cb04ca31d44e279eb9f640d3
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
import os
import sys
import datetime
import math
import tinctest
import re
from time import gmtime, strftime
import unittest2 as unittest
from fnmatch import fnmatch
from tinctest.runner import TINCTextTestResult
from tinctest.lib import run_shell_command, Gpdiff
from mpp.lib.PSQL import PSQL
from mpp.models import SQLTestCase
class BuiltinFunctionTestCase(SQLTestCase):
def __init__(self, methodName, baseline_result = None, sql_file = None, db_name = None):
super(BuiltinFunctionTestCase, self).__init__(methodName, baseline_result, sql_file, db_name)
self.optimizer_mode = 'on'
def _infer_metadata(self):
super(BuiltinFunctionTestCase, self)._infer_metadata()
self.executemode= self._metadata.get('executemode', 'ORCA_PLANNER_DIFF')
def run_test(self):
sql_file = self.sql_file
ans_file = self.ans_file
source_file = sys.modules[self.__class__.__module__].__file__
source_dir = os.path.dirname(source_file)
out_directory = self.get_out_dir()
if (self.executemode == 'ORCA_PLANNER_DIFF'):
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
new_ans_file = os.path.join(self.get_out_dir(), os.path.basename(ans_file).replace('.ans', '_mod.ans'))
guc_sql_file = self._add_gucs_to_sql_file(sql_file)
self.gucs.add('optimizer=off')
self.gucs.add('optimizer_log=off')
guc_off_sql_file = self._add_gucs_to_sql_file(sql_file)
PSQL.run_sql_file(guc_off_sql_file, dbname = self.db_name, out_file = new_ans_file)
PSQL.run_sql_file(guc_sql_file, dbname = self.db_name, out_file = out_file)
pattern = 's/transactionid,,,,,,[[:digit:]]\+,,,,[[:digit:]]\+\/[[:digit:]]\+,[[:digit:]]\+,ExclusiveLock,t,[[:digit:]]\+,/transactionid,,,,,,TRANSACTIONID,,,,VIRTUAL\/XID,PID,ExclusiveLock,t,SESSIONID,/;s/virtualxid,,,,,[[:digit:]]\+\/[[:digit:]]\+,,,,,[[:digit:]]\+\/[[:digit:]]\+,[[:digit:]]\+,ExclusiveLock,t,[[:digit:]]\+,/virtualxid,,,,,VIRTUAL\/XID,,,,,VIRTUAL\/XID,PID,ExclusiveLock,t,SESSIONID,/'
pattern += ';s@relation,\([[:digit:]]\+\),\([[:digit:]]\+\),,,,,,,,[[:digit:]]\+/[[:digit:]]\+,[[:digit:]]\+,AccessShareLock,t,[[:digit:]]\+,t,\([[:digit:]]\+\)@relation,\\1,\\2,,,,,,,,VIRTUAL/XID,PID,AccessShareLock,t,SESSIONID,t,\\3@'
sedcmd = "sed -i '' -e '%(pattern)s' %(answer_file)s" % {"answer_file": new_ans_file, "pattern": pattern}
sedcmd2 = "sed -i '' -e '%(pattern)s' %(answer_file)s" % {"answer_file" :out_file, "pattern": pattern}
sedcmd3 = "sed -i '' -e 's/pg_aoseg_[[:digit:]]\+/pg_aoseg_XXXXX/' " +new_ans_file
sedcmd4 = "sed -i '' -e 's/pg_aoseg_[[:digit:]]\+/pg_aoseg_XXXXX/' " +out_file
run_shell_command(sedcmd, "replace dynamic values in planner output with XXXXX")
run_shell_command(sedcmd2, "replace dynamic values in ORCA output with XXXXX")
run_shell_command(sedcmd3, "replace dynamic values in pg_aoseg.pg_aoseg_")
run_shell_command(sedcmd4, "replace dynamic values in pg_aoseg.pg_aoseg_")
result = Gpdiff.are_files_equal(out_file, new_ans_file)
if result == False:
self.test_artifacts.append(out_file.replace('.out', '.diff'))
return result
elif (self.executemode == 'gt1'):
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
guc_sql_file = self._add_gucs_to_sql_file(sql_file)
PSQL.run_sql_file(guc_sql_file, dbname = self.db_name, out_file = out_file)
f = open(out_file, 'r')
content = f.readlines()
f.close()
linecount = 0
for x in content:
x = x.strip()
if x == "(1 row)":
output = content[linecount - 1].strip()
linecount += 1
if int(output) > 1:
return True
diff_file = os.path.join(out_directory, os.path.basename(sql_file).replace('.sql', '.diff'))
f = open(diff_file, 'w')
f.write("expecting an output which is greater than 1, instead the actual output was " +output)
f.close()
return False
else:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
guc_sql_file = self._add_gucs_to_sql_file(sql_file)
PSQL.run_sql_file(guc_sql_file, dbname = self.db_name, out_file = out_file)
sedcmd1 = "sed -i -e 's/transactionid,,,,,[[:digit:]]\+,,,,[[:digit:]]\+,[[:digit:]]\+,ExclusiveLock,t,[[:digit:]]\+,/transactionid,,,,,XXXXX,,,,XXXXX,XXXXX,ExclusiveLock,t,XXXXX,/' " +out_file
sedcmd2 = "sed -i -e 's/pg_aoseg_[[:digit:]]\+/pg_aoseg_XXXXX/' " +out_file
run_shell_command(sedcmd1, "replace dynamic values in ORCA output with XXXXX")
run_shell_command(sedcmd2, "replace dynamic values in pg_aoseg.pg_aoseg_")
result = Gpdiff.are_files_equal(out_file, ans_file)
if result == False:
self.test_artifacts.append(out_file.replace('.out', '.diff'))
return result
| 53.267327
| 417
| 0.610409
|
665cdbf781ecf22ec0f72ddd1e1de286ab2572ae
| 10,025
|
py
|
Python
|
erpbg/erpbg/pyimagesearch/__init__.py
|
InspireSoft/erpbg
|
6da33242dc5b6a52e19cd6c17af2262dd33b6b41
|
[
"MIT"
] | null | null | null |
erpbg/erpbg/pyimagesearch/__init__.py
|
InspireSoft/erpbg
|
6da33242dc5b6a52e19cd6c17af2262dd33b6b41
|
[
"MIT"
] | null | null | null |
erpbg/erpbg/pyimagesearch/__init__.py
|
InspireSoft/erpbg
|
6da33242dc5b6a52e19cd6c17af2262dd33b6b41
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import frappe
import sys
# sys.path.append('/home/dev1/opencv/lib/')
sys.path.append('/usr/local/lib/python2.7/site-packages')
# sys.path.append('/home/frappe/frappe-bench-dimela/env/lib/python2.7/site-packages')
import numpy as np
import cv2
import csv
import glob
import os
class Searcher:
def __init__(self, indexPath):
# store our index path
self.indexPath = indexPath
def search(self, queryFeatures, limit=10):
# initialize our dictionary of results
results = {}
# open the index file for reading
with open(self.indexPath) as f:
# initialize the CSV reader
reader = csv.reader(f)
# loop over the rows in the index
for row in reader:
try:
# parse out the image ID and features, then compute the
# chi-squared distance between the features in our index
# and our query features
features = [float(x) for x in row[1:]]
d = self.chi2_distance(features, queryFeatures)
# now that we have the distance between the two feature
# vectors, we can udpate the results dictionary -- the
# key is the current image ID in the index and the
# value is the distance we just computed, representing
# how 'similar' the image in the index is to our query
results[row[0]] = d
except:
pass
# close the reader
f.close()
# sort our results, so that the smaller distances (i.e. the
# more relevant images are at the front of the list)
results = sorted([(v, k) for (k, v) in results.items()])
# return our (limited) results
return results[:limit]
def chi2_distance(self, histA, histB, eps=1e-10):
# compute the chi-squared distance
d = 0.5 * np.sum([((a - b) ** 2) / (a + b + eps)
for (a, b) in zip(histA, histB)])
# return the chi-squared distance
return d
class ColorDescriptor:
def __init__(self, bins):
# store the number of bins for the 3D histogram
self.bins = bins
def describe(self, image):
# convert the image to the HSV color space and initialize
# the features used to quantify the image
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
features = []
# grab the dimensions and compute the center of the image
(h, w) = image.shape[:2]
(cX, cY) = (int(w * 0.5), int(h * 0.5))
# divide the image into four rectangles/segments (top-left,
# top-right, bottom-right, bottom-left)
segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h),
(0, cX, cY, h)]
# construct an elliptical mask representing the center of the
# image
(axesX, axesY) = (int(w * 0.75) / 2, int(h * 0.75) / 2)
ellipMask = np.zeros(image.shape[:2], dtype="uint8")
cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1)
# loop over the segments
for (startX, endX, startY, endY) in segments:
# construct a mask for each corner of the image, subtracting
# the elliptical center from it
cornerMask = np.zeros(image.shape[:2], dtype="uint8")
cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)
cornerMask = cv2.subtract(cornerMask, ellipMask)
# extract a color histogram from the image, then update the
# feature vector
hist = self.histogram(image, cornerMask)
features.extend(hist)
# extract a color histogram from the elliptical region and
# update the feature vector
hist = self.histogram(image, ellipMask)
features.extend(hist)
# return the feature vector
return features
def histogram(self, image, mask):
# extract a 3D color histogram from the masked region of the
# image, using the supplied number of bins per channel; then
# normalize the histogram
hist = cv2.calcHist([image], [0, 1, 2], mask, self.bins,
[0, 180, 0, 256, 0, 256])
hist = cv2.normalize(hist, False).flatten()
# return the histogram
return hist
@frappe.whitelist()
def get_data():
pyimagesearch = {}
pyimagesearch["dataset1"] = "/home/frappe/frappe-bench-dimela/sites/erp.dimeladesign.com/public/files"
pyimagesearch["dataset2"] = "/home/frappe/frappe-bench-dimela/sites/erp.dimeladesign.com/private/files"
pyimagesearch["index"] = "/home/frappe/frappe-bench-dimela/sites/erp.dimeladesign.com/dataset.csv"
return pyimagesearch
def loopDir(path, type, output, cd):
# use glob to grab the image paths and loop over them
for imagePath in glob.glob(path + "/*." + str(type)):
try:
# extract the image ID (i.e. the unique filename) from the image
# path and load the image itself
imageID = imagePath[imagePath.rfind("/") + 1:]
image = cv2.imread(imagePath)
# describe the image
features = cd.describe(image)
# write the features to file
features = [str(f) for f in features]
output.write("%s,%s\n" % (imageID, ",".join(features)))
except:
pass
@frappe.whitelist()
def update_dataset():
pyimagesearch = get_data()
# initialize the color descriptor
cd = ColorDescriptor((8, 12, 3))
# open the output index file for writing
output = open(pyimagesearch["index"], "w")
# use glob to grab the image paths and loop over them
loopDir(pyimagesearch["dataset1"], "png", output, cd)
loopDir(pyimagesearch["dataset2"], "png", output, cd)
loopDir(pyimagesearch["dataset1"], "jpg", output, cd)
loopDir(pyimagesearch["dataset2"], "jpg", output, cd)
loopDir(pyimagesearch["dataset1"], "PNG", output, cd)
loopDir(pyimagesearch["dataset2"], "PNG", output, cd)
loopDir(pyimagesearch["dataset1"], "JPG", output, cd)
loopDir(pyimagesearch["dataset2"], "JPG", output, cd)
# close the index file
output.close()
return "done"
@frappe.whitelist()
def search_dataset(attached_imgname, url_addon):
pyimagesearch = get_data()
finalResults = []
try:
# initialize the image descriptor
cd = ColorDescriptor((8, 12, 3))
# load the query image and describe it
filepath = "/home/frappe/frappe-bench-dimela/sites/erp.dimeladesign.com/" + str(url_addon) + "files/" + attached_imgname
if not os.path.isfile(filepath):
raise Exception("invalid path to file: "+filepath)
query = cv2.imread(filepath)
features = cd.describe(query)
# perform the search
searcher = Searcher(pyimagesearch["index"])
results = searcher.search(features)
# loop over the results
for (score, resultID) in results:
attachment_files = frappe.db.sql('''SELECT * FROM `tabFile` WHERE `file_name`=%s AND `attached_to_doctype`<>'Image Search';''', (resultID), as_dict=True)
if len(attachment_files) > 0:
data = {"file_url": attachment_files[0]["file_url"], "docs": {}}
for file in attachment_files:
if file["attached_to_doctype"]:
if not file["attached_to_doctype"] in data["docs"]:
data["docs"][file["attached_to_doctype"]] = []
data["docs"][file["attached_to_doctype"]].append(file["attached_to_name"])
finalResults.append(data)
except Exception as e:
finalResults = [str(e)]
return finalResults
@frappe.whitelist()
def search_result(file_name, url_addon):
results = search_dataset(file_name, url_addon)
if isinstance(results[0], basestring):
return results[0]
finalResults = []
# loop over the results
for data in results:
cell = "<div style='float:left;height'>"
cell += "<img src='" + data["file_url"] + "' alt='' style='width:200px;min-height: 20px;' />"
cell += "</div>"
cell += "<div style='float:left;padding-left: 10px;'>"
if "docs" in data:
first_doctype = True
if len(data["docs"])<=0:
cell += frappe._("Качен на сървъра, но не закачен към документ.");
for doctype in data["docs"]:
if not first_doctype:
cell += "<br/>"
first_doctype = False
cell += frappe._(doctype)+": "
first_docname = True
for docname in data["docs"][doctype]:
if not first_docname:
cell += ", "
first_docname = False
cell += "<a style='text-decoration: underline;' href='/desk#Form/"+doctype+"/"+docname+"' target='_blank'>"+docname+"</a>"
cell += "</div>"
finalResults.append(cell)
return finalResults
@frappe.whitelist()
def search_test_result():
file_name = "Bella_M.JPG"
url_addon = "private/"
search = "<div style='float:left'>"
search += "<img src='/"+url_addon+"files/"+file_name+"' alt='' style='width:200px' />"
search += "</div>"
search += "<div style='float:left;padding-left: 10px;'>Searching Image</div>"
from werkzeug.wrappers import Response
response = Response()
response.mimetype = 'text/html'
response.charset = 'utf-8'
response.data = "<html><head><title>Test</title></head><body><table><tr><td>" + \
search + "</td></tr><tr><td>" + \
('</td></tr><tr><td>'.join(search_result(file_name, url_addon))) + \
"</td></tr></table></body></html>"
return response
| 37.406716
| 165
| 0.580948
|
15e2a82209083e979b5cdee3f1b80d24da21d6a9
| 2,509
|
py
|
Python
|
PLM/api/__init__.py
|
vtta2008/pipelineTool
|
2431d2fc987e3b31f2a6a63427fee456fa0765a0
|
[
"Apache-2.0"
] | 7
|
2020-10-11T21:21:50.000Z
|
2022-03-07T03:37:51.000Z
|
PLM/api/__init__.py
|
vtta2008/pipelineTool
|
2431d2fc987e3b31f2a6a63427fee456fa0765a0
|
[
"Apache-2.0"
] | null | null | null |
PLM/api/__init__.py
|
vtta2008/pipelineTool
|
2431d2fc987e3b31f2a6a63427fee456fa0765a0
|
[
"Apache-2.0"
] | 3
|
2019-03-11T21:54:52.000Z
|
2019-11-25T11:23:17.000Z
|
# -*- coding: utf-8 -*-
"""
Script Name: api.__init__
Author: Do Trinh/Jimmy - 3D artist.
Description:
API is the acronym for Application Programming Interface, which is a software intermediary that allows two
applications to talk to each other. Each time you use an app like Facebook, send an instant message, or check the
weather on your phone, you’re using an API.
EXAMPLE OF AN API
When you use an application on your mobile phone, the application connects to the Internet and sends data to a server.
The server then retrieves that data, interprets it, performs the necessary actions and sends it back to your phone.
The application then interprets that data and presents you with the information you wanted in a readable way. This is
what an API is - all of this happens via API.
To explain this better, let us take a familiar example.
Imagine you’re sitting at a table in a restaurant with a menu of choices to order from. The kitchen is the part of
the “system” that will prepare your order. What is missing is the critical link to communicate your order to the
kitchen and deliver your food back to your table. That’s where the waiter or API comes in. The waiter is the
messenger – or API – that takes your request or order and tells the kitchen – the system – what to do. Then the
waiter delivers the response back to you; in this case, it is the food.
Here is a real-life API example. You may be familiar with the process of searching flights online. Just like the
restaurant, you have a variety of options to choose from, including different cities, departure and return dates,
and more. Let us imagine that you’re booking you are flight on an airline website. You choose a departure city and
date, a return city and date, cabin class, as well as other variables. In order to book your flight, you interact
with the airline’s website to access their database and see if any seats are available on those dates and what the
costs might be.
"""
# -------------------------------------------------------------------------------------------------------------
from tests.version import Version
# This is API version
__version__ = Version('Incremental', 0, 0, 1)
__title__ = 'API'
__all__ = ['__version__']
# -------------------------------------------------------------------------------------------------------------
# Created by Trinh Do on 5/6/2020 - 3:13 AM
# © 2017 - 2020 DAMGteam. All rights reserved
| 48.25
| 122
| 0.676764
|
9b6aecfe60c2293aecdb496397e6b50c7a4c4424
| 377
|
py
|
Python
|
python/samples/coarse/example3.py
|
NVIDIA/cuQuantum
|
0f00494d4639d760228ac002e83e6d2d3dd97eca
|
[
"BSD-3-Clause"
] | 52
|
2021-12-04T20:39:12.000Z
|
2022-03-29T11:52:55.000Z
|
python/samples/coarse/example3.py
|
NVIDIA/cuQuantum
|
0f00494d4639d760228ac002e83e6d2d3dd97eca
|
[
"BSD-3-Clause"
] | 3
|
2022-02-01T22:46:50.000Z
|
2022-03-24T01:52:29.000Z
|
python/samples/coarse/example3.py
|
NVIDIA/cuQuantum
|
0f00494d4639d760228ac002e83e6d2d3dd97eca
|
[
"BSD-3-Clause"
] | 18
|
2021-12-20T17:52:07.000Z
|
2022-03-29T02:27:58.000Z
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays with explicit Einstein summation (Unicode characters).
The contraction result is also a NumPy ndarray.
"""
import numpy as np
from cuquantum import contract
a = np.ones((3,2))
b = np.ones((2,3))
r = contract("αβ,βγ->αγ", a, b)
print(r)
| 17.952381
| 83
| 0.713528
|
68b470a3a7d9adfc5ec48647d6a6e899b63d8b5d
| 139,789
|
py
|
Python
|
tools/buildman/kconfiglib.py
|
shell832/QBoot
|
f5b5169b7463670cb24d223e1c374da33d3d4457
|
[
"Unlicense"
] | 4
|
2018-09-28T04:33:26.000Z
|
2021-03-10T06:29:55.000Z
|
tools/buildman/kconfiglib.py
|
shell832/QBoot
|
f5b5169b7463670cb24d223e1c374da33d3d4457
|
[
"Unlicense"
] | 4
|
2016-08-30T11:30:25.000Z
|
2020-12-27T09:58:07.000Z
|
BBB-firmware/u-boot-v2018.05-rc2/tools/buildman/kconfiglib.py
|
guileschool/BEAGLEBONE-tutorials
|
eecd83e0c14941b05ad38eeb77e5a50602cc29ca
|
[
"MIT"
] | 2
|
2016-12-30T08:02:57.000Z
|
2020-05-16T05:59:30.000Z
|
#
# SPDX-License-Identifier: ISC
#
# Author: Ulf Magnusson
# https://github.com/ulfalizer/Kconfiglib
# This is Kconfiglib, a Python library for scripting, debugging, and extracting
# information from Kconfig-based configuration systems. To view the
# documentation, run
#
# $ pydoc kconfiglib
#
# or, if you prefer HTML,
#
# $ pydoc -w kconfiglib
#
# The examples/ subdirectory contains examples, to be run with e.g.
#
# $ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
#
# Look in testsuite.py for the test suite.
"""
Kconfiglib is a Python library for scripting and extracting information from
Kconfig-based configuration systems. Features include the following:
- Symbol values and properties can be looked up and values assigned
programmatically.
- .config files can be read and written.
- Expressions can be evaluated in the context of a Kconfig configuration.
- Relations between symbols can be quickly determined, such as finding all
symbols that reference a particular symbol.
- Highly compatible with the scripts/kconfig/*conf utilities. The test suite
automatically compares outputs between Kconfiglib and the C implementation
for a large number of cases.
For the Linux kernel, scripts are run using
$ make scriptconfig [ARCH=<arch>] SCRIPT=<path to script> [SCRIPT_ARG=<arg>]
Using the 'scriptconfig' target ensures that required environment variables
(SRCARCH, ARCH, srctree, KERNELVERSION, etc.) are set up correctly.
Scripts receive the name of the Kconfig file to load in sys.argv[1]. As of
Linux 4.1.0-rc5, this is always "Kconfig" from the kernel top-level directory.
If an argument is provided with SCRIPT_ARG, it appears as sys.argv[2].
To get an interactive Python prompt with Kconfiglib preloaded and a Config
object 'c' created, run
$ make iscriptconfig [ARCH=<arch>]
Kconfiglib supports both Python 2 and Python 3. For (i)scriptconfig, the Python
interpreter to use can be passed in PYTHONCMD, which defaults to 'python'. PyPy
works well too, and might give a nice speedup for long-running jobs.
The examples/ directory contains short example scripts, which can be run with
e.g.
$ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
or
$ make scriptconfig SCRIPT=Kconfiglib/examples/help_grep.py SCRIPT_ARG=kernel
testsuite.py contains the test suite. See the top of the script for how to run
it.
Credits: Written by Ulf "Ulfalizer" Magnusson
Send bug reports, suggestions and other feedback to ulfalizer a.t Google's
email service. Don't wrestle with internal APIs. Tell me what you need and I
might add it in a safe way as a client API instead."""
import os
import platform
import re
import sys
# File layout:
#
# Public classes
# Public functions
# Internal classes
# Internal functions
# Internal global constants
# Line length: 79 columns
#
# Public classes
#
class Config(object):
"""Represents a Kconfig configuration, e.g. for i386 or ARM. This is the
set of symbols and other items appearing in the configuration together with
their values. Creating any number of Config objects -- including for
different architectures -- is safe; Kconfiglib has no global state."""
#
# Public interface
#
def __init__(self, filename="Kconfig", base_dir=None, print_warnings=True,
print_undef_assign=False):
"""Creates a new Config object, representing a Kconfig configuration.
Raises Kconfig_Syntax_Error on syntax errors.
filename (default: "Kconfig"): The base Kconfig file of the
configuration. For the Linux kernel, you'll probably want "Kconfig"
from the top-level directory, as environment variables will make
sure the right Kconfig is included from there
(arch/<architecture>/Kconfig). If you are using Kconfiglib via 'make
scriptconfig', the filename of the base base Kconfig file will be in
sys.argv[1].
base_dir (default: None): The base directory relative to which 'source'
statements within Kconfig files will work. For the Linux kernel this
should be the top-level directory of the kernel tree. $-references
to existing environment variables will be expanded.
If None (the default), the environment variable 'srctree' will be
used if set, and the current directory otherwise. 'srctree' is set
by the Linux makefiles to the top-level kernel directory. A default
of "." would not work with an alternative build directory.
print_warnings (default: True): Set to True if warnings related to this
configuration should be printed to stderr. This can be changed later
with Config.set_print_warnings(). It is provided as a constructor
argument since warnings might be generated during parsing.
print_undef_assign (default: False): Set to True if informational
messages related to assignments to undefined symbols should be
printed to stderr for this configuration. Can be changed later with
Config.set_print_undef_assign()."""
# The set of all symbols, indexed by name (a string)
self.syms = {}
# Python 2/3 compatibility hack. This is the only one needed.
self.syms_iter = self.syms.values if sys.version_info[0] >= 3 else \
self.syms.itervalues
# The set of all defined symbols in the configuration in the order they
# appear in the Kconfig files. This excludes the special symbols n, m,
# and y as well as symbols that are referenced but never defined.
self.kconfig_syms = []
# The set of all named choices (yes, choices can have names), indexed
# by name (a string)
self.named_choices = {}
# Lists containing all choices, menus and comments in the configuration
self.choices = []
self.menus = []
self.comments = []
def register_special_symbol(type_, name, val):
sym = Symbol()
sym.is_special_ = True
sym.is_defined_ = True
sym.config = self
sym.name = name
sym.type = type_
sym.cached_val = val
self.syms[name] = sym
return sym
# The special symbols n, m and y, used as shorthand for "n", "m" and
# "y"
self.n = register_special_symbol(TRISTATE, "n", "n")
self.m = register_special_symbol(TRISTATE, "m", "m")
self.y = register_special_symbol(TRISTATE, "y", "y")
# DEFCONFIG_LIST uses this
register_special_symbol(STRING, "UNAME_RELEASE", platform.uname()[2])
# The symbol with "option defconfig_list" set, containing a list of
# default .config files
self.defconfig_sym = None
# See Symbol.get_(src)arch()
self.arch = os.environ.get("ARCH")
self.srcarch = os.environ.get("SRCARCH")
# If you set CONFIG_ in the environment, Kconfig will prefix all symbols
# with its value when saving the configuration, instead of using the default, "CONFIG_".
self.config_prefix = os.environ.get("CONFIG_")
if self.config_prefix is None:
self.config_prefix = "CONFIG_"
# See Config.__init__(). We need this for get_defconfig_filename().
self.srctree = os.environ.get("srctree")
if self.srctree is None:
self.srctree = "."
self.filename = filename
self.base_dir = self.srctree if base_dir is None else \
os.path.expandvars(base_dir)
# The 'mainmenu' text
self.mainmenu_text = None
# The filename of the most recently loaded .config file
self.config_filename = None
# The textual header of the most recently loaded .config, uncommented
self.config_header = None
self.print_warnings = print_warnings
self.print_undef_assign = print_undef_assign
self._warnings = []
# For parsing routines that stop when finding a line belonging to a
# different construct, these holds that line and the tokenized version
# of that line. The purpose is to avoid having to re-tokenize the line,
# which is inefficient and causes problems when recording references to
# symbols.
self.end_line = None
self.end_line_tokens = None
# See the comment in _parse_expr().
self._cur_item = None
self._line = None
self._filename = None
self._linenr = None
self._transform_m = None
# Parse the Kconfig files
self.top_block = []
self._parse_file(filename, None, None, None, self.top_block)
# Build Symbol.dep for all symbols
self._build_dep()
def get_arch(self):
"""Returns the value the environment variable ARCH had at the time the
Config instance was created, or None if ARCH was not set. For the
kernel, this corresponds to the architecture being built for, with
values such as "i386" or "mips"."""
return self.arch
def get_srcarch(self):
"""Returns the value the environment variable SRCARCH had at the time
the Config instance was created, or None if SRCARCH was not set. For
the kernel, this corresponds to the particular arch/ subdirectory
containing architecture-specific code."""
return self.srcarch
def get_srctree(self):
"""Returns the value the environment variable srctree had at the time
the Config instance was created, or None if srctree was not defined.
This variable points to the source directory and is used when building
in a separate directory."""
return self.srctree
def get_base_dir(self):
"""Returns the base directory relative to which 'source' statements
will work, passed as an argument to Config.__init__()."""
return self.base_dir
def get_kconfig_filename(self):
"""Returns the name of the (base) kconfig file this configuration was
loaded from."""
return self.filename
def get_config_filename(self):
"""Returns the filename of the most recently loaded configuration file,
or None if no configuration has been loaded."""
return self.config_filename
def get_config_header(self):
"""Returns the (uncommented) textual header of the .config file most
recently loaded with load_config(). Returns None if no .config file has
been loaded or if the most recently loaded .config file has no header.
The header consists of all lines up to but not including the first line
that either
1. Does not start with "#"
2. Has the form "# CONFIG_FOO is not set."
"""
return self.config_header
def get_mainmenu_text(self):
"""Returns the text of the 'mainmenu' statement (with $-references to
symbols replaced by symbol values), or None if the configuration has no
'mainmenu' statement."""
return None if self.mainmenu_text is None else \
self._expand_sym_refs(self.mainmenu_text)
def get_defconfig_filename(self):
"""Returns the name of the defconfig file, which is the first existing
file in the list given in a symbol having 'option defconfig_list' set.
$-references to symbols will be expanded ("$FOO bar" -> "foo bar" if
FOO has the value "foo"). Returns None in case of no defconfig file.
Setting 'option defconfig_list' on multiple symbols currently results
in undefined behavior.
If the environment variable 'srctree' was set when the Config was
created, get_defconfig_filename() will first look relative to that
directory before looking in the current directory; see
Config.__init__().
WARNING: A wart here is that scripts/kconfig/Makefile sometimes uses
the --defconfig=<defconfig> option when calling the C implementation of
e.g. 'make defconfig'. This option overrides the 'option
defconfig_list' symbol, meaning the result from
get_defconfig_filename() might not match what 'make defconfig' would
use. That probably ought to be worked around somehow, so that this
function always gives the "expected" result."""
if self.defconfig_sym is None:
return None
for filename, cond_expr in self.defconfig_sym.def_exprs:
if self._eval_expr(cond_expr) == "y":
filename = self._expand_sym_refs(filename)
# We first look in $srctree. os.path.join() won't work here as
# an absolute path in filename would override $srctree.
srctree_filename = os.path.normpath(self.srctree + "/" +
filename)
if os.path.exists(srctree_filename):
return srctree_filename
if os.path.exists(filename):
return filename
return None
def get_symbol(self, name):
"""Returns the symbol with name 'name', or None if no such symbol
appears in the configuration. An alternative shorthand is conf[name],
where conf is a Config instance, though that will instead raise
KeyError if the symbol does not exist."""
return self.syms.get(name)
def __getitem__(self, name):
"""Returns the symbol with name 'name'. Raises KeyError if the symbol
does not appear in the configuration."""
return self.syms[name]
def get_symbols(self, all_symbols=True):
"""Returns a list of symbols from the configuration. An alternative for
iterating over all defined symbols (in the order of definition) is
for sym in config:
...
which relies on Config implementing __iter__() and is equivalent to
for sym in config.get_symbols(False):
...
all_symbols (default: True): If True, all symbols -- including special
and undefined symbols -- will be included in the result, in an
undefined order. If False, only symbols actually defined and not
merely referred to in the configuration will be included in the
result, and will appear in the order that they are defined within
the Kconfig configuration files."""
return list(self.syms.values()) if all_symbols else self.kconfig_syms
def __iter__(self):
"""Convenience function for iterating over the set of all defined
symbols in the configuration, used like
for sym in conf:
...
The iteration happens in the order of definition within the Kconfig
configuration files. Symbols only referred to but not defined will not
be included, nor will the special symbols n, m, and y. If you want to
include such symbols as well, see config.get_symbols()."""
return iter(self.kconfig_syms)
def get_choices(self):
"""Returns a list containing all choice statements in the
configuration, in the order they appear in the Kconfig files."""
return self.choices
def get_menus(self):
"""Returns a list containing all menus in the configuration, in the
order they appear in the Kconfig files."""
return self.menus
def get_comments(self):
"""Returns a list containing all comments in the configuration, in the
order they appear in the Kconfig files."""
return self.comments
def get_top_level_items(self):
"""Returns a list containing the items (symbols, menus, choices, and
comments) at the top level of the configuration -- that is, all items
that do not appear within a menu or choice. The items appear in the
same order as within the configuration."""
return self.top_block
def load_config(self, filename, replace=True):
"""Loads symbol values from a file in the familiar .config format.
Equivalent to calling Symbol.set_user_value() to set each of the
values.
"# CONFIG_FOO is not set" within a .config file is treated specially
and sets the user value of FOO to 'n'. The C implementation works the
same way.
filename: The .config file to load. $-references to existing
environment variables will be expanded. For scripts to work even when
an alternative build directory is used with the Linux kernel, you
need to refer to the top-level kernel directory with "$srctree".
replace (default: True): True if the configuration should replace the
old configuration; False if it should add to it.
Returns a list or warnings (hopefully empty)
"""
self._warnings = []
# Regular expressions for parsing .config files
_set_re_match = re.compile(r"{}(\w+)=(.*)".format(self.config_prefix)).match
_unset_re_match = re.compile(r"# {}(\w+) is not set".format(self.config_prefix)).match
# Put this first so that a missing file doesn't screw up our state
filename = os.path.expandvars(filename)
line_feeder = _FileFeed(filename)
self.config_filename = filename
#
# Read header
#
def is_header_line(line):
return line is not None and line.startswith("#") and \
not _unset_re_match(line)
self.config_header = None
line = line_feeder.peek_next()
if is_header_line(line):
self.config_header = ""
while is_header_line(line_feeder.peek_next()):
self.config_header += line_feeder.get_next()[1:]
# Remove trailing newline
if self.config_header.endswith("\n"):
self.config_header = self.config_header[:-1]
#
# Read assignments. Hotspot for some workloads.
#
def warn_override(filename, linenr, name, old_user_val, new_user_val):
self._warn('overriding the value of {0}. '
'Old value: "{1}", new value: "{2}".'
.format(name, old_user_val, new_user_val),
filename, linenr)
# Invalidate everything to keep things simple. It might be possible to
# improve performance for the case where multiple configurations are
# loaded by only invalidating a symbol (and its dependent symbols) if
# the new user value differs from the old. One complication would be
# that symbols not mentioned in the .config must lose their user value
# when replace = True, which is the usual case.
if replace:
self.unset_user_values()
else:
self._invalidate_all()
while 1:
line = line_feeder.get_next()
if line is None:
return self._warnings
line = line.rstrip()
set_match = _set_re_match(line)
if set_match:
name, val = set_match.groups()
if val.startswith('"'):
if len(val) < 2 or val[-1] != '"':
_parse_error(line, "malformed string literal",
line_feeder.filename, line_feeder.linenr)
# Strip quotes and remove escapings. The unescaping
# procedure should be safe since " can only appear as \"
# inside the string.
val = val[1:-1].replace('\\"', '"').replace("\\\\", "\\")
if name in self.syms:
sym = self.syms[name]
if sym.user_val is not None:
warn_override(line_feeder.filename, line_feeder.linenr,
name, sym.user_val, val)
if sym.is_choice_sym:
user_mode = sym.parent.user_mode
if user_mode is not None and user_mode != val:
self._warn("assignment to {0} changes mode of "
'containing choice from "{1}" to "{2}".'
.format(name, val, user_mode),
line_feeder.filename,
line_feeder.linenr)
sym._set_user_value_no_invalidate(val, True)
else:
if self.print_undef_assign:
_stderr_msg('note: attempt to assign the value "{0}" '
"to the undefined symbol {1}."
.format(val, name),
line_feeder.filename, line_feeder.linenr)
else:
unset_match = _unset_re_match(line)
if unset_match:
name = unset_match.group(1)
if name in self.syms:
sym = self.syms[name]
if sym.user_val is not None:
warn_override(line_feeder.filename,
line_feeder.linenr,
name, sym.user_val, "n")
sym._set_user_value_no_invalidate("n", True)
def write_config(self, filename, header=None):
"""Writes out symbol values in the familiar .config format.
Kconfiglib makes sure the format matches what the C implementation
would generate, down to whitespace. This eases testing.
filename: The filename under which to save the configuration.
header (default: None): A textual header that will appear at the
beginning of the file, with each line commented out automatically.
None means no header."""
for sym in self.syms_iter():
sym.already_written = False
with open(filename, "w") as f:
# Write header
if header is not None:
f.write(_comment(header) + "\n")
# Build and write configuration
conf_strings = []
_make_block_conf(self.top_block, conf_strings.append)
f.write("\n".join(conf_strings) + "\n")
def eval(self, s):
"""Returns the value of the expression 's' -- where 's' is represented
as a string -- in the context of the configuration. Raises
Kconfig_Syntax_Error if syntax errors are detected in 's'.
For example, if FOO and BAR are tristate symbols at least one of which
has the value "y", then config.eval("y && (FOO || BAR)") => "y"
This function always yields a tristate value. To get the value of
non-bool, non-tristate symbols, use Symbol.get_value().
The result of this function is consistent with how evaluation works for
conditional expressions in the configuration as well as in the C
implementation. "m" and m are rewritten as '"m" && MODULES' and 'm &&
MODULES', respectively, and a result of "m" will get promoted to "y" if
we're running without modules.
Syntax checking is somewhat lax, partly to be compatible with lax
parsing in the C implementation."""
return self._eval_expr(self._parse_expr(self._tokenize(s, True), # Feed
None, # Current symbol/choice
s)) # line
def unset_user_values(self):
"""Resets the values of all symbols, as if Config.load_config() or
Symbol.set_user_value() had never been called."""
for sym in self.syms_iter():
sym._unset_user_value_no_recursive_invalidate()
def set_print_warnings(self, print_warnings):
"""Determines whether warnings related to this configuration (for
things like attempting to assign illegal values to symbols with
Symbol.set_user_value()) should be printed to stderr.
print_warnings: True if warnings should be printed."""
self.print_warnings = print_warnings
def set_print_undef_assign(self, print_undef_assign):
"""Determines whether informational messages related to assignments to
undefined symbols should be printed to stderr for this configuration.
print_undef_assign: If True, such messages will be printed."""
self.print_undef_assign = print_undef_assign
def __str__(self):
"""Returns a string containing various information about the Config."""
return _lines("Configuration",
"File : " +
self.filename,
"Base directory : " +
self.base_dir,
"Value of $ARCH at creation time : " +
("(not set)" if self.arch is None else self.arch),
"Value of $SRCARCH at creation time : " +
("(not set)" if self.srcarch is None else
self.srcarch),
"Source tree (derived from $srctree;",
"defaults to '.' if $srctree isn't set) : " +
self.srctree,
"Most recently loaded .config : " +
("(no .config loaded)"
if self.config_filename is None else
self.config_filename),
"Print warnings : " +
BOOL_STR[self.print_warnings],
"Print assignments to undefined symbols : " +
BOOL_STR[self.print_undef_assign])
#
# Private methods
#
#
# Kconfig parsing
#
def _parse_file(self, filename, parent, deps, visible_if_deps, block):
"""Parses the Kconfig file 'filename'. Appends the Items in the file
(and any file it sources) to the list passed in the 'block' parameter.
See _parse_block() for the meaning of the parameters."""
self._parse_block(_FileFeed(filename), None, parent, deps,
visible_if_deps, block)
def _parse_block(self, line_feeder, end_marker, parent, deps,
visible_if_deps, block):
"""Parses a block, which is the contents of either a file or an if,
menu, or choice statement. Appends the Items to the list passed in the
'block' parameter.
line_feeder: A _FileFeed instance feeding lines from a file. The
Kconfig language is line-based in practice.
end_marker: The token that ends the block, e.g. T_ENDIF ("endif") for
ifs. None for files.
parent: The enclosing menu or choice, or None if we're at the top
level.
deps: Dependencies from enclosing menus, choices and ifs.
visible_if_deps (default: None): 'visible if' dependencies from
enclosing menus.
block: The list to add items to."""
while 1:
# Do we already have a tokenized line that we determined wasn't
# part of whatever we were parsing earlier? See comment in
# Config.__init__().
if self.end_line is not None:
line = self.end_line
tokens = self.end_line_tokens
tokens.unget_all()
self.end_line = None
self.end_line_tokens = None
else:
line = line_feeder.get_next()
if line is None:
if end_marker is not None:
raise Kconfig_Syntax_Error("Unexpected end of file {0}"
.format(line_feeder.filename))
return
tokens = self._tokenize(line, False, line_feeder.filename,
line_feeder.linenr)
t0 = tokens.get_next()
if t0 is None:
continue
# Cases are ordered roughly by frequency, which speeds things up a
# bit
if t0 == T_CONFIG or t0 == T_MENUCONFIG:
# The tokenizer will automatically allocate a new Symbol object
# for any new names it encounters, so we don't need to worry
# about that here.
sym = tokens.get_next()
# Symbols defined in multiple places get the parent of their
# first definition. However, for symbols whose parents are
# choice statements, the choice statement takes precedence.
if not sym.is_defined_ or isinstance(parent, Choice):
sym.parent = parent
sym.is_defined_ = True
self._parse_properties(line_feeder, sym, deps, visible_if_deps)
self.kconfig_syms.append(sym)
block.append(sym)
elif t0 == T_SOURCE:
kconfig_file = tokens.get_next()
exp_kconfig_file = self._expand_sym_refs(kconfig_file)
f = os.path.join(self.base_dir, exp_kconfig_file)
if not os.path.exists(f):
raise IOError('{0}:{1}: sourced file "{2}" (expands to '
'"{3}") not found. Perhaps base_dir '
'(argument to Config.__init__(), currently '
'"{4}") is set to the wrong value.'
.format(line_feeder.filename,
line_feeder.linenr,
kconfig_file, exp_kconfig_file,
self.base_dir))
# Add items to the same block
self._parse_file(f, parent, deps, visible_if_deps, block)
elif t0 == end_marker:
# We have reached the end of the block
return
elif t0 == T_IF:
# If statements are treated as syntactic sugar for adding
# dependencies to enclosed items and do not have an explicit
# object representation.
dep_expr = self._parse_expr(tokens, None, line,
line_feeder.filename,
line_feeder.linenr)
# Add items to the same block
self._parse_block(line_feeder, T_ENDIF, parent,
_make_and(dep_expr, deps),
visible_if_deps, block)
elif t0 == T_COMMENT:
comment = Comment()
comment.config = self
comment.parent = parent
comment.filename = line_feeder.filename
comment.linenr = line_feeder.linenr
comment.text = tokens.get_next()
self._parse_properties(line_feeder, comment, deps,
visible_if_deps)
self.comments.append(comment)
block.append(comment)
elif t0 == T_MENU:
menu = Menu()
menu.config = self
menu.parent = parent
menu.filename = line_feeder.filename
menu.linenr = line_feeder.linenr
menu.title = tokens.get_next()
self._parse_properties(line_feeder, menu, deps,
visible_if_deps)
# This needs to go before _parse_block() so that we get the
# proper menu ordering in the case of nested functions
self.menus.append(menu)
# Parse contents and put Items in menu.block
self._parse_block(line_feeder, T_ENDMENU, menu, menu.dep_expr,
_make_and(visible_if_deps,
menu.visible_if_expr),
menu.block)
block.append(menu)
elif t0 == T_CHOICE:
name = tokens.get_next()
if name is None:
choice = Choice()
self.choices.append(choice)
else:
# Named choice
choice = self.named_choices.get(name)
if choice is None:
choice = Choice()
choice.name = name
self.named_choices[name] = choice
self.choices.append(choice)
choice.config = self
choice.parent = parent
choice.def_locations.append((line_feeder.filename,
line_feeder.linenr))
self._parse_properties(line_feeder, choice, deps,
visible_if_deps)
# Parse contents and put Items in choice.block
self._parse_block(line_feeder, T_ENDCHOICE, choice, deps,
visible_if_deps, choice.block)
choice._determine_actual_symbols()
# If no type is specified for the choice, its type is that of
# the first choice item with a specified type
if choice.type == UNKNOWN:
for item in choice.actual_symbols:
if item.type != UNKNOWN:
choice.type = item.type
break
# Each choice item of UNKNOWN type gets the type of the choice
for item in choice.actual_symbols:
if item.type == UNKNOWN:
item.type = choice.type
block.append(choice)
elif t0 == T_MAINMENU:
text = tokens.get_next()
if self.mainmenu_text is not None:
self._warn("overriding 'mainmenu' text. "
'Old value: "{0}", new value: "{1}".'
.format(self.mainmenu_text, text),
line_feeder.filename, line_feeder.linenr)
self.mainmenu_text = text
else:
_parse_error(line, "unrecognized construct",
line_feeder.filename, line_feeder.linenr)
def _parse_properties(self, line_feeder, stmt, deps, visible_if_deps):
"""Parsing of properties for symbols, menus, choices, and comments.
Takes care of propagating dependencies from enclosing menus and ifs."""
def parse_val_and_cond(tokens, line, filename, linenr):
"""Parses '<expr1> if <expr2>' constructs, where the 'if' part is
optional. Returns a tuple containing the parsed expressions, with
None as the second element if the 'if' part is missing."""
return (self._parse_expr(tokens, stmt, line, filename, linenr,
False),
self._parse_expr(tokens, stmt, line, filename, linenr)
if tokens.check(T_IF) else None)
# In case the symbol is defined in multiple locations, we need to
# remember what prompts, defaults, selects, and implies are new for
# this definition, as "depends on" should only apply to the local
# definition.
new_prompt = None
new_def_exprs = []
new_selects = []
new_implies = []
# Dependencies from 'depends on' statements
depends_on_expr = None
while 1:
line = line_feeder.get_next()
if line is None:
break
filename = line_feeder.filename
linenr = line_feeder.linenr
tokens = self._tokenize(line, False, filename, linenr)
t0 = tokens.get_next()
if t0 is None:
continue
# Cases are ordered roughly by frequency, which speeds things up a
# bit
if t0 == T_DEPENDS:
if not tokens.check(T_ON):
_parse_error(line, 'expected "on" after "depends"',
filename, linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename,
linenr)
if isinstance(stmt, (Menu, Comment)):
stmt.orig_deps = _make_and(stmt.orig_deps, parsed_deps)
else:
depends_on_expr = _make_and(depends_on_expr, parsed_deps)
elif t0 == T_HELP:
# Find first non-blank (not all-space) line and get its
# indentation
line = line_feeder.next_nonblank()
if line is None:
stmt.help = ""
break
indent = _indentation(line)
if indent == 0:
# If the first non-empty lines has zero indent, there is no
# help text
stmt.help = ""
line_feeder.unget()
break
# The help text goes on till the first non-empty line with less
# indent
help_lines = [_deindent(line, indent)]
while 1:
line = line_feeder.get_next()
if line is None or \
(not line.isspace() and _indentation(line) < indent):
stmt.help = "".join(help_lines)
break
help_lines.append(_deindent(line, indent))
if line is None:
break
line_feeder.unget()
elif t0 == T_SELECT:
target = tokens.get_next()
stmt.referenced_syms.add(target)
stmt.selected_syms.add(target)
new_selects.append(
(target,
self._parse_expr(tokens, stmt, line, filename, linenr)
if tokens.check(T_IF) else None))
elif t0 == T_IMPLY:
target = tokens.get_next()
stmt.referenced_syms.add(target)
stmt.implied_syms.add(target)
new_implies.append(
(target,
self._parse_expr(tokens, stmt, line, filename, linenr)
if tokens.check(T_IF) else None))
elif t0 in (T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING):
stmt.type = TOKEN_TO_TYPE[t0]
if tokens.peek_next() is not None:
new_prompt = parse_val_and_cond(tokens, line, filename,
linenr)
elif t0 == T_DEFAULT:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename,
linenr))
elif t0 == T_DEF_BOOL:
stmt.type = BOOL
if tokens.peek_next() is not None:
new_def_exprs.append(parse_val_and_cond(tokens, line,
filename, linenr))
elif t0 == T_PROMPT:
# 'prompt' properties override each other within a single
# definition of a symbol, but additional prompts can be added
# by defining the symbol multiple times; hence 'new_prompt'
# instead of 'prompt'.
new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
elif t0 == T_RANGE:
low = tokens.get_next()
high = tokens.get_next()
stmt.referenced_syms.add(low)
stmt.referenced_syms.add(high)
stmt.ranges.append(
(low, high,
self._parse_expr(tokens, stmt, line, filename, linenr)
if tokens.check(T_IF) else None))
elif t0 == T_DEF_TRISTATE:
stmt.type = TRISTATE
if tokens.peek_next() is not None:
new_def_exprs.append(parse_val_and_cond(tokens, line,
filename, linenr))
elif t0 == T_OPTION:
if tokens.check(T_ENV) and tokens.check(T_EQUAL):
env_var = tokens.get_next()
stmt.is_special_ = True
stmt.is_from_env = True
if env_var not in os.environ:
self._warn("The symbol {0} references the "
"non-existent environment variable {1} and "
"will get the empty string as its value. "
"If you're using Kconfiglib via "
"'make (i)scriptconfig', it should have "
"set up the environment correctly for you. "
"If you still got this message, that "
"might be an error, and you should email "
"ulfalizer a.t Google's email service."""
.format(stmt.name, env_var),
filename, linenr)
stmt.cached_val = ""
else:
stmt.cached_val = os.environ[env_var]
elif tokens.check(T_DEFCONFIG_LIST):
self.defconfig_sym = stmt
elif tokens.check(T_MODULES):
# To reduce warning spam, only warn if 'option modules' is
# set on some symbol that isn't MODULES, which should be
# safe. I haven't run into any projects that make use
# modules besides the kernel yet, and there it's likely to
# keep being called "MODULES".
if stmt.name != "MODULES":
self._warn("the 'modules' option is not supported. "
"Let me know if this is a problem for you; "
"it shouldn't be that hard to implement. "
"(Note that modules are still supported -- "
"Kconfiglib just assumes the symbol name "
"MODULES, like older versions of the C "
"implementation did when 'option modules' "
"wasn't used.)",
filename, linenr)
elif tokens.check(T_ALLNOCONFIG_Y):
if not isinstance(stmt, Symbol):
_parse_error(line,
"the 'allnoconfig_y' option is only "
"valid for symbols",
filename, linenr)
stmt.allnoconfig_y = True
else:
_parse_error(line, "unrecognized option", filename, linenr)
elif t0 == T_VISIBLE:
if not tokens.check(T_IF):
_parse_error(line, 'expected "if" after "visible"',
filename, linenr)
if not isinstance(stmt, Menu):
_parse_error(line,
"'visible if' is only valid for menus",
filename, linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename,
linenr)
stmt.visible_if_expr = _make_and(stmt.visible_if_expr,
parsed_deps)
elif t0 == T_OPTIONAL:
if not isinstance(stmt, Choice):
_parse_error(line,
'"optional" is only valid for choices',
filename,
linenr)
stmt.optional = True
else:
# See comment in Config.__init__()
self.end_line = line
self.end_line_tokens = tokens
break
# Done parsing properties. Now propagate 'depends on' and enclosing
# menu/if dependencies to expressions.
# The set of symbols referenced directly by the statement plus all
# symbols referenced by enclosing menus and ifs
stmt.all_referenced_syms = stmt.referenced_syms | _get_expr_syms(deps)
# Save original dependencies from enclosing menus and ifs
stmt.deps_from_containing = deps
if isinstance(stmt, (Menu, Comment)):
stmt.dep_expr = _make_and(stmt.orig_deps, deps)
else:
# Symbol or Choice
# See comment for 'menu_dep'
stmt.menu_dep = _make_and(deps, depends_on_expr)
# Propagate dependencies to prompts
if new_prompt is not None:
prompt, cond_expr = new_prompt
# Propagate 'visible if' dependencies from menus and local
# 'depends on' dependencies
cond_expr = _make_and(_make_and(cond_expr, visible_if_deps),
depends_on_expr)
# Save original
stmt.orig_prompts.append((prompt, cond_expr))
# Finalize with dependencies from enclosing menus and ifs
stmt.prompts.append((prompt, _make_and(cond_expr, deps)))
# Propagate dependencies to defaults
# Propagate 'depends on' dependencies
new_def_exprs = [(val_expr, _make_and(cond_expr, depends_on_expr))
for val_expr, cond_expr in new_def_exprs]
# Save original
stmt.orig_def_exprs.extend(new_def_exprs)
# Finalize with dependencies from enclosing menus and ifs
stmt.def_exprs.extend([(val_expr, _make_and(cond_expr, deps))
for val_expr, cond_expr in new_def_exprs])
# Propagate dependencies to selects and implies
# Only symbols can select and imply
if isinstance(stmt, Symbol):
# Propagate 'depends on' dependencies
new_selects = [(target, _make_and(cond_expr, depends_on_expr))
for target, cond_expr in new_selects]
new_implies = [(target, _make_and(cond_expr, depends_on_expr))
for target, cond_expr in new_implies]
# Save original
stmt.orig_selects.extend(new_selects)
stmt.orig_implies.extend(new_implies)
# Finalize with dependencies from enclosing menus and ifs
for target, cond in new_selects:
target.rev_dep = \
_make_or(target.rev_dep,
_make_and(stmt, _make_and(cond, deps)))
for target, cond in new_implies:
target.weak_rev_dep = \
_make_or(target.weak_rev_dep,
_make_and(stmt, _make_and(cond, deps)))
def _parse_expr(self, feed, cur_item, line, filename=None, linenr=None,
transform_m=True):
"""Parses an expression from the tokens in 'feed' using a simple
top-down approach. The result has the form
'(<operator>, [<parsed operands>])', where <operator> is e.g.
kconfiglib.AND. If there is only one operand (i.e., no && or ||), then
the operand is returned directly. This also goes for subexpressions.
feed: _Feed instance containing the tokens for the expression.
cur_item: The item (Symbol, Choice, Menu, or Comment) currently being
parsed, or None if we're not parsing an item. Used for recording
references to symbols.
line: The line containing the expression being parsed.
filename (default: None): The file containing the expression.
linenr (default: None): The line number containing the expression.
transform_m (default: False): Determines if 'm' should be rewritten to
'm && MODULES' -- see parse_val_and_cond().
Expression grammar, in decreasing order of precedence:
<expr> -> <symbol>
<symbol> '=' <symbol>
<symbol> '!=' <symbol>
'(' <expr> ')'
'!' <expr>
<expr> '&&' <expr>
<expr> '||' <expr>"""
# Use instance variables to avoid having to pass these as arguments
# through the top-down parser in _parse_expr_rec(), which is tedious
# and obfuscates the code. A profiler run shows no noticeable
# performance difference.
self._cur_item = cur_item
self._transform_m = transform_m
self._line = line
self._filename = filename
self._linenr = linenr
return self._parse_expr_rec(feed)
def _parse_expr_rec(self, feed):
or_term = self._parse_or_term(feed)
if not feed.check(T_OR):
# Common case -- no need for an OR node since it's just a single
# operand
return or_term
or_terms = [or_term, self._parse_or_term(feed)]
while feed.check(T_OR):
or_terms.append(self._parse_or_term(feed))
return (OR, or_terms)
def _parse_or_term(self, feed):
and_term = self._parse_factor(feed)
if not feed.check(T_AND):
# Common case -- no need for an AND node since it's just a single
# operand
return and_term
and_terms = [and_term, self._parse_factor(feed)]
while feed.check(T_AND):
and_terms.append(self._parse_factor(feed))
return (AND, and_terms)
def _parse_factor(self, feed):
token = feed.get_next()
if isinstance(token, (Symbol, str)):
if self._cur_item is not None and isinstance(token, Symbol):
self._cur_item.referenced_syms.add(token)
next_token = feed.peek_next()
# For conditional expressions ('depends on <expr>',
# '... if <expr>', # etc.), "m" and m are rewritten to
# "m" && MODULES.
if next_token != T_EQUAL and next_token != T_UNEQUAL:
if self._transform_m and (token is self.m or token == "m"):
return (AND, ["m", self._sym_lookup("MODULES")])
return token
relation = EQUAL if (feed.get_next() == T_EQUAL) else UNEQUAL
token_2 = feed.get_next()
if self._cur_item is not None and isinstance(token_2, Symbol):
self._cur_item.referenced_syms.add(token_2)
return (relation, token, token_2)
if token == T_NOT:
return (NOT, self._parse_factor(feed))
if token == T_OPEN_PAREN:
expr_parse = self._parse_expr_rec(feed)
if not feed.check(T_CLOSE_PAREN):
_parse_error(self._line, "missing end parenthesis",
self._filename, self._linenr)
return expr_parse
_parse_error(self._line, "malformed expression", self._filename,
self._linenr)
def _tokenize(self, s, for_eval, filename=None, linenr=None):
"""Returns a _Feed instance containing tokens derived from the string
's'. Registers any new symbols encountered (via _sym_lookup()).
(I experimented with a pure regular expression implementation, but it
came out slower, less readable, and wouldn't have been as flexible.)
for_eval: True when parsing an expression for a call to Config.eval(),
in which case we should not treat the first token specially nor
register new symbols."""
s = s.strip()
if s == "" or s[0] == "#":
return _Feed([])
if for_eval:
previous = None # The previous token seen
tokens = []
i = 0 # The current index in the string being tokenized
else:
# The initial word on a line is parsed specially. Let
# command_chars = [A-Za-z0-9_]. Then
# - leading non-command_chars characters are ignored, and
# - the first token consists the following one or more
# command_chars characters.
# This is why things like "----help--" are accepted.
initial_token_match = _initial_token_re_match(s)
if initial_token_match is None:
return _Feed([])
keyword = _get_keyword(initial_token_match.group(1))
if keyword == T_HELP:
# Avoid junk after "help", e.g. "---", being registered as a
# symbol
return _Feed([T_HELP])
if keyword is None:
# We expect a keyword as the first token
_tokenization_error(s, filename, linenr)
previous = keyword
tokens = [keyword]
# The current index in the string being tokenized
i = initial_token_match.end()
# _tokenize() is a hotspot during parsing, and this speeds things up a
# bit
strlen = len(s)
append = tokens.append
# Main tokenization loop. (Handles tokens past the first one.)
while i < strlen:
# Test for an identifier/keyword preceded by whitespace first; this
# is the most common case.
id_keyword_match = _id_keyword_re_match(s, i)
if id_keyword_match:
# We have an identifier or keyword. The above also stripped any
# whitespace for us.
name = id_keyword_match.group(1)
# Jump past it
i = id_keyword_match.end()
keyword = _get_keyword(name)
if keyword is not None:
# It's a keyword
append(keyword)
elif previous in STRING_LEX:
# What would ordinarily be considered an identifier is
# treated as a string after certain tokens
append(name)
else:
# It's a symbol name. _sym_lookup() will take care of
# allocating a new Symbol instance if it's the first time
# we see it.
sym = self._sym_lookup(name, for_eval)
if previous == T_CONFIG or previous == T_MENUCONFIG:
# If the previous token is T_(MENU)CONFIG
# ("(menu)config"), we're tokenizing the first line of
# a symbol definition, and should remember this as a
# location where the symbol is defined
sym.def_locations.append((filename, linenr))
else:
# Otherwise, it's a reference to the symbol
sym.ref_locations.append((filename, linenr))
append(sym)
else:
# Not an identifier/keyword
while i < strlen and s[i].isspace():
i += 1
if i == strlen:
break
c = s[i]
i += 1
# String literal (constant symbol)
if c == '"' or c == "'":
if "\\" in s:
# Slow path: This could probably be sped up, but it's a
# very unusual case anyway.
quote = c
val = ""
while 1:
if i >= len(s):
_tokenization_error(s, filename, linenr)
c = s[i]
if c == quote:
break
if c == "\\":
if i + 1 >= len(s):
_tokenization_error(s, filename, linenr)
val += s[i + 1]
i += 2
else:
val += c
i += 1
i += 1
append(val)
else:
# Fast path: If the string contains no backslashes
# (almost always) we can simply look for the matching
# quote.
end = s.find(c, i)
if end == -1:
_tokenization_error(s, filename, linenr)
append(s[i:end])
i = end + 1
elif c == "&":
# Invalid characters are ignored
if i >= len(s) or s[i] != "&": continue
append(T_AND)
i += 1
elif c == "|":
# Invalid characters are ignored
if i >= len(s) or s[i] != "|": continue
append(T_OR)
i += 1
elif c == "!":
if i < len(s) and s[i] == "=":
append(T_UNEQUAL)
i += 1
else:
append(T_NOT)
elif c == "=": append(T_EQUAL)
elif c == "(": append(T_OPEN_PAREN)
elif c == ")": append(T_CLOSE_PAREN)
elif c == "#": break # Comment
else: continue # Invalid characters are ignored
previous = tokens[-1]
return _Feed(tokens)
def _sym_lookup(self, name, for_eval=False):
"""Fetches the symbol 'name' from the symbol table, creating and
registering it if it does not exist. If 'for_eval' is True, the symbol
won't be added to the symbol table if it does not exist -- this is for
Config.eval()."""
if name in self.syms:
return self.syms[name]
new_sym = Symbol()
new_sym.config = self
new_sym.name = name
if for_eval:
self._warn("no symbol {0} in configuration".format(name))
else:
self.syms[name] = new_sym
return new_sym
#
# Expression evaluation
#
def _eval_expr(self, expr):
"""Evaluates an expression to "n", "m", or "y"."""
# Handles e.g. an "x if y" condition where the "if y" part is missing.
if expr is None:
return "y"
res = self._eval_expr_rec(expr)
if res == "m":
# Promote "m" to "y" if we're running without modules.
#
# Internally, "m" is often rewritten to "m" && MODULES by both the
# C implementation and Kconfiglib, which takes care of cases where
# "m" should be demoted to "n" instead.
modules_sym = self.syms.get("MODULES")
if modules_sym is None or modules_sym.get_value() != "y":
return "y"
return res
def _eval_expr_rec(self, expr):
if isinstance(expr, Symbol):
# Non-bool/tristate symbols are always "n" in a tristate sense,
# regardless of their value
if expr.type != BOOL and expr.type != TRISTATE:
return "n"
return expr.get_value()
if isinstance(expr, str):
return expr if (expr == "y" or expr == "m") else "n"
# Ordered by frequency
if expr[0] == AND:
res = "y"
for subexpr in expr[1]:
ev = self._eval_expr_rec(subexpr)
# Return immediately upon discovering an "n" term
if ev == "n":
return "n"
if ev == "m":
res = "m"
# 'res' is either "m" or "y" here; we already handled the
# short-circuiting "n" case in the loop.
return res
if expr[0] == NOT:
ev = self._eval_expr_rec(expr[1])
if ev == "y":
return "n"
return "y" if (ev == "n") else "m"
if expr[0] == OR:
res = "n"
for subexpr in expr[1]:
ev = self._eval_expr_rec(subexpr)
# Return immediately upon discovering a "y" term
if ev == "y":
return "y"
if ev == "m":
res = "m"
# 'res' is either "n" or "m" here; we already handled the
# short-circuiting "y" case in the loop.
return res
if expr[0] == EQUAL:
return "y" if (_str_val(expr[1]) == _str_val(expr[2])) else "n"
if expr[0] == UNEQUAL:
return "y" if (_str_val(expr[1]) != _str_val(expr[2])) else "n"
_internal_error("Internal error while evaluating expression: "
"unknown operation {0}.".format(expr[0]))
def _eval_min(self, e1, e2):
"""Returns the minimum value of the two expressions. Equates None with
'y'."""
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_less(e1_eval, e2_eval) else e2_eval
def _eval_max(self, e1, e2):
"""Returns the maximum value of the two expressions. Equates None with
'y'."""
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_greater(e1_eval, e2_eval) else e2_eval
#
# Dependency tracking (for caching and invalidation)
#
def _build_dep(self):
"""Populates the Symbol.dep sets, linking the symbol to the symbols
that immediately depend on it in the sense that changing the value of
the symbol might affect the values of those other symbols. This is used
for caching/invalidation purposes. The calculated sets might be larger
than necessary as we don't do any complicated analysis of the
expressions."""
# Adds 'sym' as a directly dependent symbol to all symbols that appear
# in the expression 'e'
def add_expr_deps(e, sym):
for s in _get_expr_syms(e):
s.dep.add(sym)
# The directly dependent symbols of a symbol are:
# - Any symbols whose prompts, default values, rev_dep (select
# condition), weak_rev_dep (imply condition) or ranges depend on the
# symbol
# - Any symbols that belong to the same choice statement as the symbol
# (these won't be included in 'dep' as that makes the dependency
# graph unwieldy, but Symbol._get_dependent() will include them)
# - Any symbols in a choice statement that depends on the symbol
for sym in self.syms_iter():
for _, e in sym.prompts:
add_expr_deps(e, sym)
for v, e in sym.def_exprs:
add_expr_deps(v, sym)
add_expr_deps(e, sym)
add_expr_deps(sym.rev_dep, sym)
add_expr_deps(sym.weak_rev_dep, sym)
for l, u, e in sym.ranges:
add_expr_deps(l, sym)
add_expr_deps(u, sym)
add_expr_deps(e, sym)
if sym.is_choice_sym:
choice = sym.parent
for _, e in choice.prompts:
add_expr_deps(e, sym)
for _, e in choice.def_exprs:
add_expr_deps(e, sym)
def _eq_to_sym(self, eq):
"""_expr_depends_on() helper. For (in)equalities of the form sym = y/m
or sym != n, returns sym. For other (in)equalities, returns None."""
relation, left, right = eq
def transform_y_m_n(item):
if item is self.y: return "y"
if item is self.m: return "m"
if item is self.n: return "n"
return item
left = transform_y_m_n(left)
right = transform_y_m_n(right)
# Make sure the symbol (if any) appears to the left
if not isinstance(left, Symbol):
left, right = right, left
if not isinstance(left, Symbol):
return None
if (relation == EQUAL and (right == "y" or right == "m")) or \
(relation == UNEQUAL and right == "n"):
return left
return None
def _expr_depends_on(self, expr, sym):
"""Reimplementation of expr_depends_symbol() from mconf.c. Used to
determine if a submenu should be implicitly created, which influences
what items inside choice statements are considered choice items."""
if expr is None:
return False
def rec(expr):
if isinstance(expr, str):
return False
if isinstance(expr, Symbol):
return expr is sym
if expr[0] in (EQUAL, UNEQUAL):
return self._eq_to_sym(expr) is sym
if expr[0] == AND:
for and_expr in expr[1]:
if rec(and_expr):
return True
return False
return rec(expr)
def _invalidate_all(self):
for sym in self.syms_iter():
sym._invalidate()
#
# Printing and misc.
#
def _expand_sym_refs(self, s):
"""Expands $-references to symbols in 's' to symbol values, or to the
empty string for undefined symbols."""
while 1:
sym_ref_match = _sym_ref_re_search(s)
if sym_ref_match is None:
return s
sym_name = sym_ref_match.group(0)[1:]
sym = self.syms.get(sym_name)
expansion = "" if sym is None else sym.get_value()
s = s[:sym_ref_match.start()] + \
expansion + \
s[sym_ref_match.end():]
def _expr_val_str(self, expr, no_value_str="(none)",
get_val_instead_of_eval=False):
"""Printing helper. Returns a string with 'expr' and its value.
no_value_str: String to return when 'expr' is missing (None).
get_val_instead_of_eval: Assume 'expr' is a symbol or string (constant
symbol) and get its value directly instead of evaluating it to a
tristate value."""
if expr is None:
return no_value_str
if get_val_instead_of_eval:
if isinstance(expr, str):
return _expr_to_str(expr)
val = expr.get_value()
else:
val = self._eval_expr(expr)
return "{0} (value: {1})".format(_expr_to_str(expr), _expr_to_str(val))
def _get_sym_or_choice_str(self, sc):
"""Symbols and choices have many properties in common, so we factor out
common __str__() stuff here. "sc" is short for "symbol or choice"."""
# As we deal a lot with string representations here, use some
# convenient shorthand:
s = _expr_to_str
#
# Common symbol/choice properties
#
user_val_str = "(no user value)" if sc.user_val is None else \
s(sc.user_val)
# Build prompts string
if not sc.prompts:
prompts_str = " (no prompts)"
else:
prompts_str_rows = []
for prompt, cond_expr in sc.orig_prompts:
prompts_str_rows.append(
' "{0}"'.format(prompt) if cond_expr is None else
' "{0}" if {1}'.format(prompt,
self._expr_val_str(cond_expr)))
prompts_str = "\n".join(prompts_str_rows)
# Build locations string
locations_str = "(no locations)" if not sc.def_locations else \
" ".join(["{0}:{1}".format(filename, linenr) for
filename, linenr in sc.def_locations])
# Build additional-dependencies-from-menus-and-ifs string
additional_deps_str = " " + \
self._expr_val_str(sc.deps_from_containing,
"(no additional dependencies)")
#
# Symbol-specific stuff
#
if isinstance(sc, Symbol):
# Build ranges string
if isinstance(sc, Symbol):
if not sc.ranges:
ranges_str = " (no ranges)"
else:
ranges_str_rows = []
for l, u, cond_expr in sc.ranges:
ranges_str_rows.append(
" [{0}, {1}]".format(s(l), s(u))
if cond_expr is None else
" [{0}, {1}] if {2}"
.format(s(l), s(u), self._expr_val_str(cond_expr)))
ranges_str = "\n".join(ranges_str_rows)
# Build default values string
if not sc.def_exprs:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for val_expr, cond_expr in sc.orig_def_exprs:
row_str = " " + self._expr_val_str(val_expr, "(none)",
sc.type == STRING)
defaults_str_rows.append(row_str)
defaults_str_rows.append(" Condition: " +
self._expr_val_str(cond_expr))
defaults_str = "\n".join(defaults_str_rows)
# Build selects string
if not sc.orig_selects:
selects_str = " (no selects)"
else:
selects_str_rows = []
for target, cond_expr in sc.orig_selects:
selects_str_rows.append(
" {0}".format(target.name) if cond_expr is None else
" {0} if {1}".format(target.name,
self._expr_val_str(cond_expr)))
selects_str = "\n".join(selects_str_rows)
# Build implies string
if not sc.orig_implies:
implies_str = " (no implies)"
else:
implies_str_rows = []
for target, cond_expr in sc.orig_implies:
implies_str_rows.append(
" {0}".format(target.name) if cond_expr is None else
" {0} if {1}".format(target.name,
self._expr_val_str(cond_expr)))
implies_str = "\n".join(implies_str_rows)
res = _lines("Symbol " +
("(no name)" if sc.name is None else sc.name),
"Type : " + TYPENAME[sc.type],
"Value : " + s(sc.get_value()),
"User value : " + user_val_str,
"Visibility : " + s(_get_visibility(sc)),
"Is choice item : " + BOOL_STR[sc.is_choice_sym],
"Is defined : " + BOOL_STR[sc.is_defined_],
"Is from env. : " + BOOL_STR[sc.is_from_env],
"Is special : " + BOOL_STR[sc.is_special_] + "\n")
if sc.ranges:
res += _lines("Ranges:", ranges_str + "\n")
res += _lines("Prompts:",
prompts_str,
"Default values:",
defaults_str,
"Selects:",
selects_str,
"Implies:",
implies_str,
"Reverse (select-related) dependencies:",
" (no reverse dependencies)"
if sc.rev_dep == "n"
else " " + self._expr_val_str(sc.rev_dep),
"Weak reverse (imply-related) dependencies:",
" (no weak reverse dependencies)"
if sc.weak_rev_dep == "n"
else " " + self._expr_val_str(sc.weak_rev_dep),
"Additional dependencies from enclosing menus "
"and ifs:",
additional_deps_str,
"Locations: " + locations_str)
return res
#
# Choice-specific stuff
#
# Build selected symbol string
sel = sc.get_selection()
sel_str = "(no selection)" if sel is None else sel.name
# Build default values string
if not sc.def_exprs:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for sym, cond_expr in sc.orig_def_exprs:
defaults_str_rows.append(
" {0}".format(sym.name) if cond_expr is None else
" {0} if {1}".format(sym.name,
self._expr_val_str(cond_expr)))
defaults_str = "\n".join(defaults_str_rows)
# Build contained symbols string
names = [sym.name for sym in sc.actual_symbols]
syms_string = " ".join(names) if names else "(empty)"
return _lines("Choice",
"Name (for named choices): " +
("(no name)" if sc.name is None else sc.name),
"Type : " + TYPENAME[sc.type],
"Selected symbol : " + sel_str,
"User value : " + user_val_str,
"Mode : " + s(sc.get_mode()),
"Visibility : " + s(_get_visibility(sc)),
"Optional : " + BOOL_STR[sc.optional],
"Prompts:",
prompts_str,
"Defaults:",
defaults_str,
"Choice symbols:",
" " + syms_string,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Locations: " + locations_str)
def _warn(self, msg, filename=None, linenr=None):
"""For printing warnings to stderr."""
msg = _build_msg("warning: " + msg, filename, linenr)
if self.print_warnings:
sys.stderr.write(msg + "\n")
self._warnings.append(msg)
class Item(object):
"""Base class for symbols and other Kconfig constructs. Subclasses are
Symbol, Choice, Menu, and Comment."""
def is_symbol(self):
"""Returns True if the item is a symbol. Short for
isinstance(item, kconfiglib.Symbol)."""
return isinstance(self, Symbol)
def is_choice(self):
"""Returns True if the item is a choice. Short for
isinstance(item, kconfiglib.Choice)."""
return isinstance(self, Choice)
def is_menu(self):
"""Returns True if the item is a menu. Short for
isinstance(item, kconfiglib.Menu)."""
return isinstance(self, Menu)
def is_comment(self):
"""Returns True if the item is a comment. Short for
isinstance(item, kconfiglib.Comment)."""
return isinstance(self, Comment)
class Symbol(Item):
"""Represents a configuration symbol - e.g. FOO for
config FOO
..."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this symbol is from."""
return self.config
def get_name(self):
"""Returns the name of the symbol."""
return self.name
def get_type(self):
"""Returns the type of the symbol: one of UNKNOWN, BOOL, TRISTATE,
STRING, HEX, or INT. These are defined at the top level of the module,
so you'd do something like
if sym.get_type() == kconfiglib.STRING:
..."""
return self.type
def get_prompts(self):
"""Returns a list of prompts defined for the symbol, in the order they
appear in the configuration files. Returns the empty list for symbols
with no prompt.
This list will have a single entry for the vast majority of symbols
having prompts, but having multiple prompts for a single symbol is
possible through having multiple 'config' entries for it."""
return [prompt for prompt, _ in self.orig_prompts]
def get_help(self):
"""Returns the help text of the symbol, or None if the symbol has no
help text."""
return self.help
def get_parent(self):
"""Returns the menu or choice statement that contains the symbol, or
None if the symbol is at the top level. Note that if statements are
treated as syntactic and do not have an explicit class
representation."""
return self.parent
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is defined. For
the vast majority of symbols this list will only contain one element.
For the following Kconfig, FOO would get two entries: the lines marked
with *.
config FOO *
bool "foo prompt 1"
config FOO *
bool "foo prompt 2"
"""
return self.def_locations
def get_ref_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is referenced in
the configuration. For example, the lines marked by * would be included
for FOO below:
config A
bool
default BAR || FOO *
config B
tristate
depends on FOO *
default m if FOO *
if FOO *
config A
bool "A"
endif
config FOO (definition not included)
bool
"""
return self.ref_locations
def get_value(self):
"""Calculate and return the value of the symbol. See also
Symbol.set_user_value()."""
if self.cached_val is not None:
return self.cached_val
# As a quirk of Kconfig, undefined symbols get their name as their
# value. This is why things like "FOO = bar" work for seeing if FOO has
# the value "bar".
if self.type == UNKNOWN:
self.cached_val = self.name
return self.name
new_val = DEFAULT_VALUE[self.type]
vis = _get_visibility(self)
# This is easiest to calculate together with the value
self.write_to_conf = False
if self.type == BOOL or self.type == TRISTATE:
# The visibility and mode (modules-only or single-selection) of
# choice items will be taken into account in _get_visibility()
if self.is_choice_sym:
if vis != "n":
choice = self.parent
mode = choice.get_mode()
self.write_to_conf = (mode != "n")
if mode == "y":
new_val = "y" if choice.get_selection() is self \
else "n"
elif mode == "m":
if self.user_val == "m" or self.user_val == "y":
new_val = "m"
else:
# If the symbol is visible and has a user value, use that.
# Otherwise, look at defaults and weak reverse dependencies
# (implies).
use_defaults_and_weak_rev_deps = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.config._eval_min(self.user_val, vis)
use_defaults_and_weak_rev_deps = False
if use_defaults_and_weak_rev_deps:
for val_expr, cond_expr in self.def_exprs:
cond_eval = self.config._eval_expr(cond_expr)
if cond_eval != "n":
self.write_to_conf = True
new_val = self.config._eval_min(val_expr,
cond_eval)
break
weak_rev_dep_val = \
self.config._eval_expr(self.weak_rev_dep)
if weak_rev_dep_val != "n":
self.write_to_conf = True
new_val = self.config._eval_max(new_val,
weak_rev_dep_val)
# Reverse (select-related) dependencies take precedence
rev_dep_val = self.config._eval_expr(self.rev_dep)
if rev_dep_val != "n":
self.write_to_conf = True
new_val = self.config._eval_max(new_val, rev_dep_val)
# We need to promote "m" to "y" in two circumstances:
# 1) If our type is boolean
# 2) If our weak_rev_dep (from IMPLY) is "y"
if new_val == "m" and \
(self.type == BOOL or
self.config._eval_expr(self.weak_rev_dep) == "y"):
new_val = "y"
elif self.type == INT or self.type == HEX:
has_active_range = False
low = None
high = None
use_defaults = True
base = 16 if self.type == HEX else 10
for l, h, cond_expr in self.ranges:
if self.config._eval_expr(cond_expr) != "n":
has_active_range = True
low_str = _str_val(l)
high_str = _str_val(h)
low = int(low_str, base) if \
_is_base_n(low_str, base) else 0
high = int(high_str, base) if \
_is_base_n(high_str, base) else 0
break
if vis != "n":
self.write_to_conf = True
if self.user_val is not None and \
_is_base_n(self.user_val, base) and \
(not has_active_range or
low <= int(self.user_val, base) <= high):
# If the user value is OK, it is stored in exactly the same
# form as specified in the assignment (with or without
# "0x", etc).
use_defaults = False
new_val = self.user_val
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
# If the default value is OK, it is stored in exactly
# the same form as specified. Otherwise, it is clamped
# to the range, and the output has "0x" as appropriate
# for the type.
new_val = _str_val(val_expr)
if _is_base_n(new_val, base):
new_val_num = int(new_val, base)
if has_active_range:
clamped_val = None
if new_val_num < low:
clamped_val = low
elif new_val_num > high:
clamped_val = high
if clamped_val is not None:
new_val = (hex(clamped_val) if \
self.type == HEX else str(clamped_val))
break
else: # For the for loop
# If no user value or default kicks in but the hex/int has
# an active range, then the low end of the range is used,
# provided it's > 0, with "0x" prepended as appropriate.
if has_active_range and low > 0:
new_val = (hex(low) if self.type == HEX else str(low))
elif self.type == STRING:
use_defaults = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.user_val
use_defaults = False
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
new_val = _str_val(val_expr)
break
self.cached_val = new_val
return new_val
def get_user_value(self):
"""Returns the value assigned to the symbol in a .config or via
Symbol.set_user_value() (provided the value was valid for the type of
the symbol). Returns None in case of no user value."""
return self.user_val
def get_upper_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the highest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "m" or
"y", arranged from lowest to highest. This corresponds to the highest
value the symbol could be given in e.g. the 'make menuconfig'
interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return None
vis = _get_visibility(self)
if TRI_TO_INT[vis] > TRI_TO_INT[rev_dep]:
return vis
return None
def get_lower_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the lowest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "n" or
"m", arranged from lowest to highest. This corresponds to the lowest
value the symbol could be given in e.g. the 'make menuconfig'
interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return None
if TRI_TO_INT[_get_visibility(self)] > TRI_TO_INT[rev_dep]:
return rev_dep
return None
def get_assignable_values(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns the empty list.
Otherwise, returns a list containing the user values that can be
assigned to the symbol (that won't be truncated). Usage example:
if "m" in sym.get_assignable_values():
sym.set_user_value("m")
This is basically a more convenient interface to
get_lower/upper_bound() when wanting to test if a particular tristate
value can be assigned."""
if self.type != BOOL and self.type != TRISTATE:
return []
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return []
res = ["n", "m", "y"][TRI_TO_INT[rev_dep] :
TRI_TO_INT[_get_visibility(self)] + 1]
return res if len(res) > 1 else []
def get_visibility(self):
"""Returns the visibility of the symbol: one of "n", "m" or "y". For
bool and tristate symbols, this is an upper bound on the value users
can set for the symbol. For other types of symbols, a visibility of "n"
means the user value will be ignored. A visibility of "n" corresponds
to not being visible in the 'make *config' interfaces.
Example (assuming we're running with modules enabled -- i.e., MODULES
set to 'y'):
# Assume this has been assigned 'n'
config N_SYM
tristate "N_SYM"
# Assume this has been assigned 'm'
config M_SYM
tristate "M_SYM"
# Has visibility 'n'
config A
tristate "A"
depends on N_SYM
# Has visibility 'm'
config B
tristate "B"
depends on M_SYM
# Has visibility 'y'
config C
tristate "C"
# Has no prompt, and hence visibility 'n'
config D
tristate
Having visibility be tri-valued ensures that e.g. a symbol cannot be
set to "y" by the user if it depends on a symbol with value "m", which
wouldn't be safe.
You should probably look at get_lower/upper_bound(),
get_assignable_values() and is_modifiable() before using this."""
return _get_visibility(self)
def get_referenced_symbols(self, refs_from_enclosing=False):
"""Returns the set() of all symbols referenced by this symbol. For
example, the symbol defined by
config FOO
bool
prompt "foo" if A && B
default C if D
depends on E
select F if G
references the symbols A through G.
refs_from_enclosing (default: False): If True, the symbols referenced
by enclosing menus and ifs will be included in the result."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def get_selected_symbols(self):
"""Returns the set() of all symbols X for which this symbol has a
'select X' or 'select X if Y' (regardless of whether Y is satisfied or
not). This is a subset of the symbols returned by
get_referenced_symbols()."""
return self.selected_syms
def get_implied_symbols(self):
"""Returns the set() of all symbols X for which this symbol has an
'imply X' or 'imply X if Y' (regardless of whether Y is satisfied or
not). This is a subset of the symbols returned by
get_referenced_symbols()."""
return self.implied_syms
def set_user_value(self, v):
"""Sets the user value of the symbol.
Equal in effect to assigning the value to the symbol within a .config
file. Use get_lower/upper_bound() or get_assignable_values() to find
the range of currently assignable values for bool and tristate symbols;
setting values outside this range will cause the user value to differ
from the result of Symbol.get_value() (be truncated). Values that are
invalid for the type (such as a_bool.set_user_value("foo")) are
ignored, and a warning is emitted if an attempt is made to assign such
a value.
For any type of symbol, is_modifiable() can be used to check if a user
value will currently have any effect on the symbol, as determined by
its visibility and range of assignable values. Any value that is valid
for the type (bool, tristate, etc.) will end up being reflected in
get_user_value() though, and might have an effect later if conditions
change. To get rid of the user value, use unset_user_value().
Any symbols dependent on the symbol are (recursively) invalidated, so
things will just work with regards to dependencies.
v: The user value to give to the symbol."""
self._set_user_value_no_invalidate(v, False)
# There might be something more efficient you could do here, but play
# it safe.
if self.name == "MODULES":
self.config._invalidate_all()
return
self._invalidate()
self._invalidate_dependent()
def unset_user_value(self):
"""Resets the user value of the symbol, as if the symbol had never
gotten a user value via Config.load_config() or
Symbol.set_user_value()."""
self._unset_user_value_no_recursive_invalidate()
self._invalidate_dependent()
def is_modifiable(self):
"""Returns True if the value of the symbol could be modified by calling
Symbol.set_user_value().
For bools and tristates, this corresponds to the symbol being visible
in the 'make menuconfig' interface and not already being pinned to a
specific value (e.g. because it is selected by another symbol).
For strings and numbers, this corresponds to just being visible. (See
Symbol.get_visibility().)"""
if self.is_special_:
return False
if self.type == BOOL or self.type == TRISTATE:
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return False
return TRI_TO_INT[_get_visibility(self)] > TRI_TO_INT[rev_dep]
return _get_visibility(self) != "n"
def is_defined(self):
"""Returns False if the symbol is referred to in the Kconfig but never
actually defined."""
return self.is_defined_
def is_special(self):
"""Returns True if the symbol is one of the special symbols n, m, y, or
UNAME_RELEASE, or gets its value from the environment."""
return self.is_special_
def is_from_environment(self):
"""Returns True if the symbol gets its value from the environment."""
return self.is_from_env
def has_ranges(self):
"""Returns True if the symbol is of type INT or HEX and has ranges that
limit what values it can take on."""
return bool(self.ranges)
def is_choice_symbol(self):
"""Returns True if the symbol is in a choice statement and is an actual
choice symbol (see Choice.get_symbols())."""
return self.is_choice_sym
def is_choice_selection(self):
"""Returns True if the symbol is contained in a choice statement and is
the selected item. Equivalent to
sym.is_choice_symbol() and sym.get_parent().get_selection() is sym"""
return self.is_choice_sym and self.parent.get_selection() is self
def is_allnoconfig_y(self):
"""Returns True if the symbol has the 'allnoconfig_y' option set."""
return self.allnoconfig_y
def __str__(self):
"""Returns a string containing various information about the symbol."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Symbol constructor -- not intended to be called directly by
Kconfiglib clients."""
self.name = None
self.type = UNKNOWN
self.prompts = []
self.def_exprs = [] # 'default' properties
self.ranges = [] # 'range' properties (for int and hex)
self.help = None # Help text
self.rev_dep = "n" # Reverse (select-related) dependencies
self.weak_rev_dep = "n" # Weak reverse (imply-related) dependencies
self.config = None
self.parent = None
self.user_val = None # Value set by user
# The prompt, default value, select, and imply conditions without any
# dependencies from menus and ifs propagated to them
self.orig_prompts = []
self.orig_def_exprs = []
self.orig_selects = []
self.orig_implies = []
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this symbol (see
# get_referenced_symbols())
self.referenced_syms = set()
# The set of symbols selected by this symbol (see
# get_selected_symbols())
self.selected_syms = set()
# The set of symbols implied by this symbol (see get_implied_symbols())
self.implied_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = set()
# This records only dependencies from enclosing ifs and menus together
# with local 'depends on' dependencies. Needed when determining actual
# choice items (hrrrr...). See Choice._determine_actual_symbols().
self.menu_dep = None
# See Symbol.get_ref/def_locations().
self.def_locations = []
self.ref_locations = []
# Populated in Config._build_dep() after parsing. Links the symbol to
# the symbols that immediately depend on it (in a caching/invalidation
# sense). The total set of dependent symbols for the symbol (the
# transitive closure) is calculated on an as-needed basis in
# _get_dependent().
self.dep = set()
# Cached values
# Caches the calculated value
self.cached_val = None
# Caches the visibility, which acts as an upper bound on the value
self.cached_visibility = None
# Caches the total list of dependent symbols. Calculated in
# _get_dependent().
self.cached_deps = None
# Flags
# Does the symbol have an entry in the Kconfig file? The trailing
# underscore avoids a collision with is_defined().
self.is_defined_ = False
# Should the symbol get an entry in .config?
self.write_to_conf = False
# Set to true when _make_conf() is called on a symbol, so that symbols
# defined in multiple locations only get one .config entry. We need to
# reset it prior to writing out a new .config.
self.already_written = False
# This is set to True for "actual" choice symbols; see
# Choice._determine_actual_symbols().
self.is_choice_sym = False
# Does the symbol get its value in some special way, e.g. from the
# environment or by being one of the special symbols n, m, and y? If
# so, the value is stored in self.cached_val, which is never
# invalidated. The trailing underscore avoids a collision with
# is_special().
self.is_special_ = False
# Does the symbol get its value from the environment?
self.is_from_env = False
# Does the symbol have the 'allnoconfig_y' option set?
self.allnoconfig_y = False
def _invalidate(self):
if self.is_special_:
return
if self.is_choice_sym:
self.parent._invalidate()
self.cached_val = None
self.cached_visibility = None
def _invalidate_dependent(self):
for sym in self._get_dependent():
sym._invalidate()
def _set_user_value_no_invalidate(self, v, suppress_load_warnings):
"""Like set_user_value(), but does not invalidate any symbols.
suppress_load_warnings: some warnings are annoying when loading a
.config that can be helpful when manually invoking set_user_value().
This flag is set to True to suppress such warnings.
Perhaps this could be made optional for load_config() instead."""
if self.is_special_:
if self.is_from_env:
self.config._warn('attempt to assign the value "{0}" to the '
'symbol {1}, which gets its value from the '
'environment. Assignment ignored.'
.format(v, self.name))
else:
self.config._warn('attempt to assign the value "{0}" to the '
'special symbol {1}. Assignment ignored.'
.format(v, self.name))
return
if not self.is_defined_:
filename, linenr = self.ref_locations[0]
if self.config.print_undef_assign:
_stderr_msg('note: attempt to assign the value "{0}" to {1}, '
"which is referenced at {2}:{3} but never "
"defined. Assignment ignored."
.format(v, self.name, filename, linenr))
return
# Check if the value is valid for our type
if not ((self.type == BOOL and (v == "y" or v == "n") ) or
(self.type == TRISTATE and (v == "y" or v == "m" or
v == "n") ) or
(self.type == STRING ) or
(self.type == INT and _is_base_n(v, 10) ) or
(self.type == HEX and _is_base_n(v, 16) )):
self.config._warn('the value "{0}" is invalid for {1}, which has '
"type {2}. Assignment ignored."
.format(v, self.name, TYPENAME[self.type]))
return
if not self.prompts and not suppress_load_warnings:
self.config._warn('assigning "{0}" to the symbol {1} which '
'lacks prompts and thus has visibility "n". '
'The assignment will have no effect.'
.format(v, self.name))
self.user_val = v
if self.is_choice_sym and (self.type == BOOL or self.type == TRISTATE):
choice = self.parent
if v == "y":
choice.user_val = self
choice.user_mode = "y"
elif v == "m":
choice.user_val = None
choice.user_mode = "m"
def _unset_user_value_no_recursive_invalidate(self):
self._invalidate()
self.user_val = None
if self.is_choice_sym:
self.parent._unset_user_value()
def _make_conf(self, append_fn):
if self.already_written:
return
self.already_written = True
# Note: write_to_conf is determined in get_value()
val = self.get_value()
if not self.write_to_conf:
return
if self.type == BOOL or self.type == TRISTATE:
append_fn("{0}{1}={2}".format(self.config.config_prefix, self.name, val)
if val == "y" or val == "m" else
"# {0}{1} is not set".format(self.config.config_prefix, self.name))
elif self.type == INT or self.type == HEX:
append_fn("{0}{1}={2}".format(self.config.config_prefix, self.name, val))
elif self.type == STRING:
# Escape \ and "
append_fn('{0}{1}="{2}"'
.format(self.config.config_prefix, self.name,
val.replace("\\", "\\\\").replace('"', '\\"')))
else:
_internal_error("Internal error while creating .config: unknown "
'type "{0}".'.format(self.type))
def _get_dependent(self):
"""Returns the set of symbols that should be invalidated if the value
of the symbol changes, because they might be affected by the change.
Note that this is an internal API -- it's probably of limited
usefulness to clients."""
if self.cached_deps is not None:
return self.cached_deps
res = set(self.dep)
for s in self.dep:
res |= s._get_dependent()
if self.is_choice_sym:
# Choice symbols also depend (recursively) on their siblings. The
# siblings are not included in 'dep' to avoid dependency loops.
for sibling in self.parent.actual_symbols:
if sibling is not self:
res.add(sibling)
res |= sibling.dep
for s in sibling.dep:
res |= s._get_dependent()
self.cached_deps = res
return res
def _has_auto_menu_dep_on(self, on):
"""See Choice._determine_actual_symbols()."""
if not isinstance(self.parent, Choice):
_internal_error("Attempt to determine auto menu dependency for "
"symbol ouside of choice.")
if not self.prompts:
# If we have no prompt, use the menu dependencies instead (what was
# specified with 'depends on')
return self.menu_dep is not None and \
self.config._expr_depends_on(self.menu_dep, on)
for _, cond_expr in self.prompts:
if self.config._expr_depends_on(cond_expr, on):
return True
return False
class Menu(Item):
"""Represents a menu statement."""
#
# Public interface
#
def get_config(self):
"""Return the Config instance this menu is from."""
return self.config
def get_title(self):
"""Returns the title text of the menu."""
return self.title
def get_parent(self):
"""Returns the menu or choice statement that contains the menu, or
None if the menu is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_location(self):
"""Returns the location of the menu as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def get_items(self, recursive=False):
"""Returns a list containing the items (symbols, menus, choice
statements and comments) in in the menu, in the same order that the
items appear within the menu.
recursive (default: False): True if items contained in items within the
menu should be included recursively (preorder)."""
if not recursive:
return self.block
res = []
for item in self.block:
res.append(item)
if isinstance(item, Menu):
res.extend(item.get_items(True))
elif isinstance(item, Choice):
res.extend(item.get_items())
return res
def get_symbols(self, recursive=False):
"""Returns a list containing the symbols in the menu, in the same order
that they appear within the menu.
recursive (default: False): True if symbols contained in items within
the menu should be included recursively."""
return [item for item in self.get_items(recursive) if
isinstance(item, Symbol)]
def get_visibility(self):
"""Returns the visibility of the menu. This also affects the visibility
of subitems. See also Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_visible_if_visibility(self):
"""Returns the visibility the menu gets from its 'visible if'
condition. "y" if the menu has no 'visible if' condition."""
return self.config._eval_expr(self.visible_if_expr)
def get_referenced_symbols(self, refs_from_enclosing=False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def __str__(self):
"""Returns a string containing various information about the menu."""
depends_on_str = self.config._expr_val_str(self.orig_deps,
"(no dependencies)")
visible_if_str = self.config._expr_val_str(self.visible_if_expr,
"(no dependencies)")
additional_deps_str = " " + \
self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _lines("Menu",
"Title : " + self.title,
"'depends on' dependencies : " + depends_on_str,
"'visible if' dependencies : " + visible_if_str,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Menu constructor -- not intended to be called directly by
Kconfiglib clients."""
self.title = None
self.dep_expr = None
self.visible_if_expr = None
self.block = [] # List of contained items
self.config = None
self.parent = None
# Dependency expression without dependencies from enclosing menus and
# ifs propagated
self.orig_deps = None
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this menu (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self, append_fn):
if self.config._eval_expr(self.dep_expr) != "n" and \
self.config._eval_expr(self.visible_if_expr) != "n":
append_fn("\n#\n# {0}\n#".format(self.title))
_make_block_conf(self.block, append_fn)
class Choice(Item):
"""Represents a choice statement. A choice can be in one of three modes:
"n" - The choice is not visible and no symbols can be selected.
"m" - Any number of symbols can be set to "m". The rest will be "n". This
is safe since potentially conflicting options don't actually get
compiled into the kernel simultaneously with "m".
"y" - One symbol will be "y" while the rest are "n".
Only tristate choices can be in "m" mode, and the visibility of the choice
is an upper bound on the mode, so that e.g. a choice that depends on a
symbol with value "m" will be in "m" mode.
The mode changes automatically when a value is assigned to a symbol within
the choice.
See Symbol.get_visibility() too."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this choice is from."""
return self.config
def get_name(self):
"""For named choices, returns the name. Returns None for unnamed
choices. No named choices appear anywhere in the kernel Kconfig files
as of Linux 3.7.0-rc8."""
return self.name
def get_type(self):
"""Returns the type of the choice. See Symbol.get_type()."""
return self.type
def get_prompts(self):
"""Returns a list of prompts defined for the choice, in the order they
appear in the configuration files. Returns the empty list for choices
with no prompt.
This list will have a single entry for the vast majority of choices
having prompts, but having multiple prompts for a single choice is
possible through having multiple 'choice' entries for it (though I'm
not sure if that ever happens in practice)."""
return [prompt for prompt, _ in self.orig_prompts]
def get_help(self):
"""Returns the help text of the choice, or None if the choice has no
help text."""
return self.help
def get_parent(self):
"""Returns the menu or choice statement that contains the choice, or
None if the choice is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the choice is defined. For
the vast majority of choices (all of them as of Linux 3.7.0-rc8) this
list will only contain one element, but its possible for named choices
to be defined in multiple locations."""
return self.def_locations
def get_selection(self):
"""Returns the symbol selected (either by the user or through
defaults), or None if either no symbol is selected or the mode is not
"y"."""
if self.cached_selection is not None:
if self.cached_selection == NO_SELECTION:
return None
return self.cached_selection
if self.get_mode() != "y":
return self._cache_ret(None)
# User choice available?
if self.user_val is not None and _get_visibility(self.user_val) == "y":
return self._cache_ret(self.user_val)
if self.optional:
return self._cache_ret(None)
return self._cache_ret(self.get_selection_from_defaults())
def get_selection_from_defaults(self):
"""Like Choice.get_selection(), but acts as if no symbol has been
selected by the user and no 'optional' flag is in effect."""
if not self.actual_symbols:
return None
for symbol, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
chosen_symbol = symbol
break
else:
chosen_symbol = self.actual_symbols[0]
# Is the chosen symbol visible?
if _get_visibility(chosen_symbol) != "n":
return chosen_symbol
# Otherwise, pick the first visible symbol
for sym in self.actual_symbols:
if _get_visibility(sym) != "n":
return sym
return None
def get_user_selection(self):
"""If the choice is in "y" mode and has a user-selected symbol, returns
that symbol. Otherwise, returns None."""
return self.user_val
def get_items(self):
"""Gets all items contained in the choice in the same order as within
the configuration ("items" instead of "symbols" since choices and
comments might appear within choices. This only happens in one place as
of Linux 3.7.0-rc8, in drivers/usb/gadget/Kconfig)."""
return self.block
def get_symbols(self):
"""Returns a list containing the choice's symbols.
A quirk (perhaps a bug) of Kconfig is that you can put items within a
choice that will not be considered members of the choice insofar as
selection is concerned. This happens for example if one symbol within a
choice 'depends on' the symbol preceding it, or if you put non-symbol
items within choices.
As of Linux 3.7.0-rc8, this seems to be used intentionally in one
place: drivers/usb/gadget/Kconfig.
This function returns the "proper" symbols of the choice in the order
they appear in the choice, excluding such items. If you want all items
in the choice, use get_items()."""
return self.actual_symbols
def get_referenced_symbols(self, refs_from_enclosing=False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def get_visibility(self):
"""Returns the visibility of the choice statement: one of "n", "m" or
"y". This acts as an upper limit on the mode of the choice (though bool
choices can only have the mode "y"). See the class documentation for an
explanation of modes."""
return _get_visibility(self)
def get_mode(self):
"""Returns the mode of the choice. See the class documentation for
an explanation of modes."""
minimum_mode = "n" if self.optional else "m"
mode = self.user_mode if self.user_mode is not None else minimum_mode
mode = self.config._eval_min(mode, _get_visibility(self))
# Promote "m" to "y" for boolean choices
if mode == "m" and self.type == BOOL:
return "y"
return mode
def is_optional(self):
"""Returns True if the choice has the 'optional' flag set (and so will
default to "n" mode)."""
return self.optional
def __str__(self):
"""Returns a string containing various information about the choice
statement."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Choice constructor -- not intended to be called directly by
Kconfiglib clients."""
self.name = None # Yes, choices can be named
self.type = UNKNOWN
self.prompts = []
self.def_exprs = [] # 'default' properties
self.help = None # Help text
self.block = [] # List of contained items
self.config = None
self.parent = None
self.user_val = None
self.user_mode = None
# We need to filter out symbols that appear within the choice block but
# are not considered choice items (see
# Choice._determine_actual_symbols()) This list holds the "actual"
# choice items.
self.actual_symbols = []
# The prompts and default values without any dependencies from
# enclosing menus and ifs propagated
self.orig_prompts = []
self.orig_def_exprs = []
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this choice (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = set()
# See Choice.get_def_locations()
self.def_locations = []
# Cached values
self.cached_selection = None
self.cached_visibility = None
self.optional = False
def _determine_actual_symbols(self):
"""If a symbol's visibility depends on the preceding symbol within a
choice, it is no longer viewed as a choice item. (This is quite
possibly a bug, but some things consciously use it... ugh. It stems
from automatic submenu creation.) In addition, it's possible to have
choices and comments within choices, and those shouldn't be considered
choice items either. Only drivers/usb/gadget/Kconfig seems to depend on
any of this. This method computes the "actual" items in the choice and
sets the is_choice_sym flag on them (retrieved via is_choice_symbol()).
Don't let this scare you: an earlier version simply checked for a
sequence of symbols where all symbols after the first appeared in the
'depends on' expression of the first, and that worked fine. The added
complexity is to be future-proof in the event that
drivers/usb/gadget/Kconfig turns even more sinister. It might very well
be overkilling things (especially if that file is refactored ;)."""
# Items might depend on each other in a tree structure, so we need a
# stack to keep track of the current tentative parent
stack = []
for item in self.block:
if not isinstance(item, Symbol):
stack = []
continue
while stack:
if item._has_auto_menu_dep_on(stack[-1]):
# The item should not be viewed as a choice item, so don't
# set item.is_choice_sym
stack.append(item)
break
else:
stack.pop()
else:
item.is_choice_sym = True
self.actual_symbols.append(item)
stack.append(item)
def _cache_ret(self, selection):
# As None is used to indicate the lack of a cached value we can't use
# that to cache the fact that the choice has no selection. Instead, we
# use the symbolic constant NO_SELECTION.
if selection is None:
self.cached_selection = NO_SELECTION
else:
self.cached_selection = selection
return selection
def _invalidate(self):
self.cached_selection = None
self.cached_visibility = None
def _unset_user_value(self):
self._invalidate()
self.user_val = None
self.user_mode = None
def _make_conf(self, append_fn):
_make_block_conf(self.block, append_fn)
class Comment(Item):
"""Represents a comment statement."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this comment is from."""
return self.config
def get_text(self):
"""Returns the text of the comment."""
return self.text
def get_parent(self):
"""Returns the menu or choice statement that contains the comment, or
None if the comment is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_location(self):
"""Returns the location of the comment as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def get_visibility(self):
"""Returns the visibility of the comment. See also
Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_referenced_symbols(self, refs_from_enclosing=False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def __str__(self):
"""Returns a string containing various information about the
comment."""
dep_str = self.config._expr_val_str(self.orig_deps,
"(no dependencies)")
additional_deps_str = " " + \
self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _lines("Comment",
"Text: " + str(self.text),
"Dependencies: " + dep_str,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Comment constructor -- not intended to be called directly by
Kconfiglib clients."""
self.text = None
self.dep_expr = None
self.config = None
self.parent = None
# Dependency expression without dependencies from enclosing menus and
# ifs propagated
self.orig_deps = None
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this comment (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self, append_fn):
if self.config._eval_expr(self.dep_expr) != "n":
append_fn("\n#\n# {0}\n#".format(self.text))
class Kconfig_Syntax_Error(Exception):
"""Exception raised for syntax errors."""
pass
class Internal_Error(Exception):
"""Exception raised for internal errors."""
pass
#
# Public functions
#
def tri_less(v1, v2):
"""Returns True if the tristate v1 is less than the tristate v2, where "n",
"m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] < TRI_TO_INT[v2]
def tri_less_eq(v1, v2):
"""Returns True if the tristate v1 is less than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] <= TRI_TO_INT[v2]
def tri_greater(v1, v2):
"""Returns True if the tristate v1 is greater than the tristate v2, where
"n", "m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] > TRI_TO_INT[v2]
def tri_greater_eq(v1, v2):
"""Returns True if the tristate v1 is greater than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] >= TRI_TO_INT[v2]
#
# Internal classes
#
class _Feed(object):
"""Class for working with sequences in a stream-like fashion; handy for
tokens."""
# This would be more helpful on the item classes, but would remove some
# flexibility
__slots__ = ['items', 'length', 'i']
def __init__(self, items):
self.items = items
self.length = len(self.items)
self.i = 0
def get_next(self):
if self.i >= self.length:
return None
item = self.items[self.i]
self.i += 1
return item
def peek_next(self):
return None if self.i >= self.length else self.items[self.i]
def check(self, token):
"""Check if the next token is 'token'. If so, remove it from the token
feed and return True. Otherwise, leave it in and return False."""
if self.i < self.length and self.items[self.i] == token:
self.i += 1
return True
return False
def unget_all(self):
self.i = 0
class _FileFeed(object):
"""Feeds lines from a file. Keeps track of the filename and current line
number. Joins any line ending in \\ with the following line. We need to be
careful to get the line number right in the presence of continuation
lines."""
__slots__ = ['filename', 'lines', 'length', 'linenr']
def __init__(self, filename):
self.filename = _clean_up_path(filename)
with open(filename, "r") as f:
# No interleaving of I/O and processing yet. Don't know if it would
# help.
self.lines = f.readlines()
self.length = len(self.lines)
self.linenr = 0
def get_next(self):
if self.linenr >= self.length:
return None
line = self.lines[self.linenr]
self.linenr += 1
while line.endswith("\\\n"):
line = line[:-2] + self.lines[self.linenr]
self.linenr += 1
return line
def peek_next(self):
linenr = self.linenr
if linenr >= self.length:
return None
line = self.lines[linenr]
while line.endswith("\\\n"):
linenr += 1
line = line[:-2] + self.lines[linenr]
return line
def unget(self):
self.linenr -= 1
while self.lines[self.linenr].endswith("\\\n"):
self.linenr -= 1
def next_nonblank(self):
"""Removes lines up to and including the next non-blank (not all-space)
line and returns it. Returns None if there are no more non-blank
lines."""
while 1:
line = self.get_next()
if line is None or not line.isspace():
return line
#
# Internal functions
#
def _get_visibility(sc):
"""Symbols and Choices have a "visibility" that acts as an upper bound on
the values a user can set for them, corresponding to the visibility in e.g.
'make menuconfig'. This function calculates the visibility for the Symbol
or Choice 'sc' -- the logic is nearly identical."""
if sc.cached_visibility is None:
vis = "n"
for _, cond_expr in sc.prompts:
vis = sc.config._eval_max(vis, cond_expr)
if isinstance(sc, Symbol) and sc.is_choice_sym:
if sc.type == TRISTATE and vis == "m" and \
sc.parent.get_mode() == "y":
# Choice symbols with visibility "m" are not visible if the
# choice has mode "y"
vis = "n"
else:
vis = sc.config._eval_min(vis, _get_visibility(sc.parent))
# Promote "m" to "y" if we're dealing with a non-tristate
if vis == "m" and sc.type != TRISTATE:
vis = "y"
sc.cached_visibility = vis
return sc.cached_visibility
def _make_and(e1, e2):
"""Constructs an AND (&&) expression. Performs trivial simplification.
Nones equate to 'y'.
Note: returns None if e1 == e2 == None."""
if e1 is None or e1 == "y":
return e2
if e2 is None or e2 == "y":
return e1
# Prefer to merge argument lists if possible to reduce the number of nodes
if isinstance(e1, tuple) and e1[0] == AND:
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e1[1] + e2[1])
return (AND, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e2[1] + [e1])
return (AND, [e1, e2])
def _make_or(e1, e2):
"""Constructs an OR (||) expression. Performs trivial simplification and
avoids Nones. Nones equate to 'y', which is usually what we want, but needs
to be kept in mind."""
# Perform trivial simplification and avoid None's (which
# correspond to y's)
if e1 is None or e2 is None or e1 == "y" or e2 == "y":
return "y"
if e1 == "n":
return e2
# Prefer to merge argument lists if possible to reduce the number of nodes
if isinstance(e1, tuple) and e1[0] == OR:
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e1[1] + e2[1])
return (OR, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e2[1] + [e1])
return (OR, [e1, e2])
def _get_expr_syms_rec(expr, res):
"""_get_expr_syms() helper. Recurses through expressions."""
if isinstance(expr, Symbol):
res.add(expr)
elif isinstance(expr, str):
return
elif expr[0] == AND or expr[0] == OR:
for term in expr[1]:
_get_expr_syms_rec(term, res)
elif expr[0] == NOT:
_get_expr_syms_rec(expr[1], res)
elif expr[0] == EQUAL or expr[0] == UNEQUAL:
if isinstance(expr[1], Symbol):
res.add(expr[1])
if isinstance(expr[2], Symbol):
res.add(expr[2])
else:
_internal_error("Internal error while fetching symbols from an "
"expression with token stream {0}.".format(expr))
def _get_expr_syms(expr):
"""Returns the set() of symbols appearing in expr."""
res = set()
if expr is not None:
_get_expr_syms_rec(expr, res)
return res
def _str_val(obj):
"""Returns the value of obj as a string. If obj is not a string (constant
symbol), it must be a Symbol."""
return obj if isinstance(obj, str) else obj.get_value()
def _make_block_conf(block, append_fn):
"""Returns a list of .config strings for a block (list) of items."""
# Collect the substrings in a list and later use join() instead of += to
# build the final .config contents. With older Python versions, this yields
# linear instead of quadratic complexity.
for item in block:
item._make_conf(append_fn)
def _sym_str_string(sym_or_str):
if isinstance(sym_or_str, str):
return '"' + sym_or_str + '"'
return sym_or_str.name
def _intersperse(lst, op):
"""_expr_to_str() helper. Gets the string representation of each expression
in lst and produces a list where op has been inserted between the
elements."""
if not lst:
return ""
res = []
def handle_sub_expr(expr):
no_parens = isinstance(expr, (str, Symbol)) or \
expr[0] in (EQUAL, UNEQUAL) or \
PRECEDENCE[op] <= PRECEDENCE[expr[0]]
if not no_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr))
if not no_parens:
res.append(")")
op_str = OP_TO_STR[op]
handle_sub_expr(lst[0])
for expr in lst[1:]:
res.append(op_str)
handle_sub_expr(expr)
return res
def _expr_to_str_rec(expr):
if expr is None:
return [""]
if isinstance(expr, (Symbol, str)):
return [_sym_str_string(expr)]
if expr[0] in (AND, OR):
return _intersperse(expr[1], expr[0])
if expr[0] == NOT:
need_parens = not isinstance(expr[1], (str, Symbol))
res = ["!"]
if need_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr[1]))
if need_parens:
res.append(")")
return res
if expr[0] in (EQUAL, UNEQUAL):
return [_sym_str_string(expr[1]),
OP_TO_STR[expr[0]],
_sym_str_string(expr[2])]
def _expr_to_str(expr):
return "".join(_expr_to_str_rec(expr))
def _indentation(line):
"""Returns the length of the line's leading whitespace, treating tab stops
as being spaced 8 characters apart."""
line = line.expandtabs()
return len(line) - len(line.lstrip())
def _deindent(line, indent):
"""Deindent 'line' by 'indent' spaces."""
line = line.expandtabs()
if len(line) <= indent:
return line
return line[indent:]
def _is_base_n(s, n):
try:
int(s, n)
return True
except ValueError:
return False
def _lines(*args):
"""Returns a string consisting of all arguments, with newlines inserted
between them."""
return "\n".join(args)
def _comment(s):
"""Returns a new string with "#" inserted before each line in 's'."""
if not s:
return "#"
res = "".join(["#" + line for line in s.splitlines(True)])
if s.endswith("\n"):
return res + "#"
return res
def _clean_up_path(path):
"""Strips an initial "./" and any trailing slashes from 'path'."""
if path.startswith("./"):
path = path[2:]
return path.rstrip("/")
def _build_msg(msg, filename, linenr):
if filename is not None:
msg = "{0}:{1}: ".format(_clean_up_path(filename), linenr) + msg
return msg
def _stderr_msg(msg, filename, linenr):
sys.stderr.write(_build_msg(msg, filename, linenr) + "\n")
def _tokenization_error(s, filename, linenr):
loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
raise Kconfig_Syntax_Error("{0}Couldn't tokenize '{1}'"
.format(loc, s.strip()))
def _parse_error(s, msg, filename, linenr):
loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
raise Kconfig_Syntax_Error("{0}Couldn't parse '{1}'{2}"
.format(loc, s.strip(),
"." if msg is None else ": " + msg))
def _internal_error(msg):
raise Internal_Error(msg +
"\nSorry! You may want to send an email to ulfalizer a.t Google's "
"email service to tell me about this. Include the message above and the "
"stack trace and describe what you were doing.")
#
# Internal global constants
#
# Tokens
(T_AND, T_OR, T_NOT,
T_OPEN_PAREN, T_CLOSE_PAREN,
T_EQUAL, T_UNEQUAL,
T_MAINMENU, T_MENU, T_ENDMENU,
T_SOURCE, T_CHOICE, T_ENDCHOICE,
T_COMMENT, T_CONFIG, T_MENUCONFIG,
T_HELP, T_IF, T_ENDIF, T_DEPENDS, T_ON,
T_OPTIONAL, T_PROMPT, T_DEFAULT,
T_BOOL, T_TRISTATE, T_HEX, T_INT, T_STRING,
T_DEF_BOOL, T_DEF_TRISTATE,
T_SELECT, T_IMPLY, T_RANGE, T_OPTION, T_ALLNOCONFIG_Y, T_ENV,
T_DEFCONFIG_LIST, T_MODULES, T_VISIBLE) = range(40)
# The leading underscore before the function assignments below prevent pydoc
# from listing them. The constants could be hidden too, but they're fairly
# obviously internal anyway, so don't bother spamming the code.
# Keyword to token map. Note that the get() method is assigned directly as a
# small optimization.
_get_keyword = \
{"mainmenu": T_MAINMENU, "menu": T_MENU, "endmenu": T_ENDMENU,
"endif": T_ENDIF, "endchoice": T_ENDCHOICE, "source": T_SOURCE,
"choice": T_CHOICE, "config": T_CONFIG, "comment": T_COMMENT,
"menuconfig": T_MENUCONFIG, "help": T_HELP, "if": T_IF,
"depends": T_DEPENDS, "on": T_ON, "optional": T_OPTIONAL,
"prompt": T_PROMPT, "default": T_DEFAULT, "bool": T_BOOL, "boolean": T_BOOL,
"tristate": T_TRISTATE, "int": T_INT, "hex": T_HEX, "def_bool": T_DEF_BOOL,
"def_tristate": T_DEF_TRISTATE, "string": T_STRING, "select": T_SELECT,
"imply" : T_IMPLY, "range": T_RANGE, "option": T_OPTION,
"allnoconfig_y": T_ALLNOCONFIG_Y, "env": T_ENV,
"defconfig_list": T_DEFCONFIG_LIST, "modules": T_MODULES,
"visible": T_VISIBLE}.get
# Strings to use for True and False
BOOL_STR = {False: "false", True: "true"}
# Tokens after which identifier-like lexemes are treated as strings. T_CHOICE
# is included to avoid symbols being registered for named choices.
STRING_LEX = frozenset((T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING, T_CHOICE,
T_PROMPT, T_MENU, T_COMMENT, T_SOURCE, T_MAINMENU))
# Matches the initial token on a line; see _tokenize(). Also eats trailing
# whitespace as an optimization.
_initial_token_re_match = re.compile(r"[^\w]*(\w+)\s*").match
# Matches an identifier/keyword optionally preceded by whitespace. Also eats
# trailing whitespace as an optimization.
_id_keyword_re_match = re.compile(r"\s*([\w./-]+)\s*").match
# Regular expression for finding $-references to symbols in strings
_sym_ref_re_search = re.compile(r"\$[A-Za-z0-9_]+").search
# Integers representing symbol types
UNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(6)
# Strings to use for types
TYPENAME = {UNKNOWN: "unknown", BOOL: "bool", TRISTATE: "tristate",
STRING: "string", HEX: "hex", INT: "int"}
# Token to type mapping
TOKEN_TO_TYPE = {T_BOOL: BOOL, T_TRISTATE: TRISTATE, T_STRING: STRING,
T_INT: INT, T_HEX: HEX}
# Default values for symbols of different types (the value the symbol gets if
# it is not assigned a user value and none of its 'default' clauses kick in)
DEFAULT_VALUE = {BOOL: "n", TRISTATE: "n", STRING: "", INT: "", HEX: ""}
# Indicates that no item is selected in a choice statement
NO_SELECTION = 0
# Integers representing expression types
AND, OR, NOT, EQUAL, UNEQUAL = range(5)
# Map from tristate values to integers
TRI_TO_INT = {"n": 0, "m": 1, "y": 2}
# Printing-related stuff
OP_TO_STR = {AND: " && ", OR: " || ", EQUAL: " = ", UNEQUAL: " != "}
PRECEDENCE = {OR: 0, AND: 1, NOT: 2}
| 39.421602
| 96
| 0.567548
|
9df18046c0a23da812371eee29ff9d9ea313d52b
| 1,205
|
py
|
Python
|
qiskit/algorithms/optimizers/nlopts/direct_l_rand.py
|
ajavadia/qiskit-sdk-py
|
a59e8e6be1793197e19998c1f7dcfc45e6f2f3af
|
[
"Apache-2.0"
] | 504
|
2018-12-15T16:34:03.000Z
|
2022-03-26T11:24:53.000Z
|
qiskit/algorithms/optimizers/nlopts/direct_l_rand.py
|
ajavadia/qiskit-sdk-py
|
a59e8e6be1793197e19998c1f7dcfc45e6f2f3af
|
[
"Apache-2.0"
] | 746
|
2018-12-16T16:44:42.000Z
|
2021-07-10T16:59:43.000Z
|
qiskit/algorithms/optimizers/nlopts/direct_l_rand.py
|
ajavadia/qiskit-sdk-py
|
a59e8e6be1793197e19998c1f7dcfc45e6f2f3af
|
[
"Apache-2.0"
] | 421
|
2018-12-22T14:49:00.000Z
|
2022-03-04T09:47:07.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""DIviding RECTangles Locally-biased Randomized optimizer. """
from .nloptimizer import NLoptOptimizer, NLoptOptimizerType
class DIRECT_L_RAND(NLoptOptimizer): # pylint: disable=invalid-name
"""
DIviding RECTangles Locally-biased Randomized optimizer.
DIRECT-L RAND is the "locally biased" variant with some randomization in near-tie decisions.
See also :class:`DIRECT_L`
NLopt global optimizer, derivative-free.
For further detail, please refer to
http://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#direct-and-direct-l
"""
def get_nlopt_optimizer(self) -> NLoptOptimizerType:
""" Return NLopt optimizer type """
return NLoptOptimizerType.GN_DIRECT_L_RAND
| 36.515152
| 96
| 0.749378
|
91278701b30f90afbdb3cebbb0929460b7b3107c
| 969
|
py
|
Python
|
13_email/start_13_blue_yellow_app_email/blue_yellow_app/data/dbsession.py
|
g2gcio/course-demo
|
b0d00a6ac7a6a6a17af963cee67cf13dc5941e95
|
[
"MIT"
] | 276
|
2016-04-04T20:57:36.000Z
|
2022-03-12T02:42:46.000Z
|
13_email/start_13_blue_yellow_app_email/blue_yellow_app/data/dbsession.py
|
g2gcio/course-demo
|
b0d00a6ac7a6a6a17af963cee67cf13dc5941e95
|
[
"MIT"
] | 37
|
2016-10-13T12:04:27.000Z
|
2020-11-22T10:36:53.000Z
|
13_email/start_13_blue_yellow_app_email/blue_yellow_app/data/dbsession.py
|
g2gcio/course-demo
|
b0d00a6ac7a6a6a17af963cee67cf13dc5941e95
|
[
"MIT"
] | 163
|
2016-10-03T02:10:00.000Z
|
2022-03-25T03:43:01.000Z
|
import sqlalchemy
import sqlalchemy.orm
from blue_yellow_app.data.modelbase import SqlAlchemyBase
# noinspection PyUnresolvedReferences
import blue_yellow_app.data.album
# noinspection PyUnresolvedReferences
import blue_yellow_app.data.track
# noinspection PyUnresolvedReferences
import blue_yellow_app.data.account
class DbSessionFactory:
factory = None
@staticmethod
def global_init(db_file):
if DbSessionFactory.factory:
return
if not db_file or not db_file.strip():
raise Exception("You must specify a data file.")
conn_str = 'sqlite:///' + db_file
print("Connecting to db with conn string: {}".format(conn_str))
engine = sqlalchemy.create_engine(conn_str, echo=False)
SqlAlchemyBase.metadata.create_all(engine)
DbSessionFactory.factory = sqlalchemy.orm.sessionmaker(bind=engine)
@staticmethod
def create_session():
return DbSessionFactory.factory()
| 29.363636
| 75
| 0.733746
|
6999c7ebe74967fa901c5e77ba269f218d6c8348
| 4,494
|
py
|
Python
|
nova/monkey_patch.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 1
|
2020-08-14T02:20:59.000Z
|
2020-08-14T02:20:59.000Z
|
nova/monkey_patch.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 2
|
2021-03-31T20:04:16.000Z
|
2021-12-13T20:45:03.000Z
|
nova/monkey_patch.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 1
|
2020-07-24T02:31:45.000Z
|
2020-07-24T02:31:45.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Enable eventlet monkey patching."""
import os
def _monkey_patch():
# NOTE(mdbooth): Anything imported here will not be monkey patched. It is
# important to take care not to import anything here which requires monkey
# patching.
import eventlet
import sys
# NOTE(mdbooth): Imports only sys (2019-01-30). Other modules imported at
# runtime on execution of debugger.init().
from nova import debugger
# Note any modules with known monkey-patching issues which have been
# imported before monkey patching.
# urllib3: https://bugs.launchpad.net/nova/+bug/1808951
# oslo_context.context: https://bugs.launchpad.net/nova/+bug/1773102
problems = (set(['urllib3', 'oslo_context.context']) &
set(sys.modules.keys()))
# See https://bugs.launchpad.net/nova/+bug/1164822
# TODO(mdbooth): This feature was deprecated and removed in eventlet at
# some point but brought back in version 0.21.0, presumably because some
# users still required it to work round issues. However, there have been a
# number of greendns fixes in eventlet since then. Specifically, it looks
# as though the originally reported IPv6 issue may have been fixed in
# version 0.24.0. We should remove this when we can confirm that the
# original issue is fixed.
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
if debugger.enabled():
# turn off thread patching to enable the remote debugger
eventlet.monkey_patch(thread=False)
elif os.name == 'nt':
# for nova-compute running on Windows(Hyper-v)
# pipes don't support non-blocking I/O
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
# Monkey patch the original current_thread to use the up-to-date _active
# global variable. See https://bugs.launchpad.net/bugs/1863021 and
# https://github.com/eventlet/eventlet/issues/592
import __original_module_threading as orig_threading
import threading
orig_threading.current_thread.__globals__['_active'] = threading._active
# NOTE(mdbooth): Log here instead of earlier to avoid loading oslo logging
# before monkey patching.
# NOTE(mdbooth): Ideally we would raise an exception here, as this is
# likely to cause problems when executing nova code. However, some non-nova
# tools load nova only to extract metadata and do not execute it. Two
# examples are oslopolicy-policy-generator and sphinx, both of which can
# fail if we assert here. It is not ideal that these utilities are monkey
# patching at all, but we should not break them.
# TODO(mdbooth): If there is any way to reliably determine if we are being
# loaded in that kind of context without breaking existing callers, we
# should do it and bypass monkey patching here entirely.
if problems:
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
LOG.warning("Modules with known eventlet monkey patching issues were "
"imported prior to eventlet monkey patching: %s. This "
"warning can usually be ignored if the caller is only "
"importing and not executing nova code.",
', '.join(problems))
# NOTE(mdbooth): This workaround is required to avoid breaking sphinx. See
# separate comment in doc/source/conf.py. It may also be useful for other
# non-nova utilities. Ideally the requirement for this workaround will be
# removed as soon as possible, so do not rely on, or extend it.
if (os.environ.get('OS_NOVA_DISABLE_EVENTLET_PATCHING', '').lower()
not in ('1', 'true', 'yes')):
_monkey_patch()
| 45.857143
| 79
| 0.70761
|
dc2261f57c6d7e3c393dc1d171e4ff58a7ef9811
| 34,568
|
py
|
Python
|
ub/modules/person.py
|
parv779/javes-3.0
|
d510717b2756a65b39ff18d9f53d4adc46d8e23f
|
[
"MIT"
] | 15
|
2020-12-13T17:37:05.000Z
|
2021-06-23T00:00:49.000Z
|
ub/modules/person.py
|
parv779/javes-3.0
|
d510717b2756a65b39ff18d9f53d4adc46d8e23f
|
[
"MIT"
] | 2
|
2021-01-11T16:39:31.000Z
|
2021-01-25T22:35:28.000Z
|
ub/modules/person.py
|
parv779/javes-3.0
|
d510717b2756a65b39ff18d9f53d4adc46d8e23f
|
[
"MIT"
] | 78
|
2020-12-13T17:52:51.000Z
|
2022-03-24T03:43:09.000Z
|
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
from telethon.tl.functions.messages import ReportSpamRequest
from telethon.tl.types import User
import datetime, asyncio, os, json, subprocess, time, math, sys, pytz, spamwatch, os, asyncio, html
from asyncio import sleep
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from random import choice, randint
from telethon.events import StopPropagation
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import ChannelParticipantsAdmins, MessageEntityMentionName
from ub import AFKREASON, COUNT_MSG, CMD_HELP, ISAFK, BOTLOG, BOTLOG_CHATID, USERS, PM_AUTO_BAN, bot, TEMP_DOWNLOAD_DIRECTORY, LOGS
from ub.events import javes05, javess, rekcah05
from telethon.errors import rpcbaseerrors
from ub import BOTLOG, BOTLOG_CHATID, CMD_HELP
from ub import CMD_HELP, BOTLOG_CHATID
from telethon.tl.functions.photos import GetUserPhotosRequest
from telethon.utils import get_input_location
from speedtest import Speedtest
from telethon import functions
from os import remove, execle, path, makedirs, getenv, environ
from shutil import rmtree
from pySmartDL import SmartDL
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from telethon.tl.types import DocumentAttributeVideo
#from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from ub import CMD_HELP, bot, HEROKU_APIKEY, HEROKU_APPNAME, UPSTREAM_REPO_URL
from asyncio import create_subprocess_shell as asyncrunapp
from asyncio.subprocess import PIPE as asyncPIPE
from platform import python_version, uname
from shutil import which
from os import remove
from telethon import version
from ub import ALIVE_NAME, PM_MESSAGE, JAVES_NAME, JAVES_MSG, ORI_MSG, AFK_MESSAGE, AFK_MSG, BLOCK_MSG, BLOCK_MESSAGE
W_CHAT = set(int(x) for x in os.environ.get("WHITE_CHATS", "").split())
import time as t
from datetime import datetime
x = math.inf
counter = 0
start=t.time()
from ub.javes_main.heroku_var import config
from sqlalchemy.exc import IntegrityError
from ub import (COUNT_PM, CMD_HELP, BOTLOG, BOTLOG_CHATID, PM_AUTO_BAN,LASTMSG, LOGS)
from ub import CMD_HELP, ALIVE_NAME, PM_MESSAGE, JAVES_NAME, JAVES_MSG, ORI_MSG
JAVES_NNAME = str(JAVES_NAME) if JAVES_NAME else str(JAVES_MSG)
BLOCK_MMSG = str(BLOCK_MESSAGE) if BLOCK_MESSAGE else str(BLOCK_MSG)
AFK_MMSG = str(AFK_MESSAGE) if AFK_MESSAGE else str(AFK_MSG)
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
MAX_FLOOD_IN_P_M_s = config.MAX_FLOOD_IN_P_M_s
PM_MESSAGE = str(PM_MESSAGE) if PM_MESSAGE else str(ORI_MSG)
javes = bot
UNAPPROVED_MSG = (
f"`{JAVES_NNAME}:`**{PM_MESSAGE}**")
swapi = os.environ.get("SPAMWATCH_API_KEY", None)
SPAMWATCH_SHOUT = os.environ.get("SPAMWATCH_SHOUT", None)
if PM_AUTO_BAN:
@javess(incoming=True, disable_edited=True, disable_errors=True)
async def permitpm(event):
if PM_AUTO_BAN:
self_user = await event.client.get_me()
if event.is_private and event.chat_id != 929138153 and event.chat_id != self_user.id and not (
await event.get_sender()).bot:
try:
from ub.modules.sql_helper.pm_permit_sql import is_approved
from ub.modules.sql_helper.globals import gvarstatus
except AttributeError:
return
apprv = is_approved(event.chat_id)
notifsoff = gvarstatus("NOTIF_OFF")
if not apprv and event.text != UNAPPROVED_MSG:
if event.chat_id in LASTMSG:
prevmsg = LASTMSG[event.chat_id]
if event.text != prevmsg:
async for message in event.client.iter_messages(
event.chat_id,
from_user='me',
search=UNAPPROVED_MSG):
await message.delete()
LASTMSG.update({event.chat_id: event.text})
else:
await event.reply(UNAPPROVED_MSG)
LASTMSG.update({event.chat_id: event.text})
if notifsoff:
await event.client.send_read_acknowledge(event.chat_id)
if event.chat_id not in COUNT_PM:
COUNT_PM.update({event.chat_id: 1})
else:
COUNT_PM[event.chat_id] = COUNT_PM[event.chat_id] + 1
if COUNT_PM[event.chat_id] == MAX_FLOOD_IN_P_M_s:
await event.respond(
f"`{JAVES_NNAME}`: ** Dont spam my master's pm this is your last warning!!**")
if COUNT_PM[event.chat_id] > MAX_FLOOD_IN_P_M_s:
await event.respond(
f"`{JAVES_NNAME}`: ** {BLOCK_MMSG} **")
try:
del COUNT_PM[event.chat_id]
del LASTMSG[event.chat_id]
except KeyError:
if BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
"Count PM is seemingly going retard, plis restart bot!",
)
return
await event.client(BlockRequest(event.chat_id))
await event.client(ReportSpamRequest(peer=event.chat_id))
if BOTLOG:
name = await event.client.get_entity(event.chat_id)
name0 = str(name.first_name)
await event.client.send_message(
BOTLOG_CHATID,
"[" + name0 + "](tg://user?id=" +
str(event.chat_id) + ")" +
" blocked for spam your PM",
)
if PM_AUTO_BAN:
@javess(disable_edited=True, outgoing=True, disable_errors=True)
async def auto_accept(event):
if not PM_AUTO_BAN:
return
self_user = await event.client.get_me()
if event.is_private and event.chat_id != 777000 and event.chat_id != self_user.id and not (
await event.get_sender()).bot:
try:
from ub.modules.sql_helper.pm_permit_sql import is_approved
from ub.modules.sql_helper.pm_permit_sql import approve
except AttributeError:
return
chat = await event.get_chat()
if isinstance(chat, User):
if is_approved(event.chat_id) or chat.bot:
return
async for message in event.client.iter_messages(event.chat_id,
reverse=True,
limit=1):
if message.message is not UNAPPROVED_MSG and message.sender.id == self_user.id:
try:
approve(event.chat_id)
except IntegrityError:
return
if is_approved(event.chat_id) and BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
"#AUTO-APPROVED\n" + "User: " +
f"[{chat.first_name}](tg://user?id={chat.id})",
)
async def get_full_user(event):
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.forward:
ruser = await event.client(
GetFullUserRequest(
previous_message.forward.from_id or previous_message.forward.channel_id
)
)
return ruser, None
else:
ruser = await event.client(
GetFullUserRequest(
previous_message.from_id
)
)
return ruser, None
else:
input_str = None
try:
input_str = event.pattern_match.group(1)
except IndexError as e:
return None, e
if event.message.entities :
mention_entity = event.message.entities
probable_user_mention_entity = mention_entity[0]
if isinstance(probable_user_mention_entity, MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
ruser = await event.client(GetFullUserRequest(user_id))
return ruser, None
else:
try:
user_object = await event.client.get_entity(input_str)
user_id = user_object.id
ruser = await event.client(GetFullUserRequest(user_id))
return ruser, None
except Exception as e:
return None, e
elif event.is_private:
try:
user_id = event.chat_id
ruser = await event.client(GetFullUserRequest(user_id))
return ruser, None
except Exception as e:
return None, e
else:
try:
user_object = await event.client.get_entity(int(input_str))
user_id = user_object.id
ruser = await event.client(GetFullUserRequest(user_id))
return ruser, None
except Exception as e:
return None, e
@javes05(outgoing=True, pattern="^\!notifoff$")
async def notifoff(noff_event):
try:
from ub.modules.sql_helper.globals import addgvar
except AttributeError:
await noff_event.edit("`Running on Non-SQL mode!`")
return
addgvar("NOTIF_OFF", True)
await noff_event.edit("`Notifications from unapproved PM's are silenced!`")
@javes05(outgoing=True, pattern="^\!notifon$")
async def notifon(non_event):
try:
from ub.modules.sql_helper.globals import delgvar
except AttributeError:
await non_event.edit("`Running on Non-SQL mode!`")
return
delgvar("NOTIF_OFF")
await non_event.edit("`Notifications from unapproved PM's unmuted!`")
@javes05(outgoing=True, pattern="^\!allow$")
async def approvepm(apprvpm):
try:
from ub.modules.sql_helper.pm_permit_sql import approve
except AttributeError:
await apprvpm.edit("`Running `")
return
if apprvpm.reply_to_msg_id:
reply = await apprvpm.get_reply_message()
ruser = await apprvpm.client.get_entity(reply.from_id)
aname = ruser.id
name0 = str(ruser.first_name)
uid = ruser.id
else:
aname = await apprvpm.client.get_entity(apprvpm.chat_id)
name0 = str(aname.first_name)
uid = apprvpm.chat_id
try:
approve(uid)
except IntegrityError:
await apprvpm.edit("You allowed to pm!")
return
await apprvpm.edit(f"[{name0}](tg://user?id={uid}) `approved to PM!`")
async for message in apprvpm.client.iter_messages(apprvpm.chat_id,
from_user='me',
search=UNAPPROVED_MSG):
await message.delete()
if BOTLOG:
await apprvpm.client.send_message(
BOTLOG_CHATID,
"#APPROVED\n" + "User: " + f"[{name0}](tg://user?id={uid})",
)
@javes05(outgoing=True, pattern="^\!disallow$")
async def disapprovepm(disapprvpm):
try:
from ub.modules.sql_helper.pm_permit_sql import dissprove
except BaseException:
await disapprvpm.edit("`Running on Non-SQL mode!`")
return
if disapprvpm.reply_to_msg_id:
reply = await disapprvpm.get_reply_message()
ruser = await disapprvpm.client.get_entity(reply.from_id)
aname = ruser.id
name0 = str(ruser.first_name)
dissprove(ruser.id)
else:
dissprove(disapprvpm.chat_id)
aname = await disapprvpm.client.get_entity(disapprvpm.chat_id)
name0 = str(aname.first_name)
await disapprvpm.edit(
f"[{name0}](tg://user?id={disapprvpm.chat_id}) ` Disaproved to PM!`")
if BOTLOG:
await disapprvpm.client.send_message(
BOTLOG_CHATID,
f"[{name0}](tg://user?id={disapprvpm.chat_id})"
" was disapproved to PM you.",
)
@javes05(outgoing=True, pattern="^\!block$")
async def blockpm(block):
""" For .block command, block people from PMing you! """
if block.reply_to_msg_id:
reply = await block.get_reply_message()
ruser = await block.client.get_entity(reply.sender.id)
aname = ruser.id
if aname == 929138153:
await block.edit(f"`{JAVES_NNAME}: Why ME wIll BLoCk My DEV`")
return
name0 = str(ruser.first_name)
await block.client(BlockRequest(ruser.id))
await block.edit(f"`{JAVES_NNAME}: You've been blocked!`")
uid = ruser.id
else:
if block.chat_id != 929138153:
await block.client(BlockRequest(block.chat_id))
aname = await block.client.get_entity(block.chat_id)
await block.edit(f"`{JAVES_NNAME}: You've been blocked!`")
name0 = str(aname.first_name)
uid = block.chat_id
else:
await block.edit(f"`{JAVES_NNAME}: Why ME wIll BLoCk My DEV `")
try:
from ub.modules.sql_helper.pm_permit_sql import dissprove
dissprove(uid)
except AttributeError:
pass
if BOTLOG:
await block.client.send_message(
BOTLOG_CHATID,
"#BLOCKED\n" + "User: " + f"[{name0}](tg://user?id={uid})",
)
@javes05(outgoing=True, pattern="^\!unblock$")
async def unblockpm(unblock):
""" For .unblock command, let people PMing you again! """
if unblock.reply_to_msg_id:
reply = await unblock.get_reply_message()
ruser = await unblock.client.get_entity(reply.sender.id)
name0 = str(ruser.first_name)
await unblock.client(UnblockRequest(ruser.id))
await unblock.edit(f"`{JAVES_NNAME}: You have been unblocked.`")
if BOTLOG:
await unblock.client.send_message(
BOTLOG_CHATID,
f"[{name0}](tg://user?id={ruser.id})"
" was unblocked!.",
)
try:
from ub.modules.sql_helper.globals import gvarstatus, addgvar, delgvar
afk_db = True
except AttributeError:
afk_db = False
AFKSTR = [f"`{JAVES_NNAME}:` ** {AFK_MMSG} **"]
global USER_AFK
global afk_time
global afk_start
global afk_end
USER_AFK = {}
afk_time = None
afk_start = {}
@javes05(outgoing=True, pattern="^!afk(?: |$)(.*)", disable_errors=True)
async def set_afk(afk_e):
message = afk_e.text
string = afk_e.pattern_match.group(1)
global ISAFK
global AFKREASON
global USER_AFK
global afk_time
global afk_start
global afk_end
global reason
USER_AFK = {}
afk_time = None
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
if string:
AFKREASON = string
await afk_e.edit(f"Going To Afk!\
\nReason: `{string}`")
else:
await afk_e.edit("Going To Afk!")
if BOTLOG:
await afk_e.client.send_message(BOTLOG_CHATID, "#AFK\nYou went AFK!")
ISAFK = True
afk_time = datetime.now()
raise StopPropagation
@javes05(outgoing=True)
async def type_afk_is_not_true(notafk):
global ISAFK
if ISAFK:
global COUNT_MSG
global USERS
global AFKREASON
global USER_AFK
global afk_time
global afk_start
global afk_end
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if ISAFK:
ISAFK = False
msg = await notafk.respond("I'm no longer AFK.")
time.sleep(3)
await msg.delete()
if BOTLOG:
await notafk.client.send_message(
BOTLOG_CHATID,
"You've recieved " + str(COUNT_MSG) + " messages from " +
str(len(USERS)) + " chats while you were away",
)
for i in USERS:
name = await notafk.client.get_entity(i)
name0 = str(name.first_name)
await notafk.client.send_message(
BOTLOG_CHATID,
"[" + name0 + "](tg://user?id=" + str(i) + ")" +
" sent you " + "`" + str(USERS[i]) + " messages`",
)
COUNT_MSG = 0
USERS = {}
AFKREASON = None
@javes05(incoming=True, disable_edited=True)
async def mention_afk(mention):
global ISAFK
if ISAFK:
if not mention.chat_id in W_CHAT:
global COUNT_MSG
global USERS
global USER_AFK
global afk_time
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "a while ago"
if mention.message.mentioned and not (await mention.get_sender()).bot:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "Yesterday"
elif days > 1:
if days > 6:
date = now + \
datet.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datet.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f" {int(hours)}h {int(minutes)}m ago"
elif minutes > 0:
afk_since = f"{int(minutes)}m {int(seconds)}s ago"
else:
afk_since = f"{int(seconds)}s ago"
if mention.sender_id not in USERS:
if AFKREASON:
await mention.reply(f"`{JAVES_NNAME}:`**{AFK_MMSG}**\
\n\n`Reason:` **{AFKREASON}**\n`Since :` **{afk_since}**")
else:
await mention.reply(str(choice(AFKSTR)))
USERS.update({mention.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif mention.sender_id in USERS:
if USERS[mention.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await mention.reply(
f"`{JAVES_NNAME}: ` **In case you didn't notice, My master Still Offline**\
\n\n`Reason:` **{AFKREASON}**\n`Since :` **{afk_since}**")
else:
await mention.reply(str(choice(AFKSTR)))
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
@javes05(incoming=True, disable_errors=True)
async def afk_on_pm(sender):
global ISAFK
if ISAFK:
global USERS
global COUNT_MSG
global COUNT_MSG
global USERS
global USER_AFK
global afk_time
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "a while ago"
if sender.is_private and sender.sender_id != 710844948 and not (
await sender.get_sender()).bot:
if PM_AUTO_BAN:
try:
from ub.modules.sql_helper.pm_permit_sql import is_approved
apprv = is_approved(sender.sender_id)
except AttributeError:
apprv = True
else:
apprv = True
if apprv and ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "Yesterday"
elif days > 1:
if days > 6:
date = now + \
datet.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datet.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"{int(hours)}h {int(minutes)}m ago"
elif minutes > 0:
afk_since = f"{int(minutes)}m {int(seconds)}s ago"
else:
afk_since = f"{int(seconds)}s ago"
if sender.sender_id not in USERS:
if AFKREASON:
await sender.reply(f"`{JAVES_NNAME}:`**{AFK_MMSG}**\
\n\n`Reason:` **{AFKREASON}**\n`Since :`**{afk_since}**")
else:
await sender.reply(str(choice(AFKSTR)))
USERS.update({sender.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif apprv and sender.sender_id in USERS:
if USERS[sender.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await sender.reply(
f"`{JAVES_NNAME}: ` **In case you didn't notice, My master Still Offline**\
\n\n`Reason:` **{AFKREASON}**\n`Since :`**{afk_since}**")
else:
await sender.reply(str(choice(AFKSTR)))
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
@javes.on(rekcah05(pattern=f"userinfo(?: |$)(.*)", allow_sudo=True))
@javes05(outgoing=True, pattern="^\!userinfo(?: |$)(.*)")
async def _(event):
sender = await event.get_sender() ; me = await event.client.get_me()
if not sender.id == me.id:
rkp = await event.reply("`processing`")
else:
rkp = await event.edit("`processing`")
if event.fwd_from:
return
ruser, rdhs = await get_full_user(event)
if ruser is None:
await rkp.edit("Error please mention user")
return False
ruser_profile_photos = await event.client(GetUserPhotosRequest(
user_id=ruser.user.id,
offset=42,
max_id=0,
limit=80
))
ruser_profile_photos_count = "f"
try:
ruser_profile_photos_count = ruser_profile_photos.count
except AttributeError as e:
pass
user_id = ruser.user.id
first_name = html.escape(ruser.user.first_name)
if first_name is not None:
first_name = first_name.replace("\u2060", "")
user_bio = ruser.about
if user_bio is not None:
user_bio = html.escape(ruser.about)
spamw = "[Add Apikey](https://t.me/javes05/157)" ; sreason = {}
try:
cas_url = f"https://api.cas.chat/check?user_id={user_id}"
r = get(cas_url, timeout=3)
data = r.json()
except BaseException:
pass
spambot = data = None
if data:
if data and data['ok']:
reason = f"[Banned by Combot Anti Spam](https://combot.org/cas/query?u={check_user.id})"
spambot = True
if spambot:
sbot = "Yes"
sn = reason
else:
sbot = "No"
sn = {}
if swapi:
sw = spamwatch.Client(swapi)
sswatch = sw.get_ban(user_id)
if sswatch:
spamw = "`Yes`"
sreason = sswatch.reason
else:
spamw = "`No`"
sreason = {}
try:
dc_id, location = get_input_location(ruser.profile_photo)
except Exception as e:
dc_id = "Need a Profile Picture to check **this**"
location = str(e)
caption = """**About** [{}](tg://user?id={})
** User ID:** `{}`
** First Name:** `{}`
** Last Name:** `{}`
** UserName:** `@{}`
** Bio:** `{}`
** Number of Profile Pics:** `{}`
** Scam:** `{}`
** Restricted:** `{}`
**Reason:** `{}`
** Banned in SpamWatch:** {}
** Reason:** `{}`
** Banned in CAS:** {} [?](http://cas.chat)
** Reason:** `{}`
** Verified by Telegram:** `{}`
** Bot** `{}`
** Deleted:** `{}`
""".format(
first_name, user_id,
user_id,
ruser.user.first_name, ruser.user.last_name,
ruser.user.username,
user_bio,
ruser_profile_photos_count,
ruser.user.scam,
ruser.user.restricted,
ruser.user.restriction_reason,
spamw,
sreason,
sbot,
sn,
ruser.user.verified,
ruser.user.bot,
ruser.user.contact,
ruser.user.deleted
)
await rkp.edit (caption)
@javes05(pattern="^\!whois(?: |$)(.*)", outgoing=True)
@javes.on(rekcah05(pattern=f"whois(?: |$)(.*)", allow_sudo=True))
async def who(event):
sender = await event.get_sender() ; me = await event.client.get_me()
if not sender.id == me.id:
rkp = await event.reply("`processing`")
else:
rkp = await event.edit("`processing`")
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
ruser = await get_user(event)
try:
photo, caption = await fetch_info(ruser, event)
except AttributeError:
rkp.edit("`Could not fetch info of that user.`")
return
#r_msg = await event.get_reply_message()
#message_id_to_reply = event.message.reply_to_msg_id
message_id_to_reply = event.reply_to_msg_id
#message_id_to_reply = r_msg.sender.id
if not message_id_to_reply:
message_id_to_reply = None
try:
await event.client.send_file(event.chat_id,
photo,
caption=caption,
link_preview=False,
force_document=False,
reply_to=message_id_to_reply,
parse_mode="html")
if not photo.startswith("http"):
os.remove(photo)
await rkp.delete()
except TypeError:
await event.edit(caption, parse_mode="html")
async def get_user(event):
if event.reply_to_msg_id and not event.pattern_match.group(1):
previous_message = await event.get_reply_message()
ruser = await event.client(
GetFullUserRequest(previous_message.sender.id))
else:
user = event.pattern_match.group(1)
if user.isnumeric():
user = int(user)
if not user:
self_user = await event.client.get_me()
user = self_user.id
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
ruser = await event.client(GetFullUserRequest(user_id))
return ruser
try:
user_object = await event.client.get_entity(user)
ruser = await event.client(
GetFullUserRequest(user_object.id))
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return ruser
async def fetch_info(ruser, event):
ruser_profile_photos = await event.client(
GetUserPhotosRequest(user_id=ruser.user.id,
offset=42,
max_id=0,
limit=80))
ruser_profile_photos_count = "Person needs help with uploading profile picture."
try:
ruser_profile_photos_count = ruser_profile_photos.count
except AttributeError as e:
pass
user_id = ruser.user.id
first_name = ruser.user.first_name
last_name = ruser.user.last_name
try:
dc_id, location = get_input_location(ruser.profile_photo)
except Exception as e:
dc_id = "Couldn't fetch DC ID!"
location = str(e)
common_chat = ruser.common_chats_count
username = ruser.user.username
user_bio = ruser.about
is_bot = ruser.user.bot
restricted = ruser.user.restricted
verified = ruser.user.verified
photo = await event.client.download_profile_photo(user_id,
TEMP_DOWNLOAD_DIRECTORY +
str(user_id) + ".jpg",
download_big=True)
first_name = first_name.replace(
"\u2060", "") if first_name else ("This User has no First Name")
last_name = last_name.replace(
"\u2060", "") if last_name else ("This User has no Last Name")
username = "@{}".format(username) if username else (
"This User has no Username")
user_bio = "This User has no About" if not user_bio else user_bio
caption = "<b>USER INFO:</b>\n\n"
caption += f"First Name: {first_name}\n"
caption += f"Last Name: {last_name}\n"
caption += f"Username: {username}\n"
caption += f"Data Centre ID: {dc_id}\n"
caption += f"Number of Profile Pics: {ruser_profile_photos_count}\n"
caption += f"Is Bot: {is_bot}\n"
caption += f"Is Restricted: {restricted}\n"
caption += f"Is Verified by Telegram: {verified}\n"
caption += f"ID: <code>{user_id}</code>\n\n"
caption += f"Bio: \n<code>{user_bio}</code>\n\n"
caption += f"Common Chats with this user: {common_chat}\n"
caption += f"<a href=\"tg://user?id={user_id}\">{first_name}</a>"
return photo, caption
@javes05(outgoing=True, pattern="^\!purge$")
@javes.on(rekcah05(pattern=f"purge$", allow_sudo=True))
async def fastpurger(purg):
chat = await purg.get_input_chat()
msgs = []
itermsg = purg.client.iter_messages(chat, min_id=purg.reply_to_msg_id)
count = 0
if purg.reply_to_msg_id is not None:
async for msg in itermsg:
msgs.append(msg)
count = count + 1
msgs.append(purg.reply_to_msg_id)
if len(msgs) == 100:
await purg.client.delete_messages(chat, msgs)
msgs = []
else:
await purg.reply("`I need a mesasge to start purging from.`")
return
if msgs:
await purg.client.delete_messages(chat, msgs)
done = await purg.client.send_message(
purg.chat_id, f"`Fast purge complete!`\
\nPurged {str(count)} messages")
await done.delete()
return
@javes05(outgoing=True, pattern="^\!purgeme")
@javes.on(rekcah05(pattern=f"purgeme", allow_sudo=True))
async def purgeme(delme):
message = delme.text
count = int(message[9:])
i = 1
async for message in delme.client.iter_messages(delme.chat_id,
from_user='me'):
if i > count + 1:
break
i = i + 1
await message.delete()
smsg = await delme.client.send_message(
delme.chat_id,
"`Purge complete!` Purged " + str(count) + " messages.",
)
await smsg.delete()
return
@javes05(outgoing=True, pattern="^\!del$")
@javes.on(rekcah05(pattern=f"del$", allow_sudo=True))
async def delete_it(delme):
msg_src = await delme.get_reply_message()
if delme.reply_to_msg_id:
try:
await msg_src.delete()
await delme.delete()
return
except rpcbaseerrors.BadRequestError:
return
@javes05(outgoing=True, pattern="^\!edit")
async def editer(edit):
message = edit.text
chat = await edit.get_input_chat()
self_id = await edit.client.get_peer_id('me')
string = str(message[6:])
i = 1
async for message in edit.client.iter_messages(chat, self_id):
if i == 2:
await message.edit(string)
await edit.delete()
break
i = i + 1
if BOTLOG:
await edit.client.send_message(BOTLOG_CHATID,
"Edit query was executed successfully")
@javes05(outgoing=True, pattern="^\!sd")
async def selfdestruct(destroy):
message = destroy.text
counter = int(message[4:6])
text = str(destroy.text[6:])
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
await sleep(counter)
await smsg.delete()
return
CMD_HELP.update({
"person":
"** pm protecter**\
\n**Usage:** protect your pm from unknown scammers\
\nFor on !set var PM_PROTECTOR True For off !del var PM_PROTECTOR\
\n\n!allow <reply to a user>\
\n**Usage:** Approves the mentioned/replied person to PM...\
\n\n!disallow <reply to a user>\
\n**Usage:** Disapproves the mentioned/replied person to PM. \
\n\n!block <reply to a user>\
\n**Usage:** Blocks the person. \
\n\n!unblock <reply to a user>\
\n**Usage:** UnBlocks the person.. \
\n\n!notifoff \
\n**Usage:** Clears/Disables any notifications of unapproved PMs. \
\n\n!notifon \
\n**Usage:** Allows notifications for unapproved PMs. \
\n\n **AFK**\
\n\n!afk <reason>\
\n**Usage:** Sets you as afk.\nReplies to anyone who tags/PM's you telling them that you are AFK(reason).\nSwitches off AFK when you type back anything, anywhere.\
\n\n!purge \
\n**Usage:** Purges all messages starting from the reply. \
\n\n!purgeme \
\n**Usage:** Deletes x amount of your latest messages.. \
\n\n!del \
\n**Usage:** Deletes the message you replied to. \
\n\n!edit <newmessage>\
\n**Usage:** Replace your last message with <newmessage>. \
\n\n!sd <x> <message> \
\n**Usage:** Creates a message that selfdestructs in x seconds. \
\n\n!userinfo / !whois <user/reply to a message> \
\n**Usage:** get userinfo. \
\n\n**Sudo Commands purge, purgeme , del, userinfo whois**\
"
})
| 37.533116
| 163
| 0.577123
|
238d41c0c7e607687c868d294cf828491a76d905
| 7,016
|
py
|
Python
|
mockaioredis/pool.py
|
adsko/mockaioredis
|
ebc5fd2f1c56402293bb5bfee50bf3c4a4e2c349
|
[
"Apache-2.0"
] | 13
|
2017-06-22T16:58:51.000Z
|
2022-01-23T08:07:54.000Z
|
mockaioredis/pool.py
|
adsko/mockaioredis
|
ebc5fd2f1c56402293bb5bfee50bf3c4a4e2c349
|
[
"Apache-2.0"
] | 12
|
2018-01-11T16:24:05.000Z
|
2021-08-12T07:42:29.000Z
|
mockaioredis/pool.py
|
adsko/mockaioredis
|
ebc5fd2f1c56402293bb5bfee50bf3c4a4e2c349
|
[
"Apache-2.0"
] | 18
|
2017-06-21T12:27:54.000Z
|
2021-08-16T09:24:48.000Z
|
'Fake aioredis.RedisPool and related functions'
import asyncio
import collections
import sys
import warnings
from .commands import MockRedis, create_redis
from .util import _NOTSET
async def create_pool(address, *, db=0, password=None, ssl=None, encoding=None,
minsize=1, maxsize=10, commands_factory=_NOTSET, loop=None):
if commands_factory == _NOTSET:
commands_factory = MockRedis
pool = MockRedisPool(address, db, password, encoding,
minsize=minsize, maxsize=maxsize,
commands_factory=commands_factory,
ssl=ssl, loop=loop)
try:
await pool._fill_free(override_min=False)
except Exception as ex:
pool.close()
await pool.wait_closed()
raise
return pool
async def create_redis_pool(address, *, db=None, password=None, ssl=None,
encoding=None, commands_factory=MockRedis,
minsize=1, maxsize=10, parser=None,
timeout=None, pool_cls=None,
connection_cls=None, loop=None):
"""Creates high-level Redis interface.
This function is a coroutine.
"""
pool = await create_pool(address, db=db,
password=password,
ssl=ssl,
encoding=encoding,
minsize=minsize,
maxsize=maxsize,
loop=loop)
return commands_factory(pool)
class MockRedisPool:
'''Imitate a aioredis.RedisPool
Or at least enough of it to create, use and close a pool
'''
def __init__(self, address, db=0, password=0, encoding=None,
*, minsize, maxsize, commands_factory, ssl=None, loop=None):
if loop is not None and sys.version_info >= (3, 8):
warnings.warn("The loop argument is deprecated",
DeprecationWarning)
if loop is None and sys.version_info < (3, 8):
loop = asyncio.get_event_loop()
self._address = address
self._db = db
self._password = password
self._encoding = encoding
self._minsize = minsize
self._maxsize = maxsize
self._factory = commands_factory
self._ssl = ssl
self._loop = loop
# fake it here, we always only have one connection
self._pool = collections.deque(maxlen=1)
self._used = set()
self._acquiring = 0
self._cond = asyncio.Condition(loop=loop)
self._close_state = asyncio.Event(loop=loop)
self._close_waiter = asyncio.ensure_future(self._do_close(), loop=loop)
@property
def minsize(self):
'''always return 1'''
return 1
@property
def maxsize(self):
'''always return 1'''
return 1
@property
def size(self):
return self.freesize + len(self._used) + self._acquiring
@property
def freesize(self):
return len(self._pool)
async def _do_close(self):
await self._close_state.wait()
async with self._cond:
waiters = []
while self._pool:
conn = self._pool.popleft()
# fake connections, so no need to do anything for used connections
def close(self):
if not self._close_state.is_set():
self._close_state.set()
@property
def closed(self):
return self._close_state.is_set()
async def wait_closed(self):
'wait until pool is closed'
await asyncio.shield(self._close_waiter, loop=self._loop)
async def acquire(self):
'''Pretend to aquire a connection.
In fact, always return the same MockRedis object once free'''
async with self._cond:
while True:
await self._fill_free(override_min=True)
if self.freesize:
conn = self._pool.popleft()
self._used.add(conn)
return conn
else:
await self._cond.wait()
def release(self, conn):
'''Release our single MockRedis connection'''
assert conn in self._used, "Invalid connection, maybe from other pool?"
self._used.remove(conn)
self._pool.append(conn)
asyncio.ensure_future(self._wakeup(), loop=self._loop)
async def _fill_free(self, *, override_min):
while self.size < self.minsize:
self._acquiring += 1
try:
conn = await self._create_new_connection()
self._pool.append(conn)
finally:
self._acquiring -= 1
if self.freesize:
return
if override_min:
while not self._pool and self.size < self.maxsize:
self._acquiring += 1
try:
conn = await self._create_new_connection()
self._pool.append(conn)
finally:
self._acquiring -= 1
def _create_new_connection(self):
return create_redis(self._address,
db=self._db,
password=self._password,
ssl=self._ssl,
encoding=self._encoding,
commands_factory=self._factory,
loop=self._loop)
async def _wakeup(self):
async with self._cond:
self._cond.notify()
def __enter__(self):
raise RuntimeError(
"'await' should be used as a context manager expression")
def __exit__(self, *args):
pass # pragma: nocover
def __await__(self):
# To make `with await pool` work
conn = yield from self.acquire().__await__()
return _ConnectionContextManager(self, conn)
def get(self):
'''Return async context manager for working with the connection
async with pool.get() as conn:
await conn.get(key)
'''
return _AsyncConnectionContextManager(self)
class _ConnectionContextManager:
__slots__ = ('_pool', '_conn')
def __init__(self, pool, conn):
self._pool = pool
self._conn = conn
def __enter__(self):
return self._conn
def __exit__(self, exc_type, exc_value, tb):
try:
self._pool.release(self._conn)
finally:
self._pool = None
self._conn = None
class _AsyncConnectionContextManager:
__slots__ = ('_pool', '_conn')
def __init__(self, pool):
self._pool = pool
self._conn = None
async def __aenter__(self):
self._conn = await self._pool.acquire()
return self._conn
async def __aexit__(self, exc_type, exc_value, tb):
try:
self._pool.release(self._conn)
finally:
self._pool = None
self._conn = None
| 30.504348
| 82
| 0.563712
|
2cfaefaae87aba2a96956f7adc0721bcee84df6d
| 6,066
|
py
|
Python
|
nodux_party/customer.py
|
tatiqm25/nodux_party
|
dcd27e7004dbde7cfb9cddd19f267a727ed18052
|
[
"MIT"
] | null | null | null |
nodux_party/customer.py
|
tatiqm25/nodux_party
|
dcd27e7004dbde7cfb9cddd19f267a727ed18052
|
[
"MIT"
] | null | null | null |
nodux_party/customer.py
|
tatiqm25/nodux_party
|
dcd27e7004dbde7cfb9cddd19f267a727ed18052
|
[
"MIT"
] | null | null | null |
def validate(doc, method):
remove_spaces(doc)
create_supplier(doc)
validate_email(doc)
if doc.tax_id:
tax_id = doc.tax_id.replace(".", "").replace(" ", "")
if doc.type_document == "":
pass
elif doc.type_document == "Pasaporte":
pass
elif doc.type_document == "RUC":
compute_check_digit(doc, tax_id)
elif doc.type_document == "Cedula":
compute_check_digit(doc, tax_id)
elif doc.type_document == "Consumidor Final":
doc.tax_id = "9999999999999"
def create_supplier(doc):
import frappe
is_supplier = doc.is_supplier
if is_supplier == 1:
supplier = frappe.db.sql("""select supplier_name, type_document, country,
address, email, phone, province, tax_id from `tabSupplier`
where tax_id = %s""",
doc.tax_id, as_dict = 1)
if supplier:
supplier = frappe.get_doc("Supplier", doc.tax_id)
if supplier:
supplier.supplier_name = doc.customer_name
supplier.comercial_name = doc.comercial_name
supplier.type_document = doc.type_document
supplier.country = doc.territory
supplier.address = doc.street
supplier.email = doc.email
supplier.phone = doc.phone
supplier.province = doc.province
supplier.tax_id = doc.tax_id
supplier.fecha_de_nacimiento = doc.fecha_de_nacimiento
supplier.fecha_de_registro = doc.fecha_de_registro
supplier.nombre_de_contacto = doc.nombre_de_contacto
supplier.mobile = doc.mobile
supplier.save()
if not supplier:
supplier = frappe.db.sql("""select supplier_name, type_document, country,
address, email, phone, province, tax_id from `tabSupplier`
where supplier_name = %s""",
doc.customer_name, as_dict = 1)
if supplier:
supplier = frappe.get_doc("Supplier", doc.customer_name)
supplier.supplier_name = doc.customer_name
supplier.comercial_name = doc.comercial_name
supplier.type_document = doc.type_document
supplier.country = doc.territory
supplier.address = doc.street
supplier.email = doc.email
supplier.phone = doc.phone
supplier.province = doc.province
supplier.tax_id = doc.tax_id
supplier.fecha_de_nacimiento = doc.fecha_de_nacimiento
supplier.fecha_de_registro = doc.fecha_de_registro
supplier.nombre_de_contacto = doc.nombre_de_contacto
supplier.mobile = doc.mobile
supplier.save()
if not supplier:
supplier = frappe.get_doc({
"doctype":"Supplier",
"supplier_name": doc.customer_name,
"comercial_name": doc.comercial_name,
"type_document": doc.type_document,
"country": doc.country,
"address": doc.street,
"email" : doc.email,
"phone" : doc.phone,
"province" : doc.province,
"tax_id":doc.tax_id,
"nombre_de_contacto": doc.nombre_de_contacto,
"fecha_de_nacimiento": doc.fecha_de_nacimiento,
"fecha_de_registro":doc.fecha_de_registro,
"mobile":doc.mobile
})
supplier.save()
def validate_email(doc):
import re
import frappe
email = doc.email
if email:
if re.match("[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,3})", email):
pass
else:
frappe.throw("Correo electronico no cumple con la estructura: ejemplo@mail.com")
def remove_spaces(doc):
if doc.customer_name:
doc.customer_name = doc.customer_name.strip()
if doc.comercial_name:
doc.comercial_name = doc.comercial_name.strip()
if doc.tax_id:
doc.tax_id = doc.tax_id.strip()
if doc.street:
doc.street = doc.street.strip()
if doc.province:
doc.province = doc.province.strip()
if doc.phone:
doc.phone = doc.phone.strip()
if doc.email:
doc.email = doc.email.strip()
if doc.mobile:
doc.mobile = doc.mobile.strip()
if doc.nombre_de_contacto:
doc.nombre_de_contacto = doc.nombre_de_contacto.strip()
def compute_check_digit(doc, raw_number):
import frappe
factor = 2
x = 0
set_check_digit = None
if doc.type_document == 'RUC':
if int(raw_number[2]) < 6:
type_party='persona_natural'
if int(raw_number[2]) == 6:
type_party='entidad_publica'
if int(raw_number[2]) == 9:
type_party='persona_juridica'
if type_party == 'persona_natural':
if len(raw_number) != 13 or int(raw_number[2]) > 5 or raw_number[-3:] != '001':
frappe.throw("Numero RUC no valido")
number = raw_number[:9]
set_check_digit = raw_number[9]
for n in number:
y = int(n) * factor
if y >= 10:
y = int(str(y)[0]) + int(str(y)[1])
x += y
if factor == 2:
factor = 1
else:
factor = 2
res = (x % 10)
if res == 0:
value = 0
else:
value = 10 - (x % 10)
if set_check_digit == str(value):
pass
else:
frappe.throw("Numero RUC no valido")
elif type_party == 'entidad_publica':
if not len(raw_number) == 13 or raw_number[2] != '6' \
or raw_number[-3:] != '001':
frappe.throw("Numero RUC no valido")
number = raw_number[:8]
set_check_digit = raw_number[8]
for n in reversed(number):
x += int(n) * factor
factor += 1
if factor == 8:
factor = 2
value = 11 - (x % 11)
if value == 11:
value = 0
if set_check_digit == str(value):
pass
else:
frappe.throw("Numero RUC no valido")
else:
if len(raw_number) != 13 or \
(type_party in ['persona_juridica'] \
and int(raw_number[2]) != 9) or raw_number[-3:] != '001':
frappe.throw("Numero RUC no valido")
number = raw_number[:9]
set_check_digit = raw_number[9]
for n in reversed(number):
x += int(n) * factor
factor += 1
if factor == 8:
factor = 2
value = 11 - (x % 11)
if value == 11:
value = 0
if set_check_digit == str(value):
pass
else:
frappe.throw("Numero RUC no valido")
else:
if len(raw_number) != 10:
frappe.throw("Numero C.I. no valido")
number = raw_number[:9]
set_check_digit = raw_number[9]
for n in number:
y = int(n) * factor
if y >= 10:
y = int(str(y)[0]) + int(str(y)[1])
x += y
if factor == 2:
factor = 1
else:
factor = 2
res = (x % 10)
if res == 0:
value = 0
else:
value = 10 - (x % 10)
if set_check_digit == str(value):
pass
else:
frappe.throw("Numero C.I. no valido")
| 27.953917
| 92
| 0.659578
|
0e7c7d652e85a0ab0247a971dd71bc310ce6e2d2
| 16,272
|
py
|
Python
|
examples/real/variants.py
|
charlesjsun/ReLMM
|
b63cd69f5b4d5fddc5988a9478632a171ceef59a
|
[
"MIT"
] | 3
|
2021-12-04T08:41:46.000Z
|
2022-03-28T11:12:36.000Z
|
examples/real/variants.py
|
charlesjsun/ReLMM
|
b63cd69f5b4d5fddc5988a9478632a171ceef59a
|
[
"MIT"
] | null | null | null |
examples/real/variants.py
|
charlesjsun/ReLMM
|
b63cd69f5b4d5fddc5988a9478632a171ceef59a
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from ray import tune
import numpy as np
from softlearning.utils.git import get_git_rev
from softlearning.utils.misc import get_host_name
from softlearning.utils.dict import deep_update
DEFAULT_KEY = "__DEFAULT_KEY__"
M = 512
ALGORITHM_PARAMS_BASE = {
'config': {
'train_every_n_steps': 1,
'n_train_repeat': 1,
'eval_render_kwargs': {},
'eval_n_episodes': 0,
'num_warmup_samples': tune.sample_from(lambda spec: (
# 5 * (spec.get('config', spec)
# ['sampler_params']
# ['config']
# ['max_path_length'])
10
)),
}
}
ALGORITHM_PARAMS_ADDITIONAL = {
'SAC': {
'class_name': 'SAC',
'config': {
'policy_lr': 3e-4,
'Q_lr': 3e-4,
'alpha_lr': 3e-4,
'target_update_interval': 1,
'tau': 5e-3,
'target_entropy': 'auto',
'discount': 0.95,
'reward_scale': 1.0,
},
},
'SACMixed': {
'class_name': 'SACMixed',
'config': {
'policy_lr': 3e-4,
'Q_lr': 3e-4,
'alpha_lr': 3e-4,
'target_update_interval': 1,
'tau': 5e-3,
'target_entropy': 'auto',
'discount': 0.95,
'reward_scale': 1.0,
'discrete_entropy_ratio_start': 0.55,
'discrete_entropy_ratio_end': 0.55,
'discrete_entropy_timesteps': 60000,
},
},
'SACDiscrete': {
'class_name': 'SACDiscrete',
'config': {
'policy_lr': 3e-4,
'Q_lr': 3e-4,
'alpha_lr': 3e-4,
'target_update_interval': 1,
'tau': 5e-3,
'discount': 0.95,
'reward_scale': 1.0,
'target_entropy_start': 'auto',
'entropy_ratio_start': 0.9,
'entropy_ratio_end': 0.55,
'entropy_timesteps': 60000,
},
},
'SQL': {
'class_name': 'SQL',
'config': {
'policy_lr': 3e-4,
'target_update_interval': 1,
'discount': 0.99,
'tau': 5e-3,
'reward_scale': tune.sample_from(lambda spec: (
{
'Swimmer': 30,
'Hopper': 30,
'HalfCheetah': 30,
'Walker2d': 10,
'Ant': 300,
'Humanoid': 100,
'Pendulum': 1,
}.get(
spec.get('config', spec)
['environment_params']
['training']
['domain'],
1.0
),
)),
},
},
}
POLICY_PARAMS_BASE = {
'gaussian': {
'class_name': 'FeedforwardGaussianPolicy',
'config': {
'hidden_layer_sizes': (M, M),
'squash': True,
'observation_keys': None,
'preprocessors': None,
},
},
'discrete_gaussian': {
'class_name': 'FeedforwardDiscreteGaussianPolicy',
'config': {
'hidden_layer_sizes': (M, M),
'observation_keys': None,
'preprocessors': None,
},
},
'discrete': {
'class_name': 'FeedforwardDiscretePolicy',
'config': {
'hidden_layer_sizes': (M, M),
'observation_keys': None,
'preprocessors': None,
},
},
}
TOTAL_STEPS_PER_UNIVERSE_DOMAIN_TASK = {
DEFAULT_KEY: int(1e4),
'gym': {
DEFAULT_KEY: int(1e4),
'Locobot': {
DEFAULT_KEY: int(2e5),
'NavigationVacuumRandomPerturbation-v0': int(2e5),
'NavigationVacuumRNDPerturbation-v0': int(2e5),
},
},
}
MAX_PATH_LENGTH_PER_UNIVERSE_DOMAIN_TASK = {
DEFAULT_KEY: 1000,
'gym': {
DEFAULT_KEY: 1000,
'Locobot': {
DEFAULT_KEY: 200,
'NavigationVacuumRandomPerturbation-v0': 200,
'NavigationVacuumRNDPerturbation-v0': 200,
'RealNavigationRND-v0': 50,
},
},
}
EPOCH_LENGTH_PER_UNIVERSE_DOMAIN_TASK = {
DEFAULT_KEY: 1000,
'gym': {
DEFAULT_KEY: 1000,
'Locobot': {
DEFAULT_KEY: 1000,
'NavigationVacuumRandomPerturbation-v0': 1000,
'NavigationVacuumRNDPerturbation-v0': 1000,
'RealNavigationRND-v0': 200,
},
},
}
ENVIRONMENT_PARAMS_PER_UNIVERSE_DOMAIN_TASK = {
'gym': {
'Locobot': {
'NavigationVacuumRandomPerturbation-v0': {
'pixel_wrapper_kwargs': {
'pixels_only': False,
},
'reset_free': True,
'room_name': 'simple',
'room_params': {
'num_objects': 100,
'object_name': "greensquareball",
'no_spawn_radius': 0.55, #0.8,
'wall_size': 5.0
},
'is_training': True,
'max_ep_len': float('inf'),
'image_size': 100,
'steps_per_second': 2,
'max_velocity': 20.0,
'trajectory_log_dir': '/home/externalhardrive/RAIL/mobilemanipulation-tf2/nohup_output/nav_vacuum_random_perturbation_edison_3_traj/',
'trajectory_log_freq': 1000,
'renders': False,
},
'NavigationVacuumRNDPerturbation-v0': {
'pixel_wrapper_kwargs': {
'pixels_only': False,
},
'reset_free': True,
'room_name': 'simple',
'room_params': {
'num_objects': 100,
'object_name': "greensquareball",
'no_spawn_radius': 0.55, #0.8,
'wall_size': 5.0
},
'is_training': True,
'max_ep_len': float('inf'),
'image_size': 100,
'steps_per_second': 2,
'max_velocity': 20.0,
'trajectory_log_dir': '/home/externalhardrive/RAIL/mobilemanipulation/nohup_output/nav_vacuum_rnd_perturbation_edison_2_traj/',
'trajectory_log_freq': 1000,
'renders': False,
},
'RealNavigationRND-v0': {
'pixel_wrapper_kwargs': {
'pixels_only': True,
},
'reset_free': True,
}
},
},
}
EXTRA_EVALUATION_ENVIRONMENT_PARAMS_PER_UNIVERSE_DOMAIN_TASK = {
'gym': {
'Locobot': {
'NavigationVacuumRandomPerturbation-v0': {
'reset_free': False,
'max_ep_len': 200,
'trajectory_log_dir': None,
'trajectory_log_freq': 0,
'is_training': False,
'renders': False,
},
'NavigationVacuumRNDPerturbation-v0': {
'reset_free': False,
'max_ep_len': 200,
'trajectory_log_dir': None,
'trajectory_log_freq': 0,
'is_training': False,
'renders': False,
},
},
},
}
def get_epoch_length(universe, domain, task):
level_result = EPOCH_LENGTH_PER_UNIVERSE_DOMAIN_TASK.copy()
for level_key in (universe, domain, task):
if isinstance(level_result, int):
return level_result
level_result = level_result.get(level_key) or level_result[DEFAULT_KEY]
return level_result
def get_max_path_length(universe, domain, task):
level_result = MAX_PATH_LENGTH_PER_UNIVERSE_DOMAIN_TASK.copy()
for level_key in (universe, domain, task):
if isinstance(level_result, int):
return level_result
level_result = level_result.get(level_key) or level_result[DEFAULT_KEY]
return level_result
def get_checkpoint_frequency(spec):
num_checkpoints = 10
config = spec.get('config', spec)
checkpoint_frequency = (
config
['algorithm_params']
['config']
['n_epochs']
) // num_checkpoints
return checkpoint_frequency
def get_total_timesteps(universe, domain, task):
level_result = TOTAL_STEPS_PER_UNIVERSE_DOMAIN_TASK.copy()
for level_key in (universe, domain, task):
if isinstance(level_result, (int, float)):
return level_result
level_result = (
level_result.get(level_key)
or level_result[DEFAULT_KEY])
return level_result
def get_algorithm_params(universe, domain, task):
total_timesteps = get_total_timesteps(universe, domain, task)
epoch_length = get_epoch_length(universe, domain, task)
n_epochs = total_timesteps / epoch_length
assert n_epochs == int(n_epochs)
algorithm_params = {
'config': {
'n_epochs': int(n_epochs),
'epoch_length': epoch_length,
'min_pool_size': get_max_path_length(universe, domain, task),
'batch_size': 256,
}
}
return algorithm_params
def get_environment_params(universe, domain, task):
environment_params = (
ENVIRONMENT_PARAMS_PER_UNIVERSE_DOMAIN_TASK
.get(universe, {}).get(domain, {}).get(task, {}))
return environment_params
def get_evaluation_environment_params(universe, domain, task):
environment_params = deepcopy(get_environment_params(universe, domain, task))
extra_params = (
EXTRA_EVALUATION_ENVIRONMENT_PARAMS_PER_UNIVERSE_DOMAIN_TASK
.get(universe, {}).get(domain, {}).get(task, {}))
environment_params.update(extra_params)
return environment_params
def get_variant_spec_base(universe, domain, task, policy, algorithm):
algorithm_params = deep_update(
deepcopy(ALGORITHM_PARAMS_BASE),
deepcopy(ALGORITHM_PARAMS_ADDITIONAL.get(algorithm, {})),
deepcopy(get_algorithm_params(universe, domain, task)),
)
perturbation_algorithm_params = deep_update(
deepcopy(ALGORITHM_PARAMS_BASE),
deepcopy(ALGORITHM_PARAMS_ADDITIONAL.get('SACMixed', {})),
deepcopy(get_algorithm_params(universe, domain, task)),
)
policy_params = deepcopy(POLICY_PARAMS_BASE[policy])
perturbation_policy_params = deepcopy(POLICY_PARAMS_BASE['discrete_gaussian'])
variant_spec = {
'git_sha': get_git_rev(__file__),
'environment_params': {
'training': {
'domain': domain,
'task': task,
'universe': universe,
'kwargs': get_environment_params(universe, domain, task),
},
'evaluation': {
'domain': domain,
'task': task,
'universe': universe,
'kwargs': get_evaluation_environment_params(universe, domain, task),
},
},
'policy_params': policy_params,
'perturbation_policy_params': perturbation_policy_params,
'Q_params': {
'class_name': 'double_feedforward_Q_function',
'config': {
'hidden_layer_sizes': (M, M),
'observation_keys': None,
'preprocessors': None,
},
},
'rnd_params': {
'class_name': 'RNDTrainer',
'config': {
'lr': 3e-4,
'output_shape': (512,),
'hidden_layer_sizes': (M, M),
'observation_keys': None,
'preprocessors': None,
},
},
'algorithm_params': algorithm_params,
'perturbation_algorithm_params': perturbation_algorithm_params,
'replay_pool_params': {
'class_name': 'SimpleReplayPool',
'config': {
'max_size': int(1e5),
},
},
'sampler_params': {
'class_name': 'SimpleSampler',
'config': {
'max_path_length': get_max_path_length(universe, domain, task),
}
},
'run_params': {
'host_name': get_host_name(),
'seed': tune.sample_from(lambda spec: np.random.randint(0, 10000)),
'checkpoint_at_end': True,
'checkpoint_frequency': tune.sample_from(get_checkpoint_frequency),
'checkpoint_replay_pool': False,
},
}
return variant_spec
def is_image_env(universe, domain, task, variant_spec):
return 'pixel_wrapper_kwargs' in variant_spec['environment_params']['training']['kwargs']
def get_variant_spec_image(universe,
domain,
task,
policy,
algorithm,
*args,
**kwargs):
variant_spec = get_variant_spec_base(
universe, domain, task, policy, algorithm, *args, **kwargs)
if is_image_env(universe, domain, task, variant_spec):
preprocessor_params = {
'class_name': 'convnet_preprocessor',
'config': {
'conv_filters': (64, 64, 64),
'conv_kernel_sizes': (3, 3, 3),
'conv_strides': (2, 2, 2),
'normalization_type': None,
'downsampling_type': 'conv',
},
}
pixel_keys = variant_spec['environment_params']['training']['kwargs']['pixel_wrapper_kwargs'].get(
'pixel_keys', ('pixels',))
preprocessors = dict()
for key in pixel_keys:
params = deepcopy(preprocessor_params)
params['config']['name'] = 'convnet_preprocessor_' + key
preprocessors[key] = params
# policy
variant_spec['policy_params']['config']['hidden_layer_sizes'] = (M, M)
variant_spec['policy_params']['config']['preprocessors'] = preprocessors
# perturbation policy
variant_spec['perturbation_policy_params']['config']['hidden_layer_sizes'] = (
tune.sample_from(lambda spec: (deepcopy(
spec.get('config', spec)
['policy_params']
['config']
['hidden_layer_sizes']
)))
)
variant_spec['perturbation_policy_params']['config']['preprocessors'] = tune.sample_from(
lambda spec: deepcopy(
spec.get('config', spec)
['policy_params']
['config']
['preprocessors']
))
# Q functions
variant_spec['Q_params']['config']['hidden_layer_sizes'] = (
tune.sample_from(lambda spec: (deepcopy(
spec.get('config', spec)
['policy_params']
['config']
['hidden_layer_sizes']
)))
)
variant_spec['Q_params']['config']['preprocessors'] = tune.sample_from(
lambda spec: (
deepcopy(
spec.get('config', spec)
['policy_params']
['config']
['preprocessors']),
None, # Action preprocessor is None
))
# RND networks
variant_spec['rnd_params']['config']['hidden_layer_sizes'] = (
tune.sample_from(lambda spec: (deepcopy(
spec.get('config', spec)
['policy_params']
['config']
['hidden_layer_sizes']
)))
)
variant_spec['rnd_params']['config']['preprocessors'] = tune.sample_from(
lambda spec: deepcopy(
spec.get('config', spec)
['policy_params']
['config']
['preprocessors']
))
return variant_spec
def get_variant_spec(args):
universe, domain, task = args.universe, args.domain, args.task
variant_spec = get_variant_spec_image(
universe, domain, task, args.policy, args.algorithm)
if args.checkpoint_replay_pool is not None:
variant_spec['run_params']['checkpoint_replay_pool'] = (
args.checkpoint_replay_pool)
return variant_spec
| 30.994286
| 151
| 0.525688
|
0e64314ae7c92d134e0c6cae56fe273e27bb6961
| 819
|
py
|
Python
|
expfactory/views/oci.py
|
YanivD/expfactory
|
a34ba21016ef01a44998764935be20ec99fdd0a8
|
[
"BSD-3-Clause"
] | null | null | null |
expfactory/views/oci.py
|
YanivD/expfactory
|
a34ba21016ef01a44998764935be20ec99fdd0a8
|
[
"BSD-3-Clause"
] | null | null | null |
expfactory/views/oci.py
|
YanivD/expfactory
|
a34ba21016ef01a44998764935be20ec99fdd0a8
|
[
"BSD-3-Clause"
] | null | null | null |
from expfactory.logger import bot
from flask import (
Blueprint,
render_template,
)
from expfactory.views.utils import (
perform_checks,
clear_session
)
from expfactory.server import app, csrf
import os
oci = Blueprint('oci', __name__,
static_url_path='/experiments/oci',
static_folder='/scif/apps/oci',
template_folder='/scif/apps')
@oci.route('/experiments/oci/')
def oci_base():
context = {'experiment': 'oci/index.html'}
return perform_checks('experiments/experiment.html', quiet=True,
context=context,
next="oci")
oci.before_request(csrf.protect)
app.register_blueprint(oci)
| 28.241379
| 73
| 0.554335
|
08f31a312398cd89d16f181bd3ea35840c30a834
| 98
|
py
|
Python
|
backend/pai_messages/apps.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | 3
|
2021-04-17T10:20:26.000Z
|
2022-03-08T07:36:13.000Z
|
backend/pai_messages/apps.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | null | null | null |
backend/pai_messages/apps.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class PaiMessagesConfig(AppConfig):
name = 'pai_messages'
| 16.333333
| 35
| 0.77551
|
1c641ee0ad0f7b818faedde471c34560551f7c6e
| 7,961
|
py
|
Python
|
electrum/plugins/cosigner_pool/proto/cosignerpool_pb2_grpc.py
|
exofoundation/EXOS-Electrum
|
89e00bc4a1c5f5cb48f9aa5ef77dd1a9bcad9da5
|
[
"MIT"
] | 5
|
2019-05-15T16:11:21.000Z
|
2021-02-20T14:12:20.000Z
|
electrum/plugins/cosigner_pool/proto/cosignerpool_pb2_grpc.py
|
exofoundation/EXOS-Electrum
|
89e00bc4a1c5f5cb48f9aa5ef77dd1a9bcad9da5
|
[
"MIT"
] | 38
|
2019-04-29T21:15:22.000Z
|
2021-12-04T18:36:28.000Z
|
electrum/plugins/cosigner_pool/proto/cosignerpool_pb2_grpc.py
|
exofoundation/EXOS-Electrum
|
89e00bc4a1c5f5cb48f9aa5ef77dd1a9bcad9da5
|
[
"MIT"
] | 5
|
2019-04-25T17:35:49.000Z
|
2021-08-12T19:50:41.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import cosignerpool_pb2 as cosignerpool__pb2
class CosignerpoolStub(object):
"""The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Put = channel.unary_unary(
'/cosignerpool.Cosignerpool/Put',
request_serializer=cosignerpool__pb2.PutRequest.SerializeToString,
response_deserializer=cosignerpool__pb2.PutResponse.FromString,
)
self.Get = channel.unary_unary(
'/cosignerpool.Cosignerpool/Get',
request_serializer=cosignerpool__pb2.GetRequest.SerializeToString,
response_deserializer=cosignerpool__pb2.GetResponse.FromString,
)
self.Delete = channel.unary_unary(
'/cosignerpool.Cosignerpool/Delete',
request_serializer=cosignerpool__pb2.DeleteRequest.SerializeToString,
response_deserializer=cosignerpool__pb2.DeleteResponse.FromString,
)
self.Ping = channel.unary_unary(
'/cosignerpool.Cosignerpool/Ping',
request_serializer=cosignerpool__pb2.Empty.SerializeToString,
response_deserializer=cosignerpool__pb2.Pong.FromString,
)
self.GetTime = channel.unary_unary(
'/cosignerpool.Cosignerpool/GetTime',
request_serializer=cosignerpool__pb2.Empty.SerializeToString,
response_deserializer=cosignerpool__pb2.GetTimeResponse.FromString,
)
class CosignerpoolServicer(object):
"""The greeting service definition.
"""
def Put(self, request, context):
"""Writes a value into a key
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Reads the value of a key
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes a key
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Ping(self, request, context):
"""Sends a 'ping'
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTime(self, request, context):
"""Gets the time on the server
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CosignerpoolServicer_to_server(servicer, server):
rpc_method_handlers = {
'Put': grpc.unary_unary_rpc_method_handler(
servicer.Put,
request_deserializer=cosignerpool__pb2.PutRequest.FromString,
response_serializer=cosignerpool__pb2.PutResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=cosignerpool__pb2.GetRequest.FromString,
response_serializer=cosignerpool__pb2.GetResponse.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=cosignerpool__pb2.DeleteRequest.FromString,
response_serializer=cosignerpool__pb2.DeleteResponse.SerializeToString,
),
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=cosignerpool__pb2.Empty.FromString,
response_serializer=cosignerpool__pb2.Pong.SerializeToString,
),
'GetTime': grpc.unary_unary_rpc_method_handler(
servicer.GetTime,
request_deserializer=cosignerpool__pb2.Empty.FromString,
response_serializer=cosignerpool__pb2.GetTimeResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'cosignerpool.Cosignerpool', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Cosignerpool(object):
"""The greeting service definition.
"""
@staticmethod
def Put(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosignerpool.Cosignerpool/Put',
cosignerpool__pb2.PutRequest.SerializeToString,
cosignerpool__pb2.PutResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosignerpool.Cosignerpool/Get',
cosignerpool__pb2.GetRequest.SerializeToString,
cosignerpool__pb2.GetResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosignerpool.Cosignerpool/Delete',
cosignerpool__pb2.DeleteRequest.SerializeToString,
cosignerpool__pb2.DeleteResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Ping(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosignerpool.Cosignerpool/Ping',
cosignerpool__pb2.Empty.SerializeToString,
cosignerpool__pb2.Pong.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTime(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosignerpool.Cosignerpool/GetTime',
cosignerpool__pb2.Empty.SerializeToString,
cosignerpool__pb2.GetTimeResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| 39.410891
| 99
| 0.636855
|
1dbd0d35dc7480aeb05f1104b77e6764c5d7b933
| 2,632
|
py
|
Python
|
pgoapi/protos/pogoprotos/settings/notification_settings_pb2.py
|
linherest/pgoapi
|
e3bdce71b06c099663e9796c8df166883059edd9
|
[
"MIT"
] | 14
|
2017-03-28T16:32:24.000Z
|
2021-03-13T23:03:57.000Z
|
pgoapi/protos/pogoprotos/settings/notification_settings_pb2.py
|
linherest/pgoapi
|
e3bdce71b06c099663e9796c8df166883059edd9
|
[
"MIT"
] | 8
|
2017-03-01T07:56:09.000Z
|
2017-08-15T07:37:12.000Z
|
pgoapi/protos/pogoprotos/settings/notification_settings_pb2.py
|
linherest/pgoapi
|
e3bdce71b06c099663e9796c8df166883059edd9
|
[
"MIT"
] | 15
|
2017-02-24T01:30:23.000Z
|
2021-06-27T08:46:43.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/notification_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/notification_settings.proto',
package='pogoprotos.settings',
syntax='proto3',
serialized_pb=_b('\n/pogoprotos/settings/notification_settings.proto\x12\x13pogoprotos.settings\"N\n\x14NotificationSettings\x12\x1a\n\x12pull_notifications\x18\x01 \x01(\x08\x12\x1a\n\x12show_notifications\x18\x02 \x01(\x08\x62\x06proto3')
)
_NOTIFICATIONSETTINGS = _descriptor.Descriptor(
name='NotificationSettings',
full_name='pogoprotos.settings.NotificationSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pull_notifications', full_name='pogoprotos.settings.NotificationSettings.pull_notifications', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='show_notifications', full_name='pogoprotos.settings.NotificationSettings.show_notifications', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=72,
serialized_end=150,
)
DESCRIPTOR.message_types_by_name['NotificationSettings'] = _NOTIFICATIONSETTINGS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NotificationSettings = _reflection.GeneratedProtocolMessageType('NotificationSettings', (_message.Message,), dict(
DESCRIPTOR = _NOTIFICATIONSETTINGS,
__module__ = 'pogoprotos.settings.notification_settings_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.NotificationSettings)
))
_sym_db.RegisterMessage(NotificationSettings)
# @@protoc_insertion_point(module_scope)
| 34.181818
| 242
| 0.783435
|
5ae602c1cdcf5d91895a6b1d48a460908012dfc3
| 2,800
|
py
|
Python
|
cirq-google/cirq_google/json_test_data/spec.py
|
pavoljuhas/Cirq
|
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
|
[
"Apache-2.0"
] | 1
|
2022-02-05T22:17:39.000Z
|
2022-02-05T22:17:39.000Z
|
cirq-google/cirq_google/json_test_data/spec.py
|
pavoljuhas/Cirq
|
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
|
[
"Apache-2.0"
] | null | null | null |
cirq-google/cirq_google/json_test_data/spec.py
|
pavoljuhas/Cirq
|
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=wrong-or-nonexistent-copyright-notice
import pathlib
import cirq_google
from cirq.testing.json import ModuleJsonTestSpec
from cirq_google.json_resolver_cache import _class_resolver_dictionary
TestSpec = ModuleJsonTestSpec(
name="cirq_google",
packages=[cirq_google, cirq_google.experimental],
test_data_path=pathlib.Path(__file__).parent,
not_yet_serializable=[
'FSIM_GATESET',
'SYC_GATESET',
'Sycamore',
'Sycamore23',
'SerializableDevice',
'SerializableGateSet',
'SQRT_ISWAP_GATESET',
'SQRT_ISWAP_INV_PARAMETERS',
'ALL_ANGLES_FLOQUET_PHASED_FSIM_CHARACTERIZATION',
'WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION',
'XmonDevice',
'XMON',
],
should_not_be_serialized=[
'AnnealSequenceSearchStrategy',
'CircuitOpDeserializer',
'CircuitOpSerializer',
'CircuitSerializer',
'CIRCUIT_SERIALIZER',
'CircuitWithCalibration',
'ConvertToSqrtIswapGates',
'ConvertToSycamoreGates',
'ConvertToXmonGates',
'DeserializingArg',
'Engine',
'EngineJob',
'EngineProcessor',
'EngineProgram',
'FSimPhaseCorrections',
'NAMED_GATESETS',
'NoiseModelFromGoogleNoiseProperties',
'ProtoVersion',
'GateOpSerializer',
'GateOpDeserializer',
'GreedySequenceSearchStrategy',
'PhasedFSimCalibrationError',
'PhasedFSimEngineSimulator',
'PerQubitDepolarizingWithDampedReadoutNoiseModel',
'SerializingArg',
'THETA_ZETA_GAMMA_FLOQUET_PHASED_FSIM_CHARACTERIZATION',
'QuantumEngineSampler',
'ProcessorSampler',
'ValidatingSampler',
'CouldNotPlaceError',
# Abstract:
'ExecutableSpec',
],
custom_class_name_to_cirq_type={
k: f'cirq.google.{k}'
for k in [
'BitstringsMeasurement',
'QuantumExecutable',
'QuantumExecutableGroup',
'KeyValueExecutableSpec',
'ExecutableResult',
'ExecutableGroupResult',
'QuantumRuntimeConfiguration',
'RuntimeInfo',
'SharedRuntimeInfo',
'ExecutableGroupResultFilesystemRecord',
'NaiveQubitPlacer',
'RandomDevicePlacer',
'HardcodedQubitPlacer',
'EngineProcessorRecord',
'SimulatedProcessorRecord',
'SimulatedProcessorWithLocalDeviceRecord',
'EngineResult',
]
},
resolver_cache=_class_resolver_dictionary(),
deprecated={
'_NamedConstantXmonDevice': 'v0.15',
'Bristlecone': 'v0.15',
'Foxtail': 'v0.15',
'GateTabulation': 'v0.16',
},
)
| 31.111111
| 70
| 0.628929
|
d3e08add2a7b9634b81871ab5cb3fd920434ec28
| 6,110
|
py
|
Python
|
api/user.py
|
whexy/CS307-12306
|
cd1e4fac92d10b6a7331d6746e6790c06f19ea77
|
[
"MIT"
] | 1
|
2021-03-23T12:58:10.000Z
|
2021-03-23T12:58:10.000Z
|
api/user.py
|
whexy/CS307-12306
|
cd1e4fac92d10b6a7331d6746e6790c06f19ea77
|
[
"MIT"
] | null | null | null |
api/user.py
|
whexy/CS307-12306
|
cd1e4fac92d10b6a7331d6746e6790c06f19ea77
|
[
"MIT"
] | 1
|
2020-05-23T01:19:22.000Z
|
2020-05-23T01:19:22.000Z
|
import datetime
from flask import request, jsonify
from flask_jwt_extended import create_access_token, jwt_required, get_jwt_identity
from flask_restful import Resource
from model.Database import DBSession
from model.models import User
class SignupApi(Resource):
"""
API class for user sign-up
"""
def post(self):
"""
Sign-up API
The body should be a JSON dictionary including the following attribute(s):
- `username`: `str`
- `real_name`: `str`
- `password`: `str`
- `id_card`: `str`
- `phone_number`: `str`
- `email`: `str`
**return**: A JSON dictionary with values:
- `code`: `int`, equals to 0 if sign-up is successful
- `error`: `str`, shown if `code != 0`
"""
session = DBSession()
try:
body = request.get_json()
if session.query(User).filter(User.username == body.get('username')).first() is not None:
return jsonify(error='Username already exists', code=406)
new_user = User(**body)
new_user.hash_password()
session.add(new_user)
session.commit()
return jsonify(code=0)
except:
session.rollback()
return jsonify(code=10, error='Unexpected error when creating user')
finally:
session.close()
class UserInfoApi(Resource):
"""
API class for user information operations
"""
def post(self):
"""
Login API
The body should be a JSON dictionary including the following attribute(s):
- `username`: `str`
- `password`: `str`
**return**: A JSON dictionary with values:
- `code`: `int`, equals to 0 if login is successful
- `token`: `str` representing JWT token, shown if `code == 0`
- `error`: `str`, shown if `code != 0`
"""
session = DBSession()
try:
body = request.get_json()
user = session.query(User).filter(User.username == body.get('username')).first()
if user is None:
return jsonify(error='Username not found', code=401)
authorized = user.check_password(body.get('password'))
if not authorized:
return jsonify(error='Wrong password', code=401)
expires = datetime.timedelta(days=1)
access_token = create_access_token(identity=str(user.user_id), expires_delta=expires)
return jsonify(token=access_token, code=0)
except:
return jsonify(code=10, error='Login error')
finally:
session.close()
@jwt_required
def get(self):
"""
User information query API, **JWT required**
**return**: A JSON dictionary with values:
- `code`: `int`, equals to 0 if query is successful
- `result`: `dict` containing user information, shown if `code == 0`
- `error`: `str`, shown if `code != 0`
"""
session = DBSession()
try:
user_id = get_jwt_identity()
user = session.query(User).filter(User.user_id == user_id).first()
if user is None:
return jsonify(error='User not found', code=404)
return jsonify(result=user.to_dict(), code=0)
except:
return jsonify(code=10, error='User information query error')
finally:
session.close()
@jwt_required
def patch(self):
"""
User information update API, **JWT required**
The body should be a JSON dictionary including the following attribute(s):
- `username`: `str`
- `password`: `str`
- `real_name`: `str`
- `email`: `str`
- `phone_number`: `str`
**return**: A JSON dictionary with values:
- `code`: `int`, equals to 0 if update is successful
- `error`: `str`, shown if `code != 0`
- `result`: `str`, shown if `code == 0`
"""
session = DBSession()
try:
body = request.get_json()
user_id = get_jwt_identity()
user = session.query(User).filter(User.user_id == user_id).first()
if user is None:
return jsonify(error='User not found', code=404)
if user.username != body.get('username'):
new_username = body.get('username')
if session.query(User).filter(User.username == new_username).first() is not None:
return jsonify(error='Username already exists', code=406)
user.username = new_username
user.real_name = body.get('real_name')
user.email = body.get('email')
user.phone_number = body.get('phone_number')
new_password = body.get('password')
if new_password:
if 8 <= len(new_password) <= 30:
user.password = new_password
user.hash_password()
else:
session.rollback()
return jsonify(code=1, error='密码长度错误')
session.commit()
return jsonify(code=0, result='用户信息修改成功')
except:
session.rollback()
return jsonify(code=10, error='Update failed')
finally:
session.close()
class UserCheckApi(Resource):
"""
API class for user existence check
"""
def get(self):
"""
User existence check API (check by username)
**argument**:
- `username`: `str`
**return**: A JSON dictionary with values:
- `code`: `int`, always equals to 0
- `result`: `boolean` indicating if the user exists
"""
session = DBSession()
try:
username = request.args.get('username')
user = session.query(User).filter(User.username == username).first()
return jsonify(result=(user is None), code=0)
except:
return jsonify(code=10, error='Query error')
finally:
session.close()
| 34.519774
| 101
| 0.551882
|
421afe949e2801092af06912a1104baaaf71e649
| 785
|
py
|
Python
|
custom_models.py
|
patra/whoops
|
c2dcf379dd20bbb0a46a6b46b59c4ad73e626df9
|
[
"MIT"
] | null | null | null |
custom_models.py
|
patra/whoops
|
c2dcf379dd20bbb0a46a6b46b59c4ad73e626df9
|
[
"MIT"
] | null | null | null |
custom_models.py
|
patra/whoops
|
c2dcf379dd20bbb0a46a6b46b59c4ad73e626df9
|
[
"MIT"
] | null | null | null |
from psiturk.db import Base, db_session, init_db
from sqlalchemy import or_, Column, Integer, String, DateTime, Boolean, Float, Text, ForeignKey
from sqlalchemy.orm import relationship, backref
import shortuuid
class LegitWorker(Base):
"""
DB for tracking workers who need compensation
"""
__tablename__ = 'legit_worker'
index = Column(Integer, primary_key=True, unique=True)
amt_worker_id = Column(String(128))
completion_code = Column(String(128))
status = Column(String(128))
bonus = Column(Float)
def __init__(self, workerid):
self.amt_worker_id = workerid
self.status = 'owed'
self.completion_code = shortuuid.uuid()
def set_bonus(self, bonus):
self.bonus = bonus
def submitted(self):
self.status = 'submitted'
def paid(self):
self.status = 'paid'
| 25.322581
| 95
| 0.743949
|
6ec2861da824cecb3e346776aa4d1b1e93a92526
| 25,330
|
py
|
Python
|
services/email_crm/service.py
|
adelaideX/optimus_ingestor
|
97f46e1fa730bb9b853e17b3b34874b487ba7739
|
[
"MIT"
] | null | null | null |
services/email_crm/service.py
|
adelaideX/optimus_ingestor
|
97f46e1fa730bb9b853e17b3b34874b487ba7739
|
[
"MIT"
] | null | null | null |
services/email_crm/service.py
|
adelaideX/optimus_ingestor
|
97f46e1fa730bb9b853e17b3b34874b487ba7739
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Service for importing the email extract from edx
"""
import glob
import json
import os
import time
import urllib2
import warnings
import zipfile
from datetime import datetime
import MySQLdb
import unicodecsv as csv
import base_service
import config
import utils
class EmailCRM(base_service.BaseService):
"""
Generates the required tables to store the email extract for use with CRM
"""
inst = None
def __init__(self):
self.email_test = None
EmailCRM.inst = self
super(EmailCRM, self).__init__()
# The pretty name of the service
self.pretty_name = "Email Extract For CRM"
# Whether the service is enabled
self.enabled = True
# Whether to run more than once
self.loop = True
# The amount of time to sleep in seconds
self.sleep_time = 60
self.sql_ecrm_conn = None
self.mongo_db = None
self.mongo_dbname = ""
# Variables
self.ecrm_db = 'Email_CRM'
self.ecrm_table = 'emailcrm'
self.cn_table = 'countries_io'
self.cn_url = 'http://country.io/names.json'
self.rn_db = 'Report_Notifier'
self.report_table = 'report'
self.le_table = 'lastexport'
self.basepath = os.path.dirname(__file__)
self.courses = {}
self.initialize()
pass
def setup(self):
"""
Set initial variables before the run loop starts
"""
self.sql_ecrm_conn = self.connect_to_sql(self.sql_ecrm_conn, self.ecrm_db, True)
self.courses = self.get_all_courses()
pass
def run(self):
"""
Runs every X seconds, the main run loop
"""
last_run = self.find_last_run_ingest("EmailCRM")
last_personcourse = self.find_last_run_ingest("PersonCourse")
last_dbstate = self.find_last_run_ingest("DatabaseState")
if self.finished_ingestion("PersonCourse") and \
last_run < last_personcourse and \
self.finished_ingestion("DatabaseState") and \
last_run < last_dbstate:
# Create country name table and import data (if required)
self.create_load_cn_table()
# Create emailcrm table (if required)
self.create_ecrm_table()
# Create 'last export table'
self.create_le_table()
ingests = self.get_ingests()
for ingest in ingests:
if ingest['type'] == 'file':
# print "ingesting " + ingest['meta']
self.start_ingest(ingest['id'])
path = ingest['meta']
# purge the table
self.truncate_ecrm_table()
# Ingest the email file
self.ingest_csv_file(path, self.ecrm_table)
# export the file
self.datadump2csv()
# Load last export file so we can use it for delta
self.load_last_export()
# Archive the file and email
self.zip_mail_last_export()
# clean up the progressive data in the export table
self.cleanup_last_export()
# update the ingest record
self.finish_ingest(ingest['id'])
self.save_run_ingest()
utils.log("EmailCRM completed")
pass
@property
def get_last_csv_file(self):
"""
Get the filename including the full path of the last saved crm_email export file.
"""
backup_path = config.EXPORT_PATH
file_list = glob.glob(os.path.join(backup_path, self.ecrm_table + "*.csv"))
file_list.sort(reverse=True)
last_file = file_list[0]
return last_file
def zip_mail_last_export(self):
"""
Zip and mail last export file.
"""
try:
import zlib
compression = zipfile.ZIP_DEFLATED
except:
compression = zipfile.ZIP_STORED
last_file = self.get_last_csv_file
if last_file:
zf = zipfile.ZipFile(last_file + ".zip", mode='w')
try:
print 'Adding {0} to zipfile.'.format(last_file)
zf.write(last_file, arcname=os.path.basename(last_file), compress_type=compression)
finally:
zf.close()
else:
return
# send an email with the attachment
attachments = [last_file + ".zip"]
# uses data from the report_notifier service db's
service_reports = self.get_service_report()
if len(service_reports) > 0:
for report in service_reports:
print("Sending Email: " + report['report_name'])
email_data = json.loads(report['email'])
html = "{0} <br/><br/>" \
"The following report (attached) was generated by AdX Analytics on {1}: <br/> " \
"<blockquote><strong>{2}</strong></blockquote> " \
"<blockquote>Attached files:</blockquote> " \
"<blockquote><blockquote>{3}</blockquote></blockquote><br/><br/>" \
"{4}".format(email_data['msg_leader'], str(datetime.now().strftime("%d/%m/%Y")),
report['report_name'], os.path.basename(last_file), email_data['msg_sig'])
# to = email_data['to_email']
if "dev" in last_file:
# send only to from address
to = [email_data['from_email']]
cc = []
else:
to = email_data['to_email']
cc = email_data['cc_email']
# send the email
utils.send_mail(send_from=email_data['from_email'],
send_to=to,
cc_to=cc,
subject=report['report_name'] + ' - ' + str(datetime.now()),
text=html,
files=attachments)
pass
def get_service_report(self):
"""
Retrieves the relevant reports to be processed for this service
:return reports in a dict
"""
sql_rn_conn = None
sql_rn_conn = self.connect_to_sql(sql_rn_conn, self.rn_db, True)
query = "SELECT * FROM " + self.report_table + " WHERE active = 1 AND report_group_code = '" + name() + "'"
cursor = sql_rn_conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query)
reports = cursor.fetchall()
cursor.close()
return reports
def load_last_export(self):
"""
Truncate the table then load the last export file.
"""
last_file = self.get_last_csv_file
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
query = "SELECT 1 FROM %s WHERE extract_file = '%s' " % (self.le_table, last_file)
cursor = self.sql_ecrm_conn.cursor()
if not cursor.execute(query) and last_file:
self.ingest_csv_file(last_file, self.le_table)
cursor.close()
self.sql_ecrm_conn.commit()
# update the extract_date timestamp with now.
query = "UPDATE %s SET extract_date = '%s', extract_file = '%s' WHERE extract_date is NULL" % (
self.le_table, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())), last_file)
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
cursor.close()
self.sql_ecrm_conn.commit()
warnings.filterwarnings('always', category=MySQLdb.Warning)
pass
def ingest_csv_file(self, ingest_file_path, tablename):
"""
Ingests a csv file of the type defined, may not work for all separated text files
:param ingest_file_path:
:param tablename:
:return:
"""
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
query = "LOAD DATA LOCAL INFILE '" + ingest_file_path + "' INTO TABLE " + tablename + " " \
"CHARACTER SET UTF8 FIELDS TERMINATED BY ',' ENCLOSED BY '\"' ESCAPED BY '\' LINES TERMINATED BY '\\r\\n' IGNORE 1 LINES"
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
warnings.filterwarnings('always', category=MySQLdb.Warning)
cursor.close()
self.sql_ecrm_conn.commit()
print "Ingested " + ingest_file_path + " into " + tablename
pass
def truncate_ecrm_table(self):
"""
Truncate the email table
"""
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
query = "TRUNCATE " + self.ecrm_table
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
warnings.filterwarnings('always', category=MySQLdb.Warning)
self.sql_ecrm_conn.commit()
cursor.close()
print "Truncating " + self.ecrm_table
pass
def create_ecrm_table(self):
"""
Create the emailcrm table
"""
columns = [
{"col_name": "user_id", "col_type": "int(11)"},
{"col_name": "username", "col_type": "varchar(255)"},
{"col_name": "email", "col_type": "varchar(255)"},
{"col_name": "full_name", "col_type": "varchar(255)"},
{"col_name": "course_id", "col_type": "varchar(255)"},
{"col_name": "is_opted_in_for_email", "col_type": "varchar(255)"},
{"col_name": "preference_set_datetime", "col_type": "datetime"},
]
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
query = "CREATE TABLE IF NOT EXISTS " + self.ecrm_table
query += "("
for column in columns:
query += column['col_name'] + " " + column['col_type'] + ', '
query += " KEY idx_user_email (`user_id`, `course_id`)) DEFAULT CHARSET=utf8;"
try:
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
except (MySQLdb.OperationalError, MySQLdb.ProgrammingError), e:
utils.log("Connection FAILED: %s" % (repr(e)))
self.sql_ecrm_conn = self.connect_to_sql(self.sql_ecrm_conn, self.ecrm_db, True)
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
utils.log("Reset connection and executed query")
warnings.filterwarnings('always', category=MySQLdb.Warning)
cursor.close()
pass
def create_le_table(self):
"""
Create the lastexport table
"""
columns = [
{"col_name": "user_id", "col_type": "int(11)"},
{"col_name": "is_staff", "col_type": "varchar(255)"},
{"col_name": "is_active", "col_type": "int(11)"},
{"col_name": "email", "col_type": "varchar(255)"},
{"col_name": "viewed", "col_type": "int(11)"},
{"col_name": "explored", "col_type": "int(11)"},
{"col_name": "certified", "col_type": "int(11)"},
{"col_name": "mode", "col_type": "varchar(255)"},
{"col_name": "first_name", "col_type": "varchar(255)"},
{"col_name": "last_name", "col_type": "varchar(255)"},
{"col_name": "course_id", "col_type": "varchar(255)"},
{"col_name": "course_name", "col_type": "varchar(255)"},
{"col_name": "course_start_date", "col_type": "varchar(255)"},
{"col_name": "enrolled_date", "col_type": "varchar(255)"},
{"col_name": "is_opted_in_for_email", "col_type": "varchar(255)"},
{"col_name": "gender", "col_type": "varchar(255)"},
{"col_name": "year_of_birth", "col_type": "varchar(255)"},
{"col_name": "level_of_education", "col_type": "varchar(255)"},
{"col_name": "levelofEd", "col_type": "varchar(255)"},
{"col_name": "country_name", "col_type": "varchar(255)"},
{"col_name": "extract_date", "col_type": "datetime"},
{"col_name": "extract_file", "col_type": "varchar(255)"},
]
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
query = "CREATE TABLE IF NOT EXISTS " + self.le_table
query += "("
for column in columns:
query += column['col_name'] + " " + column['col_type'] + ', '
query += " KEY `idx_le` (`user_id`,`viewed`,`explored`,`certified`,`is_opted_in_for_email`)) DEFAULT CHARSET=utf8;"
try:
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
except (MySQLdb.OperationalError, MySQLdb.ProgrammingError), e:
utils.log("Connection FAILED: %s" % (repr(e)))
self.sql_ecrm_conn = self.connect_to_sql(self.sql_ecrm_conn, self.ecrm_db, True)
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
utils.log("Reset connection and executed query")
warnings.filterwarnings('always', category=MySQLdb.Warning)
cursor.close()
pass
def create_load_cn_table(self):
"""
Create the country name table
"""
columns = [
{"col_name": "country_code", "col_type": "varchar(255)"},
{"col_name": "country_name", "col_type": "varchar(255)"},
]
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
query = "CREATE TABLE IF NOT EXISTS " + self.cn_table
query += "("
for column in columns:
query += "`" + column['col_name'] + "`" + " " + column['col_type'] + ', '
query += " KEY `idx_2_letter_code` (`country_code`)) CHARSET=utf8;"
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
cursor.close()
query = "SELECT 1 FROM " + self.cn_table
cursor = self.sql_ecrm_conn.cursor()
if not cursor.execute(query):
# import data
try:
countrydatafile = urllib2.urlopen(self.cn_url)
if countrydatafile:
countrydata = json.load(countrydatafile)
sql = '''INSERT INTO ''' + self.cn_table + ''' VALUES(%s, %s)'''
sql_values_list = list()
for key, value in countrydata.iteritems():
sql_values_list.append((key, value))
cursor = self.sql_ecrm_conn.cursor()
cursor.executemany(sql, sql_values_list)
cursor.close()
except Exception, e:
print repr(e)
utils.log("Country import failed: %s" % (repr(e)))
cursor.close()
warnings.filterwarnings('always', category=MySQLdb.Warning)
pass
def get_ingests(self):
"""
Retrieves the relevant ingests for the service
"""
self.setup_ingest_api()
cur = self.api_db.cursor()
query = "SELECT * FROM ingestor" \
" WHERE service_name = '" + str(self.__class__.__name__) + "' " \
" AND completed = 0 ORDER BY created ASC;"
cur.execute(query)
ingests = []
for row in cur.fetchall():
ingest = {
'id': row[0],
'type': row[2],
'meta': row[3]
}
ingests.append(ingest)
cur.close()
return ingests
def datadump2csv(self):
"""
Generates a CSV file for CRM
"""
e_tablename = self.ecrm_table
print "Exporting CSV: " + e_tablename
backup_path = config.EXPORT_PATH
current_time = time.strftime('%Y%m%d-%H%M%S')
# loop through courses -
# write first file with headers then
# each subsequent iteration append to file
backup_prefix = e_tablename + "_" + current_time
backup_file = os.path.join(backup_path, backup_prefix + ".csv")
for idx, course in enumerate(self.courses.items()):
try:
course_id = course[0]
mongoname = course[1]['mongoname']
dbname = course[1]['dbname']
# Get nice course name from course info
json_file = dbname.replace("_", "-") + '.json'
courseinfo = self.loadcourseinfo(json_file)
if courseinfo is None:
utils.log("Can not find course info for ." + str(course_id))
continue
nice_name = courseinfo['display_name']
start = courseinfo['start'].split('T')
start_date = datetime.strptime(start[0].replace('"', ''), "%Y-%m-%d")
start_date = start_date.strftime("%d/%m/%Y")
# au.last_login,
query = "SELECT e.user_id, " \
"CASE au.is_staff " \
"WHEN 1 THEN 'Yes' ELSE 'No' END AS is_staff, " \
"au.is_active, TRIM(TRAILING '.' FROM e.email ) AS email, " \
"pc.viewed, pc.explored, pc.certified, pc.mode, " \
"TRIM(TRAILING '\\\\' FROM REPLACE(REPLACE(substring_index(e.full_name, ' ', 1),'�', ''), ',', '')) AS first_name, " \
"TRIM(TRAILING '\\\\' FROM SUBSTR(TRIM(REPLACE(REPLACE(REPLACE(REPLACE(SUBSTR(e.full_name, LOCATE(' ', e.full_name)), '�', ''), ',', ''), '|', ''), CONVERT(CHAR(127) USING utf8), '')), 1, 30)) AS last_name, " \
"'{2}' AS course_id, " \
"'{3}' AS course_name, " \
"'{5}' AS course_start_date, " \
"DATE_FORMAT(pc.start_time,'%d/%m/%Y') as enrolled, " \
"CASE WHEN DATE_FORMAT(NOW(), '%Y') - up.year_of_birth < 15 THEN 'No' ELSE CASE WHEN e.is_opted_in_for_email = 'True' THEN 'Yes' ELSE 'No' END END AS is_opted_in_for_email, " \
"CASE up.gender WHEN 'm' THEN 'Male' WHEN 'f' THEN 'Female' WHEN 'o' THEN 'Other' ELSE NULL END as gender, " \
"CASE WHEN up.year_of_birth <= 1900 THEN NULL " \
"ELSE up.year_of_birth END AS year_of_birth ," \
"up.level_of_education, " \
"( CASE up.level_of_education " \
"WHEN 'p' THEN 'Doctorate' " \
"WHEN 'a' THEN 'Associate degree' " \
"WHEN 'b' THEN 'Bachelors degree' " \
"WHEN 'm' THEN 'Masters or professional degree' " \
"WHEN 'hs' THEN 'Secondary/high school' " \
"WHEN 'jhs' THEN 'Junior secondary/junior high/middle school' " \
"WHEN 'el' THEN 'Elementary/primary school' " \
"WHEN 'none' THEN 'No Formal Education' " \
"WHEN 'other' THEN 'Other Education' " \
"WHEN '' THEN 'User did not specify level of education' " \
"WHEN 'p_se' THEN 'Doctorate in science or engineering (no longer used)' " \
"WHEN 'p_oth' THEN 'Doctorate in another field (no longer used)' " \
"ELSE 'User did not specify level of education' END ) AS levelofEd, " \
"c.country_name " \
"FROM {4}.emailcrm e " \
"JOIN {0}.auth_user au ON e.user_id = au.id " \
"JOIN Person_Course.personcourse_{2} pc ON e.user_id = pc.user_id " \
"JOIN {0}.auth_userprofile up ON e.user_id = up.user_id " \
"LEFT JOIN {4}.countries_io c ON up.country = c.country_code " \
"LEFT JOIN {4}.lastexport le " \
"ON le.user_id = e.user_id " \
"AND le.viewed = pc.viewed " \
"AND le.explored = pc.explored " \
"AND le.certified = pc.certified " \
"AND le.mode = pc.mode " \
"AND le.course_id = '{2}' " \
"WHERE e.course_id = '{1}' " \
"AND le.user_id is null " \
"AND le.viewed is null " \
"AND le.explored is null " \
"AND le.certified is null " \
"AND le.mode is null " \
"AND le.course_id is null ".format(dbname, mongoname, course_id, nice_name, self.ecrm_db,
start_date)
ec_cursor = self.sql_ecrm_conn.cursor()
ec_cursor.execute(query)
result = ec_cursor.fetchall()
ec_cursor.close()
if idx == 0:
with open(backup_file, "wb") as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel', encoding='utf-8')
csv_writer.writerow([i[0] for i in ec_cursor.description]) # write headers
for row in result:
csv_writer.writerow(row)
else:
with open(backup_file, "ab") as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel', encoding='utf-8')
for row in result:
csv_writer.writerow(row)
utils.log("EmailCRM select written to file: %s" % course_id)
except Exception, e:
print repr(e)
utils.log("EmailCRM FAILED: %s" % (repr(e)))
break
utils.log("The EmailCRM data: %s exported to csv file %s" % (e_tablename, backup_file))
pass
def loadcourseinfo(self, json_file):
"""
Loads the course information from JSON course structure file
:param json_file: the name of the course structure file
:return the course information
"""
# print self
courseurl = config.SERVER_URL + '/datasources/course_structure/' + json_file
print "ATTEMPTING TO LOAD " + courseurl
courseinfofile = urllib2.urlopen(courseurl)
if courseinfofile:
courseinfo = json.load(courseinfofile)
return courseinfo
return None
def cleanup_last_export(self):
"""
Clean up the incremental values from the lastexport table
"""
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
query = "DELETE le FROM {1}.{0} le, " \
"( SELECT y.`user_id`, y.`course_id`, y.`viewed`, y.`explored`, y.`certified`, (y.`viewed` + y.`explored` + y.`certified`) as score " \
"FROM {1}.{0} y " \
"WHERE y.`viewed` + y.`explored` + y.`certified` < ( " \
"SELECT max(x.`viewed` + x.`explored` + x.`certified`) as score " \
"FROM {1}.{0} x " \
"WHERE x.`user_id` = y.`user_id` " \
"AND x.`course_id` = y.`course_id` " \
" ) ) as bad_rows " \
"WHERE le.`user_id` = bad_rows.`user_id` " \
"AND le.`course_id` = bad_rows.`course_id` " \
"AND le.`viewed` = bad_rows.`viewed` " \
"AND le.`explored` = bad_rows.`explored` " \
"AND le.`certified` = bad_rows.`certified` ;".format(self.le_table, self.ecrm_db)
try:
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
except (MySQLdb.OperationalError, MySQLdb.ProgrammingError), e:
utils.log("Connection FAILED: %s" % (repr(e)))
self.sql_ecrm_conn = self.connect_to_sql(self.sql_ecrm_conn, self.ecrm_db, True)
cursor = self.sql_ecrm_conn.cursor()
cursor.execute(query)
utils.log("Reset connection and executed query")
warnings.filterwarnings('always', category=MySQLdb.Warning)
self.sql_ecrm_conn.commit()
cursor.close()
utils.log("The lastexport table has been cleaned.")
pass
def get_files(path):
"""
Returns a list of files that the service will ingest
:param path: The path of the files
:return: An array of file paths
"""
print path
required_files = []
main_path = os.path.realpath(os.path.join(path, 'email-opt-in'))
# patch main_path to use child directory as we can't use symlink
if not config.SYMLINK_ENABLED:
main_path = utils.get_subdir(main_path)
tmp = os.path.dirname(main_path)
# get the list of directory paths
emaildirs = [x[0] for x in os.walk(tmp)]
# latest dir path in list
latestdir = emaildirs[len(emaildirs)-1]
for filename in os.listdir(latestdir):
if '.csv' in filename:
required_files.append(os.path.join(latestdir, filename))
return required_files
def name():
"""
Returns the name of the service class
"""
return "EmailCRM"
def service():
"""
Returns an instance of the service
"""
return EmailCRM()
| 40.142631
| 234
| 0.535334
|
84392746199508392bef8bd22bc64e6a8c8b3a5d
| 3,539
|
py
|
Python
|
salt/utils/slack.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 2
|
2016-11-14T15:08:53.000Z
|
2016-11-20T09:25:30.000Z
|
salt/utils/slack.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 3
|
2021-03-31T19:53:24.000Z
|
2021-12-13T20:46:19.000Z
|
salt/utils/slack.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 2
|
2020-11-04T06:32:02.000Z
|
2020-11-06T11:01:18.000Z
|
# -*- coding: utf-8 -*-
"""
Library for interacting with Slack API
.. versionadded:: 2016.3.0
:configuration: This module can be used by specifying the name of a
configuration profile in the minion config, minion pillar, or master
config.
For example:
.. code-block:: yaml
slack:
api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.ext.six.moves.http_client
# pylint: enable=import-error,no-name-in-module
import salt.utils.http
# Import 3rd-party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext.six.moves.urllib.parse import urljoin as _urljoin
from salt.version import __version__
log = logging.getLogger(__name__)
def query(
function,
api_key=None,
args=None,
method="GET",
header_dict=None,
data=None,
opts=None,
):
"""
Slack object method function to construct and execute on the API URL.
:param api_key: The Slack api key.
:param function: The Slack api function to perform.
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method.
:return: The json response from the API call or False.
"""
ret = {"message": "", "res": True}
slack_functions = {
"rooms": {"request": "channels.list", "response": "channels"},
"users": {"request": "users.list", "response": "members"},
"message": {"request": "chat.postMessage", "response": "channel"},
}
if not api_key:
api_key = __salt__["config.get"]("slack.api_key") or __salt__["config.get"](
"slack:api_key"
)
if not api_key:
log.error("No Slack api key found.")
ret["message"] = "No Slack api key found."
ret["res"] = False
return ret
api_url = "https://slack.com"
base_url = _urljoin(api_url, "/api/")
path = slack_functions.get(function).get("request")
url = _urljoin(base_url, path, False)
if not isinstance(args, dict):
query_params = {}
else:
query_params = args.copy()
query_params["token"] = api_key
if header_dict is None:
header_dict = {}
if method != "POST":
header_dict["Accept"] = "application/json"
result = salt.utils.http.query(
url,
method,
params=query_params,
data=data,
decode=True,
status=True,
header_dict=header_dict,
opts=opts,
)
if result.get("status", None) == salt.ext.six.moves.http_client.OK:
_result = result["dict"]
response = slack_functions.get(function).get("response")
if "error" in _result:
ret["message"] = _result["error"]
ret["res"] = False
return ret
ret["message"] = _result.get(response)
return ret
elif result.get("status", None) == salt.ext.six.moves.http_client.NO_CONTENT:
return True
else:
log.debug(url)
log.debug(query_params)
log.debug(data)
log.debug(result)
if "dict" in result:
_result = result["dict"]
if "error" in _result:
ret["message"] = result["error"]
ret["res"] = False
return ret
ret["message"] = _result.get(response)
else:
ret["message"] = "invalid_auth"
ret["res"] = False
return ret
| 27.648438
| 84
| 0.594236
|
20bd4c54f9895d1d6f4fd5346354b9be082ad49a
| 94
|
py
|
Python
|
telethon/version.py
|
islam-200555/Telethon
|
85103bcf6de8024c902ede98f0b9bf0f7f47a0aa
|
[
"MIT"
] | 2
|
2021-01-06T12:49:49.000Z
|
2021-04-23T16:32:13.000Z
|
telethon/version.py
|
islam-200555/Telethon
|
85103bcf6de8024c902ede98f0b9bf0f7f47a0aa
|
[
"MIT"
] | null | null | null |
telethon/version.py
|
islam-200555/Telethon
|
85103bcf6de8024c902ede98f0b9bf0f7f47a0aa
|
[
"MIT"
] | null | null | null |
# Versions should comply with PEP440.
# This line is parsed in setup.py:
__version__ = '0.19'
| 23.5
| 37
| 0.734043
|
f42c93663a644edd8df6eea468d42bc17365f5f7
| 373
|
py
|
Python
|
src/posts/migrations/0002_post_featured.py
|
PeterKim321/DjangoFoodBlog
|
7a0a63a8fd8eaf4bb18945b655f3115fa7b4a580
|
[
"Apache-2.0"
] | null | null | null |
src/posts/migrations/0002_post_featured.py
|
PeterKim321/DjangoFoodBlog
|
7a0a63a8fd8eaf4bb18945b655f3115fa7b4a580
|
[
"Apache-2.0"
] | null | null | null |
src/posts/migrations/0002_post_featured.py
|
PeterKim321/DjangoFoodBlog
|
7a0a63a8fd8eaf4bb18945b655f3115fa7b4a580
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.4 on 2020-12-11 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='featured',
field=models.BooleanField(default=True),
),
]
| 19.631579
| 52
| 0.58445
|
556c3f8cd87e609b32203a82e605b3ea0af31a3e
| 359
|
py
|
Python
|
week1/the_real_deal/zero_insert_test.py
|
sevgo/Programming101
|
ac25c4d9695563b449a629c60ec77a739c9f5be3
|
[
"BSD-3-Clause"
] | null | null | null |
week1/the_real_deal/zero_insert_test.py
|
sevgo/Programming101
|
ac25c4d9695563b449a629c60ec77a739c9f5be3
|
[
"BSD-3-Clause"
] | 1
|
2021-09-16T05:44:31.000Z
|
2021-09-16T05:44:31.000Z
|
week1/the_real_deal/zero_insert_test.py
|
sevgo/Programming101
|
ac25c4d9695563b449a629c60ec77a739c9f5be3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import unittest
from zero_insert import zero_insert
class Test_zero_insert(unittest.TestCase):
def test_zero_insert_type(self):
self.assertIsInstance(zero_insert(116457), int)
def test_zeroo_insert_result(self):
self.assertTrue(6040406 == zero_insert(6446))
if __name__ == "__main__":
unittest.main()
| 19.944444
| 55
| 0.735376
|
de2a423ba03a55eeefd6a74201b7ea2e168fce57
| 1,073
|
py
|
Python
|
from_python_community/hidecard.py
|
ZaytsevNS/python_practice
|
109e14923a2ddeacc5360fd72947275afd2159e3
|
[
"MIT"
] | null | null | null |
from_python_community/hidecard.py
|
ZaytsevNS/python_practice
|
109e14923a2ddeacc5360fd72947275afd2159e3
|
[
"MIT"
] | null | null | null |
from_python_community/hidecard.py
|
ZaytsevNS/python_practice
|
109e14923a2ddeacc5360fd72947275afd2159e3
|
[
"MIT"
] | null | null | null |
# Условие:
# На вход идёт строка с номером карты. Она может иметь внутри себя пробелы, что функция и должна предусматривать. Результат — 12 символов * и 4 последних символа от входной строки.
# Пример:
# hidecard("3459 0054 1234 6674") → ************6674
# hidecard("1234567890987654") → ************7654
import unittest
def hidecard(card: str) -> str:
formatted_number = ''.join(card.split())
if len(formatted_number) != 16:
return 'Try again'
return f'{"*" * 12}{formatted_number[-4:]}'
class TestHideCard(unittest.TestCase):
def test_one(self):
""" Should return hide number """
self.assertEqual('************6674', hidecard('3459 0054 1234 6674'))
def test_two(self):
""" Should return hide number """
self.assertEqual('************7654', hidecard('1234567890987654'))
def test_three(self):
""" Should return 'Try again' """
self.assertEqual('Try again', hidecard('4323 2145 3212'))
if __name__ == '__main__':
unittest.main()
| 30.657143
| 180
| 0.598322
|
c05da427fa0dcacdfbd913e6140b89cdfea6fa80
| 1,729
|
py
|
Python
|
tests/directives/test_autocenum.py
|
speedyleion/sphinx-c-doc
|
65721f7164a463742bc2d0b18a92b2d633a5eb3a
|
[
"MIT",
"Unlicense"
] | 7
|
2020-06-06T06:47:31.000Z
|
2021-12-12T14:18:08.000Z
|
tests/directives/test_autocenum.py
|
speedyleion/sphinx-c-doc
|
65721f7164a463742bc2d0b18a92b2d633a5eb3a
|
[
"MIT",
"Unlicense"
] | 15
|
2020-03-12T01:43:24.000Z
|
2021-04-21T06:45:13.000Z
|
tests/directives/test_autocenum.py
|
speedyleion/sphinx-c-doc
|
65721f7164a463742bc2d0b18a92b2d633a5eb3a
|
[
"MIT",
"Unlicense"
] | 1
|
2021-08-05T20:23:01.000Z
|
2021-08-05T20:23:01.000Z
|
"""
Test the parsing of c enum objects
"""
from textwrap import dedent
import pytest
from sphinx.ext.autodoc.directive import AutodocDirective
class TestAutoCEnum:
"""
Testing class for the autocenum directive for use in enums
"""
some_enum = """\
enum some_enum
If you want to document the enumerators with napoleon
then you use the section title Enumerators:.
enumerator THE_FIRST_ENUM
Used for the first item
Documentation in a comment for THE_FIRST_ITEM. Note this is trailing, for some reason clang will apply leading comments to all the enumerators
enumerator THE_SECOND_ENUM
Second verse same as the first.
enumerator THE_THIRD_ENUM
Not once, note twice, but thrice.
enumerator THE_LAST_ENUM
Just to be sure."""
doc_data = [
("example.c::some_enum", some_enum),
]
@pytest.mark.parametrize("enum, expected_doc", doc_data)
def test_doc(self, enum, expected_doc, sphinx_state):
"""
Tests the restructured text output returned by the directive.
"""
directive = AutodocDirective(
"autocenum",
[enum],
{"members": None},
None,
None,
None,
None,
sphinx_state,
None,
)
output = directive.run()
# First item is the index entry
assert 2 == len(output)
body = output[1]
# For whatever reason the as text comes back with double spacing, so we
# knock it down to single spacing to make the expected string smaller.
assert body.astext().replace("\n\n", "\n") == dedent(expected_doc)
| 27.444444
| 150
| 0.61365
|
2522f031908317871ec2d2e4adb091aa00029acc
| 596
|
py
|
Python
|
apps/accounts/urls.py
|
pinkerltm/datacube-ui
|
325d404a994d49c23922e7de10c7ab244b78500b
|
[
"Apache-2.0"
] | 1
|
2019-07-22T05:24:40.000Z
|
2019-07-22T05:24:40.000Z
|
apps/accounts/urls.py
|
SivaramakrishnanKN/NE-GeoCloud
|
affcae49e0ccd7d29360a2771a9517147ed56590
|
[
"Apache-2.0"
] | 1
|
2019-06-06T18:31:29.000Z
|
2019-06-06T18:31:29.000Z
|
apps/accounts/urls.py
|
SivaramakrishnanKN/NE-GeoCloud
|
affcae49e0ccd7d29360a2771a9517147ed56590
|
[
"Apache-2.0"
] | 5
|
2019-06-05T07:26:13.000Z
|
2019-06-08T06:53:11.000Z
|
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from . import views
urlpatterns = [
url(r'^registration', views.registration, name='registration'),
url(r'^(?P<uuid>[^/]+)/activate', views.activate, name='activate'),
url(r'^lost_password', views.lost_password, name='lost_password'),
url(r'^(?P<uuid>[^/]+)/reset', views.reset, name='reset'),
url(r'^password_change', views.password_change, name='password_change'),
url(r'^login', views.login, name='login'),
url(r'^logout', views.logout, name='logout'),
]
| 37.25
| 76
| 0.686242
|
d5e3e96280098d98986b268717c29df85be48ac3
| 8,915
|
py
|
Python
|
pychron/processing/utils/equil.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/processing/utils/equil.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/processing/utils/equil.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.etsconfig.etsconfig import ETSConfig
from six.moves import range
ETSConfig.toolkit = "qt4"
from traits.api import HasTraits, Instance, Float, on_trait_change, Int, Str, Bool
from traitsui.api import View, Item, UItem, HGroup, VGroup
from numpy import linspace, polyfit, polyval, where, hstack, exp, ones_like
# from pylab import *
from pychron.processing.argon_calculations import age_equation, calculate_flux
from pychron.graph.stacked_graph import StackedGraph
# ============= standard library imports ========================
# ============= local library imports ==========================
class Iso(HasTraits):
name = Str
intensity = Float
equil_rate = Float
static_rate = Float
def traits_view(self):
v = View(
HGroup(
UItem("name", style="readonly"),
Item("intensity"),
Item("equil_rate"),
Item("static_rate"),
)
)
return v
class EquilibrationInspector(HasTraits):
graph = Instance(StackedGraph, ())
Ar40 = Instance(
Iso,
{
"name": "Ar40",
"intensity": 3000,
"equil_rate": 0.0001,
"static_rate": 0.0002,
},
)
Ar39 = Instance(
Iso,
{"name": "Ar39", "intensity": 230, "equil_rate": 0.0001, "static_rate": 0.0002},
)
max_time = Int(50)
vary_time_zero = Bool(True)
def traits_view(self):
cntrl_grp = VGroup(
UItem("Ar40", style="custom"),
UItem("Ar39", style="custom"),
# HGroup(Item('Ar40'),
# Item('Ar39'),
# ),
HGroup(Item("max_time"), Item("vary_time_zero"))
# Item('pump_rate')
)
v = View(
VGroup(cntrl_grp, UItem("graph", style="custom")),
title="Equilibration Inspector",
)
return v
def refresh(self):
self._rebuild_graph()
@on_trait_change("Ar+:[], max_time, vary_time_zero")
def _update_graph(self, name, new):
self._rebuild_graph()
def calc_intercept(self, mag, post, eq_rate, static_rate, xma, plotid, time_zero=0):
"""
post: inlet close time in seconds after eq start
"""
def func(t):
I_el = EL_src(mag, t, post)
I = I_el + MS_src(t) - MS_pump(t, I_el, post, eq_rate, static_rate)
return I
ts = linspace(0, xma, xma + 1)
I = func(ts)
g = self.graph
# g.new_series(ts, I, plotid=plotid, color='black')
fidx = where(ts > post)[0]
b = 1
if fidx.shape[0]:
g.new_series(ts[: fidx[0]], I[: fidx[0]], plotid=plotid, color="black")
g.new_series(ts[fidx[0] :], I[fidx[0] :], plotid=plotid, color="red")
# plot(ts[:fidx[0]], I[:fidx[0]], 'black')
fI = I[fidx]
ft = ts[fidx]
m, b = polyfit(ft, fI, 1)
vi = fI[0]
b = vi - m * post
# g.new_series(ts, polyval((m, b), ts), plotid=plotid, color='red',
# line_style='dash')
# plot(ts, polyval((m, b), ts), ls='--', c='r')
return polyval((m, b), time_zero)
def _rebuild_graph(self):
g = self.graph
g.clear()
plot1 = g.new_plot(
ytitle="Delta Age (ka)",
xtitle="Time (s)",
padding_left=70,
padding_right=25,
)
plot2 = g.new_plot(ytitle="Ar39 (fA)", padding_left=70, padding_right=25)
plot3 = g.new_plot(ytitle="Ar40 (fA)", padding_left=70, padding_right=25)
Ar40 = self.Ar40.intensity
Ar39 = self.Ar39.intensity
if not (Ar40 and Ar39):
return
plot2.value_range.low_setting = Ar39 * 0.95
plot2.value_range.high_setting = Ar39 * 1.05
plot3.value_range.low_setting = Ar40 * 0.95
plot3.value_range.high_setting = Ar40 * 1.01
R = Ar40 / Ar39
xma = self.max_time
rs = []
ns = []
ds = []
if self.vary_time_zero:
posts = (15,)
xs = list(range(0, 15))
index = xs
else:
posts = list(range(5, 30))
xs = (0,)
index = posts
for pi in posts:
for ti in xs:
# subplot(311)
n = self.calc_intercept(
Ar40,
pi,
self.Ar40.equil_rate,
self.Ar40.static_rate,
xma,
2,
time_zero=ti,
)
# subplot(312)
d = self.calc_intercept(
Ar39,
pi,
self.Ar39.equil_rate,
self.Ar39.static_rate,
xma,
1,
time_zero=ti,
)
ns.append(n)
ds.append(d)
# rs.append(((n / d) - R) / R * 100)
rs.append((n / d))
# print ns
# g.new_series(xs, ns, plotid=2)
# g.new_series(xs, ds, plotid=1)
# g.new_series(to, rs, plotid=0)
mon_age = 28
j = calculate_flux((Ar40, 0), (Ar39, 0), mon_age)
ages = [(age_equation(j[0], abs(ri)) - mon_age) * 1000 for ri in rs]
g.new_series(index, ages, plotid=0)
def EL_src(mag, t, post, rate=0.8):
pre_t = where(t <= post)[0]
post_t = where(t > post)[0]
pre_v = mag * (1 - exp(-rate * t[pre_t]))
return hstack((pre_v, ones_like(post_t) * pre_v[-1]))
def MS_src(t, rate=0.0125):
return rate * t
def MS_pump(t, mag_t, post, eq_rate, static_rate):
pre_t = where(t <= post)[0]
post_t = where(t > post)[0]
pre_v = eq_rate * t[pre_t] * mag_t[pre_t]
post_v = static_rate * t[post_t] * mag_t[post_t]
return hstack((pre_v, post_v))
# def calc_intercept(intensity, post, pump_rate, xma):
# '''
# post: inlet close time in seconds after eq start
# '''
# def func(t):
# I_el = EL_src(intensity, t, post)
# I = I_el + MS_src(t) - \
# MS_pump(t, I_el, post, rate=pump_rate)
# return I
#
# ts = linspace(0, xma, 500)
# I = func(ts)
#
# fidx = where(ts > post)[0]
#
# plot(ts[:fidx[0]], I[:fidx[0]], 'black')
# fI = I[fidx]
# ft = ts[fidx]
# m, b = polyfit(ft, fI, 1)
# vi = fI[0]
# b = vi - m * post
#
# plot(ts, polyval((m, b), ts), ls='--', c='r')
# return b
#
# def main():
#
# # coctail ratio
# # Ar39 = 250 / 4.
# # Ar40 = 13.8 * Ar39
# Ar40 = 300
# Ar39 = 5
#
#
# xma = 150
# pump_rate = 0.0001
#
# # to = range(5, 20)
# # to = (1, 3, 5, 10, 15, 20, 30, 35)
# to = (1, 5, 10, 15, 20)
# rs = []
# # to = range(1, 200, 10)
# for i, pi in enumerate(to):
# subplot(311)
# n = calc_intercept(Ar40, pi, pump_rate, xma)
# subplot(312)
# d = calc_intercept(Ar39, pi, pump_rate, xma)
# rs.append((n / d))
#
# subplots_adjust(hspace=0.05)
# subplot(311)
# xlim(0, xma)
# xticks(visible=False)
# ylabel('Ar40 fA')
#
# subplot(312)
# xlim(0, xma)
# xticks(visible=False)
# ylabel('Ar39 fA')
#
# mon_age = 28
# j = calculate_flux((Ar40, 0), (Ar39, 0), mon_age)
# ages = [(age_equation(j[0], abs(ri)) - mon_age) * 1000 for ri in rs]
#
# subplot(313)
# plot(to, ages)
# xlim(0, xma)
# ylabel('delta age (ka) ')
# xlabel('t (s)')
#
# show()
if __name__ == "__main__":
eq = EquilibrationInspector()
eq.Ar40.intensity = 300
eq.Ar39.intensity = 25
eq.configure_traits()
# main()
# ============= EOF =============================================
| 28.482428
| 88
| 0.487493
|
deddf00bc4f4fb51cf666def2befe61970118541
| 6,642
|
py
|
Python
|
davis/eval_custom_framewise.py
|
MSiam/segment-any-moving
|
82cb782867d866d2f4eb68230edb75f613e15a02
|
[
"Apache-2.0"
] | 70
|
2019-09-16T17:55:55.000Z
|
2022-03-07T00:26:53.000Z
|
davis/eval_custom_framewise.py
|
MSiam/segment-any-moving
|
82cb782867d866d2f4eb68230edb75f613e15a02
|
[
"Apache-2.0"
] | 9
|
2019-09-30T09:15:11.000Z
|
2021-07-21T11:33:13.000Z
|
davis/eval_custom_framewise.py
|
MSiam/segment-any-moving
|
82cb782867d866d2f4eb68230edb75f613e15a02
|
[
"Apache-2.0"
] | 5
|
2019-09-25T05:14:37.000Z
|
2021-07-08T20:13:47.000Z
|
"""Per-frame version of proposed evaluation for video instance segmentation.
See fbms/eval_custom.py for a video-level evaluation that also works with
DAVIS."""
import argparse
import collections
import logging
import pickle
from pathlib import Path
import numpy as np
import scipy.optimize
import pycocotools.mask as mask_util
from tqdm import tqdm
from pycocotools.coco import COCO
from PIL import Image
import utils.log as log_utils
def compute_f_measure(precision, recall):
return 2 * precision * recall / (max(precision + recall, 1e-10))
def get_unique_objects(groundtruth):
"""Get unique object ids from segmentation mask
Adapted from DAVIS evaluation code.
"""
ids = sorted(np.unique(groundtruth))
if ids[-1] == 255: # Remove unknown-label
ids = ids[:-1]
if ids[0] == 0: # Remove background
ids = ids[1:]
return ids
def main():
# Use first line of file docstring as description if it exists.
parser = argparse.ArgumentParser(
description=__doc__.split('\n')[0] if __doc__ else '',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--detections-pickle', type=Path, required=True)
parser.add_argument('--annotations-json', type=Path, required=True)
parser.add_argument('--davis-root', type=Path, required=True)
parser.add_argument('--threshold', type=float, default=0.7)
args = parser.parse_args()
output_log = log_utils.add_time_to_path(
args.detections_pickle.parent / (Path(__file__).name + '.log'))
log_utils.setup_logging(output_log)
logging.info('Args:\n%s', vars(args))
groundtruth = COCO(str(args.annotations_json))
image_ids = sorted(groundtruth.getImgIds())
# Map <sequence_name>/<frame>.png to list of segmentations, sorted in
# ascending order of scores.
results = {}
with open(args.detections_pickle, 'rb') as f:
data = pickle.load(f)
boxes = data['all_boxes']
masks = data['all_segms']
num_classes = len(boxes)
for c in range(num_classes):
assert len(boxes[c]) == len(image_ids), (
f'Expected {len(image_ids)} boxes for class {c}, got '
f'{len(boxes[c])}')
for i, image_id in enumerate(image_ids):
scores = []
segmentations = []
# Merge all classes into one.
for c in range(1, num_classes):
scores.extend(boxes[c][i][:, 4])
segmentations.extend(masks[c][i])
segmentation_scores = sorted(
zip(segmentations, scores), key=lambda x: x[1])
results[groundtruth.imgs[image_id]['file_name']] = [
segmentation for segmentation, score in segmentation_scores
if score > args.threshold
]
sequence_frames = collections.defaultdict(list)
for x in results.keys():
x = Path(x)
sequence_frames[x.parent.name].append(x)
annotations_dir = args.davis_root / 'Annotations' / '480p'
metrics = [] # List of (frame name, precision, recall, f-measure) tuples
for sequence, frames in tqdm(sequence_frames.items()):
frames = sorted(frames, key=lambda x: int(x.stem))
davis_sequence = annotations_dir / sequence
davis_frames = sorted(
davis_sequence.glob('*.png'), key=lambda x: int(x.stem))
assert (
len(davis_frames) == len(frames)
or len(davis_frames) == (len(frames) + 1)
), 'Unexpected number of frames. Expected: %s or %s, saw %s' % (
len(frames), len(frames) + 1, len(davis_frames))
for i, frame_path in enumerate(davis_frames):
frame_name = str(frame_path.relative_to(annotations_dir))
groundtruth = np.array(Image.open(frame_path))
# Some frames in DAVIS 16 have an extra channel, but this code
# should only be used with DAVIS 17.
assert groundtruth.ndim == 2, (
'Groundtruth has multiple channels. This may be because you '
'are passing DAVIS 2016 annotations, which is not supported.')
unique_objects = get_unique_objects(groundtruth)
groundtruth_masks = [
groundtruth == i for i in unique_objects
]
if i == (len(davis_frames) - 1) and frame_name not in results:
previous_frame_name = '%s/%05d.png' % (sequence, i - 1)
results[frame_name] = results[previous_frame_name]
prediction = np.full(groundtruth.shape, fill_value=-1)
for p, predicted_mask in enumerate(results[frame_name]):
prediction[mask_util.decode(predicted_mask) != 0] = p
predicted_masks = [
(prediction == p) for p in np.unique(prediction)
if p != -1
]
num_predicted = [m.sum() for m in predicted_masks]
num_groundtruth = [x.sum() for x in groundtruth_masks]
f_measures = np.zeros((len(groundtruth_masks),
len(predicted_masks)))
intersections = {}
for g, groundtruth_mask in enumerate(groundtruth_masks):
for p, predicted_mask in enumerate(predicted_masks):
intersection = (groundtruth_mask & predicted_mask).sum()
intersections[g, p] = intersection
precision = intersection / num_predicted[p]
recall = intersection / num_groundtruth[g]
f_measures[g, p] = compute_f_measure(precision, recall)
# Tuple of (groundtruth_indices, predicted_indices)
assignment = scipy.optimize.linear_sum_assignment(-f_measures)
assignment = zip(assignment[0].tolist(), assignment[1].tolist())
num_predicted = (prediction != -1).sum()
num_groundtruth = sum(groundtruth_mask.sum()
for groundtruth_mask in groundtruth_masks)
num_correct = sum(intersections[(g, p)] for g, p in assignment)
precision = 100 * num_correct / max(num_predicted, 1e-10)
recall = 100 * num_correct / num_groundtruth
f_measure = compute_f_measure(precision, recall)
metrics.append((frame_name, precision, recall, f_measure))
logging.info('Average precision: %.2f', np.mean([m[1] for m in metrics]))
logging.info('Average recall: %.2f', np.mean([m[2] for m in metrics]))
logging.info('Average f-measure: %.2f', np.mean([m[3] for m in metrics]))
if __name__ == "__main__":
main()
| 41.773585
| 78
| 0.618338
|
f3ac5011657ad85593d60998736950d8f1568987
| 1,719
|
py
|
Python
|
blog/models.py
|
zxperts/pyblog
|
4c6e494978005bb782e35729e6f1af4f874cd467
|
[
"MIT"
] | null | null | null |
blog/models.py
|
zxperts/pyblog
|
4c6e494978005bb782e35729e6f1af4f874cd467
|
[
"MIT"
] | null | null | null |
blog/models.py
|
zxperts/pyblog
|
4c6e494978005bb782e35729e6f1af4f874cd467
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.template.defaultfilters import slugify
class PostCategory(models.Model):
name = models.CharField(max_length=50)
def slug(self):
return slugify(self.name)
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=166)
category = models.ForeignKey('PostCategory',
null=True,
blank=True,
on_delete=models.DO_NOTHING)
published = models.BooleanField(default=False)
text = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True,null=True)
def __str__(self):
return self.title
class Comment(models.Model):
STATUS_VISIBLE = 'visible'
STATUS_HIDDEN = 'hidden'
STATUS_MODERATED = 'moderated'
STATUS_CHOICES = (
(STATUS_VISIBLE, 'visible'),
(STATUS_HIDDEN, 'hidden'),
(STATUS_MODERATED, 'moderated')
)
post = models.ForeignKey('Post', on_delete=models.CASCADE,
related_name='comments')
author_name = models.CharField(max_length=100)
text = models.TextField()
status = models.CharField(max_length=20,
default=STATUS_VISIBLE,
choices=STATUS_CHOICES,
)
moderation_text = models.CharField(max_length=200,blank=True)
created_at = models.DateTimeField(auto_now_add=True,null=True)
def __str__(self):
return '{} - {} (status={})'.format(self.author_name, self.text[:20],self.status)
#Puis python manage.py makemigrations
#Puis python manage.py migrates
| 28.180328
| 89
| 0.618965
|
2c5eb62f168aba9032e336a0cdff34023f008d7f
| 2,006
|
py
|
Python
|
polire/custom/custom.py
|
patel-zeel/ALdeploy
|
58212209c4b495db471ada0fd695eab38e2acfe8
|
[
"BSD-3-Clause"
] | null | null | null |
polire/custom/custom.py
|
patel-zeel/ALdeploy
|
58212209c4b495db471ada0fd695eab38e2acfe8
|
[
"BSD-3-Clause"
] | null | null | null |
polire/custom/custom.py
|
patel-zeel/ALdeploy
|
58212209c4b495db471ada0fd695eab38e2acfe8
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from ..interpolate.base import Base
class CustomInterpolator(Base):
"""
Class to interpolate by fitting a sklearn type Regressor to
the given data.
Parameters
----------
regressor: class definition,
This variable is used to pass in the Regressor we would like
to use for interpolation. The regressor sould be sklearn type
regressor. Example from sklearn.ensemble -> RandomForestRegressor
reg_kwargs: dict, optional
This is a dictionary that is passed into the Regressor initialization.
Use this to change the behaviour of the passed regressor. Default = empty dict
Attributes
----------
reg : object
Object of the `regressor` class passed.
"""
def __init__(
self,
regressor,
resolution="standard",
coordinate_type="Euclidean",
reg_kwargs={},
):
super().__init__(resolution, coordinate_type)
self.reg = regressor(**reg_kwargs)
def _fit(self, X, y):
"""Function for fitting.
This function is not supposed to be called directly.
"""
self.reg.fit(X, y)
return self
def _predict_grid(self, x1lim, x2lim):
"""Function for grid interpolation.
This function is not supposed to be called directly.
"""
# getting the boundaries for interpolation
x1min, x1max = x1lim
x2min, x2max = x2lim
# building the grid
x1 = np.linspace(x1min, x1max, self.resolution)
x2 = np.linspace(x2min, x2max, self.resolution)
X1, X2 = np.meshgrid(x1, x2)
return self.reg.predict(np.asarray([X1.ravel(), X2.ravel()]).T)
def _predict(self, X):
"""Function for interpolation on specific points.
This function is not supposed to be called directly.
"""
return self.reg.predict(X)
def __repr__(self):
return self.__class__.__name__ + "." + self.reg.__class__.__name__
| 29.940299
| 86
| 0.625623
|
a34a8c8b2999f843a69bdb69eb9aedbb02d6555c
| 49,279
|
py
|
Python
|
python/generator.py
|
opencor/libxml2
|
5c009c668b5b8ae90e274d75f5cbb658d0901482
|
[
"MIT"
] | null | null | null |
python/generator.py
|
opencor/libxml2
|
5c009c668b5b8ae90e274d75f5cbb658d0901482
|
[
"MIT"
] | null | null | null |
python/generator.py
|
opencor/libxml2
|
5c009c668b5b8ae90e274d75f5cbb658d0901482
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python -u
#
# generate python wrappers from the XML API description
#
functions = {}
enums = {} # { enumType: { enumConstant: enumValue } }
import os
import sys
import string
if __name__ == "__main__":
# launched as a script
srcPref = os.path.dirname(sys.argv[0])
else:
# imported
srcPref = os.path.dirname(__file__)
#######################################################################
#
# That part if purely the API acquisition phase from the
# XML API description
#
#######################################################################
import os
import xml.sax
debug = 0
def getparser():
# Attach parser to an unmarshalling object. return both objects.
target = docParser()
parser = xml.sax.make_parser()
parser.setContentHandler(target)
return parser, target
class docParser(xml.sax.handler.ContentHandler):
def __init__(self):
self._methodname = None
self._data = []
self.in_function = 0
self.startElement = self.start
self.endElement = self.end
self.characters = self.data
def close(self):
if debug:
print("close")
def getmethodname(self):
return self._methodname
def data(self, text):
if debug:
print("data %s" % text)
self._data.append(text)
def start(self, tag, attrs):
if debug:
print("start %s, %s" % (tag, attrs))
if tag == 'function':
self._data = []
self.in_function = 1
self.function = None
self.function_cond = None
self.function_args = []
self.function_descr = None
self.function_return = None
self.function_file = None
if 'name' in attrs.keys():
self.function = attrs['name']
if 'file' in attrs.keys():
self.function_file = attrs['file']
elif tag == 'cond':
self._data = []
elif tag == 'info':
self._data = []
elif tag == 'arg':
if self.in_function == 1:
self.function_arg_name = None
self.function_arg_type = None
self.function_arg_info = None
if 'name' in attrs.keys():
self.function_arg_name = attrs['name']
if 'type' in attrs.keys():
self.function_arg_type = attrs['type']
if 'info' in attrs.keys():
self.function_arg_info = attrs['info']
elif tag == 'return':
if self.in_function == 1:
self.function_return_type = None
self.function_return_info = None
self.function_return_field = None
if 'type' in attrs.keys():
self.function_return_type = attrs['type']
if 'info' in attrs.keys():
self.function_return_info = attrs['info']
if 'field' in attrs.keys():
self.function_return_field = attrs['field']
elif tag == 'enum':
enum(attrs['type'],attrs['name'],attrs['value'])
def end(self, tag):
if debug:
print("end %s" % tag)
if tag == 'function':
if self.function != None:
function(self.function, self.function_descr,
self.function_return, self.function_args,
self.function_file, self.function_cond)
self.in_function = 0
elif tag == 'arg':
if self.in_function == 1:
self.function_args.append([self.function_arg_name,
self.function_arg_type,
self.function_arg_info])
elif tag == 'return':
if self.in_function == 1:
self.function_return = [self.function_return_type,
self.function_return_info,
self.function_return_field]
elif tag == 'info':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_descr = str
elif tag == 'cond':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_cond = str
def function(name, desc, ret, args, file, cond):
functions[name] = (desc, ret, args, file, cond)
def enum(type, name, value):
if type not in enums:
enums[type] = {}
enums[type][name] = value
#######################################################################
#
# Some filtering rukes to drop functions/types which should not
# be exposed as-is on the Python interface
#
#######################################################################
skipped_modules = {
'xmlmemory': None,
'DOCBparser': None,
'SAX': None,
'hash': None,
'list': None,
'threads': None,
# 'xpointer': None,
}
skipped_types = {
'int *': "usually a return type",
'xmlSAXHandlerPtr': "not the proper interface for SAX",
'htmlSAXHandlerPtr': "not the proper interface for SAX",
'xmlRMutexPtr': "thread specific, skipped",
'xmlMutexPtr': "thread specific, skipped",
'xmlGlobalStatePtr': "thread specific, skipped",
'xmlListPtr': "internal representation not suitable for python",
'xmlBufferPtr': "internal representation not suitable for python",
'FILE *': None,
}
#######################################################################
#
# Table of remapping to/from the python type or class to the C
# counterpart.
#
#######################################################################
py_types = {
'void': (None, None, None, None),
'int': ('i', None, "int", "int"),
'long': ('l', None, "long", "long"),
'double': ('d', None, "double", "double"),
'unsigned int': ('i', None, "int", "int"),
'xmlChar': ('c', None, "int", "int"),
'unsigned char *': ('z', None, "charPtr", "char *"),
'char *': ('z', None, "charPtr", "char *"),
'const char *': ('z', None, "charPtrConst", "const char *"),
'xmlChar *': ('z', None, "xmlCharPtr", "xmlChar *"),
'const xmlChar *': ('z', None, "xmlCharPtrConst", "const xmlChar *"),
'xmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlDtdPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlDtdPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlDtd *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlDtd *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlAttrPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlAttrPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlAttr *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlAttr *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlEntityPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlEntityPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlEntity *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlEntity *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlElementPtr': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'const xmlElementPtr': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'xmlElement *': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'const xmlElement *': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'xmlAttributePtr': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'const xmlAttributePtr': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'xmlAttribute *': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'const xmlAttribute *': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'xmlNsPtr': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'const xmlNsPtr': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'xmlNs *': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'const xmlNs *': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'xmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const xmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'xmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const xmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const htmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const htmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const htmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'htmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const htmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlXPathContextPtr': ('O', "xmlXPathContext", "xmlXPathContextPtr", "xmlXPathContextPtr"),
'xmlXPathContext *': ('O', "xpathContext", "xmlXPathContextPtr", "xmlXPathContextPtr"),
'xmlXPathParserContextPtr': ('O', "xmlXPathParserContext", "xmlXPathParserContextPtr", "xmlXPathParserContextPtr"),
'xmlParserCtxtPtr': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'xmlParserCtxt *': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'htmlParserCtxtPtr': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'htmlParserCtxt *': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'xmlValidCtxtPtr': ('O', "ValidCtxt", "xmlValidCtxtPtr", "xmlValidCtxtPtr"),
'xmlCatalogPtr': ('O', "catalog", "xmlCatalogPtr", "xmlCatalogPtr"),
'FILE *': ('O', "File", "FILEPtr", "FILE *"),
'xmlURIPtr': ('O', "URI", "xmlURIPtr", "xmlURIPtr"),
'xmlErrorPtr': ('O', "Error", "xmlErrorPtr", "xmlErrorPtr"),
'xmlOutputBufferPtr': ('O', "outputBuffer", "xmlOutputBufferPtr", "xmlOutputBufferPtr"),
'xmlParserInputBufferPtr': ('O', "inputBuffer", "xmlParserInputBufferPtr", "xmlParserInputBufferPtr"),
'xmlRegexpPtr': ('O', "xmlReg", "xmlRegexpPtr", "xmlRegexpPtr"),
'xmlTextReaderLocatorPtr': ('O', "xmlTextReaderLocator", "xmlTextReaderLocatorPtr", "xmlTextReaderLocatorPtr"),
'xmlTextReaderPtr': ('O', "xmlTextReader", "xmlTextReaderPtr", "xmlTextReaderPtr"),
'xmlRelaxNGPtr': ('O', "relaxNgSchema", "xmlRelaxNGPtr", "xmlRelaxNGPtr"),
'xmlRelaxNGParserCtxtPtr': ('O', "relaxNgParserCtxt", "xmlRelaxNGParserCtxtPtr", "xmlRelaxNGParserCtxtPtr"),
'xmlRelaxNGValidCtxtPtr': ('O', "relaxNgValidCtxt", "xmlRelaxNGValidCtxtPtr", "xmlRelaxNGValidCtxtPtr"),
'xmlSchemaPtr': ('O', "Schema", "xmlSchemaPtr", "xmlSchemaPtr"),
'xmlSchemaParserCtxtPtr': ('O', "SchemaParserCtxt", "xmlSchemaParserCtxtPtr", "xmlSchemaParserCtxtPtr"),
'xmlSchemaValidCtxtPtr': ('O', "SchemaValidCtxt", "xmlSchemaValidCtxtPtr", "xmlSchemaValidCtxtPtr"),
}
py_return_types = {
'xmlXPathObjectPtr': ('O', "foo", "xmlXPathObjectPtr", "xmlXPathObjectPtr"),
}
unknown_types = {}
foreign_encoding_args = (
'htmlCreateMemoryParserCtxt',
'htmlCtxtReadMemory',
'htmlParseChunk',
'htmlReadMemory',
'xmlCreateMemoryParserCtxt',
'xmlCtxtReadMemory',
'xmlCtxtResetPush',
'xmlParseChunk',
'xmlParseMemory',
'xmlReadMemory',
'xmlRecoverMemory',
)
#######################################################################
#
# This part writes the C <-> Python stubs libxml2-py.[ch] and
# the table libxml2-export.c to add when registrering the Python module
#
#######################################################################
# Class methods which are written by hand in libxml.c but the Python-level
# code is still automatically generated (so they are not in skip_function()).
skip_impl = (
'xmlSaveFileTo',
'xmlSaveFormatFileTo',
)
deprecated_funcs = {
'xmlIsRef': True,
'xmlRemoveRef': True,
}
def skip_function(name):
if name[0:12] == "xmlXPathWrap":
return 1
if name == "xmlFreeParserCtxt":
return 1
if name == "xmlCleanupParser":
return 1
if name == "xmlFreeTextReader":
return 1
# if name[0:11] == "xmlXPathNew":
# return 1
# the next function is defined in libxml.c
if name == "xmlRelaxNGFreeValidCtxt":
return 1
if name == "xmlFreeValidCtxt":
return 1
if name == "xmlSchemaFreeValidCtxt":
return 1
#
# Those are skipped because the Const version is used of the bindings
# instead.
#
if name == "xmlTextReaderBaseUri":
return 1
if name == "xmlTextReaderLocalName":
return 1
if name == "xmlTextReaderName":
return 1
if name == "xmlTextReaderNamespaceUri":
return 1
if name == "xmlTextReaderPrefix":
return 1
if name == "xmlTextReaderXmlLang":
return 1
if name == "xmlTextReaderValue":
return 1
if name == "xmlOutputBufferClose": # handled by by the superclass
return 1
if name == "xmlOutputBufferFlush": # handled by by the superclass
return 1
if name == "xmlErrMemory":
return 1
if name == "xmlValidBuildContentModel":
return 1
if name == "xmlValidateElementDecl":
return 1
if name == "xmlValidateAttributeDecl":
return 1
if name == "xmlPopInputCallbacks":
return 1
return 0
def print_function_wrapper(name, output, export, include):
global py_types
global unknown_types
global functions
global skipped_modules
try:
(desc, ret, args, file, cond) = functions[name]
except:
print("failed to get function %s infos")
return
if file in skipped_modules:
return 0
if skip_function(name) == 1:
return 0
if name in skip_impl:
# Don't delete the function entry in the caller.
return 1
is_deprecated = name in deprecated_funcs
c_call = ""
format=""
format_args=""
c_args=""
c_return=""
c_convert=""
c_release=""
num_bufs=0
for arg in args:
# This should be correct
if arg[1][0:6] == "const ":
arg[1] = arg[1][6:]
c_args = c_args + " %s %s;\n" % (arg[1], arg[0])
if arg[1] in py_types:
(f, t, n, c) = py_types[arg[1]]
if (f == 'z') and (name in foreign_encoding_args) and (num_bufs == 0):
f = 's#'
if f != None:
format = format + f
if t != None:
format_args = format_args + ", &pyobj_%s" % (arg[0])
c_args = c_args + " PyObject *pyobj_%s;\n" % (arg[0])
c_convert = c_convert + \
" %s = (%s) Py%s_Get(pyobj_%s);\n" % (arg[0],
arg[1], t, arg[0])
else:
format_args = format_args + ", &%s" % (arg[0])
if f == 's#':
format_args = format_args + ", &py_buffsize%d" % num_bufs
c_args = c_args + " Py_ssize_t py_buffsize%d;\n" % num_bufs
num_bufs = num_bufs + 1
if c_call != "":
c_call = c_call + ", "
c_call = c_call + "%s" % (arg[0])
if t == "File":
c_release = c_release + \
" PyFile_Release(%s);\n" % (arg[0])
else:
if arg[1] in skipped_types:
return 0
if arg[1] in unknown_types:
lst = unknown_types[arg[1]]
lst.append(name)
else:
unknown_types[arg[1]] = [name]
return -1
if format != "":
format = format + ":%s" % (name)
if ret[0] == 'void':
if file == "python_accessor":
if args[1][1] == "char *" or args[1][1] == "xmlChar *":
c_call = "\n if (%s->%s != NULL) xmlFree(%s->%s);\n" % (
args[0][0], args[1][0], args[0][0], args[1][0])
c_call = c_call + " %s->%s = (%s)xmlStrdup((const xmlChar *)%s);\n" % (args[0][0],
args[1][0], args[1][1], args[1][0])
else:
c_call = "\n %s->%s = %s;\n" % (args[0][0], args[1][0],
args[1][0])
else:
c_call = "\n %s(%s);\n" % (name, c_call)
ret_convert = " Py_INCREF(Py_None);\n return(Py_None);\n"
elif ret[0] in py_types:
(f, t, n, c) = py_types[ret[0]]
c_return = c_return + " %s c_retval;\n" % (ret[0])
if file == "python_accessor" and ret[2] != None:
c_call = "\n c_retval = %s->%s;\n" % (args[0][0], ret[2])
else:
c_call = "\n c_retval = %s(%s);\n" % (name, c_call)
ret_convert = " py_retval = libxml_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
elif ret[0] in py_return_types:
(f, t, n, c) = py_return_types[ret[0]]
c_return = c_return + " %s c_retval;\n" % (ret[0])
c_call = "\n c_retval = %s(%s);\n" % (name, c_call)
ret_convert = " py_retval = libxml_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
else:
if ret[0] in skipped_types:
return 0
if ret[0] in unknown_types:
lst = unknown_types[ret[0]]
lst.append(name)
else:
unknown_types[ret[0]] = [name]
return -1
if cond != None and cond != "":
include.write("#if %s\n" % cond)
export.write("#if %s\n" % cond)
output.write("#if %s\n" % cond)
include.write("PyObject * ")
include.write("libxml_%s(PyObject *self, PyObject *args);\n" % (name))
export.write(" { (char *)\"%s\", libxml_%s, METH_VARARGS, NULL },\n" %
(name, name))
if file == "python":
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n")
export.write("#endif\n")
output.write("#endif\n")
return 1
if file == "python_accessor" and ret[0] != "void" and ret[2] is None:
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n")
export.write("#endif\n")
output.write("#endif\n")
return 1
if is_deprecated:
output.write("XML_IGNORE_DEPRECATION_WARNINGS\n")
output.write("PyObject *\n")
output.write("libxml_%s(PyObject *self ATTRIBUTE_UNUSED," % (name))
output.write(" PyObject *args")
if format == "":
output.write(" ATTRIBUTE_UNUSED")
output.write(") {\n")
if ret[0] != 'void':
output.write(" PyObject *py_retval;\n")
if c_return != "":
output.write(c_return)
if c_args != "":
output.write(c_args)
if is_deprecated:
output.write("\n if (libxml_deprecationWarning(\"%s\") == -1)\n" %
name)
output.write(" return(NULL);\n")
if format != "":
output.write("\n if (!PyArg_ParseTuple(args, (char *)\"%s\"%s))\n" %
(format, format_args))
output.write(" return(NULL);\n")
if c_convert != "":
output.write(c_convert)
output.write(c_call)
if c_release != "":
output.write(c_release)
output.write(ret_convert)
output.write("}\n")
if is_deprecated:
output.write("XML_POP_WARNINGS\n")
output.write("\n")
if cond != None and cond != "":
include.write("#endif /* %s */\n" % cond)
export.write("#endif /* %s */\n" % cond)
output.write("#endif /* %s */\n" % cond)
return 1
def buildStubs():
global py_types
global py_return_types
global unknown_types
try:
f = open(os.path.join(srcPref,"libxml2-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError as msg:
try:
f = open(os.path.join(srcPref,"..","doc","libxml2-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError as msg:
print(file, ":", msg)
sys.exit(1)
n = len(list(functions.keys()))
print("Found %d functions in libxml2-api.xml" % (n))
py_types['pythonObject'] = ('O', "pythonObject", "pythonObject", "pythonObject")
try:
f = open(os.path.join(srcPref,"libxml2-python-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError as msg:
print(file, ":", msg)
print("Found %d functions in libxml2-python-api.xml" % (
len(list(functions.keys())) - n))
nb_wrap = 0
failed = 0
skipped = 0
include = open("libxml2-py.h", "w")
include.write("/* Generated */\n\n")
export = open("libxml2-export.c", "w")
export.write("/* Generated */\n\n")
wrapper = open("libxml2-py.c", "w")
wrapper.write("/* Generated */\n\n")
wrapper.write("#define PY_SSIZE_T_CLEAN\n")
wrapper.write("#include <Python.h>\n")
wrapper.write("#include <libxml/xmlversion.h>\n")
wrapper.write("#include <libxml/tree.h>\n")
wrapper.write("#include <libxml/xmlschemastypes.h>\n")
wrapper.write("#include \"libxml_wrap.h\"\n")
wrapper.write("#include \"libxml2-py.h\"\n\n")
for function in sorted(functions.keys()):
ret = print_function_wrapper(function, wrapper, export, include)
if ret < 0:
failed = failed + 1
del functions[function]
if ret == 0:
skipped = skipped + 1
del functions[function]
if ret == 1:
nb_wrap = nb_wrap + 1
include.close()
export.close()
wrapper.close()
print("Generated %d wrapper functions, %d failed, %d skipped\n" % (nb_wrap,
failed, skipped))
print("Missing type converters: ")
for type in list(unknown_types.keys()):
print("%s:%d " % (type, len(unknown_types[type])))
print()
#######################################################################
#
# This part writes part of the Python front-end classes based on
# mapping rules between types and classes and also based on function
# renaming to get consistent function names at the Python level
#
#######################################################################
#
# The type automatically remapped to generated classes
#
classes_type = {
"xmlNodePtr": ("._o", "xmlNode(_obj=%s)", "xmlNode"),
"xmlNode *": ("._o", "xmlNode(_obj=%s)", "xmlNode"),
"xmlDocPtr": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"xmlDoc *": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"htmlDocPtr": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"htmlxmlDocPtr *": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"xmlAttrPtr": ("._o", "xmlAttr(_obj=%s)", "xmlAttr"),
"xmlAttr *": ("._o", "xmlAttr(_obj=%s)", "xmlAttr"),
"xmlNsPtr": ("._o", "xmlNs(_obj=%s)", "xmlNs"),
"xmlNs *": ("._o", "xmlNs(_obj=%s)", "xmlNs"),
"xmlDtdPtr": ("._o", "xmlDtd(_obj=%s)", "xmlDtd"),
"xmlDtd *": ("._o", "xmlDtd(_obj=%s)", "xmlDtd"),
"xmlEntityPtr": ("._o", "xmlEntity(_obj=%s)", "xmlEntity"),
"xmlEntity *": ("._o", "xmlEntity(_obj=%s)", "xmlEntity"),
"xmlElementPtr": ("._o", "xmlElement(_obj=%s)", "xmlElement"),
"xmlElement *": ("._o", "xmlElement(_obj=%s)", "xmlElement"),
"xmlAttributePtr": ("._o", "xmlAttribute(_obj=%s)", "xmlAttribute"),
"xmlAttribute *": ("._o", "xmlAttribute(_obj=%s)", "xmlAttribute"),
"xmlXPathContextPtr": ("._o", "xpathContext(_obj=%s)", "xpathContext"),
"xmlXPathContext *": ("._o", "xpathContext(_obj=%s)", "xpathContext"),
"xmlXPathParserContext *": ("._o", "xpathParserContext(_obj=%s)", "xpathParserContext"),
"xmlXPathParserContextPtr": ("._o", "xpathParserContext(_obj=%s)", "xpathParserContext"),
"xmlParserCtxtPtr": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"xmlParserCtxt *": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"htmlParserCtxtPtr": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"htmlParserCtxt *": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"xmlValidCtxtPtr": ("._o", "ValidCtxt(_obj=%s)", "ValidCtxt"),
"xmlCatalogPtr": ("._o", "catalog(_obj=%s)", "catalog"),
"xmlURIPtr": ("._o", "URI(_obj=%s)", "URI"),
"xmlErrorPtr": ("._o", "Error(_obj=%s)", "Error"),
"xmlOutputBufferPtr": ("._o", "outputBuffer(_obj=%s)", "outputBuffer"),
"xmlParserInputBufferPtr": ("._o", "inputBuffer(_obj=%s)", "inputBuffer"),
"xmlRegexpPtr": ("._o", "xmlReg(_obj=%s)", "xmlReg"),
"xmlTextReaderLocatorPtr": ("._o", "xmlTextReaderLocator(_obj=%s)", "xmlTextReaderLocator"),
"xmlTextReaderPtr": ("._o", "xmlTextReader(_obj=%s)", "xmlTextReader"),
'xmlRelaxNGPtr': ('._o', "relaxNgSchema(_obj=%s)", "relaxNgSchema"),
'xmlRelaxNGParserCtxtPtr': ('._o', "relaxNgParserCtxt(_obj=%s)", "relaxNgParserCtxt"),
'xmlRelaxNGValidCtxtPtr': ('._o', "relaxNgValidCtxt(_obj=%s)", "relaxNgValidCtxt"),
'xmlSchemaPtr': ("._o", "Schema(_obj=%s)", "Schema"),
'xmlSchemaParserCtxtPtr': ("._o", "SchemaParserCtxt(_obj=%s)", "SchemaParserCtxt"),
'xmlSchemaValidCtxtPtr': ("._o", "SchemaValidCtxt(_obj=%s)", "SchemaValidCtxt"),
}
converter_type = {
"xmlXPathObjectPtr": "xpathObjectRet(%s)",
}
primary_classes = ["xmlNode", "xmlDoc"]
classes_ancestor = {
"xmlNode" : "xmlCore",
"xmlDtd" : "xmlNode",
"xmlDoc" : "xmlNode",
"xmlAttr" : "xmlNode",
"xmlNs" : "xmlNode",
"xmlEntity" : "xmlNode",
"xmlElement" : "xmlNode",
"xmlAttribute" : "xmlNode",
"outputBuffer": "ioWriteWrapper",
"inputBuffer": "ioReadWrapper",
"parserCtxt": "parserCtxtCore",
"xmlTextReader": "xmlTextReaderCore",
"ValidCtxt": "ValidCtxtCore",
"SchemaValidCtxt": "SchemaValidCtxtCore",
"relaxNgValidCtxt": "relaxNgValidCtxtCore",
}
classes_destructors = {
"parserCtxt": "xmlFreeParserCtxt",
"catalog": "xmlFreeCatalog",
"URI": "xmlFreeURI",
# "outputBuffer": "xmlOutputBufferClose",
"inputBuffer": "xmlFreeParserInputBuffer",
"xmlReg": "xmlRegFreeRegexp",
"xmlTextReader": "xmlFreeTextReader",
"relaxNgSchema": "xmlRelaxNGFree",
"relaxNgParserCtxt": "xmlRelaxNGFreeParserCtxt",
"relaxNgValidCtxt": "xmlRelaxNGFreeValidCtxt",
"Schema": "xmlSchemaFree",
"SchemaParserCtxt": "xmlSchemaFreeParserCtxt",
"SchemaValidCtxt": "xmlSchemaFreeValidCtxt",
"ValidCtxt": "xmlFreeValidCtxt",
}
functions_noexcept = {
"xmlHasProp": 1,
"xmlHasNsProp": 1,
"xmlDocSetRootElement": 1,
"xmlNodeGetNs": 1,
"xmlNodeGetNsDefs": 1,
"xmlNextElementSibling": 1,
"xmlPreviousElementSibling": 1,
"xmlFirstElementChild": 1,
"xmlLastElementChild": 1,
}
reference_keepers = {
"xmlTextReader": [('inputBuffer', 'input')],
"relaxNgValidCtxt": [('relaxNgSchema', 'schema')],
"SchemaValidCtxt": [('Schema', 'schema')],
}
function_classes = {}
function_classes["None"] = []
def nameFixup(name, classe, type, file):
listname = classe + "List"
ll = len(listname)
l = len(classe)
if name[0:l] == listname:
func = name[l:]
func = func[0:1].lower() + func[1:]
elif name[0:12] == "xmlParserGet" and file == "python_accessor":
func = name[12:]
func = func[0:1].lower() + func[1:]
elif name[0:12] == "xmlParserSet" and file == "python_accessor":
func = name[12:]
func = func[0:1].lower() + func[1:]
elif name[0:10] == "xmlNodeGet" and file == "python_accessor":
func = name[10:]
func = func[0:1].lower() + func[1:]
elif name[0:9] == "xmlURIGet" and file == "python_accessor":
func = name[9:]
func = func[0:1].lower() + func[1:]
elif name[0:9] == "xmlURISet" and file == "python_accessor":
func = name[6:]
func = func[0:1].lower() + func[1:]
elif name[0:11] == "xmlErrorGet" and file == "python_accessor":
func = name[11:]
func = func[0:1].lower() + func[1:]
elif name[0:17] == "xmlXPathParserGet" and file == "python_accessor":
func = name[17:]
func = func[0:1].lower() + func[1:]
elif name[0:11] == "xmlXPathGet" and file == "python_accessor":
func = name[11:]
func = func[0:1].lower() + func[1:]
elif name[0:11] == "xmlXPathSet" and file == "python_accessor":
func = name[8:]
func = func[0:1].lower() + func[1:]
elif name[0:15] == "xmlOutputBuffer" and file != "python":
func = name[15:]
func = func[0:1].lower() + func[1:]
elif name[0:20] == "xmlParserInputBuffer" and file != "python":
func = name[20:]
func = func[0:1].lower() + func[1:]
elif name[0:9] == "xmlRegexp" and file == "xmlregexp":
func = "regexp" + name[9:]
elif name[0:6] == "xmlReg" and file == "xmlregexp":
func = "regexp" + name[6:]
elif name[0:20] == "xmlTextReaderLocator" and file == "xmlreader":
func = name[20:]
elif name[0:18] == "xmlTextReaderConst" and file == "xmlreader":
func = name[18:]
elif name[0:13] == "xmlTextReader" and file == "xmlreader":
func = name[13:]
elif name[0:12] == "xmlReaderNew" and file == "xmlreader":
func = name[9:]
elif name[0:11] == "xmlACatalog":
func = name[11:]
func = func[0:1].lower() + func[1:]
elif name[0:l] == classe:
func = name[l:]
func = func[0:1].lower() + func[1:]
elif name[0:7] == "libxml_":
func = name[7:]
func = func[0:1].lower() + func[1:]
elif name[0:6] == "xmlGet":
func = name[6:]
func = func[0:1].lower() + func[1:]
elif name[0:3] == "xml":
func = name[3:]
func = func[0:1].lower() + func[1:]
else:
func = name
if func[0:5] == "xPath":
func = "xpath" + func[5:]
elif func[0:4] == "xPtr":
func = "xpointer" + func[4:]
elif func[0:8] == "xInclude":
func = "xinclude" + func[8:]
elif func[0:2] == "iD":
func = "ID" + func[2:]
elif func[0:3] == "uRI":
func = "URI" + func[3:]
elif func[0:4] == "uTF8":
func = "UTF8" + func[4:]
elif func[0:3] == 'sAX':
func = "SAX" + func[3:]
return func
def functionCompare(info1, info2):
(index1, func1, name1, ret1, args1, file1) = info1
(index2, func2, name2, ret2, args2, file2) = info2
if file1 == file2:
if func1 < func2:
return -1
if func1 > func2:
return 1
if file1 == "python_accessor":
return -1
if file2 == "python_accessor":
return 1
if file1 < file2:
return -1
if file1 > file2:
return 1
return 0
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def writeDoc(name, args, indent, output):
if functions[name][0] is None or functions[name][0] == "":
return
val = functions[name][0]
val = val.replace("NULL", "None")
output.write(indent)
output.write('"""')
while len(val) > 60:
if val[0] == " ":
val = val[1:]
continue
str = val[0:60]
i = str.rfind(" ")
if i < 0:
i = 60
str = val[0:i]
val = val[i:]
output.write(str)
output.write('\n ')
output.write(indent)
output.write(val)
output.write(' """\n')
def buildWrappers():
global ctypes
global py_types
global py_return_types
global unknown_types
global functions
global function_classes
global classes_type
global classes_list
global converter_type
global primary_classes
global converter_type
global classes_ancestor
global converter_type
global primary_classes
global classes_ancestor
global classes_destructors
global functions_noexcept
for type in classes_type.keys():
function_classes[classes_type[type][2]] = []
#
# Build the list of C types to look for ordered to start
# with primary classes
#
ctypes = []
classes_list = []
ctypes_processed = {}
classes_processed = {}
for classe in primary_classes:
classes_list.append(classe)
classes_processed[classe] = ()
for type in classes_type.keys():
tinfo = classes_type[type]
if tinfo[2] == classe:
ctypes.append(type)
ctypes_processed[type] = ()
for type in sorted(classes_type.keys()):
if type in ctypes_processed:
continue
tinfo = classes_type[type]
if tinfo[2] not in classes_processed:
classes_list.append(tinfo[2])
classes_processed[tinfo[2]] = ()
ctypes.append(type)
ctypes_processed[type] = ()
for name in functions.keys():
found = 0
(desc, ret, args, file, cond) = functions[name]
for type in ctypes:
classe = classes_type[type][2]
if name[0:3] == "xml" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:3] == "xml" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor":
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:4] == "html" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:4] == "html" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor":
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
if found == 1:
continue
if name[0:8] == "xmlXPath":
continue
if name[0:6] == "xmlStr":
continue
if name[0:10] == "xmlCharStr":
continue
func = nameFixup(name, "None", file, file)
info = (0, func, name, ret, args, file)
function_classes['None'].append(info)
classes = open("libxml2class.py", "w")
txt = open("libxml2class.txt", "w")
txt.write(" Generated Classes for libxml2-python\n\n")
txt.write("#\n# Global functions of the module\n#\n\n")
if "None" in function_classes:
flist = function_classes["None"]
flist = sorted(flist, key=cmp_to_key(functionCompare))
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
if file != oldfile:
classes.write("#\n# Functions from module %s\n#\n\n" % file)
txt.write("\n# functions from module %s\n" % file)
oldfile = file
classes.write("def %s(" % func)
txt.write("%s()\n" % func)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes)
for arg in args:
if arg[1] in classes_type:
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
if arg[1] in py_types:
(f, t, n, c) = py_types[arg[1]]
if t == "File":
classes.write(" if %s is not None: %s.flush()\n" % (
arg[0], arg[0]))
if ret[0] != "void":
classes.write(" ret = ")
else:
classes.write(" ")
classes.write("libxml2mod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
if arg[1] in classes_type:
classes.write("__o")
n = n + 1
classes.write(")\n")
# This may be needed to reposition the I/O, but likely to cause more harm
# than good. Those changes in Python3 really break the model.
# for arg in args:
# if arg[1] in py_types:
# (f, t, n, c) = py_types[arg[1]]
# if t == "File":
# classes.write(" if %s is not None: %s.seek(0,0)\n"%(
# arg[0], arg[0]))
if ret[0] != "void":
if ret[0] in classes_type:
#
# Raise an exception
#
if name in functions_noexcept:
classes.write(" if ret is None:return None\n")
elif name.find("URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif name.find("XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif name.find("Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
classes.write(" return ")
classes.write(classes_type[ret[0]][1] % ("ret"))
classes.write("\n")
else:
classes.write(" return ret\n")
classes.write("\n")
txt.write("\n\n#\n# Set of classes of the module\n#\n\n")
for classname in classes_list:
if classname == "None":
pass
else:
if classname in classes_ancestor:
txt.write("\n\nClass %s(%s)\n" % (classname,
classes_ancestor[classname]))
classes.write("class %s(%s):\n" % (classname,
classes_ancestor[classname]))
classes.write(" def __init__(self, _obj=None):\n")
if classes_ancestor[classname] == "xmlCore" or \
classes_ancestor[classname] == "xmlNode":
classes.write(" if checkWrapper(_obj) != 0:")
classes.write(" raise TypeError")
classes.write("('%s got a wrong wrapper object type')\n" % \
classname)
if classname in reference_keepers:
rlist = reference_keepers[classname]
for ref in rlist:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" self._o = _obj\n")
classes.write(" %s.__init__(self, _obj=_obj)\n\n" % (
classes_ancestor[classname]))
if classes_ancestor[classname] == "xmlCore" or \
classes_ancestor[classname] == "xmlNode":
classes.write(" def __repr__(self):\n")
format = "<%s (%%s) object at 0x%%x>" % (classname)
classes.write(" return \"%s\" %% (self.name, int(pos_id (self)))\n\n" % (
format))
else:
txt.write("Class %s()\n" % (classname))
classes.write("class %s:\n" % (classname))
classes.write(" def __init__(self, _obj=None):\n")
if classname in reference_keepers:
list = reference_keepers[classname]
for ref in list:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" if _obj != None:self._o = _obj;return\n")
classes.write(" self._o = None\n\n")
destruct=None
if classname in classes_destructors:
classes.write(" def __del__(self):\n")
classes.write(" if self._o != None:\n")
classes.write(" libxml2mod.%s(self._o)\n" %
classes_destructors[classname])
classes.write(" self._o = None\n\n")
destruct=classes_destructors[classname]
flist = function_classes[classname]
flist = sorted(flist, key=cmp_to_key(functionCompare))
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
#
# Do not provide as method the destructors for the class
# to avoid double free
#
if name == destruct:
continue
if file != oldfile:
if file == "python_accessor":
classes.write(" # accessors for %s\n" % (classname))
txt.write(" # accessors\n")
else:
classes.write(" #\n")
classes.write(" # %s functions from module %s\n" % (
classname, file))
txt.write("\n # functions from module %s\n" % file)
classes.write(" #\n\n")
oldfile = file
classes.write(" def %s(self" % func)
txt.write(" %s()\n" % func)
n = 0
for arg in args:
if n != index:
classes.write(", %s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes)
n = 0
for arg in args:
if arg[1] in classes_type:
if n != index:
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
n = n + 1
if ret[0] != "void":
classes.write(" ret = ")
else:
classes.write(" ")
classes.write("libxml2mod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
if n != index:
classes.write("%s" % arg[0])
if arg[1] in classes_type:
classes.write("__o")
else:
classes.write("self")
if arg[1] in classes_type:
classes.write(classes_type[arg[1]][0])
n = n + 1
classes.write(")\n")
if ret[0] != "void":
if ret[0] in classes_type:
#
# Raise an exception
#
if name in functions_noexcept:
classes.write(
" if ret is None:return None\n")
elif name.find("URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif name.find("XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif name.find("Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
#
# generate the returned class wrapper for the object
#
classes.write(" __tmp = ")
classes.write(classes_type[ret[0]][1] % ("ret"))
classes.write("\n")
#
# Sometime one need to keep references of the source
# class in the returned class object.
# See reference_keepers for the list
#
tclass = classes_type[ret[0]][2]
if tclass in reference_keepers:
list = reference_keepers[tclass]
for pref in list:
if pref[0] == classname:
classes.write(" __tmp.%s = self\n" %
pref[1])
#
# return the class
#
classes.write(" return __tmp\n")
elif ret[0] in converter_type:
#
# Raise an exception
#
if name in functions_noexcept:
classes.write(
" if ret is None:return None")
elif name.find("URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif name.find("XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif name.find("Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
classes.write(" return ")
classes.write(converter_type[ret[0]] % ("ret"))
classes.write("\n")
else:
classes.write(" return ret\n")
classes.write("\n")
#
# Generate enum constants
#
for type,enum in enums.items():
classes.write("# %s\n" % type)
items = enum.items()
items = sorted(items, key=(lambda i: int(i[1])))
for name,value in items:
classes.write("%s = %s\n" % (name,value))
classes.write("\n")
txt.close()
classes.close()
buildStubs()
buildWrappers()
| 39.518043
| 120
| 0.499361
|
8b0f2794465cd27758fe973e58bca8883dd85bb2
| 2,162
|
py
|
Python
|
tools/TF2CaffeModel/itl/files/__init__.py
|
chenaili6/FeatherCNN
|
52cd8c8749ed584461a88b1f04749bb35a48f9a6
|
[
"Apache-2.0"
] | 4
|
2018-05-14T09:00:33.000Z
|
2021-05-14T08:11:54.000Z
|
tools/TF2CaffeModel/itl/files/__init__.py
|
nihui/FeatherCNN
|
2805f371bd8f33ef742cc9523979f29295d926fb
|
[
"Apache-2.0"
] | null | null | null |
tools/TF2CaffeModel/itl/files/__init__.py
|
nihui/FeatherCNN
|
2805f371bd8f33ef742cc9523979f29295d926fb
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
itl provides rich layer implementations trailed for
various benchmarks and domain-specific problems. In addition, we also
support transparent access to native TensorFlow parameters.
For example, we provide not only layers for local response normalization, but also
layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``.
More functions can be found in `TensorFlow API <https://www.tensorflow.org/versions/master/api_docs/index.html>`__.
"""
from .dataset_loaders.celebA_dataset import *
from .dataset_loaders.cifar10_dataset import *
from .dataset_loaders.cyclegan_dataset import *
from .dataset_loaders.flickr_1M_dataset import *
from .dataset_loaders.flickr_25k_dataset import *
from .dataset_loaders.imdb_dataset import *
from .dataset_loaders.matt_mahoney_dataset import *
from .dataset_loaders.mnist_dataset import *
from .dataset_loaders.mnist_fashion_dataset import *
from .dataset_loaders.mpii_dataset import *
from .dataset_loaders.nietzsche_dataset import *
from .dataset_loaders.ptb_dataset import *
from .dataset_loaders.voc_dataset import *
from .dataset_loaders.wmt_en_fr_dataset import *
from .utils import *
__all__ = [
# Dataset Loaders
'load_celebA_dataset',
'load_cifar10_dataset',
'load_cyclegan_dataset',
'load_fashion_mnist_dataset',
'load_flickr1M_dataset',
'load_flickr25k_dataset',
'load_imdb_dataset',
'load_matt_mahoney_text8_dataset',
'load_mnist_dataset',
'load_mpii_pose_dataset',
'load_nietzsche_dataset',
'load_ptb_dataset',
'load_voc_dataset',
'load_wmt_en_fr_dataset',
# Util Functions
'assign_params',
'del_file',
'del_folder',
'download_file_from_google_drive',
'exists_or_mkdir',
'file_exists',
'folder_exists',
'load_and_assign_npz',
'load_and_assign_npz_dict',
'load_ckpt',
'load_cropped_svhn',
'load_file_list',
'load_folder_list',
'load_npy_to_any',
'load_npz',
'maybe_download_and_extract',
'natural_keys',
'npz_to_W_pdf',
'read_file',
'save_any_to_npy',
'save_ckpt',
'save_npz',
'save_npz_dict',
]
| 30.450704
| 115
| 0.748844
|
11dd73f6e1584653ca6ce595317b2e702b52b099
| 90
|
py
|
Python
|
djangocms_versioning/__init__.py
|
jonathan-s/djangocms-versioning
|
2fb6e26dd2f1b39b38d525b1afe9a6a6433bb605
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_versioning/__init__.py
|
jonathan-s/djangocms-versioning
|
2fb6e26dd2f1b39b38d525b1afe9a6a6433bb605
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_versioning/__init__.py
|
jonathan-s/djangocms-versioning
|
2fb6e26dd2f1b39b38d525b1afe9a6a6433bb605
|
[
"BSD-3-Clause"
] | null | null | null |
__version__ = "0.0.26"
default_app_config = "djangocms_versioning.apps.VersioningConfig"
| 22.5
| 65
| 0.811111
|
56daa7d1ed0238d51503ee9da48d95e11b73e8a2
| 3,748
|
py
|
Python
|
mongo_interface.py
|
hkhamm/meetme
|
53beb81f47932090d95a4c29666b020912487f0b
|
[
"Artistic-2.0"
] | null | null | null |
mongo_interface.py
|
hkhamm/meetme
|
53beb81f47932090d95a4c29666b020912487f0b
|
[
"Artistic-2.0"
] | 2
|
2021-02-08T20:17:47.000Z
|
2021-04-30T20:33:38.000Z
|
mongo_interface.py
|
hkhamm/meetme
|
53beb81f47932090d95a4c29666b020912487f0b
|
[
"Artistic-2.0"
] | null | null | null |
import flask
import sys
from pymongo import MongoClient
import CONFIG
try:
dbclient = MongoClient(CONFIG.MONGO_URL)
db = dbclient.meetme
collection = db.busy_times
except:
print("Failure opening database. Is Mongo running? Correct password?")
sys.exit(1)
def add_times_db(new_times):
"""
Inserts a list of busy times into the database with the given key.
:param new_times: is a list of busy times to add to the busy times in
the database.
"""
times = convert_times(new_times)
for time in times:
try:
record = {'type': 'busy_times',
'key': flask.session['key'],
'start': time['start'],
'end': time['end']}
collection.insert(record)
message = 'times added.'
except:
message = 'times not added.'
# print(message)
def get_times_db():
"""
Returns a dict of all busy times in the database with the given key.
"""
records = []
records_dict = {}
for record in collection.find({'type': 'busy_times',
'key': flask.session['key']}):
records_dict[record['start']['dateTime']] = {
'start': {'dateTime': record['start']['dateTime']},
'end': {'dateTime': record['end']['dateTime']}
}
for key in sorted(records_dict):
records.append(records_dict[key])
return records
def remove_all_times_db():
"""
Deletes all busy times from the database for the given key.
"""
try:
record = {'type': 'busy_times',
'key': flask.session['key']}
collection.delete_many(record)
message = 'times removed'
result = True
except:
message = 'times not removed'
result = False
# print(message)
return flask.jsonify(message=message, result=result)
def convert_times(times):
"""
Converts from Google Calendar events to busy times that include only
start and end date times.
:param times: is a list of Google Calendar events.
:return: a list of busy time dicts with start and end date times.
"""
new_times = []
for time in times:
new_times.append({'start': {'dateTime': time['start']['dateTime']},
'end': {'dateTime': time['end']['dateTime']}})
return new_times
def store_date_range_db(start, end):
"""
Stores a date range in the database with the given key.
:param start: is the start of the date range.
:param end: is the end of the date range.
"""
try:
record = {'type': 'date_range',
'key': flask.session['key'],
'start': start,
'end': end}
collection.insert(record)
message = 'date_range added.'
except:
message = 'date_range not added.'
# print(message)
def get_date_range_db():
"""
Gets the date range from the database for the given key.
:return: the date range as a list of records.
"""
records = []
try:
for record in collection.find({'type': 'date_range',
'key': flask.session['key']}):
records.append(record)
message = 'date_range found'
except:
message = 'date_range not found'
# print(message)
return records[0]
def remove_date_range_db():
"""
Removes the date range from the database for the given key.
"""
try:
collection.delete_one({'type': 'date_range',
'key': flask.session['key']})
message = 'date range removed'
except:
message = 'date range not removed'
# print(message)
| 26.771429
| 75
| 0.568303
|
409b3ae190e0b896349fd2f9eb31f6ba8673a29a
| 891
|
py
|
Python
|
app/apidocs/token.py
|
zsh2008/mini-shop-server
|
adeb8ad522034d8337166e3a199c000921580c21
|
[
"MIT"
] | 2
|
2020-11-10T08:46:15.000Z
|
2020-11-12T01:57:24.000Z
|
app/apidocs/token.py
|
zsh2008/mini-shop-server
|
adeb8ad522034d8337166e3a199c000921580c21
|
[
"MIT"
] | null | null | null |
app/apidocs/token.py
|
zsh2008/mini-shop-server
|
adeb8ad522034d8337166e3a199c000921580c21
|
[
"MIT"
] | 1
|
2020-11-10T08:46:16.000Z
|
2020-11-10T08:46:16.000Z
|
# _*_ coding: utf-8 _*_
"""
Created by Alimazing on 2018/12/4.
"""
__author__ = 'Alimazing'
get_token = {
"parameters": [
{
"name": "body",
"in": "body",
"require": "true",
"schema": {
"id": "Token",
"required": ["account", "secret", "type"],
"properties": {
"account": {
"type": "string",
"description": "用户名",
"enum": ["999@qq.com"],
"default": "999@qq.com"
},
"secret": {
"type": "string",
"description": "密码",
"enum": ["123456"],
"default": "123456"
},
"type": {
"type": "integer",
"description": "登录方式",
"enum": [100],
"default": 100
}
}
}
}
],
"responses": {
"200": {
"description": "登录成功",
"examples": {}
}
}
}
get_token_info = {
"parameters": [],
"responses": {
"200": {
"description": "获取成功",
"examples": {}
}
}
}
| 15.910714
| 46
| 0.452301
|
c03ae2ec495d04260c481f465aaaa2f89aa60c08
| 1,204
|
py
|
Python
|
groundwork/util.py
|
amhaske/groundwork
|
abd63a54a34434ebdf527b1619c8bc90d8f97c28
|
[
"MIT"
] | 17
|
2016-07-27T12:32:06.000Z
|
2022-01-24T15:58:04.000Z
|
groundwork/util.py
|
amhaske/groundwork
|
abd63a54a34434ebdf527b1619c8bc90d8f97c28
|
[
"MIT"
] | 31
|
2016-12-16T07:29:54.000Z
|
2019-05-07T07:08:18.000Z
|
groundwork/util.py
|
amhaske/groundwork
|
abd63a54a34434ebdf527b1619c8bc90d8f97c28
|
[
"MIT"
] | 6
|
2018-03-05T13:53:31.000Z
|
2019-06-07T05:33:54.000Z
|
def gw_get(object_dict, name=None, plugin=None):
"""
Getter function to retrieve objects from a given object dictionary.
Used mainly to provide get() inside patterns.
:param object_dict: objects, which must have 'name' and 'plugin' as attribute
:type object_dict: dictionary
:param name: name of the object
:type name: str
:param plugin: plugin name, which registers the object
:return: None, single object or dict of objects
"""
if plugin is not None:
if name is None:
object_list = {}
for key in object_dict.keys():
if object_dict[key].plugin == plugin:
object_list[key] = object_dict[key]
return object_list
else:
if name in object_dict.keys():
if object_dict[name].plugin == plugin:
return object_dict[name]
else:
return None
else:
return None
else:
if name is None:
return object_dict
else:
if name in object_dict.keys():
return object_dict[name]
else:
return None
| 32.540541
| 81
| 0.554817
|
6cb6aea55b92ececf6a68a81056fccd94de37239
| 6,672
|
py
|
Python
|
flask_stateless_auth/__init__.py
|
omarryhan/flask-stateless-auth
|
c6acefc55050d1a53235ead20cb7d5e9eb4bbf9a
|
[
"MIT"
] | 3
|
2018-09-13T19:55:47.000Z
|
2018-09-15T18:31:22.000Z
|
flask_stateless_auth/__init__.py
|
omarryhan/flask-stateless-auth
|
c6acefc55050d1a53235ead20cb7d5e9eb4bbf9a
|
[
"MIT"
] | null | null | null |
flask_stateless_auth/__init__.py
|
omarryhan/flask-stateless-auth
|
c6acefc55050d1a53235ead20cb7d5e9eb4bbf9a
|
[
"MIT"
] | null | null | null |
from functools import wraps
from werkzeug.local import LocalProxy
from werkzeug.security import safe_str_cmp
from flask import jsonify, request, current_app, _request_ctx_stack, has_request_context
from flask.signals import Namespace
__title__ = "Flask-Stateless-Auth"
__description__ = "Flask stateless authentication with secrets"
__url__ = "https://github.com/omarryhan/flask-stateless-auth"
__version_info__ = ("0", "0", "17")
__version__ = ".".join(__version_info__)
__author__ = "Omar Ryhan"
__author_email__ = "omarryhan@gmail.com"
__maintainer__ = "Omar Ryhan"
__license__ = "MIT"
__copyright__ = "(c) 2018 by Omar Ryhan"
__all__ = [
"current_stateless_user",
"token_required",
"StatelessAuthError",
"StatelessAuthManager",
"UserMixin",
"TokenMixin",
]
# TODO: Unit test
# TODO: Test app_context_processor
# TODO: Test signals
# TODO: Support python 2
DEFAULT_AUTH_TYPE = "Bearer"
AUTH_HEADER = "Authorization"
ADD_CONTEXT_PROCESSOR = True
DEFAULT_TOKEN_TYPE = "access"
_signals = Namespace()
user_authorized = _signals.signal("user-authorized")
user_unauthorized = _signals.signal("user-unauthorized")
def _get_stateless_user():
if has_request_context:
return getattr(_request_ctx_stack.top, "stateless_user", None)
else:
return None
current_stateless_user = LocalProxy(_get_stateless_user)
def token_required(*args, token_type=None, auth_type=None):
""" The args parameter should not be used.
Python will automatically pass this decorator your function if you don't pass it any args.
Though it will still work if you decorate your function with: `token_required()` instad of just `token_required` """
def inner(f):
@wraps(f)
def innermost(*args, **kwargs):
app = current_app._get_current_object()
try:
app.stateless_auth_manager._set_user(token_type, auth_type)
except StatelessAuthError as e:
user_unauthorized.send(app.stateless_auth_manager)
raise e
except AttributeError as e:
print(
"Provide a token callback, a user callback and a StatelessAuthError handler as shown in StatelessAuthManager's docs"
)
raise e
else:
user_authorized.send(app.stateless_auth_manager)
return f(*args, **kwargs)
return innermost
if token_type is None and auth_type is None and args:
return inner(args[0])
return inner
class StatelessAuthError(Exception):
""" 400: request, 401: token, 403: scope 500: server"""
def __init__(self, msg, code, type_):
self.code = code
self.msg = msg
self.type = type_
self.full_msg = "{} error: {}".format(type_, msg)
super(StatelessAuthError, self).__init__(self.full_msg)
class StatelessAuthManager:
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
app.stateless_auth_manager = self
self._init_configs(app)
if self.add_context_processor:
app.context_processor(self._stateless_user_context_processor)
app.teardown_request(self.teardown)
def _init_configs(self, app):
self.default_auth_type = app.config.get("DEFAULT_AUTH_TYPE", DEFAULT_AUTH_TYPE)
self.auth_header = app.config.get("AUTH_HEADER", AUTH_HEADER)
self.add_context_processor = app.config.get(
"ADD_CONTEXT_PROCESSOR", ADD_CONTEXT_PROCESSOR
)
self.default_token_type = app.config.get(
"DEFAULT_TOKEN_TYPE", DEFAULT_TOKEN_TYPE
)
def teardown(self, exception):
""" TODO: Should there be anything here?"""
pass
def user_loader(self, callback):
self._user_callback = callback
return callback
def token_loader(self, callback):
self._token_callback = callback
return callback
def _load_user_model(self, user_id):
return self._user_callback(user_id)
def _load_token_model(self, token, token_type, auth_type):
return self._token_callback(
token=token, token_type=token_type, auth_type=auth_type
)
def _load_token_from_request(self, auth_type):
token = request.headers.get(self.auth_header)
if token:
token = token.split(" ")
else:
raise StatelessAuthError(msg="No token provided", code=400, type_="Request")
if len(token) == 2 and isinstance(token, list):
if safe_str_cmp(token[0], auth_type):
return token[1]
else:
raise StatelessAuthError(
msg="Invalid token type", code=400, type_="Request"
)
else:
raise StatelessAuthError(
msg="Invalid number of arguments in token header",
code=400,
type_="Request",
)
def _set_user(self, token_type, auth_type):
if auth_type is None:
auth_type = self.default_auth_type
if token_type is None:
token_type = self.default_token_type
token = self._load_token_from_request(auth_type)
token_model = self._load_token_model(
token=token, token_type=token_type, auth_type=auth_type
)
if not token_model:
raise StatelessAuthError(msg="Invalid token", code=401, type_="Token")
self._check_token(token_model, token_type, auth_type)
user = self._load_user_model(token_model)
if not user:
raise StatelessAuthError(
msg="Internal server error", code=500, type_="Server"
)
self._check_user(user)
self._update_request_context_with(user)
def _check_token(self, token_model, token_type, auth_type):
if token_model.token_expired(token_type, auth_type):
raise StatelessAuthError(
msg="{} token expired".format(token_type), code=401, type_="Token"
)
def _check_user(self, user):
if not user or not user.is_active:
raise StatelessAuthError(msg="Invalid User", code=401, type_="Token")
def _stateless_user_context_processor(self):
return dict(current_stateless_user=_get_stateless_user())
def _update_request_context_with(self, user):
ctx = _request_ctx_stack.top
ctx.stateless_user = user
class TokenMixin:
def token_expired(self, token_type, auth_type):
return False
class UserMixin:
@property
def is_active(self):
return True
| 32.866995
| 136
| 0.654976
|
05820dc821a2252b3730ae11200845bf96f4ed35
| 8,196
|
py
|
Python
|
entity_typing_framework/EntityTypingNetwork_classes/projectors.py
|
NooneBug/entity_typing_framework
|
e4c3cf3a6d9c3a3453ce516de855fc22b49ae5c0
|
[
"MIT"
] | null | null | null |
entity_typing_framework/EntityTypingNetwork_classes/projectors.py
|
NooneBug/entity_typing_framework
|
e4c3cf3a6d9c3a3453ce516de855fc22b49ae5c0
|
[
"MIT"
] | null | null | null |
entity_typing_framework/EntityTypingNetwork_classes/projectors.py
|
NooneBug/entity_typing_framework
|
e4c3cf3a6d9c3a3453ce516de855fc22b49ae5c0
|
[
"MIT"
] | null | null | null |
from pytorch_lightning.core.lightning import LightningModule
from torch.nn import Sigmoid, ModuleDict, ReLU, Linear, Dropout, BatchNorm1d
from torch.nn.modules import activation
class Layer(LightningModule):
'''
Fully Connected Layer with activation function, with parametrization managed through the :code:`yaml` configuration file under the key :code:`model.ET_Network_params.input_projector_params.layer_id`
Each layer has an incremental id specified in the :code:`yaml` configuration file, which works as index in the dictionary (see the example configuration files)
Parameters:
in_features:
dimension of the input features of the fully connected layer
out_features:
dimension of the output features of the fully connected layer
activation:
the activation function to use; supported activations are :code:`relu` and :code:`sigmoid`
use_dropout:
if use the dropout or not
dropout_p:
probability of dropout
use_batch_norm:
if use the batch normalization or not
'''
def __init__(self, in_features, out_features, activation = 'relu', use_dropout = True, dropout_p = .1, use_batch_norm = False) -> None:
super().__init__()
self.linear = Linear(in_features, out_features)
self.activation = self.instance_activation(activation)
self.use_dropout = use_dropout
self.use_batch_norm = use_batch_norm
if self.use_dropout:
self.dropout = Dropout(p = dropout_p)
if self.use_batch_norm:
self.batch_norm = BatchNorm1d(num_features=out_features)
def forward(self, hidden_representation):
'''
Performs the forward pass for the fully connected layer.
Parameters:
hidden representation:
a tensor with shape :code:`[in_features, batch_size]`
Output:
output of the forward pass and the activation with shape :code:`[out_features, batch_size]`
'''
h = self.linear(hidden_representation)
if self.activation:
h = self.activation(h)
if self.use_batch_norm:
h = self.batch_norm(h)
if self.use_dropout:
h = self.dropout(h)
return h
def instance_activation(self, activation_name):
'''
instances the activation function. This procedure is driven by the :code:`yaml` configuration file
parameters:
activation name:
name of the activation function to use, specified in the key: :code:`model.ET_Network_params.input_projector_params.layer_id.activation` of the :code:`yaml` configuration file
supported value : :code:`['relu', 'sigmoid']`
'''
if activation_name == 'relu':
return ReLU()
elif activation_name == 'sigmoid':
return Sigmoid()
elif activation_name == 'none':
return None
else:
raise Exception('An unknown name (\'{}\')is given for activation, check the yaml or implement an activation that correspond to that name'.format(activation_name))
class Classifier(LightningModule):
'''
Projector used as classification layer after the :ref:`Encoder<encoder>`. Predicts a vector with shape :code:`(type_number)` with values between 0 and 1.
Parameters:
name:
the name of the submodule, has to be specified in the :code:`yaml` configuration file with key :code:`model.ET_Network_params.input_projector_params.name`
to instance this projector insert the string :code:`Classifier` in the :code:`yaml` configuration file with key :code:`model.ET_Network_params.input_projector_params.name`
this param is used by the :ref:`Entity Typing Network<EntityTypingNetwork>` to instance the correct submodule
type_number:
number of types in the dataset for this run, it is automatically extracted by the :doc:`DatasetManager<dataset_managers>` and automatically given in input to the Classifier by the :ref:`Entity Typing Network<EntityTypingNetwork>`
input_dim:
dimension of the vector inputed for the forward, it is automatically extracted from the :doc:`Encoder<encoders>` by the :ref:`Entity Typing Network<EntityTypingNetwork>`
parameters:
dictionary of parameters to instantiate different :code:`Layer` objects.
the values for this parameter have to be specified in a :code:`yaml` dictionary in the :code:`yaml` configuration file with key :code:`model.ET_Network_params.input_projector_params.layers_parameters`
see the documentation of :code:`Layer` for the format of these parameters
'''
def __init__(self, name, type_number, input_dim, layers_parameters):
super().__init__()
self.type_number = type_number
self.input_dim = input_dim
self.layers_parameters = layers_parameters
self.add_parameters()
self.check_parameters()
self.layers = ModuleDict({layer_name: Layer(**layer_parameters) for layer_name, layer_parameters in self.layers_parameters.items()})
def forward(self, input_representation):
'''
operates the forward pass of this submodule, proecting the encoded input in a vector of confidence values (one for each type in the dataset)
parameters:
input_representation:
output of the :doc:`Input Encoder<encoders>` with shape :code:`[input_dim, batch_size]`
output:
classification vector with shape :code:`[type_number, batch_size]`
'''
for i in range(len(self.layers_parameters)):
if i == 0:
h = self.layers[str(i)](input_representation)
else:
h = self.layers[str(i)](h)
return h
def add_parameters(self):
'''
adds the default parameters if are not specified into the :code:`yaml` configuration file under the key :code:`model.ET_Network_params.input_projector_params.layers_parameters`
The default values are:
- if input features of the 0th projection layer are not specified or it is specified the string :code:`encoder_dim`, the value :code:`input_dim` is inserted by default
- if output features of the last proection layer are not specificied or it is specified the string :code:`type_number`: the value :code:`type_number` is inserted by default
'''
if 'in_features' not in self.layers_parameters['0']:
self.layers_parameters['0']['in_features'] = self.input_dim
if self.layers_parameters['0']['in_features'] == 'encoder_dim':
self.layers_parameters['0']['in_features'] = self.input_dim
if 'out_features' not in self.layers_parameters[str(len(self.layers_parameters) - 1)]:
self.layers_parameters[str(len(self.layers_parameters) - 1)]['out_features'] = self.type_number
if self.layers_parameters[str(len(self.layers_parameters) - 1)]['out_features'] == 'type_number':
self.layers_parameters[str(len(self.layers_parameters) - 1)]['out_features'] = self.type_number
def check_parameters(self):
'''
Check the parameters values and raises exceptions. Ensure that a classic classification can be obtained.
'''
if self.input_dim != self.layers_parameters['0']['in_features']:
raise Exception('Encoder\'s output dimension ({}) and projector\'s input dimension ({}) has to have the same value ({}). Check the yaml'.format(self.input_dim, self.layers_parameters['0']['in_features'], self.input_dim))
if self.type_number != self.layers_parameters[str(len(self.layers_parameters) - 1)]['out_features']:
raise Exception('Types\' number ({}) and projector\'s last layer output dimension ({}) has to have the same value ({}). Check the yaml'.format(self.type_number, self.layers_parameters[str(len(self.layers_parameters) - 1)]['out_features'], self.type_number))
| 49.077844
| 269
| 0.668253
|
8555c0391d3cd6cb09bde173eb535891cd14b33f
| 11,071
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20160901/get_express_route_circuit.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20160901/get_express_route_circuit.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20160901/get_express_route_circuit.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetExpressRouteCircuitResult',
'AwaitableGetExpressRouteCircuitResult',
'get_express_route_circuit',
]
@pulumi.output_type
class GetExpressRouteCircuitResult:
"""
ExpressRouteCircuit resource
"""
def __init__(__self__, allow_classic_operations=None, authorizations=None, circuit_provisioning_state=None, etag=None, gateway_manager_etag=None, id=None, location=None, name=None, peerings=None, provisioning_state=None, service_key=None, service_provider_notes=None, service_provider_properties=None, service_provider_provisioning_state=None, sku=None, tags=None, type=None):
if allow_classic_operations and not isinstance(allow_classic_operations, bool):
raise TypeError("Expected argument 'allow_classic_operations' to be a bool")
pulumi.set(__self__, "allow_classic_operations", allow_classic_operations)
if authorizations and not isinstance(authorizations, list):
raise TypeError("Expected argument 'authorizations' to be a list")
pulumi.set(__self__, "authorizations", authorizations)
if circuit_provisioning_state and not isinstance(circuit_provisioning_state, str):
raise TypeError("Expected argument 'circuit_provisioning_state' to be a str")
pulumi.set(__self__, "circuit_provisioning_state", circuit_provisioning_state)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if gateway_manager_etag and not isinstance(gateway_manager_etag, str):
raise TypeError("Expected argument 'gateway_manager_etag' to be a str")
pulumi.set(__self__, "gateway_manager_etag", gateway_manager_etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peerings and not isinstance(peerings, list):
raise TypeError("Expected argument 'peerings' to be a list")
pulumi.set(__self__, "peerings", peerings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if service_key and not isinstance(service_key, str):
raise TypeError("Expected argument 'service_key' to be a str")
pulumi.set(__self__, "service_key", service_key)
if service_provider_notes and not isinstance(service_provider_notes, str):
raise TypeError("Expected argument 'service_provider_notes' to be a str")
pulumi.set(__self__, "service_provider_notes", service_provider_notes)
if service_provider_properties and not isinstance(service_provider_properties, dict):
raise TypeError("Expected argument 'service_provider_properties' to be a dict")
pulumi.set(__self__, "service_provider_properties", service_provider_properties)
if service_provider_provisioning_state and not isinstance(service_provider_provisioning_state, str):
raise TypeError("Expected argument 'service_provider_provisioning_state' to be a str")
pulumi.set(__self__, "service_provider_provisioning_state", service_provider_provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="allowClassicOperations")
def allow_classic_operations(self) -> Optional[bool]:
"""
Allow classic operations
"""
return pulumi.get(self, "allow_classic_operations")
@property
@pulumi.getter
def authorizations(self) -> Optional[Sequence['outputs.ExpressRouteCircuitAuthorizationResponse']]:
"""
The list of authorizations.
"""
return pulumi.get(self, "authorizations")
@property
@pulumi.getter(name="circuitProvisioningState")
def circuit_provisioning_state(self) -> Optional[str]:
"""
The CircuitProvisioningState state of the resource.
"""
return pulumi.get(self, "circuit_provisioning_state")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> Optional[str]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> Optional[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
The list of peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="serviceKey")
def service_key(self) -> Optional[str]:
"""
The ServiceKey.
"""
return pulumi.get(self, "service_key")
@property
@pulumi.getter(name="serviceProviderNotes")
def service_provider_notes(self) -> Optional[str]:
"""
The ServiceProviderNotes.
"""
return pulumi.get(self, "service_provider_notes")
@property
@pulumi.getter(name="serviceProviderProperties")
def service_provider_properties(self) -> Optional['outputs.ExpressRouteCircuitServiceProviderPropertiesResponse']:
"""
The ServiceProviderProperties.
"""
return pulumi.get(self, "service_provider_properties")
@property
@pulumi.getter(name="serviceProviderProvisioningState")
def service_provider_provisioning_state(self) -> Optional[str]:
"""
The ServiceProviderProvisioningState state of the resource. Possible values are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
"""
return pulumi.get(self, "service_provider_provisioning_state")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ExpressRouteCircuitSkuResponse']:
"""
The SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetExpressRouteCircuitResult(GetExpressRouteCircuitResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetExpressRouteCircuitResult(
allow_classic_operations=self.allow_classic_operations,
authorizations=self.authorizations,
circuit_provisioning_state=self.circuit_provisioning_state,
etag=self.etag,
gateway_manager_etag=self.gateway_manager_etag,
id=self.id,
location=self.location,
name=self.name,
peerings=self.peerings,
provisioning_state=self.provisioning_state,
service_key=self.service_key,
service_provider_notes=self.service_provider_notes,
service_provider_properties=self.service_provider_properties,
service_provider_provisioning_state=self.service_provider_provisioning_state,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_express_route_circuit(circuit_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteCircuitResult:
"""
ExpressRouteCircuit resource
:param str circuit_name: The name of express route circuit.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['circuitName'] = circuit_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20160901:getExpressRouteCircuit', __args__, opts=opts, typ=GetExpressRouteCircuitResult).value
return AwaitableGetExpressRouteCircuitResult(
allow_classic_operations=__ret__.allow_classic_operations,
authorizations=__ret__.authorizations,
circuit_provisioning_state=__ret__.circuit_provisioning_state,
etag=__ret__.etag,
gateway_manager_etag=__ret__.gateway_manager_etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
peerings=__ret__.peerings,
provisioning_state=__ret__.provisioning_state,
service_key=__ret__.service_key,
service_provider_notes=__ret__.service_provider_notes,
service_provider_properties=__ret__.service_provider_properties,
service_provider_provisioning_state=__ret__.service_provider_provisioning_state,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 40.258182
| 380
| 0.677084
|
15a50098de04d7c0408dfd27ef7fe6275ec1570f
| 2,025
|
py
|
Python
|
sprinkler/models.py
|
shanisma/plant-keeper
|
3ca92ae2d55544a301e1398496a08a45cca6d15b
|
[
"CC0-1.0"
] | 1
|
2020-04-12T22:00:17.000Z
|
2020-04-12T22:00:17.000Z
|
sprinkler/models.py
|
shanisma/plant-keeper
|
3ca92ae2d55544a301e1398496a08a45cca6d15b
|
[
"CC0-1.0"
] | null | null | null |
sprinkler/models.py
|
shanisma/plant-keeper
|
3ca92ae2d55544a301e1398496a08a45cca6d15b
|
[
"CC0-1.0"
] | null | null | null |
from django.db import models
from water.models import Device as WaterDevice
class Device(models.Model):
tag = models.CharField(unique=True, null=False, blank=False, max_length=200)
def __str__(self):
return f"{self.tag}"
class Sensor(models.Model):
tag = models.OneToOneField(
Device, on_delete=models.CASCADE, related_name="sprinkler_sensor_tag"
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
soil_moisture_raw_adc = models.IntegerField(blank=False, null=False)
soil_moisture = models.FloatField(blank=False, null=False)
def __str__(self):
return f"{self.tag}"
class Config(models.Model):
tag = models.OneToOneField(
Device, on_delete=models.CASCADE, related_name="sprinkler_config_tag"
)
water_tag_link = models.ForeignKey(WaterDevice, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
soil_moisture_min_level = models.FloatField(blank=False, null=False)
soil_moisture_max_level = models.FloatField(blank=False, null=False)
def __str__(self):
return f"{self.tag}"
class Controller(models.Model):
tag = models.OneToOneField(
Device, on_delete=models.CASCADE, related_name="sprinkler_controller_tag"
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
water_valve_signal = models.BooleanField(blank=False, null=False)
def __str__(self):
return f"{self.tag}"
class ForceController(models.Model):
tag = models.OneToOneField(
Device, on_delete=models.CASCADE, related_name="sprinkler_forceController_tag"
)
updated_at = models.DateTimeField(auto_now=True)
force_water_valve_signal = models.BooleanField(blank=False, null=False)
water_valve_signal = models.BooleanField(blank=False, null=False)
def __str__(self):
return f"{self.tag}"
| 31.153846
| 86
| 0.732346
|
71fc6f47d1423170343cabb93b8b2762e390fc16
| 1,111
|
py
|
Python
|
clade/extensions/assembler.py
|
kateya/clade
|
f2c091be8055156ab3e6ce6b8f855c4b01d2b6f3
|
[
"Apache-2.0"
] | 11
|
2018-10-15T08:46:00.000Z
|
2022-02-14T14:03:15.000Z
|
clade/extensions/assembler.py
|
kateya/clade
|
f2c091be8055156ab3e6ce6b8f855c4b01d2b6f3
|
[
"Apache-2.0"
] | 136
|
2018-08-07T11:11:29.000Z
|
2022-03-31T19:02:21.000Z
|
clade/extensions/assembler.py
|
kateya/clade
|
f2c091be8055156ab3e6ce6b8f855c4b01d2b6f3
|
[
"Apache-2.0"
] | 6
|
2018-11-09T12:52:39.000Z
|
2022-02-19T20:34:25.000Z
|
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clade.extensions.common import Common
class AS(Common):
__version__ = "1"
def parse(self, cmds_file):
super().parse(cmds_file, self.conf.get("AS.which_list", []))
def parse_cmd(self, cmd):
parsed_cmd = super().parse_cmd(cmd, self.name)
if self.is_bad(parsed_cmd):
self.dump_bad_cmd_id(parsed_cmd["id"])
return
self.dump_cmd_by_id(cmd["id"], parsed_cmd)
| 33.666667
| 79
| 0.710171
|
edda3cb52417d494d1f20de321f85184deb04bd2
| 125
|
py
|
Python
|
PySrc/8day/Re05.py
|
Timmy-Oh/Adorable-Lab
|
c21454d011e6888fd28c41d1624721ea1826be40
|
[
"Apache-2.0"
] | null | null | null |
PySrc/8day/Re05.py
|
Timmy-Oh/Adorable-Lab
|
c21454d011e6888fd28c41d1624721ea1826be40
|
[
"Apache-2.0"
] | null | null | null |
PySrc/8day/Re05.py
|
Timmy-Oh/Adorable-Lab
|
c21454d011e6888fd28c41d1624721ea1826be40
|
[
"Apache-2.0"
] | null | null | null |
import re
r = re.compile("ck?w")
print(r.search("cw"))
print(r.search("ckw"))
print(r.search("ckkw"))
print(r.search("kkkw"))
| 20.833333
| 23
| 0.656
|
9f26d9925393c4a7300b70d6e70e45c02baf2b65
| 2,755
|
py
|
Python
|
mapping/enable/http_tile_manager.py
|
nmichaud/enable-mapping
|
421aae6c3c700406df0f2438cec190daf5074084
|
[
"BSD-3-Clause"
] | 1
|
2019-04-22T16:36:06.000Z
|
2019-04-22T16:36:06.000Z
|
mapping/enable/http_tile_manager.py
|
pombreda/enable-mapping
|
421aae6c3c700406df0f2438cec190daf5074084
|
[
"BSD-3-Clause"
] | null | null | null |
mapping/enable/http_tile_manager.py
|
pombreda/enable-mapping
|
421aae6c3c700406df0f2438cec190daf5074084
|
[
"BSD-3-Clause"
] | 2
|
2015-04-14T10:06:03.000Z
|
2020-10-03T03:56:47.000Z
|
import logging
# Enthought library imports
from traits.api import Int, Str, implements, on_trait_change
from pyface.gui import GUI
# Local imports
from i_tile_manager import ITileManager
from tile_manager import TileManager
from cacheing_decorators import lru_cache
from asynchttp import AsyncHTTPConnection
from async_loader import async_loader
class HTTPTileManager(TileManager):
implements(ITileManager)
#### ITileManager interface ###########################################
def get_tile_size(self):
return 256
def convert_to_tilenum(self, x, y, zoom):
n = 2 ** zoom
size = self.get_tile_size()
col = (x / size % n)
row = (n - 1 - y / size % n)
return (zoom, col, row)
@lru_cache()
def get_tile(self, zoom, row, col):
# Schedule a request to get the tile
async_loader.put(TileRequest(self._tile_received,
self.server, self.port, self.url,
dict(zoom=zoom, row=row, col=col)))
# return a blank tile for now
return None
#### Public interface #################################################
server = Str
port = Int(80)
url = Str
### Private interface ##################################################
def _tile_received(self, tile_args, data):
zoom, row, col = tile_args['zoom'], tile_args['row'], tile_args['col']
try:
data = self.process_raw(data)
self.get_tile.replace(data, self, zoom, row, col)
self.tile_ready = (zoom, row, col)
except Exception, e:
# Failed to process tile
logging.exception("Failed to process %s%s"%(self.server, self.url%(zoom,row,col)))
@on_trait_change('server, url')
def _reset_cache(self, new):
self.get_tile.clear()
# This is a hack to repaint
self.tile_ready = 0,0,0
class TileRequest(AsyncHTTPConnection):
def __init__(self, handler, host, port, url, tile_args):
AsyncHTTPConnection.__init__(self, host, port)
self.handler = handler
self._url = url
self._tile_args = tile_args
#self.set_debuglevel(1)
def handle_connect(self):
AsyncHTTPConnection.handle_connect(self)
self.putrequest("GET", self._url%self._tile_args)
self.endheaders()
self.getresponse()
def handle_response(self):
if self.response.status == 200:
GUI.invoke_later(self.handler,
self._tile_args,
self.response.body)
self.close()
def __str__(self):
return "TileRequest for %s"%str(self._tile_args)
def __repr__(self):
return str(self)
| 30.611111
| 94
| 0.582214
|
7432ed03e6d2fd564d879b2b7ee1e17cc59432f3
| 3,123
|
py
|
Python
|
training.py
|
mirefek/neur-rec-seq
|
6e0ff723b01e290f1da7bb3aabed65c494a875e8
|
[
"MIT"
] | 1
|
2020-06-08T15:24:11.000Z
|
2020-06-08T15:24:11.000Z
|
training.py
|
mirefek/neur-rec-seq
|
6e0ff723b01e290f1da7bb3aabed65c494a875e8
|
[
"MIT"
] | null | null | null |
training.py
|
mirefek/neur-rec-seq
|
6e0ff723b01e290f1da7bb3aabed65c494a875e8
|
[
"MIT"
] | null | null | null |
import os
import sys
import torch
"""
Parameters:
model:
SeqGen instance from model.py
seq:
one-dimensional tensor of symbols
steps:
the number of training steps
optimizer -- default: {'type' : "Adam"}:
dict containing a key "type",
and the other keys corresponding to keyword arguments of the appropriate PyTorch optimizer
clip_norm:
either single number for gradient norm clipping,
or keyword arguments dict for torch.nn.utils.clip_grad_norm_
no clipping if not set (None)
verbose, logf -- default: True, sys.stdout
if verbose is True, continuous loss and mistakes are printed to logf
save_dir, save_each -- default None, 50:
if save_dir is set, then the network and optimizer weights
will be saved to the directory save_dir every "save_each" step
load:
if set to a tuple (load_dir, step), it loads the network and optimizer weights
and continue training from there
"""
def train_seq(model, seq, steps, optimizer = {'type' : "Adam"}, clip_norm = None, verbose = True,
logf = sys.stdout, save_dir = None, save_each = 50, load = None):
if save_dir is not None: os.makedirs(save_dir, exist_ok=True)
model.train()
parameters = tuple(model.parameters())
optimizer_args = dict(optimizer)
optimizer_type = optimizer_args.pop('type')
optimizer = getattr(torch.optim, optimizer_type)(parameters, **optimizer_args)
if load is not None:
load_dir, start_step = load
fname = os.path.join(load_dir, "step{}".format(start_step))
model.load_state_dict(torch.load(load_fname))
optimizer.load_state_dict(torch.load(load_fname+"_optimizer"))
else: start_step = 0
out = dict()
def store_out(label, value, step):
if isinstance(value, torch.Tensor):
value = value.detach().item()
if step == 0:
out[label+"_sum"] = value
out[label+"_min"] = value
else:
out[label+"_sum"] += value
out[label+"_min"] = min(value, out[label+"_min"])
out[label+"_last"] = value
def get_out_avg(label):
out[label+"_avg"] = float(out[label+"_sum"]) / steps
del out[label+"_sum"]
for step in range(start_step, start_step + steps):
optimizer.zero_grad()
loss, acc = model.get_loss(seq)
mistakes = len(seq) - acc
store_out('loss', loss, step)
store_out('mistakes', mistakes, step)
if verbose:
logf.write("{} {} {}\n".format(step, loss.item(), mistakes))
logf.flush()
loss.backward()
if clip_norm is not None:
if not isinstance(clip_norm, dict): clip_norm = {'max_norm' : clip_norm}
torch.nn.utils.clip_grad_norm_(parameters, **clip_norm)
optimizer.step()
if save_dir is not None and (step+1) % save_each == 0:
fname = os.path.join(save_dir, "step{}".format(step+1))
torch.save(model.state_dict(), fname)
torch.save(optimizer.state_dict(), fname+"_optimizer")
get_out_avg('loss')
get_out_avg('mistakes')
return out
| 35.896552
| 97
| 0.638809
|
1f265113bfa666c664fe72ca12502f033860de2f
| 486
|
py
|
Python
|
examples/src/dbnd_examples/extensions/custom_output_location.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 224
|
2020-01-02T10:46:37.000Z
|
2022-03-02T13:54:08.000Z
|
examples/src/dbnd_examples/extensions/custom_output_location.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 16
|
2020-03-11T09:37:58.000Z
|
2022-01-26T10:22:08.000Z
|
examples/src/dbnd_examples/extensions/custom_output_location.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 24
|
2020-03-24T13:53:50.000Z
|
2022-03-22T11:55:18.000Z
|
import logging
from dbnd import PythonTask, output, parameter
logger = logging.getLogger()
class GenerateReportToCustomLocation(PythonTask):
_conf__base_output_path_fmt = (
"{root}/{env_label}/reports/{name}/"
"{output_name}{output_ext}/date={task_target_date}"
)
name = parameter.value("report")
report = output[str]
def run(self):
logger.info("Going to write to %s", self.report)
self.report = "Some text in weird location!"
| 23.142857
| 59
| 0.674897
|
5040fb49247ae827f97e8c2355d3e53b6d33491f
| 5,287
|
py
|
Python
|
ketiga.py
|
ichsanhizmanhardy/BelajarGIS
|
9672a3fac5bb00fa3e551aa0afb432cdf8c0e6ed
|
[
"MIT"
] | null | null | null |
ketiga.py
|
ichsanhizmanhardy/BelajarGIS
|
9672a3fac5bb00fa3e551aa0afb432cdf8c0e6ed
|
[
"MIT"
] | null | null | null |
ketiga.py
|
ichsanhizmanhardy/BelajarGIS
|
9672a3fac5bb00fa3e551aa0afb432cdf8c0e6ed
|
[
"MIT"
] | null | null | null |
import shapefile
class ketiga:
def __init__(self):
self.ketiga = shapefile.Writer('ketiga', shapeType=shapefile.POLYGON)
self.ketiga.shapeType
self.ketiga.field('nama_ruangan', 'C')
#-------------------- KODING ------------------#
# Ilham Muhammad Ariq 1174087
def tanggaD2(self, nama):
self.ketiga.record(nama)
self.ketiga.poly(
[[[-16, 20], [-19, 20], [-19, 27], [-16, 27], [-16, 20]]])
def r301(self, nama):
self.ketiga.record(nama)
self.ketiga.poly(
[[[-12.4, 20], [-16, 20], [-16, 24], [-12.4, 24], [-12.4, 20]]])
# Alvan Alvanzah 1174077
def r302(self, nama):
self.ketiga.record(nama)
self.ketiga.poly(
[[[-8.8, 20], [-12.4, 20], [-12.4, 24], [-8.8, 24], [-8.8, 20]]])
# Advent Nopele Sihite 1184089
def r304(self, nama):
self.ketiga.record(nama)
self.ketiga.poly(
[[[-1.6, 20], [-5.2, 20], [-5.2, 24], [-1.6, 24], [-1.6, 20]]])
# Difa
def r303(self, nama):
self.ketiga.record(nama)
self.ketiga.poly(
[[[-5.2, 20], [-8.8, 20], [-8.8, 24], [-5.2, 24], [-5.2, 20]]])
# Muhammad Reza Syachrani 1174084
def r307(self, nama):
self.ketiga.record(nama)
self.ketiga.poly(
[[[9.2, 20], [5.6, 20], [5.6, 24], [9.2, 24], [9.2, 20]]])
def r308(self, nama):
self.ketiga.record(nama)
self.ketiga.poly(
[[[12.8, 20], [9.2, 20], [9.2, 24], [12.8, 24], [12.8, 20]]])
# Kaka Kamaludin 1174067
def r305(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[2, 20], [-1.6, 20], [-1.6, 24], [2, 24], [2, 20]]])
def r306(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[5.6, 20], [2, 20], [2, 24], [5.6, 24], [5.6, 20]]])
# Arrizal Furqona Gifary 1174070
def r309(self, nama):
self.ketiga.record(nama)
self.ketiga.poly(
[[[16.4, 20], [12.8, 20], [12.8, 24], [16.4, 24], [16.4, 20]]])
# Fanny Shafira 1174069
def r310(self, nama):
self.ketiga.record(nama)
self.ketiga.poly(
[[[20, 20], [16.4, 20], [16.4, 24], [20, 24], [20, 20]]])
# Chandra Kirana Poetra 1174079
def rwccewek2(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[22, 20], [20, 20], [20, 24], [22, 24], [22, 20]]])
def rwccewek3(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[24, 20], [22, 20], [22, 24], [24, 24], [24, 20]]])
# Mochamad Arifqi Ramadhan 1174074
def tanggaB2(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[27, 20], [24, 20], [24, 27], [27, 27], [27, 20]]])
# Handi Handi Hermawan 1174080
def r311(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[16, 12], [16, 18], [22, 18], [22, 12], [16, 12]]])
# Bakti Qilan Mufid 1174083
def r312(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[16, 6], [16, 12], [22, 12], [22, 6], [16, 6]]])
def tanggaB1(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[27, -3], [24, -3], [24, 4], [27, 4], [27, -3]]])
#Ainul Filiani 1174073
def rwccewek1(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[24, 0], [22, 0], [22, 4], [24, 4], [24, 0]]])
# Aulyardha Anindita 1174054
def rwccowok(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[22, 0], [20, 0], [20, 4], [22, 4], [22, 0]]])
# Nurul Izza Hamka 1174062
def rteknisi(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[20, 0], [14, 0], [14, 4], [20, 4], [20, 0]]])
#Tia Nur Candida 1174086
def r314(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[14, 0], [8, 0], [8, 4], [14, 4], [14, 0]]])
# D.Irga B. Naufal Fakhri 1174066
def r315(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[8, 0], [2, 0], [2, 4], [8, 4], [8, 0]]])
def r316(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[2, 0], [-4, 0], [-4, 4], [2, 4], [2, 0]]])
# Muhammad Abdul Gani Wijaya 1174071
def r319(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[-8, 6], [-13, 6], [-13, 10], [-8, 10], [-8, 6]]])
def r320(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[-8, 10], [-13, 10], [-13, 14], [-8, 14], [-8, 10]]])
#Alfadian Owen 1174091
def r321(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[-8, 14], [-13, 14], [-13, 18], [-8, 18], [-8, 14]]])
def center(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[12, 7], [12, 17], [-4, 17], [-4, 7], [12, 7]]])
#Dini Permata Putri 1174053
def r317(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[-4, 0], [-10, 0], [-10, 4], [-4, 4], [-4, 0]]])
def r318(self, nama):
self.ketiga.record(nama)
self.ketiga.poly([[[-10, 0], [-16, 0], [-16, 4], [-10, 4], [-10, 0]]])
#-------------------- BATAS END KODING ------------------#
def close(self):
self.ketiga.close()
| 32.042424
| 80
| 0.493475
|
fd34c449ba0fe5a2fc38e4a75672112fa71c82f9
| 7,104
|
py
|
Python
|
results_explorer/models.py
|
open-risk/equinox
|
0503e716b566ff7c776f04a611879f88d86e1cc6
|
[
"Apache-2.0"
] | 10
|
2021-03-21T22:05:33.000Z
|
2022-03-15T18:26:58.000Z
|
results_explorer/models.py
|
open-risk/equinox
|
0503e716b566ff7c776f04a611879f88d86e1cc6
|
[
"Apache-2.0"
] | 2
|
2021-10-30T15:15:41.000Z
|
2021-11-11T12:35:02.000Z
|
results_explorer/models.py
|
open-risk/equinox
|
0503e716b566ff7c776f04a611879f88d86e1cc6
|
[
"Apache-2.0"
] | 1
|
2022-03-16T18:59:36.000Z
|
2022-03-16T18:59:36.000Z
|
# Copyright (c) 2021 Open Risk (https://www.openriskmanagement.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from risk_analysis.Workflows import Workflow
from risk_analysis.Objectives import Playbook
class ResultGroup(models.Model):
"""
Data object holds a group of calculation results
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, default=1)
creation_date = models.DateTimeField(auto_now_add=True)
group_type = models.IntegerField(default=0)
# The number of results include in the group
# Must be manually augmented whenever there is a result added or deleted
calculation_count = models.IntegerField(default=0)
# the playbook that created this result group (if available)
# ATTN result groups can also be formed in ad-hoc ways (e.g. user defined collections)
# in that case there is no playbook associated and thus standardized reports
# and visualization are not available
playbook = models.ForeignKey(Playbook, on_delete=models.CASCADE, null=True, blank=True,
help_text="Playbook that created this ResultGroup (if any)")
# TODO Does not make strict sense for a collection
calculation_timestamp = models.DateTimeField(default=now)
def __str__(self):
return str(self.pk)
def get_absolute_url(self):
return reverse('admin:results_explorer_result_group_change', kwargs={'pk': self.pk})
class Meta:
verbose_name = "Result Group"
verbose_name_plural = "Result Groups"
class Calculation(models.Model):
"""
Data object holds the complete outcome of a workflow calculation as returned by model server. Includes reference to user initiating calculation and the submitted workflow. Logfile holds a logstring
Result is json object with flexible structure. Typically:
'Graph' : json object (different types)
'Statistics': json object (tabular)
"""
result_group = models.ForeignKey(ResultGroup, on_delete=models.CASCADE, null=True, blank=True,
help_text="Result Group to which this Calculation belong (if any)")
user = models.ForeignKey(User, on_delete=models.CASCADE, default=1)
# The Base Workflow object that was used for the calculation
workflow = models.ForeignKey(Workflow, on_delete=models.CASCADE, default=1)
# The final workflow_data used for the calculation
# In principle starting with the base workflow, performing all the FK embeddings
# and applying the workflow delta should reproduce the workflow data stored here
workflow_data = models.JSONField(null=True, blank=True, help_text="Verbatim storage of the calculation input "
"in JSON format")
# The result object creation time (may differ from the server execution time)
creation_date = models.DateTimeField(auto_now_add=True)
logfile = models.TextField(null=True, blank=True, help_text="Verbatim storage of the calculation logfile")
results_data = models.JSONField(null=True, blank=True, help_text="Verbatim storage of the calculation results "
"in JSON format")
calculation_timestamp = models.DateTimeField(default=now)
def __str__(self):
return str(self.pk)
def get_absolute_url(self):
return reverse('admin:results_explorer_calculation_change', kwargs={'pk': self.pk})
class Meta:
verbose_name = "Result"
verbose_name_plural = "Results"
class Visualization(models.Model):
"""
Data object holds the structural Vega / Vega-Lite specification of a visualization
Includes reference to user creating the Visualization
"""
VISUALIZATION_DATA_CHOICES = [(0, 'Load portfolio data from local JSON files'),
(1, 'Fetch portfolio data via REST API'),
(2, 'Create new portfolio from local JSON configuration'),
(3, 'Fetch portfolio configuration via REST API'),
(4, 'Attached portfolio data in JSON format')]
OBJECTIVE_CHOICE = [(0, 'Portfolio Information'), (1, 'Concentration Risk'), (2, 'Origination'),
(3, 'Risk Appetite'), (4, 'Risk Capital'), (5, 'Other')]
name = models.CharField(max_length=200, help_text="Assigned name to help manage Visualization collections")
user_id = models.ForeignKey(User, on_delete=models.CASCADE, default=1, help_text="The creator of the Visualization")
creation_date = models.DateTimeField(auto_now_add=True)
last_change_date = models.DateTimeField(auto_now=True)
objective = models.IntegerField(default=0, null=True, blank=True, choices=OBJECTIVE_CHOICE,
help_text="Objective fulfilled by the Visualization")
description = models.TextField(null=True, blank=True, help_text="A description of the main purpose and "
"characteristics of the Visualization")
visualization_data_mode = models.IntegerField(default=1, null=True, blank=True, choices=VISUALIZATION_DATA_CHOICES,
help_text="Select the mode for portfolio data inputs")
visualization_data = models.JSONField(null=True, blank=True, help_text="Container for visualization data")
visualization_data_url = models.URLField(null=True, blank=True, help_text="URL for visualization data")
results_url = models.CharField(max_length=200, null=True, blank=True, help_text="Where to store the results")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('results_explorer:visualization_view', kwargs={'pk': self.pk})
class Meta:
verbose_name = "Visualization"
verbose_name_plural = "Visualizations"
| 47.046358
| 201
| 0.698198
|
037f499114e422513889605df2adf61cde289839
| 2,479
|
py
|
Python
|
bunyip/network/nn.py
|
danhey/Bunyip
|
741d966e85d841d28867f2419e15644969439382
|
[
"MIT"
] | 1
|
2021-11-20T19:02:46.000Z
|
2021-11-20T19:02:46.000Z
|
bunyip/network/nn.py
|
danhey/Bunyip
|
741d966e85d841d28867f2419e15644969439382
|
[
"MIT"
] | null | null | null |
bunyip/network/nn.py
|
danhey/Bunyip
|
741d966e85d841d28867f2419e15644969439382
|
[
"MIT"
] | null | null | null |
# # def initialize_network(self, model_path=None):
# # """ Move this to a class please"""
# # try:
# # from tensorflow.keras.models import load_model
# # from tensorflow.keras.initializers import glorot_uniform
# # from tensorflow.keras.utils import CustomObjectScope
# # except:
# # raise ImportError("You need TensorFlow for this")
# # if model_path is None:
# # import os
# # model_path = os.path.join(os.path.dirname(__file__), "network/RELU_2000_2000_lr=1e-05_norm_insert2000layer-1571628331/NN.h5")
# # with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
# # model = load_model(model_path)
# # return model
# def predict(self, binned_flux, model=None, norm=True):
# """Predict light curve parameters from the neural network
# Parameters
# ----------
# binned_flux : np.ndarray
# Flux values
# model : [type], optional
# [description], by default None
# norm : bool, optional
# [description], by default True
# Returns
# -------
# [type]
# [description]
# """
# if model is None:
# model = self.initialize_network()
# y_hat = model.predict(binned_flux[None,:])
# if norm:
# mu= np.array([
# 0.6205747949371053,
# 0.2374090928468623,
# 0.1891195153173617,
# 1.3006089700783283,
# 69.9643427508551,
# 0.17749621516829056,
# 179.6479435131075,
# ])
# std = np.array([
# 0.22820790194476795,
# 0.08166430725337233,
# 0.05891981424090313,
# 0.4059874833585892,
# 11.465339377838976,
# 0.12821797216376407,
# 103.59690197983575,
# ])
# y_hat = y_hat * std + mu
# return y_hat
# def update_from_network(self, **kwargs):
# """Update light curve parameters from neural network model.
# """
# lc = lk.LightCurve(self.phase, self.flux)
# binned = lc.bin(bins=100).normalize()
# prediction = self.predict(binned.flux, **kwargs)[0]
# self.update_parameters(prediction)
| 35.927536
| 142
| 0.510286
|
518acfbaf83b6f804c80b9db3dcea265eef6a69b
| 3,183
|
py
|
Python
|
src/sage/categories/algebra_modules.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/categories/algebra_modules.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/categories/algebra_modules.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | 1
|
2017-10-08T08:21:23.000Z
|
2017-10-08T08:21:23.000Z
|
r"""
Algebra modules
"""
#*****************************************************************************
# Copyright (C) 2005 David Kohel <kohel@maths.usyd.edu>
# William Stein <wstein@math.ucsd.edu>
# 2008-2009 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.cachefunc import cached_method
from category_types import Category_module
from modules import Modules
class AlgebraModules(Category_module):
"""
The category of modules over a fixed algebra $A$.
EXAMPLES::
sage: AlgebraModules(QQ['a'])
Category of algebra modules over Univariate Polynomial Ring in a over Rational Field
sage: AlgebraModules(QQ['a']).super_categories()
[Category of modules over Univariate Polynomial Ring in a over Rational Field]
Note: as of now, `A` is required to be commutative, ensuring that
the categories of left and right modules are isomorphic. Feedback
and use cases for potential generalizations to the non commutative
case are welcome.
"""
def __init__(self, A):
"""
EXAMPLES::
sage: AlgebraModules(QQ['a'])
Category of algebra modules over Univariate Polynomial Ring in a over Rational Field
sage: AlgebraModules(QQ['a,b']) # todo: not implemented (QQ['a,b'] should be in Algebras(QQ))
sage: AlgebraModules(FreeAlgebra(QQ,2,'a,b'))
Traceback (most recent call last):
...
TypeError: A (=Free Algebra on 2 generators (a, b) over Rational Field) must be a commutative algebra
sage: AlgebraModules(QQ)
Traceback (most recent call last):
...
TypeError: A (=Rational Field) must be a commutative algebra
TESTS::
sage: TestSuite(AlgebraModules(QQ['a'])).run()
"""
from sage.categories.commutative_algebras import CommutativeAlgebras
if not hasattr(A, "base_ring") or not A in CommutativeAlgebras(A.base_ring()):
raise TypeError("A (=%s) must be a commutative algebra"%A)
Category_module.__init__(self, A)
@classmethod
def an_instance(cls):
"""
Returns an instance of this class
EXAMPLES::
sage: AlgebraModules.an_instance()
Category of algebra modules over Univariate Polynomial Ring in x over Rational Field
"""
from sage.rings.rational_field import QQ
return cls(QQ['x'])
def algebra(self):
"""
EXAMPLES::
sage: AlgebraModules(QQ[x]).algebra()
Univariate Polynomial Ring in x over Rational Field
"""
return self.base_ring()
def super_categories(self):
"""
EXAMPLES::
sage: AlgebraModules(QQ[x]).super_categories()
[Category of modules over Univariate Polynomial Ring in x over Rational Field]
"""
R = self.algebra()
return [Modules(R)]
| 35.366667
| 113
| 0.591894
|
c741562cabf588d6a1002d7c5a35ae6b2064a558
| 1,766
|
py
|
Python
|
src/pages/models.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/pages/models.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/pages/models.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
from django.db import models
from django.utils import timezone
from xarala.utils import upload_image_path
class Subscribe(models.Model):
email_id = models.EmailField(null=True, blank=True)
timestamp = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.email_id
class Carousel(models.Model):
title = models.CharField(max_length=150)
link_to = models.URLField(max_length=200)
link_text = models.CharField(max_length=50)
image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
active = models.BooleanField(default=False)
def __str__(self):
return self.title
class Contact(models.Model):
full_name = models.CharField(max_length=150)
email = models.EmailField(max_length=150)
phone = models.CharField(max_length=150)
rule = models.CharField(max_length=150, blank=True, null=True)
enterprise = models.CharField(max_length=150, blank=True, null=True)
message = models.TextField(blank=True)
def __str__(self):
return f"{self.full_name} : {self.email} - {self.phone}"
class Team(models.Model):
first_name = models.CharField(max_length=150)
last_name = models.CharField(max_length=150)
email = models.CharField(max_length=150)
phone = models.CharField(max_length=150)
profession = models.CharField(max_length=150)
profile = models.ImageField(upload_to=upload_image_path, blank=True, null=True)
bio = models.TextField()
website = models.URLField(max_length=150, blank=True, null=True)
facebook = models.URLField(max_length=150, blank=True, null=True)
twitter = models.URLField(max_length=150, blank=True, null=True)
def __str__(self):
return f"{self.first_name} {self.last_name}"
| 34.627451
| 83
| 0.730464
|
ba8823f820b9e3ad83005058bead3a9e5dc97965
| 942
|
py
|
Python
|
backendapp/common/migrations/0021_auto_20200403_1230.py
|
finebrush/takeatripsFB
|
85a5be1a2ee68531f04f2601a3f69ddc608d4d27
|
[
"BSD-3-Clause"
] | null | null | null |
backendapp/common/migrations/0021_auto_20200403_1230.py
|
finebrush/takeatripsFB
|
85a5be1a2ee68531f04f2601a3f69ddc608d4d27
|
[
"BSD-3-Clause"
] | 13
|
2020-02-12T03:05:15.000Z
|
2022-02-10T14:26:50.000Z
|
backendapp/common/migrations/0021_auto_20200403_1230.py
|
finebrush/takeatripsFB
|
85a5be1a2ee68531f04f2601a3f69ddc608d4d27
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2.7 on 2020-04-03 03:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0020_auto_20200325_1716'),
]
operations = [
migrations.AlterModelOptions(
name='pinbuy',
options={'ordering': ('id',), 'verbose_name': 'PinBuy', 'verbose_name_plural': 'PinBuy'},
),
migrations.AlterModelOptions(
name='pindrink',
options={'ordering': ('id',), 'verbose_name': 'PinDrink', 'verbose_name_plural': 'PinDrink'},
),
migrations.AlterModelOptions(
name='pineat',
options={'ordering': ('id',), 'verbose_name': 'PinEat', 'verbose_name_plural': 'PinEat'},
),
migrations.AlterModelOptions(
name='pinfun',
options={'ordering': ('id',), 'verbose_name': 'PinFun', 'verbose_name_plural': 'PinFun'},
),
]
| 31.4
| 105
| 0.573248
|
17da53b904edc8606f6d527cdc08c16e545e323a
| 4,624
|
py
|
Python
|
examples/goulburn-deconvolution/deconvolve-timeseries.py
|
agriff86/rd-deconvolve
|
6d7772674886fe7391f66d6f89c03aca5e73d226
|
[
"MIT"
] | null | null | null |
examples/goulburn-deconvolution/deconvolve-timeseries.py
|
agriff86/rd-deconvolve
|
6d7772674886fe7391f66d6f89c03aca5e73d226
|
[
"MIT"
] | null | null | null |
examples/goulburn-deconvolution/deconvolve-timeseries.py
|
agriff86/rd-deconvolve
|
6d7772674886fe7391f66d6f89c03aca5e73d226
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
"""
Deconvolve radon observations from a Goulburn field campaign (700L detector)
"""
import sys
import os
EXAMPLE_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('PDF') # prevent graphics from being displayed
import matplotlib as mpl
import matplotlib.pyplot as plt
import datetime
try:
import rddeconv
except ImportError:
# assume we're running from within source tree but don't want to install
sys.path.append(PROJECT_DIR)
import rddeconv
from rddeconv.emcee_deconvolve_tm import emcee_deconvolve_tm, lamrn
def load_glb(fname_glb, missing_value=500):
"""load Goulburn radon data"""
df_glb = pd.read_csv(fname_glb)
df_glb.columns = [itm.strip().lower() for itm in df_glb.columns]
df_glb['time'] = [datetime.datetime.strptime(itm, '%H:%M').time() for itm in df_glb.time]
time = [ datetime.datetime.combine(datetime.date(int(itm[1]['year']),
int(itm[1]['month']),
int(itm[1]['dom'])),
itm[1]['time']) for itm in df_glb.iterrows()]
df_glb.index = time
#clean up negative values
df_glb.loc[df_glb.lld<0, 'lld'] = missing_value
return df_glb.copy()
def test_df_deconvolve_goulburn(nproc, one_night_only=False):
"""
run the deconvolution method
1. load/munge data
2. set instrument parameters and priors
3. run deconvolution
"""
#
# ... load/munge data
#
df = load_glb(fname_glb=os.path.join(EXAMPLE_DIR,'raw-data/Goulburn_Nov_2011_Internal_DB_v01_raw.csv'))
# drop problematic first value (lld=1)
df.lld.iloc[0] = np.NaN
df = df.dropna(subset=['lld'])
# we want the air temperature to be *at* the report time, rather than an
# average over each half hour
atv = df.airt.values.copy()
df.airt = np.r_[(atv[1:] + atv[:-1])/2.0, atv[-1]]
# and convert to K
df.airt += 273.15
# drop the calibration period
df = df.loc[datetime.datetime(2011, 11, 2, 18):]
# drop the bad data at the end of the record
df = df.loc[:datetime.datetime(2011, 11, 10, 12)]
#
# ... set instrument parameters
#
parameters = dict()
parameters.update(rddeconv.util.standard_parameters)
parameters.update(dict(
Q = 0.0122,
rs = 0.9,
lamp = 1/180.0,
eff = 0.14539,
Q_external = 40.0 / 60.0 / 1000.0,
V_delay = 200.0 / 1000.0,
V_tank = 750.0 / 1000.0,
recoil_prob = 0.02,
t_delay = 60.0,
interpolation_mode = 1,
expected_change_std = 1.25,
transform_radon_timeseries = True))
parameters['recoil_prob'] = 0.5*(1-parameters['rs'])
parameters['t_delay'] = 30.0
# place a constraint on the net efficiency
parameters['total_efficiency'] = 0.154 # from Scott's cal
parameters['total_efficiency_frac_error'] = 0.05
parameters['expected_change_std'] = 1.05 # for TESTING
parameters['expected_change_std'] = 1.25
# note: using default priors, defined in emcee_deconvolve_tm
if one_night_only:
df = df.head(48 * nproc)
chunksize = 43
overlap = 12
dfobs = df.copy()
#
# ... run deconvolution
#
df = emcee_deconvolve_tm(df,
iterations=3000, #3000, # try e.g. 3000
thin=100, #100, # also big, e.g. 100
chunksize=chunksize,
overlap=overlap,
model_parameters=parameters,
nproc=nproc,
nthreads=1,
stop_on_error=True)
df = df.join(dfobs)
return df
if __name__ == "__main__":
#
# ... check on emcee version
#
import emcee
print("EMCEE sampler, version: {}".format(emcee.__version__))
os.chdir(EXAMPLE_DIR)
df = test_df_deconvolve_goulburn(nproc=20, one_night_only=True)
df.to_csv('tm_deconvolution_glb.csv')
# save a picture comparing raw (lld_scaled) with deconvolved (lld_mean) obs
fig, ax = plt.subplots()
# plot, with conversion to Bq/m3 from atoms/m3
(df[['lld_scaled','lld_mean']]*lamrn).plot(ax=ax)
ax.set_ylabel('Radon concentration inside detector (Bq/m3)')
fig.savefig('tm_deconvolution_glb.png')
| 30.622517
| 107
| 0.598832
|
03df714fd4e2d47b0c7d5d7045b4588104dc83cd
| 20,926
|
py
|
Python
|
localstack/services/cloudformation/models/ec2.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
localstack/services/cloudformation/models/ec2.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
localstack/services/cloudformation/models/ec2.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
import json
from typing import Callable
from moto.ec2.utils import generate_route_id
from localstack.services.cloudformation.deployment_utils import generate_default_name
from localstack.services.cloudformation.service_models import REF_ID_ATTRS, GenericBaseModel
from localstack.utils.aws import aws_stack
from localstack.utils.strings import str_to_bool
class EC2RouteTable(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::RouteTable"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
tags_filters = map(
lambda tag: {"Name": f"tag:{tag.get('Key')}", "Values": [tag.get("Value")]},
self.props.get("Tags") or [],
)
filters = [
{"Name": "vpc-id", "Values": [self.props["VpcId"]]},
{"Name": "association.main", "Values": ["false"]},
]
filters.extend(tags_filters)
route_tables = client.describe_route_tables(Filters=filters)["RouteTables"]
return (route_tables or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get("RouteTableId")
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_route_table",
"parameters": {
"VpcId": "VpcId",
"TagSpecifications": get_tags_param("route-table"),
},
},
"delete": {
"function": "delete_route_table",
"parameters": {"RouteTableId": "RouteTableId"},
},
}
class EC2Route(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::Route"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
dst_cidr = self.resolve_refs_recursively(
stack_name, props.get("DestinationCidrBlock"), resources
)
dst_cidr6 = self.resolve_refs_recursively(
stack_name, props.get("DestinationIpv6CidrBlock"), resources
)
table_id = self.resolve_refs_recursively(stack_name, props.get("RouteTableId"), resources)
route_tables = client.describe_route_tables()["RouteTables"]
route_table = ([t for t in route_tables if t["RouteTableId"] == table_id] or [None])[0]
if route_table:
routes = route_table.get("Routes", [])
route = [
r
for r in routes
if r.get("DestinationCidrBlock") == (dst_cidr or "_not_set_")
or r.get("DestinationIpv6CidrBlock") == (dst_cidr6 or "_not_set_")
]
return (route or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
props = self.props
return generate_route_id(
props.get("RouteTableId"),
props.get("DestinationCidrBlock"),
props.get("DestinationIpv6CidrBlock"),
)
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_route",
"parameters": ["DestinationCidrBlock", "DestinationIpv6CidrBlock", "RouteTableId"],
},
"delete": {
"function": "delete_route",
"parameters": ["DestinationCidrBlock", "DestinationIpv6CidrBlock", "RouteTableId"],
},
}
class EC2InternetGateway(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::InternetGateway"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
gateways = client.describe_internet_gateways()["InternetGateways"]
tags = self.props.get("Tags")
gateway = [g for g in gateways if (g.get("Tags") or []) == (tags or [])]
return (gateway or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get("InternetGatewayId")
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_internet_gateway",
"parameters": {"TagSpecifications": get_tags_param("internet-gateway")},
}
}
class EC2SubnetRouteTableAssociation(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::SubnetRouteTableAssociation"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
table_id = self.resolve_refs_recursively(stack_name, props.get("RouteTableId"), resources)
gw_id = self.resolve_refs_recursively(stack_name, props.get("GatewayId"), resources)
route_tables = client.describe_route_tables()["RouteTables"]
route_table = ([t for t in route_tables if t["RouteTableId"] == table_id] or [None])[0]
subnet_id = self.resolve_refs_recursively(stack_name, props.get("SubnetId"), resources)
if route_table:
associations = route_table.get("Associations", [])
association = [a for a in associations if a.get("GatewayId") == gw_id]
if subnet_id:
association = [a for a in associations if a.get("SubnetId") == subnet_id]
return (association or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get("RouteTableAssociationId")
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "associate_route_table",
"parameters": {
"GatewayId": "GatewayId",
"RouteTableId": "RouteTableId",
"SubnetId": "SubnetId",
},
},
"delete": {
"function": "disassociate_route_table",
"parameters": {"AssociationId": "RouteTableAssociationId"},
},
}
class EC2VPCGatewayAttachment(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::VPCGatewayAttachment"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
igw_id = self.resolve_refs_recursively(
stack_name, props.get("InternetGatewayId"), resources
)
vpngw_id = self.resolve_refs_recursively(stack_name, props.get("VpnGatewayId"), resources)
gateways = []
if igw_id:
gateways = client.describe_internet_gateways()["InternetGateways"]
gateways = [g for g in gateways if g["InternetGatewayId"] == igw_id]
elif vpngw_id:
gateways = client.describe_vpn_gateways()["VpnGateways"]
gateways = [g for g in gateways if g["VpnGatewayId"] == vpngw_id]
gateway = (gateways or [{}])[0]
attachments = gateway.get("Attachments") or gateway.get("VpcAttachments") or []
result = [a for a in attachments if a.get("State") in ("attached", "available")]
if result:
return gateway
def get_physical_resource_id(self, attribute=None, **kwargs):
props = self.props
gw_id = props.get("VpnGatewayId") or props.get("InternetGatewayId")
attachment = (props.get("Attachments") or props.get("VpcAttachments") or [{}])[0]
if attachment:
result = "%s-%s" % (gw_id, attachment.get("VpcId"))
return result
@classmethod
def get_deploy_templates(cls):
def _attach_gateway(resource_id, resources, *args, **kwargs):
client = aws_stack.connect_to_service("ec2")
resource = cls(resources[resource_id])
props = resource.props
igw_id = props.get("InternetGatewayId")
vpngw_id = props.get("VpnGatewayId")
vpc_id = props.get("VpcId")
if igw_id:
client.attach_internet_gateway(VpcId=vpc_id, InternetGatewayId=igw_id)
elif vpngw_id:
client.attach_vpn_gateway(VpcId=vpc_id, VpnGatewayId=vpngw_id)
return {"create": {"function": _attach_gateway}}
class SecurityGroup(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::SecurityGroup"
def fetch_state(self, stack_name, resources):
props = self.props
group_id = props.get("GroupId")
group_name = props.get("GroupName")
client = aws_stack.connect_to_service("ec2")
if group_id:
resp = client.describe_security_groups(GroupIds=[group_id])
else:
resp = client.describe_security_groups(GroupNames=[group_name])
return (resp["SecurityGroups"] or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
if self.physical_resource_id:
return self.physical_resource_id
if attribute in REF_ID_ATTRS:
props = self.props
return props.get("GroupId") or props.get("GroupName")
@staticmethod
def add_defaults(resource, stack_name: str):
role_name = resource.get("Properties", {}).get("GroupName")
if not role_name:
resource["Properties"]["GroupName"] = generate_default_name(
stack_name, resource["LogicalResourceId"]
)
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_security_group",
"parameters": {
"GroupName": "GroupName",
"VpcId": "VpcId",
"Description": "GroupDescription",
},
},
"delete": {
"function": "delete_security_group",
"parameters": {"GroupId": "PhysicalResourceId"},
},
}
class EC2Subnet(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::Subnet"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
filters = [
{"Name": "cidr-block", "Values": [props["CidrBlock"]]},
{"Name": "vpc-id", "Values": [props["VpcId"]]},
]
subnets = client.describe_subnets(Filters=filters)["Subnets"]
return (subnets or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get("SubnetId")
@classmethod
def get_deploy_templates(cls):
def _post_create(resource_id, resources, resource_type, func, stack_name):
client = aws_stack.connect_to_service("ec2")
resource = cls(resources[resource_id])
props = resource.props
bool_attrs = [
"AssignIpv6AddressOnCreation",
"EnableDns64",
"MapPublicIpOnLaunch",
]
custom_attrs = bool_attrs + ["PrivateDnsNameOptionsOnLaunch"]
if not any(attr in props for attr in custom_attrs):
return
state = resource.fetch_state(stack_name, resources)
subnet_id = state.get("SubnetId")
# update boolean attributes
for attr in bool_attrs:
if attr in props:
kwargs = {attr: {"Value": str_to_bool(props[attr])}}
client.modify_subnet_attribute(SubnetId=subnet_id, **kwargs)
# determine DNS hostname type on launch
dns_options = props.get("PrivateDnsNameOptionsOnLaunch")
if dns_options:
if isinstance(dns_options, str):
dns_options = json.loads(dns_options)
if dns_options.get("HostnameType"):
client.modify_subnet_attribute(
SubnetId=subnet_id,
PrivateDnsHostnameTypeOnLaunch=dns_options.get("HostnameType"),
)
return {
"create": [
{
"function": "create_subnet",
"parameters": [
"AvailabilityZone",
"AvailabilityZoneId",
"CidrBlock",
"Ipv6CidrBlock",
"Ipv6Native",
"OutpostArn",
{"TagSpecifications": get_tags_param("subnet")},
"VpcId",
],
},
{"function": _post_create},
],
"delete": {
"function": "delete_subnet",
"parameters": {"SubnetId": "PhysicalResourceId"},
},
}
class EC2VPC(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::VPC"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
resp = client.describe_vpcs(Filters=[{"Name": "cidr", "Values": [self.props["CidrBlock"]]}])
return (resp["Vpcs"] or [None])[0]
def get_cfn_attribute(self, attribute_name):
ec2_client = aws_stack.connect_to_service("ec2")
vpc_id = self.state["VpcId"]
if attribute_name == "DefaultSecurityGroup":
sgs = ec2_client.describe_security_groups(
Filters=[
{"Name": "group-name", "Values": ["default"]},
{"Name": "vpc-id", "Values": [vpc_id]},
]
)["SecurityGroups"]
if len(sgs) != 1:
raise Exception(f"There should only be one default group for this VPC ({vpc_id=})")
return sgs[0]["GroupId"]
elif attribute_name == "DefaultNetworkAcl":
acls = ec2_client.describe_network_acls(
Filters=[
{"Name": "default", "Values": ["true"]},
{"Name": "vpc-id", "Values": [vpc_id]},
]
)["NetworkAcls"]
if len(acls) != 1:
raise Exception(
f"There should only be one default network ACL for this VPC ({vpc_id=})"
)
return acls[0]["NetworkAclId"]
else:
return super(EC2VPC, self).get_cfn_attribute(attribute_name)
@classmethod
def get_deploy_templates(cls):
def _pre_delete(resource_id, resources, *args, **kwargs):
res = cls(resources[resource_id])
vpc_id = res.state.get("VpcId")
if vpc_id:
ec2_client = aws_stack.connect_to_service("ec2")
resp = ec2_client.describe_route_tables(
Filters=[
{"Name": "vpc-id", "Values": [vpc_id]},
{"Name": "association.main", "Values": ["false"]},
]
)
for rt in resp["RouteTables"]:
for assoc in rt.get("Associations", []):
# skipping Main association (upstream moto includes default association that cannot be deleted)
if assoc.get("Main"):
continue
ec2_client.disassociate_route_table(
AssociationId=assoc["RouteTableAssociationId"]
)
ec2_client.delete_route_table(RouteTableId=rt["RouteTableId"])
return {
"create": {
"function": "create_vpc",
"parameters": {
"CidrBlock": "CidrBlock",
"InstanceTenancy": "InstanceTenancy",
"TagSpecifications": get_tags_param("vpc"),
},
},
"delete": [
{"function": _pre_delete},
{
"function": "delete_vpc",
"parameters": {"VpcId": "PhysicalResourceId"},
},
],
}
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get("VpcId")
class EC2NatGateway(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::NatGateway"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
subnet_id = self.resolve_refs_recursively(stack_name, props.get("SubnetId"), resources)
assoc_id = self.resolve_refs_recursively(stack_name, props.get("AllocationId"), resources)
result = client.describe_nat_gateways(
Filters=[{"Name": "subnet-id", "Values": [subnet_id]}]
)
result = result["NatGateways"]
result = [
gw
for gw in result
if assoc_id in [ga["AllocationId"] for ga in gw["NatGatewayAddresses"]]
]
return (result or [None])[0]
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_nat_gateway",
"parameters": {
"SubnetId": "SubnetId",
"AllocationId": "AllocationId",
"TagSpecifications": get_tags_param("natgateway"),
},
},
"delete": {
"function": "delete_nat_gateway",
"parameters": {"NatGatewayId": "PhysicalResourceId"},
},
}
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get("NatGatewayId")
class EC2Instance(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::Instance"
def fetch_state(self, stack_name, resources):
instance_id = self.get_physical_resource_id()
if not instance_id:
return
return self._get_state()
def update_resource(self, new_resource, stack_name, resources):
instance_id = self.get_physical_resource_id()
props = new_resource["Properties"]
groups = props.get("SecurityGroups", props.get("SecurityGroupIds"))
client = aws_stack.connect_to_service("ec2")
kwargs = {}
if groups:
kwargs["Groups"] = groups
client.modify_instance_attribute(
InstanceId=instance_id,
InstanceType={"Value": props["InstanceType"]},
**kwargs,
)
return self._get_state(client)
def _get_state(self, client=None):
instance_id = self.get_physical_resource_id()
client = client or aws_stack.connect_to_service("ec2")
resp = client.describe_instances(InstanceIds=[instance_id])
reservation = (resp.get("Reservations") or [{}])[0]
result = (reservation.get("Instances") or [None])[0]
return result
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get("InstanceId")
def get_cfn_attribute(self, attribute_name):
if attribute_name in REF_ID_ATTRS:
return self.props.get("InstanceId")
if attribute_name == "PublicIp":
return self.props.get("PublicIpAddress") or "127.0.0.1"
if attribute_name == "PublicDnsName":
return self.props.get("PublicDnsName")
if attribute_name == "AvailabilityZone":
return (
self.props.get("Placement", {}).get("AvailabilityZone")
or f"{aws_stack.get_region()}a"
)
return super(EC2Instance, self).get_cfn_attribute(attribute_name)
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_instances",
"parameters": {
"InstanceType": "InstanceType",
"SecurityGroups": "SecurityGroups",
"KeyName": "KeyName",
"ImageId": "ImageId",
},
"defaults": {"MinCount": 1, "MaxCount": 1},
},
"delete": {
"function": "terminate_instances",
"parameters": {
"InstanceIds": lambda params, **kw: [
kw["resources"][kw["resource_id"]]["PhysicalResourceId"]
]
},
},
}
def get_tags_param(resource_type: str) -> Callable:
"""Return a tag parameters creation function for the given resource type"""
def _param(params, **kwargs):
tags = params.get("Tags")
if not tags:
return None
return [{"ResourceType": resource_type, "Tags": tags}]
return _param
| 37.636691
| 119
| 0.565469
|
e657d4fdad62aeb0a94d824adb5e00481f3d9c85
| 679
|
py
|
Python
|
bot/settings.py
|
mcurranseijo/Friendo_Bot
|
2b05fc20002f85702e908712b364b5f41653aaae
|
[
"MIT"
] | 1
|
2020-11-12T02:34:19.000Z
|
2020-11-12T02:34:19.000Z
|
bot/settings.py
|
mcurranseijo/Friendo_Bot
|
2b05fc20002f85702e908712b364b5f41653aaae
|
[
"MIT"
] | null | null | null |
bot/settings.py
|
mcurranseijo/Friendo_Bot
|
2b05fc20002f85702e908712b364b5f41653aaae
|
[
"MIT"
] | null | null | null |
"""Constants for the bot."""
import os
from pathlib import Path
TOKEN = os.environ.get("FRIENDO_TOKEN")
MEME_USERNAME = os.environ.get("MEME_USERNAME")
MEME_PASSWORD = os.environ.get("MEME_PASSWORD")
# event api key
EVENT_API_KEY = os.environ.get("EVENT_API_KEY")
WEATHER_TOKEN = os.environ.get("WEATHER_TOKEN")
COMMAND_PREFIX = "."
VERSION = "1.2."
NAME = "Friendo"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
IMG_CACHE = Path(BASE_DIR, "image_cache")
BASE_GITHUB_REPO = "https://github.com/fisher60/Friendo_Bot"
<<<<<<< develop
LOG_FILE_NAME = "friendo.log"
=======
LOG_FILE_PATH = Path(BASE_DIR, "logs")
>>>>>>> develop
API_COGS = ["events", "memes"]
| 18.351351
| 60
| 0.709867
|
b9c04c635bab1c0cbdc4ebe260f2adc7f9a5ce28
| 24,247
|
py
|
Python
|
flask1/.venv/Lib/site-packages/pip/_vendor/distlib/version.py
|
kerwin-yang-yang/Bigplatform
|
575ebb3c494ce28e820ab5ad0a91f194451eec76
|
[
"MIT"
] | 32
|
2021-05-03T09:03:57.000Z
|
2022-03-17T09:18:59.000Z
|
flask1/.venv/Lib/site-packages/pip/_vendor/distlib/version.py
|
kerwin-yang-yang/Bigplatform
|
575ebb3c494ce28e820ab5ad0a91f194451eec76
|
[
"MIT"
] | 4
|
2021-05-29T20:42:52.000Z
|
2022-03-16T03:01:12.000Z
|
flask1/.venv/Lib/site-packages/pip/_vendor/distlib/version.py
|
kerwin-yang-yang/Bigplatform
|
575ebb3c494ce28e820ab5ad0a91f194451eec76
|
[
"MIT"
] | 27
|
2021-11-10T08:44:10.000Z
|
2022-03-30T08:19:46.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-440,
setuptools-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
from .util import parse_requirement
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
# this is a method only to support alternative implementations
# via overriding
def parse_requirement(self, s):
return parse_requirement(s)
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
r = self.parse_requirement(s)
if not r:
raise ValueError('Not valid: %r' % s)
self.name = r.name
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if r.constraints:
# import pdb; pdb.set_trace()
for op, s in r.constraints:
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: String or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # minimum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is probably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile(r'^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
# See issue #140. Be tolerant of a single trailing comma.
if s.endswith(','):
s = s[:-1]
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
| 32.766216
| 79
| 0.526416
|
6be590537723bb01f208468970da40e8750b86f8
| 696
|
py
|
Python
|
src/pythonmysequel/Table.py
|
jasonli0616/PythonMySequel
|
692abc4517f060683fb34bc304e1bc3b06d2dc7f
|
[
"MIT"
] | null | null | null |
src/pythonmysequel/Table.py
|
jasonli0616/PythonMySequel
|
692abc4517f060683fb34bc304e1bc3b06d2dc7f
|
[
"MIT"
] | null | null | null |
src/pythonmysequel/Table.py
|
jasonli0616/PythonMySequel
|
692abc4517f060683fb34bc304e1bc3b06d2dc7f
|
[
"MIT"
] | null | null | null |
'''
This class represents a table in the database
It is mostly used as a parameter for pythonmysequel.Connection methods
'''
from pythonmysequel.values import _ValueType
class Table:
def __init__(self,
table_name:str, **values:_ValueType) -> None:
self.table_name = table_name
self.values = values
self.primary_key = None
self._has_primary_key()
def _has_primary_key(self) -> bool:
'''Returns whether or not there is a primary key column (bool)'''
for key, value in self.values.items():
if 'PRIMARY KEY' in value.options:
self.primary_key = key
return True
return False
| 31.636364
| 73
| 0.635057
|
85264a0d7e9ec06536acb5da5591ce3a39b5ca9f
| 1,630
|
py
|
Python
|
app/models.py
|
Danish15/RamenBowlBlog
|
a8c86ea2627a9b323feae651a7565768b6666a97
|
[
"MIT"
] | null | null | null |
app/models.py
|
Danish15/RamenBowlBlog
|
a8c86ea2627a9b323feae651a7565768b6666a97
|
[
"MIT"
] | null | null | null |
app/models.py
|
Danish15/RamenBowlBlog
|
a8c86ea2627a9b323feae651a7565768b6666a97
|
[
"MIT"
] | null | null | null |
from werkzeug.security import generate_password_hash, check_password_hash
from . import db
from flask.ext.login import UserMixin
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
password_hash = db.Column(db.String(128))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User %r>' % self.username
| 30.754717
| 73
| 0.710429
|
c06e053bad901ac22724019c413dbb60356201cf
| 655
|
py
|
Python
|
src/realtweetornotbot/utils/urlutils.py
|
giulionf/realtweetornotbot
|
f9107fe0cb32849b8d0827c49797b9353468ebb3
|
[
"MIT"
] | 84
|
2019-02-11T03:06:58.000Z
|
2022-02-10T08:44:41.000Z
|
src/realtweetornotbot/utils/urlutils.py
|
giulionf/realtweetornotbot
|
f9107fe0cb32849b8d0827c49797b9353468ebb3
|
[
"MIT"
] | 42
|
2018-12-27T12:55:08.000Z
|
2022-01-25T11:40:10.000Z
|
src/realtweetornotbot/utils/urlutils.py
|
giulionf/realtweetornotbot
|
f9107fe0cb32849b8d0827c49797b9353468ebb3
|
[
"MIT"
] | 7
|
2019-07-02T07:46:20.000Z
|
2021-12-22T22:01:39.000Z
|
import requests
IMAGE_FORMATS = ("image/png", "image/jpeg", "image/jpg", "image/webp")
class UrlUtils:
""" Helper class for URLs """
@staticmethod
def is_imgur_url(url):
""" Returns true, if an image url is an IMGUR image or album """
return "imgur.com" in url
@staticmethod
def is_image_url(url):
""" Returns true if the url is to an image file """
try:
r = requests.head(url)
if r.headers.get("content-type") in IMAGE_FORMATS:
return True
except requests.exceptions.MissingSchema:
print("Missing Schema Exception")
return False
| 27.291667
| 72
| 0.598473
|
b3191b03ae5358cd34952cc197eb2784f6d620d8
| 1,921
|
py
|
Python
|
pythainlp/cli/tagging.py
|
Subarna578/pythainlp
|
9650a40396719284add17bb09f50e948dea41053
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/cli/tagging.py
|
Subarna578/pythainlp
|
9650a40396719284add17bb09f50e948dea41053
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/cli/tagging.py
|
Subarna578/pythainlp
|
9650a40396719284add17bb09f50e948dea41053
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from pythainlp import cli
from pythainlp.tag import pos_tag
class SubAppBase:
def __init__(self, name, argv):
parser = argparse.ArgumentParser(name)
parser.add_argument(
"--text",
type=str,
help="input text",
)
parser.add_argument(
"--engine",
type=str,
help="default: %s" % self.default_engine,
default=self.default_engine
)
parser.add_argument(
"--corpus",
type=str,
help="default: %s" % self.default_corpus,
)
parser.add_argument(
'--sep',
type=str,
help="default: %s" % self.default_sep,
default=self.default_sep
)
args = parser.parse_args(argv)
print(f"Using engine={args.engine}")
self.args = args
result = self.run(
args.text.split(args.sep), engine=args.engine, corpus=args.corpus
)
result_str = map(lambda x: "%s/%s" % x, result)
print(" ".join(result_str))
class POSTaggingApp(SubAppBase):
def __init__(self, *args, **kwargs):
self.default_engine = "perceptron"
self.default_corpus = "orchid"
self.default_sep = "|"
self.run = pos_tag
super().__init__(*args, **kwargs)
class App:
def __init__(self, argv):
parser = argparse.ArgumentParser(**cli.make_usage("tagging"))
parser.add_argument(
"command",
type=str,
nargs="?",
help="[pos]"
)
args = parser.parse_args(argv[2:3])
command = args.command
cli.exit_if_empty(args.command, parser)
argv = argv[3:]
if command == "pos":
POSTaggingApp("Part-of-Speech tagging", argv)
else:
raise ValueError(f"no command:{subcommand}")
| 23.144578
| 77
| 0.534097
|
45cd4f80446ea5ff6b72328032b7dc9f328f9d24
| 3,811
|
py
|
Python
|
deepctr/estimator/models/wdl.py
|
dzzxjl/DeepCTR
|
ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e
|
[
"Apache-2.0"
] | 6,192
|
2017-12-05T03:02:35.000Z
|
2022-03-31T20:59:30.000Z
|
deepctr/estimator/models/wdl.py
|
dzzxjl/DeepCTR
|
ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e
|
[
"Apache-2.0"
] | 362
|
2018-04-15T06:53:20.000Z
|
2022-03-21T15:03:02.000Z
|
deepctr/estimator/models/wdl.py
|
dzzxjl/DeepCTR
|
ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e
|
[
"Apache-2.0"
] | 1,960
|
2017-12-05T03:16:04.000Z
|
2022-03-31T06:37:00.000Z
|
# -*- coding:utf-8 -*-
"""
Author:
Weichen Shen, weichenswc@163.com
Reference:
[1] Cheng H T, Koc L, Harmsen J, et al. Wide & deep learning for recommender systems[C]//Proceedings of the 1st Workshop on Deep Learning for Recommender Systems. ACM, 2016: 7-10.(https://arxiv.org/pdf/1606.07792.pdf)
"""
import tensorflow as tf
from tensorflow.python.keras.layers import Dense
from ..feature_column import get_linear_logit, input_from_feature_columns
from ..utils import deepctr_model_fn, DNN_SCOPE_NAME, variable_scope
from ...layers import DNN, combined_dnn_input
def WDLEstimator(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 128, 64), l2_reg_linear=1e-5,
l2_reg_embedding=1e-5, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu',
task='binary', model_dir=None, config=None, linear_optimizer='Ftrl',
dnn_optimizer='Adagrad', training_chief_hooks=None):
"""Instantiates the Wide&Deep Learning architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
:param l2_reg_linear: float. L2 regularizer strength applied to wide part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:param model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
:param config: tf.RunConfig object to configure the runtime settings.
:param linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
:param dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
:param training_chief_hooks: Iterable of `tf.train.SessionRunHook` objects to
run on the chief worker during training.
:return: A Tensorflow Estimator instance.
"""
def _model_fn(features, labels, mode, config):
train_flag = (mode == tf.estimator.ModeKeys.TRAIN)
linear_logits = get_linear_logit(features, linear_feature_columns, l2_reg_linear=l2_reg_linear)
with variable_scope(DNN_SCOPE_NAME):
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding=l2_reg_embedding)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input, training=train_flag)
dnn_logits = Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_out)
logits = linear_logits + dnn_logits
return deepctr_model_fn(features, mode, logits, labels, task, linear_optimizer, dnn_optimizer,
training_chief_hooks=training_chief_hooks)
return tf.estimator.Estimator(_model_fn, model_dir=model_dir, config=config)
| 56.880597
| 221
| 0.722907
|
e9e847057f4b3a285e73e46acb6c6ef696d45ece
| 356
|
py
|
Python
|
website/migrations/0003_alter_contact_options.py
|
majeedkarimi/siteino
|
03cfef5f1f5e52ea5ac0bac56820d20bac4d92e5
|
[
"MIT"
] | null | null | null |
website/migrations/0003_alter_contact_options.py
|
majeedkarimi/siteino
|
03cfef5f1f5e52ea5ac0bac56820d20bac4d92e5
|
[
"MIT"
] | null | null | null |
website/migrations/0003_alter_contact_options.py
|
majeedkarimi/siteino
|
03cfef5f1f5e52ea5ac0bac56820d20bac4d92e5
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.12 on 2022-03-06 16:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0002_auto_20220228_1125'),
]
operations = [
migrations.AlterModelOptions(
name='contact',
options={'ordering': ['-created_date']},
),
]
| 19.777778
| 52
| 0.598315
|
80c5d7b6579c02754497b8490c0fb622c26f653a
| 2,504
|
py
|
Python
|
viglb/spec.py
|
pmdm56/vigor
|
0a65733a2b7bf48fc7d6071ea89c1af36f1cba80
|
[
"MIT"
] | 36
|
2019-09-06T15:33:31.000Z
|
2022-02-02T21:11:36.000Z
|
viglb/spec.py
|
pmdm56/vigor
|
0a65733a2b7bf48fc7d6071ea89c1af36f1cba80
|
[
"MIT"
] | 3
|
2019-10-03T10:33:19.000Z
|
2020-08-10T13:06:01.000Z
|
viglb/spec.py
|
pmdm56/vigor
|
0a65733a2b7bf48fc7d6071ea89c1af36f1cba80
|
[
"MIT"
] | 9
|
2019-09-18T15:12:29.000Z
|
2021-01-20T12:45:01.000Z
|
from state import flow_emap, flow_id_to_backend_id, backends, backend_ip_emap, cht
EXP_TIME = 10 * 1000
BACKEND_EXP_TIME = 3600000000 * 1000
EXT_PORT = 2
if a_packet_received:
flow_emap.expire_all(now - EXP_TIME)
backend_ip_emap.expire_all(now - BACKEND_EXP_TIME)
h3 = pop_header(tcpudp, on_mismatch=([],[]))
h2 = pop_header(ipv4, on_mismatch=([],[]))
h1 = pop_header(ether, on_mismatch=([],[]))
assert a_packet_received
assert h1.type == 8 # 0x0800 == IPv4 in big endian
assert h2.npid == 6 or h2.npid == 17 # 6/17 -> TCP/UDP
if received_on_port == EXT_PORT: # Packet from the external network - client
packet_flow = LoadBalancedFlowc(h2.saddr, h2.daddr, h3.src_port, h3.dst_port, h2.npid)
alloc_flow_and_process_packet = False;
if flow_emap.has(packet_flow):
flow_id = flow_emap.get(packet_flow)
backend_id = flow_id_to_backend_id.get(flow_id)
if backend_ip_emap.has_idx(backend_id):
flow_emap.refresh_idx(flow_emap.get(packet_flow), now)
backend = backends.get(backend_id)
return ([backend.nic],
[ether(h1, saddr=..., daddr=backend.mac),
ipv4(h2, cksum=..., daddr=backend.ip),
tcpudp(h3)])
else:
flow_emap.erase(packet_flow)
alloc_flow_and_process_packet = True
else:
alloc_flow_and_process_packet = True
if alloc_flow_and_process_packet:
if backend_ip_emap.exists_with_cht(cht, _LoadBalancedFlow_hash(packet_flow)):
bknd = backend_ip_emap.choose_with_cht(cht, _LoadBalancedFlow_hash(packet_flow))
if not flow_emap.full():
idx = the_index_allocated
flow_emap.add(packet_flow, idx, now)
flow_id_to_backend_id.set(idx, bknd)
backend = backends.get(bknd)
return ([backend.nic],
[ether(h1, saddr=..., daddr=backend.mac),
ipv4(h2, cksum=..., daddr=backend.ip),
tcpudp(h3)])
else:
return ([],[])
else: # A heartbeat from a backend
bknd_addr = ip_addrc(h2.saddr)
if backend_ip_emap.has(bknd_addr):
backend_ip_emap.refresh_idx(backend_ip_emap.get(bknd_addr), now)
else:
if not backend_ip_emap.full():
idx = the_index_allocated
backend_ip_emap.add(bknd_addr, idx, now)
backends.set(idx, LoadBalancedBackendc(received_on_port, h1.saddr, h2.saddr))
return ([],[])
| 41.733333
| 92
| 0.63738
|
3eb500d29ff979a46995e5f848e71031c10155c5
| 18,400
|
py
|
Python
|
samples/pacman.py
|
JPTIZ/asciimatics
|
4ae88c6684599e07766e8bfddce9cdb8f44f9e33
|
[
"Apache-2.0"
] | 3,197
|
2015-05-08T22:08:17.000Z
|
2022-03-31T04:43:27.000Z
|
samples/pacman.py
|
hakxcore/asciimatics
|
613a4ddc67cb808b45dc4b90f1b93f0b78bfc9df
|
[
"Apache-2.0"
] | 339
|
2015-05-12T17:51:35.000Z
|
2022-03-28T11:52:07.000Z
|
samples/pacman.py
|
hakxcore/asciimatics
|
613a4ddc67cb808b45dc4b90f1b93f0b78bfc9df
|
[
"Apache-2.0"
] | 314
|
2015-10-27T23:02:28.000Z
|
2022-03-28T11:43:30.000Z
|
#!/usr/bin/env python3
from copy import deepcopy
import sys
from asciimatics.exceptions import ResizeScreenError
from asciimatics.paths import Path
from asciimatics.renderers import StaticRenderer, ColourImageFile, FigletText
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.effects import Print, Sprite, BannerText
namco = """
88888888b. 8888888b. 8888888888b. .d88888888 .d888888b.
88 88 88 88 88 88 88 88 88
88 88 .d88888888 88 88 88 88 88 88
88 88 88 88 88 88 88 88 88 88
88 88 `888888888 88 88 88 `888888888 `8888888P'
"""
dot = """${7,2,7}####
${7,2,7}####
"""
pac_man = """
{0}##########
{0}##################
{0}############${{7,2,7}} {0}######
{0}############${{4,2,0}} ${{7,2,7}} {0}######
{0}##########################
{0}##########################
{0}##########################
{0}##########################
{0}##########################
{0}######################
{0}######################
{0}##################
{0}##########
""", """
{0}##########
{0}##################
{0}############${{7,2,7}} {0}######
{0}############${{4,2,0}} ${{7,2,7}} {0}######
{0}##########################
{0}##########################
{0}############
{0}##########################
{0}##########################
{0}######################
{0}######################
{0}##################
{0}##########
""", """
{0}##########
{0}##################
{0}############${{7,2,7}} {0}######
{0}############${{4,2,0}} ${{7,2,7}} {0}######
{0}##########################
{0}####################
{0}############
{0}####################
{0}##########################
{0}######################
{0}######################
{0}##################
{0}##########
""", """
{0}##########
{0}##################
{0}############${{7,2,7}} {0}######
{0}############${{4,2,0}} ${{7,2,7}} {0}######
{0}####################
{0}################
{0}############
{0}################
{0}####################
{0}######################
{0}######################
{0}##################
{0}##########
""", """
{0}##########
{0}##################
{0}############${{7,2,7}} {0}######
{0}##########${{4,2,0}} ${{7,2,7}} {0}######
{0}##################
{0}##############
{0}############
{0}##############
{0}##################
{0}####################
{0}######################
{0}##################
{0}##########
"""
pac_man_right = """
{0}##########
{0}##################
{0}######${{7,2,7}} {0}############
{0}######${{7,2,7}} ${{4,2,0}} {0}############
{0}##########################
{0}##########################
{0}##########################
{0}##########################
{0}##########################
{0}######################
{0}######################
{0}##################
{0}##########
""", """
{0}##########
{0}##################
{0}######${{7,2,7}} {0}############
{0}######${{7,2,7}} ${{4,2,0}} {0}############
{0}##########################
{0}##########################
{0}############
{0}##########################
{0}##########################
{0}######################
{0}######################
{0}##################
{0}##########
""", """
{0}##########
{0}##################
{0}######${{7,2,7}} {0}############
{0}######${{7,2,7}} ${{4,2,0}} {0}############
{0}##########################
{0}####################
{0}############
{0}####################
{0}##########################
{0}######################
{0}######################
{0}##################
{0}##########
""", """
{0}##########
{0}##################
{0}######${{7,2,7}} {0}############
{0}######${{7,2,7}} ${{4,2,0}} {0}############
{0}####################
{0}################
{0}############
{0}################
{0}#####################
{0}######################
{0}######################
{0}##################
{0}##########
""", """
{0}##########
{0}##################
{0}######${{7,2,7}} {0}############
{0}######${{7,2,7}} ${{4,2,0}} {0}##########
{0}##################
{0}##############
{0}############
{0}##############
{0}##################
{0}####################
{0}######################
{0}##################
{0}##########
"""
ghost = """
{0}########
{0}################
{0}####################
{0}##${{7,2,7}}....{0}########${{7,2,7}}....{0}######
${{7,2,7}}........{0}####${{7,2,7}}........{0}####
${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}####
{0}##${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}######
{0}####${{7,2,7}}....{0}########${{7,2,7}}....{0}########
{0}############################
{0}############################
{0}##########################
{0}####${{7,2,0}} {0}########${{7,2,0}} {0}########
{0}##${{7,2,0}} {0}####${{7,2,0}} {0}####
""", """
{0}########
{0}################
{0}####################
{0}##${{7,2,7}}....{0}########${{7,2,7}}....{0}######
${{7,2,7}}........{0}####${{7,2,7}}........{0}####
${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}####
{0}##${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}######
{0}####${{7,2,7}}....{0}########${{7,2,7}}....{0}########
{0}############################
{0}############################
{0}############################
{0}######${{7,2,0}} {0}########${{7,2,0}} {0}########
{0}####${{7,2,0}} {0}####${{7,2,0}} {0}####
""", """
{0}########
{0}################
{0}####################
{0}##${{7,2,7}}....{0}########${{7,2,7}}....{0}######
${{7,2,7}}........{0}####${{7,2,7}}........{0}####
${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}####
{0}##${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}######
{0}####${{7,2,7}}....{0}########${{7,2,7}}....{0}########
{0}############################
{0}############################
{0}############################
{0}########${{7,2,0}} {0}########${{7,2,0}} {0}########
{0}####${{7,2,0}} {0}####${{7,2,0}} {0}####
""", """
{0}########
{0}################
{0}####################
{0}##${{7,2,7}}....{0}########${{7,2,7}}....{0}######
${{7,2,7}}........{0}####${{7,2,7}}........{0}####
${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}####
{0}##${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}######
{0}####${{7,2,7}}....{0}########${{7,2,7}}....{0}########
{0}############################
{0}############################
{0}############################
{0}########${{7,2,0}} {0}########${{7,2,0}} {0}######
{0}####${{7,2,0}} {0}####${{7,2,0}} {0}####
""", """
{0}########
{0}################
{0}####################
{0}##${{7,2,7}}....{0}########${{7,2,7}}....{0}######
${{7,2,7}}........{0}####${{7,2,7}}........{0}####
${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}####
{0}##${{4,2,4}} ${{7,2,7}}....{0}####${{4,2,4}} ${{7,2,7}}....{0}######
{0}####${{7,2,7}}....{0}########${{7,2,7}}....{0}########
{0}############################
{0}############################
{0}############################
{0}##${{7,2,0}} {0}########${{7,2,0}} {0}########${{7,2,0}} {0}####
{0}####${{7,2,0}} {0}####${{7,2,0}} {0}##
"""
scared_ghost = """
${4,2,4}########
${4,2,4}################
${4,2,4}####################
${4,2,4}########################
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}############################
${4,2,4}############################
${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####
${4,2,4}##${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}##
${4,2,4}############################
${4,2,4}####${7,2,0} ${4,2,4}########${7,2,0} ${4,2,4}########${7,2,0} ${4,2,4}##
${4,2,4}##${7,2,0} ${4,2,4}####${7,2,0} ${4,2,4}####
""", """
${4,2,4}########
${4,2,4}################
${4,2,4}####################
${4,2,4}########################
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}############################
${4,2,4}############################
${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####
${4,2,4}##${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}##
${4,2,4}############################
${4,2,4}##${7,2,0} ${4,2,4}########${7,2,0} ${4,2,4}########${7,2,0} ${4,2,4}####
${4,2,4}####${7,2,0} ${4,2,4}####${7,2,0} ${4,2,4}##
""", """
${4,2,4}########
${4,2,4}################
${4,2,4}####################
${4,2,4}########################
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}############################
${4,2,4}############################
${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####
${4,2,4}##${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}##
${4,2,4}############################
${4,2,4}########${7,2,0} ${4,2,4}########${7,2,0} ${4,2,4}######
${4,2,4}####${7,2,0} ${4,2,4}####${7,2,0} ${4,2,4}####
""", """
${4,2,4}########
${4,2,4}################
${4,2,4}####################
${4,2,4}########################
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}############################
${4,2,4}############################
${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####
${4,2,4}##${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}##
${4,2,4}############################
${4,2,4}########${7,2,0} ${4,2,4}########${7,2,0} ${4,2,4}########
${4,2,4}####${7,2,0} ${4,2,4}####${7,2,0} ${4,2,4}####
""", """
${4,2,4}########
${4,2,4}################
${4,2,4}####################
${4,2,4}########################
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}####${7,2,7} ${4,2,4}########${7,2,7} ${4,2,4}####
${4,2,4}############################
${4,2,4}############################
${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####
${4,2,4}##${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}####${7,2,7} ${4,2,4}##
${4,2,4}############################
${4,2,4}######${7,2,0} ${4,2,4}########${7,2,0} ${4,2,4}########
${4,2,4}####${7,2,0} ${4,2,4}####${7,2,0} ${4,2,4}####
"""
eyes = """
${4,2,4}####${4,2,0} ${4,2,4}####
${7,2,7}..${4,2,4}####${7,2,7}..${7,2,0} ${7,2,7}..${4,2,4}####${7,2,7}..
${7,2,7}........${7,2,0} ${7,2,7}........
${7,2,7}........${7,2,0} ${7,2,7}........
${7,2,7}....${7,2,0} ${7,2,7}....
"""
# Globals used for pacman animation
direction = 1
value = 0
def cycle():
global value, direction
value += direction
if value <= 0 or value >= 4:
direction = -direction
return value
class PacMan(Sprite):
def __init__(self, screen, path, start_frame=0, stop_frame=0):
images = []
images_right = []
colour = Screen.COLOUR_YELLOW if screen.colours <= 16 else 11
for image in pac_man:
images.append(image.format("${%d,2,%d}" % (colour, colour)))
for image in pac_man_right:
images_right.append(image.format("${%d,2,%d}" % (colour, colour)))
super(PacMan, self).__init__(
screen,
renderer_dict={
"default": StaticRenderer(images=images, animation=cycle),
"left": StaticRenderer(images=images, animation=cycle),
"right": StaticRenderer(images=images_right, animation=cycle),
},
path=path,
start_frame=start_frame,
stop_frame=stop_frame)
def _update(self, frame_no):
super(PacMan, self)._update(frame_no)
for effect in self._scene.effects:
if isinstance(effect, ScaredGhost) and self.overlaps(effect):
effect.eaten()
class Ghost(Sprite):
def __init__(self, screen, path, colour=1, start_frame=0, stop_frame=0):
images = []
for image in ghost:
images.append(image.format("${%d,2,%d}" % (colour, colour)))
super(Ghost, self).__init__(
screen,
renderer_dict={
"default": StaticRenderer(images=images),
},
colour=colour,
path=path,
start_frame=start_frame,
stop_frame=stop_frame)
class ScaredGhost(Sprite):
def __init__(self, screen, path, start_frame=0, stop_frame=0):
super(ScaredGhost, self).__init__(
screen,
renderer_dict={
"default": StaticRenderer(images=scared_ghost),
},
colour=Screen.COLOUR_BLUE,
path=path,
start_frame=start_frame,
stop_frame=stop_frame)
self._eaten = False
def eaten(self):
# Already eaten - just ignore
if self._eaten:
return
# Allow one more iteration for this Sprite to clear itself up.
self._eaten = True
self._delete_count = 2
# Spawn the eyes to run away
path = Path()
path.jump_to(self._old_x + 12, self._old_y + 4)
path.move_straight_to(
self._old_x + 12, -8, (self._old_y + 12) // 2)
path.wait(10000)
self._scene.add_effect(Eyes(self._screen, path))
class Eyes(Sprite):
def __init__(self, screen, path, start_frame=0, stop_frame=0):
super(Eyes, self).__init__(
screen,
renderer_dict={
"default": StaticRenderer(images=[eyes]),
},
colour=Screen.COLOUR_BLUE,
path=path,
start_frame=start_frame,
stop_frame=stop_frame)
class EatingScene(Scene):
def __init__(self, screen):
super(EatingScene, self).__init__([], 240 + screen.width)
self._screen = screen
self._reset_count = 0
def reset(self, old_scene=None, screen=None):
super(EatingScene, self).reset(old_scene, screen)
# Recreate all the elements.
centre = (self._screen.width // 2, self._screen.height // 2)
path = Path()
path.jump_to(-16, centre[1])
path.move_straight_to(
self._screen.width + 16, centre[1], (self._screen.width + 16) // 3)
path.wait(100)
path2 = Path()
path2.jump_to(-16, centre[1])
path2.move_straight_to(
self._screen.width + 16, centre[1], self._screen.width + 16)
path2.wait(100)
# Take a copy of the list before using it to remove all effects.
for effect in self.effects[:]:
self.remove_effect(effect)
self.add_effect(
ScaredGhost(self._screen, deepcopy(path2)))
self.add_effect(
ScaredGhost(self._screen, deepcopy(path2), start_frame=60))
self.add_effect(
ScaredGhost(self._screen, deepcopy(path2), start_frame=120))
self.add_effect(
ScaredGhost(self._screen, deepcopy(path2), start_frame=180))
self.add_effect(PacMan(self._screen, path, start_frame=240))
def demo(screen):
scenes = []
centre = (screen.width // 2, screen.height // 2)
# Title
effects = [
BannerText(screen,
ColourImageFile(screen, "pacman.png", 16, 0, True),
(screen.height - 16) // 2,
Screen.COLOUR_WHITE),
Print(screen,
StaticRenderer(images=["A tribute to the classic 80's "
"video game by Namco."]),
screen.height - 1)
]
scenes.append(Scene(effects, 0))
# Scene 1 - run away, eating dots
path = Path()
path.jump_to(screen.width + 16, centre[1])
path.move_straight_to(-16, centre[1], (screen.width + 16) // 3)
path.wait(100)
if screen.colours <= 16:
inky = 6
pinky = 5
blinky = 1
clyde = 2
else:
inky = 14
pinky = 201
blinky = 9
clyde = 208
effects = [
PacMan(screen, path),
Ghost(screen, deepcopy(path), inky, start_frame=40),
Ghost(screen, deepcopy(path), pinky, start_frame=60),
Ghost(screen, deepcopy(path), blinky, start_frame=80),
Ghost(screen, deepcopy(path), clyde, start_frame=100),
]
for x in range(5, screen.width, 16):
effects.insert(0,
Print(screen,
StaticRenderer(images=[dot]),
screen.height // 2,
x=x,
speed=1,
stop_frame=4))
scenes.append(Scene(effects, 100 + screen.width))
# Scene 2 - Chase ghosts after a power pill
scenes.append(EatingScene(screen))
# Scene 3 - Thanks...
effects = [
Print(screen, FigletText("Thank you,"), screen.height // 3 - 3,
colour=Screen.COLOUR_RED),
Print(screen,
StaticRenderer(images=[namco]),
screen.height * 2 // 3 - 2,
colour=Screen.COLOUR_RED),
Print(screen,
StaticRenderer(images=["< Press X to exit. >"]),
screen.height - 1)
]
scenes.append(Scene(effects, 0))
screen.play(scenes, stop_on_resize=True, repeat=False)
if __name__ == "__main__":
while True:
try:
Screen.wrapper(demo)
sys.exit(0)
except ResizeScreenError:
pass
| 34.137291
| 100
| 0.308587
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.